summaryrefslogtreecommitdiffstats
path: root/gfx/wr/webrender
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
commita90a5cba08fdf6c0ceb95101c275108a152a3aed (patch)
tree532507288f3defd7f4dcf1af49698bcb76034855 /gfx/wr/webrender
parentAdding debian version 126.0.1-1. (diff)
downloadfirefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.tar.xz
firefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.zip
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/wr/webrender')
-rw-r--r--gfx/wr/webrender/Cargo.toml2
-rw-r--r--gfx/wr/webrender/res/cs_svg_filter_node.glsl859
-rw-r--r--gfx/wr/webrender/res/ps_quad.glsl55
-rw-r--r--gfx/wr/webrender/res/ps_quad_conic_gradient.glsl90
-rw-r--r--gfx/wr/webrender/res/ps_quad_radial_gradient.glsl81
-rw-r--r--gfx/wr/webrender/res/ps_quad_textured.glsl38
-rw-r--r--gfx/wr/webrender/res/rect.glsl9
-rw-r--r--gfx/wr/webrender/res/sample_color0.glsl41
-rw-r--r--gfx/wr/webrender/src/batch.rs69
-rw-r--r--gfx/wr/webrender/src/frame_builder.rs1
-rw-r--r--gfx/wr/webrender/src/gpu_types.rs20
-rw-r--r--gfx/wr/webrender/src/internal_types.rs628
-rw-r--r--gfx/wr/webrender/src/pattern.rs21
-rw-r--r--gfx/wr/webrender/src/picture.rs439
-rw-r--r--gfx/wr/webrender/src/prepare.rs135
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/conic.rs49
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/linear.rs6
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/radial.rs50
-rw-r--r--gfx/wr/webrender/src/prim_store/image.rs6
-rw-r--r--gfx/wr/webrender/src/prim_store/mod.rs16
-rw-r--r--gfx/wr/webrender/src/prim_store/picture.rs768
-rw-r--r--gfx/wr/webrender/src/quad.rs284
-rw-r--r--gfx/wr/webrender/src/render_target.rs196
-rw-r--r--gfx/wr/webrender/src/render_task.rs1295
-rw-r--r--gfx/wr/webrender/src/render_task_cache.rs7
-rw-r--r--gfx/wr/webrender/src/render_task_graph.rs20
-rw-r--r--gfx/wr/webrender/src/renderer/mod.rs49
-rw-r--r--gfx/wr/webrender/src/renderer/shade.rs48
-rw-r--r--gfx/wr/webrender/src/renderer/vertex.rs55
-rw-r--r--gfx/wr/webrender/src/scene_building.rs513
-rw-r--r--gfx/wr/webrender/src/spatial_node.rs59
-rw-r--r--gfx/wr/webrender/src/spatial_tree.rs122
-rw-r--r--gfx/wr/webrender/src/tile_cache.rs8
33 files changed, 5718 insertions, 321 deletions
diff --git a/gfx/wr/webrender/Cargo.toml b/gfx/wr/webrender/Cargo.toml
index b99404de0d..8fab0c63ac 100644
--- a/gfx/wr/webrender/Cargo.toml
+++ b/gfx/wr/webrender/Cargo.toml
@@ -52,7 +52,7 @@ svg_fmt = "0.4"
tracy-rs = "0.1.2"
derive_more = { version = "0.99", default-features = false, features = ["add_assign"] }
etagere = "0.2.6"
-glean = { version = "59.0.0", optional = true }
+glean = { version = "60.0.1", optional = true }
firefox-on-glean = { version = "0.1.0", optional = true }
swgl = { path = "../swgl", optional = true }
topological-sort = "0.1"
diff --git a/gfx/wr/webrender/res/cs_svg_filter_node.glsl b/gfx/wr/webrender/res/cs_svg_filter_node.glsl
new file mode 100644
index 0000000000..39c406eddf
--- /dev/null
+++ b/gfx/wr/webrender/res/cs_svg_filter_node.glsl
@@ -0,0 +1,859 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+Notes about how this shader works and the requirements it faces:
+* Each filter has a _CONVERTSRGB variant that converts to linear before
+ performing the operation and converts back to sRGB for output. Since the
+ inputs and output of this shader are premultiplied alpha, we have to undo the
+ premultiply and then convert the sRGB color to linearRGB color, perform the
+ desired operations, and then convert back to sRGB and then premultiply again.
+* For some operations the _CONVERTSRGB variant is never invoked by WebRender, an
+ example is OPACITY where the two modes have identical results, as scaling RGBA
+ by a single scalar value only changes the opacity, without changing color
+ relative to alpha, the sRGB vs linearRGB gamut mapping is relative to alpha.
+* SVG filters are usually in linear space so the _CONVERTSRGB variant is used
+ heavily in SVG filter graphs, whereas CSS filters use the regular variant.
+* Handling of color-interpolation for feFlood and feDropShadow is out of scope
+ for this shader, the values can be converted in the WebRender or Gecko code if
+ necessary.
+* All SVG filters have a subregion rect to clip the operation to, in many cases
+ this can just be an alteration of the task uvrect in WebRender, but in some
+ cases we might need to enforce it in the shader.
+* All filters have an offset for each input, this is an optimization for folding
+ feOffset into the downstream nodes of the graph, because it is inefficient to
+ be copying an image just to scroll it, and feOffset is not rare.
+
+Notes about specific filter kinds:
+* FILTER_BLEND_* kinds follow spec
+ https://drafts.fxtf.org/compositing-1/#blending which says to mix from
+ Rs to B() based on Rb.a, then mix from Rb to that color based on Rs.a.
+* FILTER_COMPOSITE_* kinds use math from Skia as it is elegant.
+* FILTER_COMPONENT_TRANSFER_* kinds assume a [4][256] table in gpucache.
+* FILTER_DROP_SHADOW_* composites Rs source over the dropshadow in Rb.a,
+ it's not actually a composite per se, and needs to be composited onto
+ the destination using a separate blend.
+*/
+
+#define WR_FEATURE_TEXTURE_2D
+
+#include shared,prim_shared
+
+varying highp vec2 vInput1Uv;
+varying highp vec2 vInput2Uv;
+flat varying highp vec4 vInput1UvRect;
+flat varying highp vec4 vInput2UvRect;
+flat varying mediump ivec4 vData;
+flat varying mediump vec4 vFilterData0;
+flat varying mediump vec4 vFilterData1;
+
+// x: Filter input count, y: Filter kind.
+// Packed in to a vector to work around bug 1630356.
+flat varying mediump ivec2 vFilterInputCountFilterKindVec;
+#define vFilterInputCount vFilterInputCountFilterKindVec.x
+#define vFilterKind vFilterInputCountFilterKindVec.y
+// Packed in to a vector to work around bug 1630356.
+flat varying mediump vec2 vFloat0;
+
+flat varying mediump mat4 vColorMat;
+flat varying mediump ivec4 vFuncs;
+
+// must match add_svg_filter_node_instances in render_target.rs
+#define FILTER_IDENTITY 0
+#define FILTER_IDENTITY_CONVERTSRGB 1
+#define FILTER_OPACITY 2
+#define FILTER_OPACITY_CONVERTSRGB 3
+#define FILTER_TO_ALPHA 4
+#define FILTER_TO_ALPHA_CONVERTSRGB 5
+#define FILTER_BLEND_COLOR 6
+#define FILTER_BLEND_COLOR_CONVERTSRGB 7
+#define FILTER_BLEND_COLOR_BURN 8
+#define FILTER_BLEND_COLOR_BURN_CONVERTSRGB 9
+#define FILTER_BLEND_COLOR_DODGE 10
+#define FILTER_BLEND_COLOR_DODGE_CONVERTSRGB 11
+#define FILTER_BLEND_DARKEN 12
+#define FILTER_BLEND_DARKEN_CONVERTSRGB 13
+#define FILTER_BLEND_DIFFERENCE 14
+#define FILTER_BLEND_DIFFERENCE_CONVERTSRGB 15
+#define FILTER_BLEND_EXCLUSION 16
+#define FILTER_BLEND_EXCLUSION_CONVERTSRGB 17
+#define FILTER_BLEND_HARD_LIGHT 18
+#define FILTER_BLEND_HARD_LIGHT_CONVERTSRGB 19
+#define FILTER_BLEND_HUE 20
+#define FILTER_BLEND_HUE_CONVERTSRGB 21
+#define FILTER_BLEND_LIGHTEN 22
+#define FILTER_BLEND_LIGHTEN_CONVERTSRGB 23
+#define FILTER_BLEND_LUMINOSITY 24
+#define FILTER_BLEND_LUMINOSITY_CONVERTSRGB 25
+#define FILTER_BLEND_MULTIPLY 26
+#define FILTER_BLEND_MULTIPLY_CONVERTSRGB 27
+#define FILTER_BLEND_NORMAL 28
+#define FILTER_BLEND_NORMAL_CONVERTSRGB 29
+#define FILTER_BLEND_OVERLAY 30
+#define FILTER_BLEND_OVERLAY_CONVERTSRGB 31
+#define FILTER_BLEND_SATURATION 32
+#define FILTER_BLEND_SATURATION_CONVERTSRGB 33
+#define FILTER_BLEND_SCREEN 34
+#define FILTER_BLEND_SCREEN_CONVERTSRGB 35
+#define FILTER_BLEND_SOFT_LIGHT 36
+#define FILTER_BLEND_SOFT_LIGHT_CONVERTSRGB 37
+#define FILTER_COLOR_MATRIX 38
+#define FILTER_COLOR_MATRIX_CONVERTSRGB 39
+#define FILTER_COMPONENT_TRANSFER 40
+#define FILTER_COMPONENT_TRANSFER_CONVERTSRGB 41
+#define FILTER_COMPOSITE_ARITHMETIC 42
+#define FILTER_COMPOSITE_ARITHMETIC_CONVERTSRGB 43
+#define FILTER_COMPOSITE_ATOP 44
+#define FILTER_COMPOSITE_ATOP_CONVERTSRGB 45
+#define FILTER_COMPOSITE_IN 46
+#define FILTER_COMPOSITE_IN_CONVERTSRGB 47
+#define FILTER_COMPOSITE_LIGHTER 48
+#define FILTER_COMPOSITE_LIGHTER_CONVERTSRGB 49
+#define FILTER_COMPOSITE_OUT 50
+#define FILTER_COMPOSITE_OUT_CONVERTSRGB 51
+#define FILTER_COMPOSITE_OVER 52
+#define FILTER_COMPOSITE_OVER_CONVERTSRGB 53
+#define FILTER_COMPOSITE_XOR 54
+#define FILTER_COMPOSITE_XOR_CONVERTSRGB 55
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE 56
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE_CONVERTSRGB 57
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE 58
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE_CONVERTSRGB 59
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP 60
+#define FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP_CONVERTSRGB 61
+#define FILTER_DIFFUSE_LIGHTING_DISTANT 62
+#define FILTER_DIFFUSE_LIGHTING_DISTANT_CONVERTSRGB 63
+#define FILTER_DIFFUSE_LIGHTING_POINT 64
+#define FILTER_DIFFUSE_LIGHTING_POINT_CONVERTSRGB 65
+#define FILTER_DIFFUSE_LIGHTING_SPOT 66
+#define FILTER_DIFFUSE_LIGHTING_SPOT_CONVERTSRGB 67
+#define FILTER_DISPLACEMENT_MAP 68
+#define FILTER_DISPLACEMENT_MAP_CONVERTSRGB 69
+#define FILTER_DROP_SHADOW 70
+#define FILTER_DROP_SHADOW_CONVERTSRGB 71
+#define FILTER_FLOOD 72
+#define FILTER_FLOOD_CONVERTSRGB 73
+#define FILTER_GAUSSIAN_BLUR 74
+#define FILTER_GAUSSIAN_BLUR_CONVERTSRGB 75
+#define FILTER_IMAGE 76
+#define FILTER_IMAGE_CONVERTSRGB 77
+#define FILTER_MORPHOLOGY_DILATE 80
+#define FILTER_MORPHOLOGY_DILATE_CONVERTSRGB 81
+#define FILTER_MORPHOLOGY_ERODE 82
+#define FILTER_MORPHOLOGY_ERODE_CONVERTSRGB 83
+#define FILTER_SPECULAR_LIGHTING_DISTANT 86
+#define FILTER_SPECULAR_LIGHTING_DISTANT_CONVERTSRGB 87
+#define FILTER_SPECULAR_LIGHTING_POINT 88
+#define FILTER_SPECULAR_LIGHTING_POINT_CONVERTSRGB 89
+#define FILTER_SPECULAR_LIGHTING_SPOT 90
+#define FILTER_SPECULAR_LIGHTING_SPOT_CONVERTSRGB 91
+#define FILTER_TILE 92
+#define FILTER_TILE_CONVERTSRGB 93
+#define FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING 94
+#define FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING_CONVERTSRGB 95
+#define FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING 96
+#define FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING_CONVERTSRGB 97
+#define FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING 98
+#define FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING_CONVERTSRGB 99
+#define FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING 100
+#define FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING_CONVERTSRGB 101
+
+// All of the _CONVERTSRGB variants match this mask
+#define FILTER_BITFLAGS_CONVERTSRGB 1
+
+#ifdef WR_VERTEX_SHADER
+
+// due to padding around the target rect, we need to know both target and render task rect
+PER_INSTANCE in vec4 aFilterTargetRect;
+PER_INSTANCE in vec4 aFilterInput1ContentScaleAndOffset;
+PER_INSTANCE in vec4 aFilterInput2ContentScaleAndOffset;
+PER_INSTANCE in int aFilterInput1TaskAddress;
+PER_INSTANCE in int aFilterInput2TaskAddress;
+PER_INSTANCE in int aFilterKind;
+PER_INSTANCE in int aFilterInputCount;
+PER_INSTANCE in ivec2 aFilterExtraDataAddress;
+
+// used for feFlood and feDropShadow colors
+// this is based on SrgbToLinear below, but that version hits SWGL compile
+// errors when used in vertex shaders for some reason
+vec3 vertexSrgbToLinear(vec3 color) {
+ vec3 c1 = color * vec3(1.0 / 12.92);
+ vec3 c2;
+ c2.r = pow(color.r * (1.0 / 1.055) + (0.055 / 1.055), 2.4);
+ c2.g = pow(color.g * (1.0 / 1.055) + (0.055 / 1.055), 2.4);
+ c2.b = pow(color.b * (1.0 / 1.055) + (0.055 / 1.055), 2.4);
+ return mix(c1, c2, step(vec3(0.04045), color));
+}
+
+vec4 compute_uv_rect(RectWithEndpoint task_rect, vec2 texture_size) {
+ vec4 uvRect = vec4(task_rect.p0 + vec2(0.5),
+ task_rect.p1 - vec2(0.5));
+ uvRect /= texture_size.xyxy;
+ return uvRect;
+}
+
+vec2 compute_uv(RectWithEndpoint task_rect, vec4 scale_and_offset, vec2 target_size, vec2 texture_size) {
+ // SVG spec dictates that we want to *not* scale coordinates between nodes,
+ // must be able to sample at offsets, and must be able to fetch outside the
+ // clamp rect as transparent black, so we have custom texcoords for the
+ // fetch area, separate from the clamp rect
+ return (task_rect.p0 + scale_and_offset.zw + scale_and_offset.xy * aPosition.xy) / texture_size.xy;
+}
+
+void main(void) {
+ vec2 pos = mix(aFilterTargetRect.xy, aFilterTargetRect.zw, aPosition.xy);
+
+ RectWithEndpoint input_1_task;
+ if (aFilterInputCount > 0) {
+ vec2 texture_size = vec2(TEX_SIZE(sColor0).xy);
+ input_1_task = fetch_render_task_rect(aFilterInput1TaskAddress);
+ vInput1UvRect = compute_uv_rect(input_1_task, texture_size);
+ vInput1Uv = compute_uv(input_1_task, aFilterInput1ContentScaleAndOffset, aFilterTargetRect.zw - aFilterTargetRect.xy, texture_size);
+ }
+
+ RectWithEndpoint input_2_task;
+ if (aFilterInputCount > 1) {
+ vec2 texture_size = vec2(TEX_SIZE(sColor1).xy);
+ input_2_task = fetch_render_task_rect(aFilterInput2TaskAddress);
+ vInput2UvRect = compute_uv_rect(input_2_task, texture_size);
+ vInput2Uv = compute_uv(input_2_task, aFilterInput2ContentScaleAndOffset, aFilterTargetRect.zw - aFilterTargetRect.xy, texture_size);
+ }
+
+ vFilterInputCount = aFilterInputCount;
+ vFilterKind = aFilterKind;
+
+ switch (aFilterKind) {
+ case FILTER_IDENTITY:
+ case FILTER_IDENTITY_CONVERTSRGB:
+ break;
+ case FILTER_OPACITY:
+ case FILTER_OPACITY_CONVERTSRGB:
+ // opacity takes one input and an alpha value, so we just stuffed
+ // that in the unused input 2 content rect
+ vFloat0.x = aFilterInput2ContentScaleAndOffset.x;
+ break;
+ case FILTER_TO_ALPHA:
+ case FILTER_TO_ALPHA_CONVERTSRGB:
+ break;
+ case FILTER_BLEND_COLOR:
+ case FILTER_BLEND_COLOR_CONVERTSRGB:
+ case FILTER_BLEND_COLOR_BURN:
+ case FILTER_BLEND_COLOR_BURN_CONVERTSRGB:
+ case FILTER_BLEND_COLOR_DODGE:
+ case FILTER_BLEND_COLOR_DODGE_CONVERTSRGB:
+ case FILTER_BLEND_DARKEN:
+ case FILTER_BLEND_DARKEN_CONVERTSRGB:
+ case FILTER_BLEND_DIFFERENCE:
+ case FILTER_BLEND_DIFFERENCE_CONVERTSRGB:
+ case FILTER_BLEND_EXCLUSION:
+ case FILTER_BLEND_EXCLUSION_CONVERTSRGB:
+ case FILTER_BLEND_HARD_LIGHT:
+ case FILTER_BLEND_HARD_LIGHT_CONVERTSRGB:
+ case FILTER_BLEND_HUE:
+ case FILTER_BLEND_HUE_CONVERTSRGB:
+ case FILTER_BLEND_LIGHTEN:
+ case FILTER_BLEND_LIGHTEN_CONVERTSRGB:
+ case FILTER_BLEND_LUMINOSITY:
+ case FILTER_BLEND_LUMINOSITY_CONVERTSRGB:
+ case FILTER_BLEND_MULTIPLY:
+ case FILTER_BLEND_MULTIPLY_CONVERTSRGB:
+ case FILTER_BLEND_NORMAL:
+ case FILTER_BLEND_NORMAL_CONVERTSRGB:
+ case FILTER_BLEND_OVERLAY:
+ case FILTER_BLEND_OVERLAY_CONVERTSRGB:
+ case FILTER_BLEND_SATURATION:
+ case FILTER_BLEND_SATURATION_CONVERTSRGB:
+ case FILTER_BLEND_SCREEN:
+ case FILTER_BLEND_SCREEN_CONVERTSRGB:
+ case FILTER_BLEND_SOFT_LIGHT:
+ case FILTER_BLEND_SOFT_LIGHT_CONVERTSRGB:
+ break;
+ case FILTER_COLOR_MATRIX:
+ case FILTER_COLOR_MATRIX_CONVERTSRGB:
+ vec4 mat_data[4] = fetch_from_gpu_cache_4_direct(aFilterExtraDataAddress);
+ vColorMat = mat4(mat_data[0], mat_data[1], mat_data[2], mat_data[3]);
+ vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress + ivec2(4, 0));
+ break;
+ case FILTER_COMPONENT_TRANSFER:
+ case FILTER_COMPONENT_TRANSFER_CONVERTSRGB:
+ vData = ivec4(aFilterExtraDataAddress, 0, 0);
+ break;
+ case FILTER_COMPOSITE_ARITHMETIC:
+ case FILTER_COMPOSITE_ARITHMETIC_CONVERTSRGB:
+ // arithmetic parameters
+ vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress);
+ break;
+ case FILTER_COMPOSITE_ATOP:
+ case FILTER_COMPOSITE_ATOP_CONVERTSRGB:
+ case FILTER_COMPOSITE_IN:
+ case FILTER_COMPOSITE_IN_CONVERTSRGB:
+ case FILTER_COMPOSITE_LIGHTER:
+ case FILTER_COMPOSITE_LIGHTER_CONVERTSRGB:
+ case FILTER_COMPOSITE_OUT:
+ case FILTER_COMPOSITE_OUT_CONVERTSRGB:
+ case FILTER_COMPOSITE_OVER:
+ case FILTER_COMPOSITE_OVER_CONVERTSRGB:
+ case FILTER_COMPOSITE_XOR:
+ case FILTER_COMPOSITE_XOR_CONVERTSRGB:
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_DISTANT:
+ case FILTER_DIFFUSE_LIGHTING_DISTANT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_POINT:
+ case FILTER_DIFFUSE_LIGHTING_POINT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_SPOT:
+ case FILTER_DIFFUSE_LIGHTING_SPOT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DISPLACEMENT_MAP:
+ case FILTER_DISPLACEMENT_MAP_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DROP_SHADOW:
+ vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress);
+ // premultiply the color
+ vFilterData0.rgb = vFilterData0.rgb * vFilterData0.a;
+ break;
+ case FILTER_DROP_SHADOW_CONVERTSRGB:
+ vFilterData0 = fetch_from_gpu_cache_1_direct(aFilterExtraDataAddress);
+ // convert from sRGB to linearRGB and premultiply by alpha
+ vFilterData0.rgb = vertexSrgbToLinear(vFilterData0.rgb);
+ vFilterData0.rgb = vFilterData0.rgb * vFilterData0.a;
+ break;
+ case FILTER_FLOOD:
+ // feFlood has no actual input textures, so input 2 rect is color
+ vFilterData0 = aFilterInput2ContentScaleAndOffset;
+ // premultiply the color
+ vFilterData0.rgb = vFilterData0.rgb * vFilterData0.a;
+ break;
+ case FILTER_FLOOD_CONVERTSRGB:
+ // feFlood has no actual input textures, so input 2 rect is color
+ vFilterData0 = aFilterInput2ContentScaleAndOffset;
+ // convert from sRGB to linearRGB and premultiply by alpha
+ vFilterData0.rgb = vertexSrgbToLinear(vFilterData0.rgb);
+ vFilterData0.rgb = vFilterData0.rgb * vFilterData0.a;
+ break;
+ case FILTER_GAUSSIAN_BLUR:
+ case FILTER_GAUSSIAN_BLUR_CONVERTSRGB:
+ break;
+ case FILTER_IMAGE:
+ case FILTER_IMAGE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_MORPHOLOGY_DILATE:
+ case FILTER_MORPHOLOGY_DILATE_CONVERTSRGB:
+ case FILTER_MORPHOLOGY_ERODE:
+ case FILTER_MORPHOLOGY_ERODE_CONVERTSRGB:
+ // morphology filters have radius values in second input rect
+ vFilterData0 = aFilterInput2ContentScaleAndOffset;
+ break;
+ case FILTER_SPECULAR_LIGHTING_DISTANT:
+ case FILTER_SPECULAR_LIGHTING_DISTANT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_SPECULAR_LIGHTING_POINT:
+ case FILTER_SPECULAR_LIGHTING_POINT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_SPECULAR_LIGHTING_SPOT:
+ case FILTER_SPECULAR_LIGHTING_SPOT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TILE:
+ case FILTER_TILE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING:
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING_CONVERTSRGB:
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING:
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING_CONVERTSRGB:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING_CONVERTSRGB:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING_CONVERTSRGB:
+ // TODO
+ break;
+ default:
+ break;
+ }
+
+ gl_Position = uTransform * vec4(pos, 0.0, 1.0);
+}
+#endif
+
+#ifdef WR_FRAGMENT_SHADER
+
+vec3 Multiply(vec3 Cb, vec3 Cs) {
+ return Cb * Cs;
+}
+
+vec3 Screen(vec3 Cb, vec3 Cs) {
+ return Cb + Cs - (Cb * Cs);
+}
+
+vec3 HardLight(vec3 Cb, vec3 Cs) {
+ vec3 m = Multiply(Cb, 2.0 * Cs);
+ vec3 s = Screen(Cb, 2.0 * Cs - 1.0);
+ vec3 edge = vec3(0.5, 0.5, 0.5);
+ return mix(m, s, step(edge, Cs));
+}
+
+// TODO: Worth doing with mix/step? Check GLSL output.
+float ColorDodge(float Cb, float Cs) {
+ if (Cb == 0.0)
+ return 0.0;
+ else if (Cs == 1.0)
+ return 1.0;
+ else
+ return min(1.0, Cb / (1.0 - Cs));
+}
+
+// TODO: Worth doing with mix/step? Check GLSL output.
+float ColorBurn(float Cb, float Cs) {
+ if (Cb == 1.0)
+ return 1.0;
+ else if (Cs == 0.0)
+ return 0.0;
+ else
+ return 1.0 - min(1.0, (1.0 - Cb) / Cs);
+}
+
+float SoftLight(float Cb, float Cs) {
+ if (Cs <= 0.5) {
+ return Cb - (1.0 - 2.0 * Cs) * Cb * (1.0 - Cb);
+ } else {
+ float D;
+
+ if (Cb <= 0.25)
+ D = ((16.0 * Cb - 12.0) * Cb + 4.0) * Cb;
+ else
+ D = sqrt(Cb);
+
+ return Cb + (2.0 * Cs - 1.0) * (D - Cb);
+ }
+}
+
+vec3 Difference(vec3 Cb, vec3 Cs) {
+ return abs(Cb - Cs);
+}
+
+vec3 Exclusion(vec3 Cb, vec3 Cs) {
+ return Cb + Cs - 2.0 * Cb * Cs;
+}
+
+// These functions below are taken from the spec.
+// There's probably a much quicker way to implement
+// them in GLSL...
+float Sat(vec3 c) {
+ return max(c.r, max(c.g, c.b)) - min(c.r, min(c.g, c.b));
+}
+
+float Lum(vec3 c) {
+ vec3 f = vec3(0.3, 0.59, 0.11);
+ return dot(c, f);
+}
+
+vec3 ClipColor(vec3 C) {
+ float L = Lum(C);
+ float n = min(C.r, min(C.g, C.b));
+ float x = max(C.r, max(C.g, C.b));
+
+ if (n < 0.0)
+ C = L + (((C - L) * L) / (L - n));
+
+ if (x > 1.0)
+ C = L + (((C - L) * (1.0 - L)) / (x - L));
+
+ return C;
+}
+
+vec3 SetLum(vec3 C, float l) {
+ float d = l - Lum(C);
+ return ClipColor(C + d);
+}
+
+void SetSatInner(inout float Cmin, inout float Cmid, inout float Cmax, float s) {
+ if (Cmax > Cmin) {
+ Cmid = (((Cmid - Cmin) * s) / (Cmax - Cmin));
+ Cmax = s;
+ } else {
+ Cmid = 0.0;
+ Cmax = 0.0;
+ }
+ Cmin = 0.0;
+}
+
+vec3 SetSat(vec3 C, float s) {
+ if (C.r <= C.g) {
+ if (C.g <= C.b) {
+ SetSatInner(C.r, C.g, C.b, s);
+ } else {
+ if (C.r <= C.b) {
+ SetSatInner(C.r, C.b, C.g, s);
+ } else {
+ SetSatInner(C.b, C.r, C.g, s);
+ }
+ }
+ } else {
+ if (C.r <= C.b) {
+ SetSatInner(C.g, C.r, C.b, s);
+ } else {
+ if (C.g <= C.b) {
+ SetSatInner(C.g, C.b, C.r, s);
+ } else {
+ SetSatInner(C.b, C.g, C.r, s);
+ }
+ }
+ }
+ return C;
+}
+
+vec3 Hue(vec3 Cb, vec3 Cs) {
+ return SetLum(SetSat(Cs, Sat(Cb)), Lum(Cb));
+}
+
+vec3 Saturation(vec3 Cb, vec3 Cs) {
+ return SetLum(SetSat(Cb, Sat(Cs)), Lum(Cb));
+}
+
+vec3 Color(vec3 Cb, vec3 Cs) {
+ return SetLum(Cs, Lum(Cb));
+}
+
+vec3 Luminosity(vec3 Cb, vec3 Cs) {
+ return SetLum(Cb, Lum(Cs));
+}
+
+// Based on the Gecko implementation in
+// https://hg.mozilla.org/mozilla-central/file/91b4c3687d75/gfx/src/FilterSupport.cpp#l24
+// These could be made faster by sampling a lookup table stored in a float texture
+// with linear interpolation.
+
+vec3 SrgbToLinear(vec3 color) {
+ vec3 c1 = color / 12.92;
+ vec3 c2 = pow(color / 1.055 + vec3(0.055 / 1.055), vec3(2.4));
+ return if_then_else(lessThanEqual(color, vec3(0.04045)), c1, c2);
+}
+
+vec3 LinearToSrgb(vec3 color) {
+ vec3 c1 = color * 12.92;
+ vec3 c2 = vec3(1.055) * pow(color, vec3(1.0 / 2.4)) - vec3(0.055);
+ return if_then_else(lessThanEqual(color, vec3(0.0031308)), c1, c2);
+}
+
+vec4 sampleInUvRect(sampler2D sampler, vec2 uv, vec4 uvRect) {
+ vec2 clamped = clamp(uv.xy, uvRect.xy, uvRect.zw);
+ return texture(sampler, clamped);
+}
+
+vec4 sampleInUvRectRepeat(sampler2D sampler, vec2 uv, vec4 uvRect) {
+ vec2 size = (uvRect.zw - uvRect.xy);
+ vec2 tiled = uv.xy - floor((uv.xy - uvRect.xy) / size) * size;
+ return texture(sampler, tiled);
+}
+
+void main(void) {
+ // Raw premultiplied color of source texture
+ vec4 Rs = vec4(0.0, 0.0, 0.0, 0.0);
+ // Raw premultiplied color of destination texture
+ vec4 Rb = vec4(0.0, 0.0, 0.0, 0.0);
+ // Normalized (non-premultiplied) color of source texture
+ vec4 Ns = vec4(0.0, 0.0, 0.0, 0.0);
+ // Normalized (non-premultiplied) color of destination texture
+ vec4 Nb = vec4(0.0, 0.0, 0.0, 0.0);
+ // used in FILTER_COMPONENT_TRANSFER
+ ivec4 k;
+ if (vFilterInputCount > 0) {
+ Rs = sampleInUvRect(sColor0, vInput1Uv, vInput1UvRect);
+ Ns.rgb = Rs.rgb * (1.0 / max(0.000001, Rs.a));
+ Ns.a = Rs.a;
+ if ((vFilterKind & FILTER_BITFLAGS_CONVERTSRGB) != 0) {
+ Ns.rgb = SrgbToLinear(Ns.rgb);
+ Rs.rgb = Ns.rgb * Rs.a;
+ }
+ }
+ if (vFilterInputCount > 1) {
+ Rb = sampleInUvRect(sColor1, vInput2Uv, vInput2UvRect);
+ Nb.rgb = Rb.rgb * (1.0 / max(0.000001, Rb.a));
+ Nb.a = Rb.a;
+ if ((vFilterKind & FILTER_BITFLAGS_CONVERTSRGB) != 0) {
+ Nb.rgb = SrgbToLinear(Nb.rgb);
+ Rb.rgb = Nb.rgb * Rb.a;
+ }
+ }
+
+ vec4 result = vec4(1.0, 0.0, 0.0, 1.0);
+
+ switch (vFilterKind) {
+ case FILTER_IDENTITY:
+ case FILTER_IDENTITY_CONVERTSRGB:
+ result = Rs;
+ break;
+ case FILTER_OPACITY:
+ case FILTER_OPACITY_CONVERTSRGB:
+ result = Rs * vFloat0.x;
+ break;
+ case FILTER_TO_ALPHA:
+ case FILTER_TO_ALPHA_CONVERTSRGB:
+ // Just return the alpha, we have literally nothing to do on the RGB
+ // values here, this also means CONVERTSRGB is irrelevant.
+ oFragColor = vec4(0.0, 0.0, 0.0, Rs.a);
+ return;
+ case FILTER_BLEND_COLOR:
+ case FILTER_BLEND_COLOR_CONVERTSRGB:
+ result.rgb = Color(Nb.rgb, Ns.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_COLOR_BURN:
+ case FILTER_BLEND_COLOR_BURN_CONVERTSRGB:
+ result.rgb = vec3(ColorBurn(Nb.r, Ns.r), ColorBurn(Nb.g, Ns.g), ColorBurn(Nb.b, Ns.b));
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_COLOR_DODGE:
+ case FILTER_BLEND_COLOR_DODGE_CONVERTSRGB:
+ result.rgb = vec3(ColorDodge(Nb.r, Ns.r), ColorDodge(Nb.g, Ns.g), ColorDodge(Nb.b, Ns.b));
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_DARKEN:
+ case FILTER_BLEND_DARKEN_CONVERTSRGB:
+ result.rgb = Rs.rgb + Rb.rgb - max(Rs.rgb * Rb.a, Rb.rgb * Rs.a);
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_DIFFERENCE:
+ case FILTER_BLEND_DIFFERENCE_CONVERTSRGB:
+ result.rgb = Rs.rgb + Rb.rgb - 2.0 * min(Rs.rgb * Rb.a, Rb.rgb * Rs.a);
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_EXCLUSION:
+ case FILTER_BLEND_EXCLUSION_CONVERTSRGB:
+ result.rgb = Rs.rgb + Rb.rgb - 2.0 * (Rs.rgb * Rb.rgb);
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_HARD_LIGHT:
+ case FILTER_BLEND_HARD_LIGHT_CONVERTSRGB:
+ result.rgb = HardLight(Nb.rgb, Ns.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_HUE:
+ case FILTER_BLEND_HUE_CONVERTSRGB:
+ result.rgb = Hue(Nb.rgb, Ns.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_LIGHTEN:
+ case FILTER_BLEND_LIGHTEN_CONVERTSRGB:
+ result.rgb = Rs.rgb + Rb.rgb - min(Rs.rgb * Rb.a, Rb.rgb * Rs.a);
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_LUMINOSITY:
+ case FILTER_BLEND_LUMINOSITY_CONVERTSRGB:
+ result.rgb = Luminosity(Nb.rgb, Ns.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_MULTIPLY:
+ case FILTER_BLEND_MULTIPLY_CONVERTSRGB:
+ result.rgb = Rs.rgb * (1.0 - Rb.a) + Rb.rgb * (1.0 - Rs.a) + Rs.rgb * Rb.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_NORMAL:
+ case FILTER_BLEND_NORMAL_CONVERTSRGB:
+ result = Rb * (1.0 - Rs.a) + Rs;
+ break;
+ case FILTER_BLEND_OVERLAY:
+ case FILTER_BLEND_OVERLAY_CONVERTSRGB:
+ // Overlay is inverse of Hardlight
+ result.rgb = HardLight(Ns.rgb, Nb.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_SATURATION:
+ case FILTER_BLEND_SATURATION_CONVERTSRGB:
+ result.rgb = Saturation(Nb.rgb, Ns.rgb);
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_SCREEN:
+ case FILTER_BLEND_SCREEN_CONVERTSRGB:
+ result.rgb = Rs.rgb + Rb.rgb - (Rs.rgb * Rb.rgb);
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_BLEND_SOFT_LIGHT:
+ case FILTER_BLEND_SOFT_LIGHT_CONVERTSRGB:
+ result.rgb = vec3(SoftLight(Nb.r, Ns.r), SoftLight(Nb.g, Ns.g), SoftLight(Nb.b, Ns.b));
+ result.rgb = (1.0 - Rb.a) * Rs.rgb + (1.0 - Rs.a) * Rb.rgb + Rs.a * Rb.a * result.rgb;
+ result.a = Rb.a * (1.0 - Rs.a) + Rs.a;
+ break;
+ case FILTER_COLOR_MATRIX:
+ case FILTER_COLOR_MATRIX_CONVERTSRGB:
+ result = vColorMat * Ns + vFilterData0;
+ result = clamp(result, vec4(0.0), vec4(1.0));
+ result.rgb = result.rgb * result.a;
+ break;
+ case FILTER_COMPONENT_TRANSFER:
+ case FILTER_COMPONENT_TRANSFER_CONVERTSRGB:
+ // fetch new value for each channel from the RGBA lookup table.
+ result = floor(clamp(Ns * 255.0, vec4(0.0), vec4(255.0)));
+ // SWGL doesn't have an intrinsic for ivec4(vec4)
+ k = ivec4(int(result.r), int(result.g), int(result.b), int(result.a));
+ result.r = fetch_from_gpu_cache_1_direct(vData.xy + ivec2(k.r, 0)).r;
+ result.g = fetch_from_gpu_cache_1_direct(vData.xy + ivec2(k.g, 0)).g;
+ result.b = fetch_from_gpu_cache_1_direct(vData.xy + ivec2(k.b, 0)).b;
+ result.a = fetch_from_gpu_cache_1_direct(vData.xy + ivec2(k.a, 0)).a;
+ result.rgb = result.rgb * result.a;
+ break;
+ case FILTER_COMPOSITE_ARITHMETIC:
+ case FILTER_COMPOSITE_ARITHMETIC_CONVERTSRGB:
+ result = Rs * Rb * vFilterData0.x + Rs * vFilterData0.y + Rb * vFilterData0.z + vec4(vFilterData0.w);
+ result = clamp(result, vec4(0.0), vec4(1.0));
+ break;
+ case FILTER_COMPOSITE_ATOP:
+ case FILTER_COMPOSITE_ATOP_CONVERTSRGB:
+ result = Rs * Rb.a + Rb * (1.0 - Rs.a);
+ break;
+ case FILTER_COMPOSITE_IN:
+ case FILTER_COMPOSITE_IN_CONVERTSRGB:
+ result = Rs * Rb.a;
+ break;
+ case FILTER_COMPOSITE_LIGHTER:
+ case FILTER_COMPOSITE_LIGHTER_CONVERTSRGB:
+ result = Rs + Rb;
+ result = clamp(result, vec4(0.0), vec4(1.0));
+ break;
+ case FILTER_COMPOSITE_OUT:
+ case FILTER_COMPOSITE_OUT_CONVERTSRGB:
+ result = Rs * (1.0 - Rb.a);
+ break;
+ case FILTER_COMPOSITE_OVER:
+ case FILTER_COMPOSITE_OVER_CONVERTSRGB:
+ result = Rs + Rb * (1.0 - Rs.a);
+ break;
+ case FILTER_COMPOSITE_XOR:
+ case FILTER_COMPOSITE_XOR_CONVERTSRGB:
+ result = Rs * (1.0 - Rb.a) + Rb * (1.0 - Rs.a);
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_DUPLICATE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_NONE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP:
+ case FILTER_CONVOLVE_MATRIX_EDGE_MODE_WRAP_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_DISTANT:
+ case FILTER_DIFFUSE_LIGHTING_DISTANT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_POINT:
+ case FILTER_DIFFUSE_LIGHTING_POINT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DIFFUSE_LIGHTING_SPOT:
+ case FILTER_DIFFUSE_LIGHTING_SPOT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DISPLACEMENT_MAP:
+ case FILTER_DISPLACEMENT_MAP_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_DROP_SHADOW:
+ case FILTER_DROP_SHADOW_CONVERTSRGB:
+ // First input is original image, second input is offset and blurred
+ // image, we replace color of second input with vFilterData.rgb and
+ // composite with mode OVER.
+ // This color is already premultiplied, so it's ready to use
+ result = Rs + vFilterData0 * (Rb.a * (1.0 - Rs.a));
+ break;
+ case FILTER_FLOOD:
+ case FILTER_FLOOD_CONVERTSRGB:
+ result = vFilterData0;
+ break;
+ case FILTER_GAUSSIAN_BLUR:
+ case FILTER_GAUSSIAN_BLUR_CONVERTSRGB:
+ // unused - the IDENTITY filter is used for composing this
+ break;
+ case FILTER_IMAGE:
+ case FILTER_IMAGE_CONVERTSRGB:
+ // TODO - we need to get the uvrect set up in the code before
+ // this shader case will matter, best to leave it at the fallback
+ // color for now when it is known to be broken.
+ break;
+ case FILTER_MORPHOLOGY_DILATE:
+ case FILTER_MORPHOLOGY_DILATE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_MORPHOLOGY_ERODE:
+ case FILTER_MORPHOLOGY_ERODE_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_SPECULAR_LIGHTING_DISTANT:
+ case FILTER_SPECULAR_LIGHTING_DISTANT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_SPECULAR_LIGHTING_POINT:
+ case FILTER_SPECULAR_LIGHTING_POINT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_SPECULAR_LIGHTING_SPOT:
+ case FILTER_SPECULAR_LIGHTING_SPOT_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TILE:
+ case FILTER_TILE_CONVERTSRGB:
+ // TODO
+ // we can just return the texel without doing anything else
+ vec2 tileUv = rect_repeat(vInput1Uv, vInput1UvRect.xy, vInput1UvRect.zw);
+ oFragColor = sampleInUvRect(sColor0, tileUv, vInput1UvRect);
+ return;
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING:
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_NO_STITCHING_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING:
+ case FILTER_TURBULENCE_WITH_FRACTAL_NOISE_WITH_STITCHING_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_NO_STITCHING_CONVERTSRGB:
+ // TODO
+ break;
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING:
+ case FILTER_TURBULENCE_WITH_TURBULENCE_NOISE_WITH_STITCHING_CONVERTSRGB:
+ // TODO
+ break;
+ default:
+ break;
+ }
+
+ if ((vFilterKind & FILTER_BITFLAGS_CONVERTSRGB) != 0) {
+ // convert back to sRGB in unmultiplied color space
+ result.rgb = LinearToSrgb(result.rgb * (1.0 / max(0.000001, result.a))) * result.a;
+ }
+
+ oFragColor = result;
+}
+#endif
diff --git a/gfx/wr/webrender/res/ps_quad.glsl b/gfx/wr/webrender/res/ps_quad.glsl
index 3565c28afc..dfde43045c 100644
--- a/gfx/wr/webrender/res/ps_quad.glsl
+++ b/gfx/wr/webrender/res/ps_quad.glsl
@@ -37,12 +37,9 @@
#include shared,rect,transform,render_task,gpu_buffer
flat varying mediump vec4 v_color;
-flat varying mediump vec4 v_uv_sample_bounds;
-// x: (in ps_quad_textured) has edge flags
-// y: has uv rect
-// z: (in ps_quad_textured) sample as mask
+// w: has edge flags
+// x,y,z are avaible for patterns to use.
flat varying lowp ivec4 v_flags;
-varying highp vec2 v_uv;
#ifndef SWGL_ANTIALIAS
varying highp vec2 vLocalPos;
@@ -74,22 +71,24 @@ varying highp vec2 vLocalPos;
PER_INSTANCE in ivec4 aData;
+struct QuadSegment {
+ RectWithEndpoint rect;
+ RectWithEndpoint uv_rect;
+};
+
struct PrimitiveInfo {
vec2 local_pos;
RectWithEndpoint local_prim_rect;
RectWithEndpoint local_clip_rect;
+ QuadSegment segment;
+
int edge_flags;
int quad_flags;
ivec2 pattern_input;
};
-struct QuadSegment {
- RectWithEndpoint rect;
- vec4 uv_rect;
-};
-
struct QuadPrimitive {
RectWithEndpoint bounds;
RectWithEndpoint clip;
@@ -102,7 +101,7 @@ QuadSegment fetch_segment(int base, int index) {
vec4 texels[2] = fetch_from_gpu_buffer_2f(base + 3 + index * 2);
seg.rect = RectWithEndpoint(texels[0].xy, texels[0].zw);
- seg.uv_rect = texels[1];
+ seg.uv_rect = RectWithEndpoint(texels[1].xy, texels[1].zw);
return seg;
}
@@ -232,7 +231,7 @@ PrimitiveInfo quad_primive_info(void) {
QuadSegment seg;
if (qi.segment_index == INVALID_SEGMENT_INDEX) {
seg.rect = prim.bounds;
- seg.uv_rect = vec4(0.0);
+ seg.uv_rect = RectWithEndpoint(vec2(0.0), vec2(0.0));
} else {
seg = fetch_segment(qi.prim_address_f, qi.segment_index);
}
@@ -325,35 +324,13 @@ PrimitiveInfo quad_primive_info(void) {
qi.quad_flags
);
- if (seg.uv_rect.xy == seg.uv_rect.zw) {
- v_color = prim.color;
- v_flags.y = 0;
- } else {
- v_color = vec4(1.0);
- v_flags.y = 1;
-
- vec2 f = (vi.local_pos - seg.rect.p0) / (seg.rect.p1 - seg.rect.p0);
-
- vec2 uv = mix(
- seg.uv_rect.xy,
- seg.uv_rect.zw,
- f
- );
-
- vec2 texture_size = vec2(TEX_SIZE(sColor0));
-
- v_uv = uv / texture_size;
-
- v_uv_sample_bounds = vec4(
- seg.uv_rect.xy + vec2(0.5),
- seg.uv_rect.zw - vec2(0.5)
- ) / texture_size.xyxy;
- }
+ v_color = prim.color;
return PrimitiveInfo(
vi.local_pos,
prim.bounds,
prim.clip,
+ seg,
qi.edge_flags,
qi.quad_flags,
qh.pattern_input
@@ -372,9 +349,9 @@ void antialiasing_vertex(PrimitiveInfo prim) {
vLocalPos = prim.local_pos;
if (prim.edge_flags == 0) {
- v_flags.x = 0;
+ v_flags.w = 0;
} else {
- v_flags.x = 1;
+ v_flags.w = 1;
}
#endif
}
@@ -392,7 +369,7 @@ vec4 pattern_fragment(vec4 base_color);
float antialiasing_fragment() {
float alpha = 1.0;
#ifndef SWGL_ANTIALIAS
- if (v_flags.x != 0) {
+ if (v_flags.w != 0) {
alpha = init_transform_fs(vLocalPos);
}
#endif
diff --git a/gfx/wr/webrender/res/ps_quad_conic_gradient.glsl b/gfx/wr/webrender/res/ps_quad_conic_gradient.glsl
new file mode 100644
index 0000000000..afd02b2776
--- /dev/null
+++ b/gfx/wr/webrender/res/ps_quad_conic_gradient.glsl
@@ -0,0 +1,90 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/// This shader renders radial graidents in a color or alpha target.
+
+#include ps_quad,gradient
+
+#define PI 3.141592653589793
+
+// x: start offset, y: offset scale, z: angle
+// Packed in to a vector to work around bug 1630356.
+flat varying highp vec3 v_start_offset_offset_scale_angle_vec;
+#define v_start_offset v_start_offset_offset_scale_angle_vec.x
+#define v_offset_scale v_start_offset_offset_scale_angle_vec.y
+#define v_angle v_start_offset_offset_scale_angle_vec.z
+
+varying highp vec2 v_dir;
+
+#ifdef WR_VERTEX_SHADER
+struct ConicGradient {
+ vec2 center;
+ vec2 scale;
+ float start_offset;
+ float end_offset;
+ float angle;
+ // 1.0 if the gradient should be repeated, 0.0 otherwise.
+ float repeat;
+};
+
+ConicGradient fetch_conic_gradient(int address) {
+ vec4[2] data = fetch_from_gpu_buffer_2f(address);
+
+ return ConicGradient(
+ data[0].xy,
+ data[0].zw,
+ data[1].x,
+ data[1].y,
+ data[1].z,
+ data[1].w
+ );
+}
+
+void pattern_vertex(PrimitiveInfo info) {
+ ConicGradient gradient = fetch_conic_gradient(info.pattern_input.x);
+ v_gradient_address.x = info.pattern_input.y;
+ v_gradient_repeat.x = gradient.repeat;
+
+ // Store 1/d where d = end_offset - start_offset
+ // If d = 0, we can't get its reciprocal. Instead, just use a zero scale.
+ float d = gradient.end_offset - gradient.start_offset;
+ v_offset_scale = d != 0.0 ? 1.0 / d : 0.0;
+
+ v_angle = PI / 2.0 - gradient.angle;
+ v_start_offset = gradient.start_offset * v_offset_scale;
+ v_dir = ((info.local_pos - info.local_prim_rect.p0) * gradient.scale - gradient.center);
+}
+
+#endif
+
+
+#ifdef WR_FRAGMENT_SHADER
+
+// From https://math.stackexchange.com/questions/1098487/atan2-faster-approximation
+float approx_atan2(float y, float x) {
+ vec2 a = abs(vec2(x, y));
+ float slope = min(a.x, a.y) / max(a.x, a.y);
+ float s2 = slope * slope;
+ float r = ((-0.0464964749 * s2 + 0.15931422) * s2 - 0.327622764) * s2 * slope + slope;
+
+ r = if_then_else(float(a.y > a.x), 1.57079637 - r, r);
+ r = if_then_else(float(x < 0.0), 3.14159274 - r, r);
+ // To match atan2's behavior, -0.0 should count as negative and flip the sign of r.
+ // Does this matter in practice in the context of conic gradients?
+ r = r * sign(y);
+
+ return r;
+}
+
+vec4 pattern_fragment(vec4 color) {
+ // Use inverse trig to find the angle offset from the relative position.
+ vec2 current_dir = v_dir;
+ float current_angle = approx_atan2(current_dir.y, current_dir.x) + v_angle;
+ float offset = fract(current_angle / (2.0 * PI)) * v_offset_scale - v_start_offset;
+
+ color *= sample_gradient(offset);
+ return color;
+}
+
+#endif
diff --git a/gfx/wr/webrender/res/ps_quad_radial_gradient.glsl b/gfx/wr/webrender/res/ps_quad_radial_gradient.glsl
new file mode 100644
index 0000000000..05b4dd2aa8
--- /dev/null
+++ b/gfx/wr/webrender/res/ps_quad_radial_gradient.glsl
@@ -0,0 +1,81 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/// This shader renders radial graidents in a color or alpha target.
+
+#include ps_quad,gradient
+
+// Start radius. Packed in to a vector to work around bug 1630356.
+flat varying highp vec2 v_start_radius;
+varying highp vec2 v_pos;
+
+struct RadialGradient {
+ vec2 center;
+ vec2 scale;
+ float start_radius;
+ float end_radius;
+ float xy_ratio;
+ // 1.0 if the gradient should be repeated, 0.0 otherwise.
+ float repeat;
+};
+
+RadialGradient fetch_radial_gradient(int address) {
+ vec4[2] data = fetch_from_gpu_buffer_2f(address);
+
+ return RadialGradient(
+ data[0].xy,
+ data[0].zw,
+ data[1].x,
+ data[1].y,
+ data[1].z,
+ data[1].w
+ );
+}
+
+#ifdef WR_VERTEX_SHADER
+void pattern_vertex(PrimitiveInfo info) {
+ RadialGradient gradient = fetch_radial_gradient(info.pattern_input.x);
+ v_gradient_address.x = info.pattern_input.y;
+
+ // Store 1/rd where rd = end_radius - start_radius
+ // If rd = 0, we can't get its reciprocal. Instead, just use a zero scale.
+ float rd = gradient.end_radius - gradient.start_radius;
+ float radius_scale = rd != 0.0 ? 1.0 / rd : 0.0;
+
+ v_start_radius.x = gradient.start_radius * radius_scale;
+
+ // Transform all coordinates by the y scale so the
+ // fragment shader can work with circles
+
+ // v_pos is in a coordinate space relative to the task rect
+ // (so it is independent of the task origin).
+ v_pos = ((info.local_pos - info.local_prim_rect.p0) * gradient.scale - gradient.center) * radius_scale;
+ v_pos.y *= gradient.xy_ratio;
+
+ v_gradient_repeat.x = gradient.repeat;
+}
+#endif
+
+#ifdef WR_FRAGMENT_SHADER
+vec4 pattern_fragment(vec4 color) {
+ // Solve for t in length(pd) = v_start_radius + t * rd
+ float offset = length(v_pos) - v_start_radius.x;
+ color *= sample_gradient(offset);
+
+ return color;
+}
+
+#if defined(SWGL_DRAW_SPAN)
+void swgl_drawSpanRGBA8() {
+ int address = swgl_validateGradient(sGpuBufferF, get_gpu_buffer_uv(v_gradient_address.x),
+ int(GRADIENT_ENTRIES + 2.0));
+ if (address < 0) {
+ return;
+ }
+ swgl_commitRadialGradientRGBA8(sGpuBufferF, address, GRADIENT_ENTRIES, v_gradient_repeat.x != 0.0,
+ v_pos, v_start_radius.x);
+}
+#endif
+
+#endif
diff --git a/gfx/wr/webrender/res/ps_quad_textured.glsl b/gfx/wr/webrender/res/ps_quad_textured.glsl
index b405ccac2c..82868d68df 100644
--- a/gfx/wr/webrender/res/ps_quad_textured.glsl
+++ b/gfx/wr/webrender/res/ps_quad_textured.glsl
@@ -4,24 +4,42 @@
/// This shader renders solid colors or simple images in a color or alpha target.
-#include ps_quad
+#include ps_quad,sample_color0
+
+#define v_flags_textured v_flags.x
+#define v_flags_sample_as_mask v_flags.y
#ifdef WR_VERTEX_SHADER
+
void pattern_vertex(PrimitiveInfo info) {
+ // Note: Since the uv rect is passed via segments, This shader cannot sample from a
+ // texture if no segments are provided
+ if (info.segment.uv_rect.p0 != info.segment.uv_rect.p1) {
+ // Textured
+ v_flags_textured = 1;
+
+ vec2 f = (info.local_pos - info.segment.rect.p0) / rect_size(info.segment.rect);
+ vs_init_sample_color0(f, info.segment.uv_rect);
+ } else {
+ // Solid color
+ v_flags_textured = 0;
+ }
+
if ((info.quad_flags & QF_SAMPLE_AS_MASK) != 0) {
- v_flags.z = 1;
+ v_flags_sample_as_mask = 1;
} else {
- v_flags.z = 0;
+ v_flags_sample_as_mask = 0;
}
}
+
#endif
#ifdef WR_FRAGMENT_SHADER
+
vec4 pattern_fragment(vec4 color) {
- if (v_flags.y != 0) {
- vec2 uv = clamp(v_uv, v_uv_sample_bounds.xy, v_uv_sample_bounds.zw);
- vec4 texel = TEX_SAMPLE(sColor0, uv);
- if (v_flags.z != 0) {
+ if (v_flags_textured != 0) {
+ vec4 texel = fs_sample_color0();
+ if (v_flags_sample_as_mask != 0) {
texel = texel.rrrr;
}
color *= texel;
@@ -32,12 +50,12 @@ vec4 pattern_fragment(vec4 color) {
#if defined(SWGL_DRAW_SPAN)
void swgl_drawSpanRGBA8() {
- if (v_flags.y != 0) {
- if (v_flags.z != 0) {
+ if (v_flags_textured != 0) {
+ if (v_flags_sample_as_mask != 0) {
// Fall back to fragment shader as we don't specialize for mask yet. Perhaps
// we can use an existing swgl commit or add a new one though?
} else {
- swgl_commitTextureLinearColorRGBA8(sColor0, v_uv, v_uv_sample_bounds, v_color);
+ swgl_commitTextureLinearColorRGBA8(sColor0, v_uv0, v_uv0_sample_bounds, v_color);
}
} else {
swgl_commitSolidRGBA8(v_color);
diff --git a/gfx/wr/webrender/res/rect.glsl b/gfx/wr/webrender/res/rect.glsl
index 2a080ee393..4449d1a565 100644
--- a/gfx/wr/webrender/res/rect.glsl
+++ b/gfx/wr/webrender/res/rect.glsl
@@ -38,3 +38,12 @@ vec2 rect_clamp(RectWithEndpoint rect, vec2 pt) {
vec2 rect_size(RectWithEndpoint rect) {
return rect.p1 - rect.p0;
}
+
+// this is similar to rect_clamp but repeats the image for coordinates outside
+// the rect, used in SVG feTile filter
+vec2 rect_repeat(vec2 p, vec2 p0, vec2 p1) {
+ vec2 r = p - p0;
+ vec2 s = p1 - p0;
+ vec2 is = 1.0 / max(s, vec2(0.000001));
+ return p0 + s * fract(is * r);
+}
diff --git a/gfx/wr/webrender/res/sample_color0.glsl b/gfx/wr/webrender/res/sample_color0.glsl
new file mode 100644
index 0000000000..fa8d85961d
--- /dev/null
+++ b/gfx/wr/webrender/res/sample_color0.glsl
@@ -0,0 +1,41 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/// This file provides the boilerplate for sampling from sColor0 with strict sample bounds.
+
+#include shared
+
+flat varying mediump vec4 v_uv0_sample_bounds;
+varying highp vec2 v_uv0;
+
+#ifdef WR_VERTEX_SHADER
+
+/// sample_pos is in 0..1 normalized coordinates
+/// uv_rect is in pixel.
+void vs_init_sample_color0(vec2 sample_pos, RectWithEndpoint uv_rect) {
+ vec2 uv = mix(uv_rect.p0, uv_rect.p1, sample_pos);
+
+ vec2 texture_size = vec2(TEX_SIZE(sColor0));
+
+ v_uv0 = uv / texture_size;
+
+ v_uv0_sample_bounds = vec4(
+ uv_rect.p0 + vec2(0.5),
+ uv_rect.p1 - vec2(0.5)
+ ) / texture_size.xyxy;
+}
+
+#endif
+
+#ifdef WR_FRAGMENT_SHADER
+
+/// The vertex shader must have called vs_init_sample_color0
+vec4 fs_sample_color0() {
+ vec2 uv = clamp(v_uv0, v_uv0_sample_bounds.xy, v_uv0_sample_bounds.zw);
+ vec4 texel = TEX_SAMPLE(sColor0, uv);
+
+ return texel;
+}
+
+#endif
diff --git a/gfx/wr/webrender/src/batch.rs b/gfx/wr/webrender/src/batch.rs
index 7cf9341515..78adcd036b 100644
--- a/gfx/wr/webrender/src/batch.rs
+++ b/gfx/wr/webrender/src/batch.rs
@@ -437,16 +437,19 @@ impl OpaqueBatchList {
// `current_batch_index` instead of iterating the batches.
z_bounding_rect: &PictureRect,
) -> &mut Vec<PrimitiveInstanceData> {
- if self.current_batch_index == usize::MAX ||
+ // If the area of this primitive is larger than the given threshold,
+ // then it is large enough to warrant breaking a batch for. In this
+ // case we just see if it can be added to the existing batch or
+ // create a new one.
+ let is_large_occluder = z_bounding_rect.area() > self.pixel_area_threshold_for_new_batch;
+ // Since primitives of the same kind tend to come in succession, we keep track
+ // of the current batch index to skip the search in some cases. We ignore the
+ // current batch index in the case of large occluders to make sure they get added
+ // at the top of the bach list.
+ if is_large_occluder || self.current_batch_index == usize::MAX ||
!self.batches[self.current_batch_index].key.is_compatible_with(&key) {
let mut selected_batch_index = None;
- let item_area = z_bounding_rect.area();
-
- // If the area of this primitive is larger than the given threshold,
- // then it is large enough to warrant breaking a batch for. In this
- // case we just see if it can be added to the existing batch or
- // create a new one.
- if item_area > self.pixel_area_threshold_for_new_batch {
+ if is_large_occluder {
if let Some(batch) = self.batches.last() {
if batch.key.is_compatible_with(&key) {
selected_batch_index = Some(self.batches.len() - 1);
@@ -1742,7 +1745,8 @@ impl BatchBuilder {
Filter::ComponentTransfer |
Filter::Blur { .. } |
Filter::DropShadows(..) |
- Filter::Opacity(..) => unreachable!(),
+ Filter::Opacity(..) |
+ Filter::SVGGraphNode(..) => unreachable!(),
};
// Other filters that may introduce opacity are handled via different
@@ -2183,6 +2187,53 @@ impl BatchBuilder {
uv_rect_address.as_int(),
);
}
+ PictureCompositeMode::SVGFEGraph(..) => {
+ let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture(
+ prim_info.clip_task_index,
+ render_tasks,
+ ).unwrap();
+
+ let kind = BatchKind::Brush(
+ BrushBatchKind::Image(ImageBufferKind::Texture2D)
+ );
+ let (uv_rect_address, texture) = render_tasks.resolve_location(
+ pic_task_id,
+ gpu_cache,
+ ).unwrap();
+ let textures = BatchTextures::prim_textured(
+ texture,
+ clip_mask_texture_id,
+ );
+ let key = BatchKey::new(
+ kind,
+ blend_mode,
+ textures,
+ );
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ ImageBrushData {
+ color_mode: ShaderColorMode::Image,
+ alpha_type: AlphaType::PremultipliedAlpha,
+ raster_space: RasterizationSpace::Screen,
+ opacity: 1.0,
+ }.encode(),
+ );
+
+ self.add_brush_instance_to_batches(
+ key,
+ batch_features,
+ bounding_rect,
+ z_id,
+ INVALID_SEGMENT_INDEX,
+ EdgeAaSegmentMask::all(),
+ clip_task_address,
+ brush_flags,
+ prim_header_index,
+ uv_rect_address.as_int(),
+ );
+ }
}
}
None => {
diff --git a/gfx/wr/webrender/src/frame_builder.rs b/gfx/wr/webrender/src/frame_builder.rs
index b975c960eb..c9e66d2aff 100644
--- a/gfx/wr/webrender/src/frame_builder.rs
+++ b/gfx/wr/webrender/src/frame_builder.rs
@@ -453,6 +453,7 @@ impl FrameBuilder {
SubpixelMode::Allow,
&mut frame_state,
&frame_context,
+ data_stores,
&mut scratch.primitive,
tile_caches,
)
diff --git a/gfx/wr/webrender/src/gpu_types.rs b/gfx/wr/webrender/src/gpu_types.rs
index e222ebed04..38e7fbb717 100644
--- a/gfx/wr/webrender/src/gpu_types.rs
+++ b/gfx/wr/webrender/src/gpu_types.rs
@@ -129,6 +129,21 @@ pub struct SvgFilterInstance {
pub extra_data_address: GpuCacheAddress,
}
+#[derive(Clone, Debug)]
+#[repr(C)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct SVGFEFilterInstance {
+ pub target_rect: DeviceRect,
+ pub input_1_content_scale_and_offset: [f32; 4],
+ pub input_2_content_scale_and_offset: [f32; 4],
+ pub input_1_task_address: RenderTaskAddress,
+ pub input_2_task_address: RenderTaskAddress,
+ pub kind: u16,
+ pub input_count: u16,
+ pub extra_data_address: GpuCacheAddress,
+}
+
#[derive(Copy, Clone, Debug, Hash, MallocSizeOf, PartialEq, Eq)]
#[repr(C)]
#[cfg_attr(feature = "capture", derive(Serialize))]
@@ -535,7 +550,7 @@ impl From<SplitCompositeInstance> for PrimitiveInstanceData {
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct QuadInstance {
- pub render_task_address: RenderTaskAddress,
+ pub dst_task_address: RenderTaskAddress,
pub prim_address_i: GpuBufferAddress,
pub prim_address_f: GpuBufferAddress,
pub z_id: ZBufferId,
@@ -565,12 +580,13 @@ impl From<QuadInstance> for PrimitiveInstanceData {
((instance.part_index as i32) << 8) |
((instance.segment_index as i32) << 0),
- instance.render_task_address.0,
+ instance.dst_task_address.0,
],
}
}
}
+#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
pub struct QuadSegment {
pub rect: LayoutRect,
diff --git a/gfx/wr/webrender/src/internal_types.rs b/gfx/wr/webrender/src/internal_types.rs
index 97827a98fe..660f8d6da1 100644
--- a/gfx/wr/webrender/src/internal_types.rs
+++ b/gfx/wr/webrender/src/internal_types.rs
@@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{ColorF, DocumentId, ExternalImageId, PrimitiveFlags, Parameter, RenderReasons};
-use api::{ImageFormat, NotificationRequest, Shadow, FilterOp, ImageBufferKind};
+use api::{ImageFormat, NotificationRequest, Shadow, FilterOpGraphPictureBufferId, FilterOpGraphPictureReference, FilterOpGraphNode, FilterOp, ImageBufferKind};
use api::FramePublishId;
use api::units::*;
use crate::render_api::DebugCommand;
@@ -15,6 +15,7 @@ use crate::frame_builder::Frame;
use crate::profiler::TransactionProfile;
use crate::spatial_tree::SpatialNodeIndex;
use crate::prim_store::PrimitiveInstanceIndex;
+use crate::filterdata::FilterDataHandle;
use fxhash::FxHasher;
use plane_split::BspSplitter;
use smallvec::SmallVec;
@@ -208,8 +209,557 @@ pub struct PlaneSplitterIndex(pub usize);
/// An arbitrary number which we assume opacity is invisible below.
const OPACITY_EPSILON: f32 = 0.001;
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct FilterGraphPictureReference {
+ /// Id of the picture in question in a namespace unique to this filter DAG,
+ /// some are special values like
+ /// FilterPrimitiveDescription::kPrimitiveIndexSourceGraphic.
+ pub buffer_id: FilterOpGraphPictureBufferId,
+ /// Set by wrap_prim_with_filters to the subregion of the input node, may
+ /// also have been offset for feDropShadow or feOffset
+ pub subregion: LayoutRect,
+ /// During scene build this is the offset to apply to the input subregion
+ /// for feOffset, which can be optimized away by pushing its offset and
+ /// subregion crop to downstream nodes. This is always zero in render tasks
+ /// where it has already been applied to subregion by that point. Not used
+ /// in get_coverage_svgfe because source_padding/target_padding represent
+ /// the offset there.
+ pub offset: LayoutVector2D,
+ /// Equal to the inflate value of the referenced buffer, or 0
+ pub inflate: i16,
+ /// Padding on each side to represent how this input is read relative to the
+ /// node's output subregion, this represents what the operation needs to
+ /// read from ths input, which may be blurred or offset.
+ pub source_padding: LayoutRect,
+ /// Padding on each side to represent how this input affects the node's
+ /// subregion, this can be used to calculate target subregion based on
+ /// SourceGraphic subregion. This is usually equal to source_padding except
+ /// offset in the opposite direction, inflates typically do the same thing
+ /// to both types of padding.
+ pub target_padding: LayoutRect,
+}
+
+impl From<FilterOpGraphPictureReference> for FilterGraphPictureReference {
+ fn from(pic: FilterOpGraphPictureReference) -> Self {
+ FilterGraphPictureReference{
+ buffer_id: pic.buffer_id,
+ // All of these are set by wrap_prim_with_filters
+ subregion: LayoutRect::zero(),
+ offset: LayoutVector2D::zero(),
+ inflate: 0,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }
+ }
+}
+
+pub const SVGFE_CONVOLVE_DIAMETER_LIMIT: usize = 5;
+pub const SVGFE_CONVOLVE_VALUES_LIMIT: usize = SVGFE_CONVOLVE_DIAMETER_LIMIT *
+ SVGFE_CONVOLVE_DIAMETER_LIMIT;
+
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub enum FilterGraphOp {
+ /// Filter that copies the SourceGraphic image into the specified subregion,
+ /// This is intentionally the only way to get SourceGraphic into the graph,
+ /// as the filter region must be applied before it is used.
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - no inputs, no linear
+ SVGFESourceGraphic,
+ /// Filter that copies the SourceAlpha image into the specified subregion,
+ /// This is intentionally the only way to get SourceAlpha into the graph,
+ /// as the filter region must be applied before it is used.
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - no inputs, no linear
+ SVGFESourceAlpha,
+ /// Filter that does no transformation of the colors, used to implement a
+ /// few things like SVGFEOffset, and this is the default value in
+ /// impl_default_for_enums.
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input with offset
+ SVGFEIdentity,
+ /// represents CSS opacity property as a graph node like the rest of the
+ /// SVGFE* filters
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ SVGFEOpacity{valuebinding: api::PropertyBinding<f32>, value: f32},
+ /// convert a color image to an alpha channel - internal use; generated by
+ /// SVGFilterInstance::GetOrCreateSourceAlphaIndex().
+ SVGFEToAlpha,
+ /// combine 2 images with SVG_FEBLEND_MODE_DARKEN
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendDarken,
+ /// combine 2 images with SVG_FEBLEND_MODE_LIGHTEN
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendLighten,
+ /// combine 2 images with SVG_FEBLEND_MODE_MULTIPLY
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendMultiply,
+ /// combine 2 images with SVG_FEBLEND_MODE_NORMAL
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendNormal,
+ /// combine 2 images with SVG_FEBLEND_MODE_SCREEN
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendScreen,
+ /// combine 2 images with SVG_FEBLEND_MODE_OVERLAY
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendOverlay,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR_DODGE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColorDodge,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR_BURN
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColorBurn,
+ /// combine 2 images with SVG_FEBLEND_MODE_HARD_LIGHT
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendHardLight,
+ /// combine 2 images with SVG_FEBLEND_MODE_SOFT_LIGHT
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendSoftLight,
+ /// combine 2 images with SVG_FEBLEND_MODE_DIFFERENCE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendDifference,
+ /// combine 2 images with SVG_FEBLEND_MODE_EXCLUSION
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendExclusion,
+ /// combine 2 images with SVG_FEBLEND_MODE_HUE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendHue,
+ /// combine 2 images with SVG_FEBLEND_MODE_SATURATION
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendSaturation,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColor,
+ /// combine 2 images with SVG_FEBLEND_MODE_LUMINOSITY
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendLuminosity,
+ /// transform colors of image through 5x4 color matrix (transposed for
+ /// efficiency)
+ /// parameters: FilterGraphNode, matrix[5][4]
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feColorMatrixElement
+ SVGFEColorMatrix{values: [f32; 20]},
+ /// transform colors of image through configurable gradients with component
+ /// swizzle
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feComponentTransferElement
+ SVGFEComponentTransfer,
+ /// Processed version of SVGFEComponentTransfer with the FilterData
+ /// replaced by an interned handle, this is made in wrap_prim_with_filters.
+ /// Aside from the interned handle, creates_pixels indicates if the transfer
+ /// parameters will probably fill the entire subregion with non-zero alpha.
+ SVGFEComponentTransferInterned{handle: FilterDataHandle, creates_pixels: bool},
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode, k1, k2, k3, k4
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeArithmetic{k1: f32, k2: f32, k3: f32, k4: f32},
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeATop,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeIn,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Docs: https://developer.mozilla.org/en-US/docs/Web/SVG/Element/feComposite
+ SVGFECompositeLighter,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeOut,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeOver,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeXOR,
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterGraphNode, orderX, orderY, kernelValues[25], divisor,
+ /// bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeDuplicate{order_x: i32, order_y: i32,
+ kernel: [f32; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: f32, bias: f32,
+ target_x: i32, target_y: i32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, preserve_alpha: i32},
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterGraphNode, orderX, orderY, kernelValues[25], divisor,
+ /// bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeNone{order_x: i32, order_y: i32,
+ kernel: [f32; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: f32, bias: f32,
+ target_x: i32, target_y: i32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, preserve_alpha: i32},
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterGraphNode, orderX, orderY, kernelValues[25], divisor,
+ /// bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeWrap{order_x: i32, order_y: i32,
+ kernel: [f32; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: f32, bias: f32,
+ target_x: i32, target_y: i32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, preserve_alpha: i32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// distant light source with specified direction
+ /// parameters: FilterGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, azimuth, elevation
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDistantLightElement
+ SVGFEDiffuseLightingDistant{surface_scale: f32, diffuse_constant: f32,
+ kernel_unit_length_x: f32, kernel_unit_length_y: f32, azimuth: f32,
+ elevation: f32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// point light source at specified location
+ /// parameters: FilterGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, x, y, z
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEPointLightElement
+ SVGFEDiffuseLightingPoint{surface_scale: f32, diffuse_constant: f32,
+ kernel_unit_length_x: f32, kernel_unit_length_y: f32, x: f32, y: f32,
+ z: f32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// spot light source at specified location pointing at specified target
+ /// location with specified hotspot sharpness and cone angle
+ /// parameters: FilterGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, x, y, z, pointsAtX, pointsAtY,
+ /// pointsAtZ, specularExponent, limitingConeAngle
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpotLightElement
+ SVGFEDiffuseLightingSpot{surface_scale: f32, diffuse_constant: f32,
+ kernel_unit_length_x: f32, kernel_unit_length_y: f32, x: f32, y: f32,
+ z: f32, points_at_x: f32, points_at_y: f32, points_at_z: f32,
+ cone_exponent: f32, limiting_cone_angle: f32},
+ /// calculate a distorted version of first input image using offset values
+ /// from second input image at specified intensity
+ /// parameters: FilterGraphNode, scale, xChannelSelector, yChannelSelector
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDisplacementMapElement
+ SVGFEDisplacementMap{scale: f32, x_channel_selector: u32,
+ y_channel_selector: u32},
+ /// create and merge a dropshadow version of the specified image's alpha
+ /// channel with specified offset and blur radius
+ /// parameters: FilterGraphNode, flood_color, flood_opacity, dx, dy,
+ /// stdDeviationX, stdDeviationY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDropShadowElement
+ SVGFEDropShadow{color: ColorF, dx: f32, dy: f32, std_deviation_x: f32,
+ std_deviation_y: f32},
+ /// synthesize a new image of specified size containing a solid color
+ /// parameters: FilterGraphNode, color
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEFloodElement
+ SVGFEFlood{color: ColorF},
+ /// create a blurred version of the input image
+ /// parameters: FilterGraphNode, stdDeviationX, stdDeviationY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEGaussianBlurElement
+ SVGFEGaussianBlur{std_deviation_x: f32, std_deviation_y: f32},
+ /// synthesize a new image based on a url (i.e. blob image source)
+ /// parameters: FilterGraphNode,
+ /// samplingFilter (see SamplingFilter in Types.h), transform
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEImageElement
+ SVGFEImage{sampling_filter: u32, matrix: [f32; 6]},
+ /// create a new image based on the input image with the contour stretched
+ /// outward (dilate operator)
+ /// parameters: FilterGraphNode, radiusX, radiusY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEMorphologyElement
+ SVGFEMorphologyDilate{radius_x: f32, radius_y: f32},
+ /// create a new image based on the input image with the contour shrunken
+ /// inward (erode operator)
+ /// parameters: FilterGraphNode, radiusX, radiusY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEMorphologyElement
+ SVGFEMorphologyErode{radius_x: f32, radius_y: f32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// distant light source with specified direction
+ /// parameters: FilerData, surfaceScale, specularConstant, specularExponent,
+ /// kernelUnitLengthX, kernelUnitLengthY, azimuth, elevation
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDistantLightElement
+ SVGFESpecularLightingDistant{surface_scale: f32, specular_constant: f32,
+ specular_exponent: f32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, azimuth: f32, elevation: f32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// point light source at specified location
+ /// parameters: FilterGraphNode, surfaceScale, specularConstant,
+ /// specularExponent, kernelUnitLengthX, kernelUnitLengthY, x, y, z
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEPointLightElement
+ SVGFESpecularLightingPoint{surface_scale: f32, specular_constant: f32,
+ specular_exponent: f32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, x: f32, y: f32, z: f32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// spot light source at specified location pointing at specified target
+ /// location with specified hotspot sharpness and cone angle
+ /// parameters: FilterGraphNode, surfaceScale, specularConstant,
+ /// specularExponent, kernelUnitLengthX, kernelUnitLengthY, x, y, z,
+ /// pointsAtX, pointsAtY, pointsAtZ, specularExponent, limitingConeAngle
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpotLightElement
+ SVGFESpecularLightingSpot{surface_scale: f32, specular_constant: f32,
+ specular_exponent: f32, kernel_unit_length_x: f32,
+ kernel_unit_length_y: f32, x: f32, y: f32, z: f32, points_at_x: f32,
+ points_at_y: f32, points_at_z: f32, cone_exponent: f32,
+ limiting_cone_angle: f32},
+ /// create a new image based on the input image, repeated throughout the
+ /// output rectangle
+ /// parameters: FilterGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETileElement
+ SVGFETile,
+ /// synthesize a new image based on Fractal Noise (Perlin) with the chosen
+ /// stitching mode
+ /// parameters: FilterGraphNode, baseFrequencyX, baseFrequencyY, numOctaves,
+ /// seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithFractalNoiseWithNoStitching{base_frequency_x: f32,
+ base_frequency_y: f32, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Fractal Noise (Perlin) with the chosen
+ /// stitching mode
+ /// parameters: FilterGraphNode, baseFrequencyX, baseFrequencyY, numOctaves,
+ /// seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithFractalNoiseWithStitching{base_frequency_x: f32,
+ base_frequency_y: f32, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Turbulence Noise (offset vectors)
+ /// parameters: FilterGraphNode, baseFrequencyX, baseFrequencyY, numOctaves,
+ /// seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{base_frequency_x: f32,
+ base_frequency_y: f32, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Turbulence Noise (offset vectors)
+ /// parameters: FilterGraphNode, baseFrequencyX, baseFrequencyY, numOctaves,
+ /// seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithTurbulenceNoiseWithStitching{base_frequency_x: f32,
+ base_frequency_y: f32, num_octaves: u32, seed: u32},
+}
+
+impl FilterGraphOp {
+ pub fn kind(&self) -> &'static str {
+ match *self {
+ FilterGraphOp::SVGFEBlendColor => "SVGFEBlendColor",
+ FilterGraphOp::SVGFEBlendColorBurn => "SVGFEBlendColorBurn",
+ FilterGraphOp::SVGFEBlendColorDodge => "SVGFEBlendColorDodge",
+ FilterGraphOp::SVGFEBlendDarken => "SVGFEBlendDarken",
+ FilterGraphOp::SVGFEBlendDifference => "SVGFEBlendDifference",
+ FilterGraphOp::SVGFEBlendExclusion => "SVGFEBlendExclusion",
+ FilterGraphOp::SVGFEBlendHardLight => "SVGFEBlendHardLight",
+ FilterGraphOp::SVGFEBlendHue => "SVGFEBlendHue",
+ FilterGraphOp::SVGFEBlendLighten => "SVGFEBlendLighten",
+ FilterGraphOp::SVGFEBlendLuminosity => "SVGFEBlendLuminosity",
+ FilterGraphOp::SVGFEBlendMultiply => "SVGFEBlendMultiply",
+ FilterGraphOp::SVGFEBlendNormal => "SVGFEBlendNormal",
+ FilterGraphOp::SVGFEBlendOverlay => "SVGFEBlendOverlay",
+ FilterGraphOp::SVGFEBlendSaturation => "SVGFEBlendSaturation",
+ FilterGraphOp::SVGFEBlendScreen => "SVGFEBlendScreen",
+ FilterGraphOp::SVGFEBlendSoftLight => "SVGFEBlendSoftLight",
+ FilterGraphOp::SVGFEColorMatrix{..} => "SVGFEColorMatrix",
+ FilterGraphOp::SVGFEComponentTransfer => "SVGFEComponentTransfer",
+ FilterGraphOp::SVGFEComponentTransferInterned{..} => "SVGFEComponentTransferInterned",
+ FilterGraphOp::SVGFECompositeArithmetic{..} => "SVGFECompositeArithmetic",
+ FilterGraphOp::SVGFECompositeATop => "SVGFECompositeATop",
+ FilterGraphOp::SVGFECompositeIn => "SVGFECompositeIn",
+ FilterGraphOp::SVGFECompositeLighter => "SVGFECompositeLighter",
+ FilterGraphOp::SVGFECompositeOut => "SVGFECompositeOut",
+ FilterGraphOp::SVGFECompositeOver => "SVGFECompositeOver",
+ FilterGraphOp::SVGFECompositeXOR => "SVGFECompositeXOR",
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{..} => "SVGFEConvolveMatrixEdgeModeDuplicate",
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{..} => "SVGFEConvolveMatrixEdgeModeNone",
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{..} => "SVGFEConvolveMatrixEdgeModeWrap",
+ FilterGraphOp::SVGFEDiffuseLightingDistant{..} => "SVGFEDiffuseLightingDistant",
+ FilterGraphOp::SVGFEDiffuseLightingPoint{..} => "SVGFEDiffuseLightingPoint",
+ FilterGraphOp::SVGFEDiffuseLightingSpot{..} => "SVGFEDiffuseLightingSpot",
+ FilterGraphOp::SVGFEDisplacementMap{..} => "SVGFEDisplacementMap",
+ FilterGraphOp::SVGFEDropShadow{..} => "SVGFEDropShadow",
+ FilterGraphOp::SVGFEFlood{..} => "SVGFEFlood",
+ FilterGraphOp::SVGFEGaussianBlur{..} => "SVGFEGaussianBlur",
+ FilterGraphOp::SVGFEIdentity => "SVGFEIdentity",
+ FilterGraphOp::SVGFEImage{..} => "SVGFEImage",
+ FilterGraphOp::SVGFEMorphologyDilate{..} => "SVGFEMorphologyDilate",
+ FilterGraphOp::SVGFEMorphologyErode{..} => "SVGFEMorphologyErode",
+ FilterGraphOp::SVGFEOpacity{..} => "SVGFEOpacity",
+ FilterGraphOp::SVGFESourceAlpha => "SVGFESourceAlpha",
+ FilterGraphOp::SVGFESourceGraphic => "SVGFESourceGraphic",
+ FilterGraphOp::SVGFESpecularLightingDistant{..} => "SVGFESpecularLightingDistant",
+ FilterGraphOp::SVGFESpecularLightingPoint{..} => "SVGFESpecularLightingPoint",
+ FilterGraphOp::SVGFESpecularLightingSpot{..} => "SVGFESpecularLightingSpot",
+ FilterGraphOp::SVGFETile => "SVGFETile",
+ FilterGraphOp::SVGFEToAlpha => "SVGFEToAlpha",
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} => "SVGFETurbulenceWithFractalNoiseWithNoStitching",
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} => "SVGFETurbulenceWithFractalNoiseWithStitching",
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} => "SVGFETurbulenceWithTurbulenceNoiseWithNoStitching",
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => "SVGFETurbulenceWithTurbulenceNoiseWithStitching",
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct FilterGraphNode {
+ /// Indicates this graph node was marked as necessary by the DAG optimizer
+ pub kept_by_optimizer: bool,
+ /// true if color_interpolation_filter == LinearRgb; shader will convert
+ /// sRGB texture pixel colors on load and convert back on store, for correct
+ /// interpolation
+ pub linear: bool,
+ /// padding for output rect if we need a border to get correct clamping, or
+ /// to account for larger final subregion than source rect (see bug 1869672)
+ pub inflate: i16,
+ /// virtualized picture input bindings, these refer to other filter outputs
+ /// by number within the graph, usually there is one element
+ pub inputs: Vec<FilterGraphPictureReference>,
+ /// clipping rect for filter node output
+ pub subregion: LayoutRect,
+}
+
+impl From<FilterOpGraphNode> for FilterGraphNode {
+ fn from(node: FilterOpGraphNode) -> Self {
+ let mut inputs: Vec<FilterGraphPictureReference> = Vec::new();
+ if node.input.buffer_id != FilterOpGraphPictureBufferId::None {
+ inputs.push(node.input.into());
+ }
+ if node.input2.buffer_id != FilterOpGraphPictureBufferId::None {
+ inputs.push(node.input2.into());
+ }
+ // If the op used by this node is a feMerge, it will add more inputs
+ // after this invocation.
+ FilterGraphNode{
+ linear: node.linear,
+ inputs,
+ subregion: node.subregion,
+ // These are computed later in scene_building
+ kept_by_optimizer: true,
+ inflate: 0,
+ }
+ }
+}
+
+
/// Equivalent to api::FilterOp with added internal information
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub enum Filter {
@@ -233,6 +783,7 @@ pub enum Filter {
LinearToSrgb,
ComponentTransfer,
Flood(ColorF),
+ SVGGraphNode(FilterGraphNode, FilterGraphOp),
}
impl Filter {
@@ -258,6 +809,7 @@ impl Filter {
Filter::Flood(color) => {
color.a > OPACITY_EPSILON
}
+ Filter::SVGGraphNode(..) => true,
}
}
@@ -296,6 +848,7 @@ impl Filter {
Filter::LinearToSrgb |
Filter::ComponentTransfer |
Filter::Flood(..) => false,
+ Filter::SVGGraphNode(..) => false,
}
}
@@ -319,6 +872,7 @@ impl Filter {
Filter::Blur { .. } => 12,
Filter::DropShadows(..) => 13,
Filter::Opacity(..) => 14,
+ Filter::SVGGraphNode(..) => unreachable!("SVGGraphNode handled elsewhere"),
}
}
}
@@ -342,6 +896,76 @@ impl From<FilterOp> for Filter {
FilterOp::ComponentTransfer => Filter::ComponentTransfer,
FilterOp::DropShadow(shadow) => Filter::DropShadows(smallvec![shadow]),
FilterOp::Flood(color) => Filter::Flood(color),
+ FilterOp::SVGFEBlendColor{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendColor),
+ FilterOp::SVGFEBlendColorBurn{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendColorBurn),
+ FilterOp::SVGFEBlendColorDodge{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendColorDodge),
+ FilterOp::SVGFEBlendDarken{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendDarken),
+ FilterOp::SVGFEBlendDifference{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendDifference),
+ FilterOp::SVGFEBlendExclusion{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendExclusion),
+ FilterOp::SVGFEBlendHardLight{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendHardLight),
+ FilterOp::SVGFEBlendHue{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendHue),
+ FilterOp::SVGFEBlendLighten{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendLighten),
+ FilterOp::SVGFEBlendLuminosity{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendLuminosity),
+ FilterOp::SVGFEBlendMultiply{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendMultiply),
+ FilterOp::SVGFEBlendNormal{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendNormal),
+ FilterOp::SVGFEBlendOverlay{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendOverlay),
+ FilterOp::SVGFEBlendSaturation{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendSaturation),
+ FilterOp::SVGFEBlendScreen{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendScreen),
+ FilterOp::SVGFEBlendSoftLight{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEBlendSoftLight),
+ FilterOp::SVGFEColorMatrix{node, values} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEColorMatrix{values}),
+ FilterOp::SVGFEComponentTransfer{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEComponentTransfer),
+ FilterOp::SVGFECompositeArithmetic{node, k1, k2, k3, k4} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeArithmetic{k1, k2, k3, k4}),
+ FilterOp::SVGFECompositeATop{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeATop),
+ FilterOp::SVGFECompositeIn{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeIn),
+ FilterOp::SVGFECompositeLighter{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeLighter),
+ FilterOp::SVGFECompositeOut{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeOut),
+ FilterOp::SVGFECompositeOver{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeOver),
+ FilterOp::SVGFECompositeXOR{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFECompositeXOR),
+ FilterOp::SVGFEConvolveMatrixEdgeModeDuplicate{node, order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha}),
+ FilterOp::SVGFEConvolveMatrixEdgeModeNone{node, order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha}),
+ FilterOp::SVGFEConvolveMatrixEdgeModeWrap{node, order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha}),
+ FilterOp::SVGFEDiffuseLightingDistant{node, surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEDiffuseLightingDistant{surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation}),
+ FilterOp::SVGFEDiffuseLightingPoint{node, surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEDiffuseLightingPoint{surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z}),
+ FilterOp::SVGFEDiffuseLightingSpot{node, surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEDiffuseLightingSpot{surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle}),
+ FilterOp::SVGFEDisplacementMap{node, scale, x_channel_selector, y_channel_selector} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEDisplacementMap{scale, x_channel_selector, y_channel_selector}),
+ FilterOp::SVGFEDropShadow{node, color, dx, dy, std_deviation_x, std_deviation_y} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEDropShadow{color, dx, dy, std_deviation_x, std_deviation_y}),
+ FilterOp::SVGFEFlood{node, color} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEFlood{color}),
+ FilterOp::SVGFEGaussianBlur{node, std_deviation_x, std_deviation_y} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEGaussianBlur{std_deviation_x, std_deviation_y}),
+ FilterOp::SVGFEIdentity{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEIdentity),
+ FilterOp::SVGFEImage{node, sampling_filter, matrix} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEImage{sampling_filter, matrix}),
+ FilterOp::SVGFEMorphologyDilate{node, radius_x, radius_y} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEMorphologyDilate{radius_x, radius_y}),
+ FilterOp::SVGFEMorphologyErode{node, radius_x, radius_y} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEMorphologyErode{radius_x, radius_y}),
+ FilterOp::SVGFEOffset{node, offset_x, offset_y} => {
+ Filter::SVGGraphNode(
+ FilterGraphNode {
+ kept_by_optimizer: true, // computed later in scene_building
+ linear: node.linear,
+ inflate: 0, // computed later in scene_building
+ inputs: [FilterGraphPictureReference {
+ buffer_id: node.input.buffer_id,
+ offset: LayoutVector2D::new(offset_x, offset_y),
+ subregion: LayoutRect::zero(),
+ inflate: 0,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }].to_vec(),
+ subregion: node.subregion,
+ },
+ FilterGraphOp::SVGFEIdentity,
+ )
+ },
+ FilterOp::SVGFEOpacity{node, valuebinding, value} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEOpacity{valuebinding, value}),
+ FilterOp::SVGFESourceAlpha{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFESourceAlpha),
+ FilterOp::SVGFESourceGraphic{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFESourceGraphic),
+ FilterOp::SVGFESpecularLightingDistant{node, surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFESpecularLightingDistant{surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation}),
+ FilterOp::SVGFESpecularLightingPoint{node, surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFESpecularLightingPoint{surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z}),
+ FilterOp::SVGFESpecularLightingSpot{node, surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFESpecularLightingSpot{surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle}),
+ FilterOp::SVGFETile{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFETile),
+ FilterOp::SVGFEToAlpha{node} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFEToAlpha),
+ FilterOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{node, base_frequency_x, base_frequency_y, num_octaves, seed} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{base_frequency_x, base_frequency_y, num_octaves, seed}),
+ FilterOp::SVGFETurbulenceWithFractalNoiseWithStitching{node, base_frequency_x, base_frequency_y, num_octaves, seed} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{base_frequency_x, base_frequency_y, num_octaves, seed}),
+ FilterOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{node, base_frequency_x, base_frequency_y, num_octaves, seed} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{base_frequency_x, base_frequency_y, num_octaves, seed}),
+ FilterOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{node, base_frequency_x, base_frequency_y, num_octaves, seed} => Filter::SVGGraphNode(node.into(), FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{base_frequency_x, base_frequency_y, num_octaves, seed}),
}
}
}
diff --git a/gfx/wr/webrender/src/pattern.rs b/gfx/wr/webrender/src/pattern.rs
index 36a06fa2b9..f4ddd51f9f 100644
--- a/gfx/wr/webrender/src/pattern.rs
+++ b/gfx/wr/webrender/src/pattern.rs
@@ -10,12 +10,14 @@ use api::{ColorF, PremultipliedColorF};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum PatternKind {
ColorOrTexture = 0,
+ RadialGradient = 1,
+ ConicGradient = 2,
- Mask = 1,
+ Mask = 3,
// When adding patterns, don't forget to update the NUM_PATTERNS constant.
}
-pub const NUM_PATTERNS: u32 = 2;
+pub const NUM_PATTERNS: u32 = 4;
impl PatternKind {
pub fn from_u32(val: u32) -> Self {
@@ -61,8 +63,21 @@ impl Pattern {
Pattern {
kind: PatternKind::ColorOrTexture,
shader_input: PatternShaderInput::default(),
- base_color: PremultipliedColorF::BLACK,
+ base_color: PremultipliedColorF::WHITE,
is_opaque: false,
}
}
+
+ pub fn supports_segmented_rendering(&self) -> bool {
+ match self.kind {
+ PatternKind::ColorOrTexture | PatternKind::Mask => {
+ true
+ }
+ PatternKind::RadialGradient | PatternKind::ConicGradient => {
+ // TODO: We need to fix up the layout coords mismatch in pattern
+ // and quad rendering to allow these to be segmented.
+ false
+ }
+ }
+ }
}
diff --git a/gfx/wr/webrender/src/picture.rs b/gfx/wr/webrender/src/picture.rs
index 1f1fd5e4f6..f22bcadd06 100644
--- a/gfx/wr/webrender/src/picture.rs
+++ b/gfx/wr/webrender/src/picture.rs
@@ -95,7 +95,7 @@
//! improved as a follow up).
use api::{MixBlendMode, PremultipliedColorF, FilterPrimitiveKind};
-use api::{PropertyBinding, PropertyBindingId, FilterPrimitive, RasterSpace};
+use api::{PropertyBinding, PropertyBindingId, FilterPrimitive, FilterOpGraphPictureBufferId, RasterSpace};
use api::{DebugFlags, ImageKey, ColorF, ColorU, PrimitiveFlags};
use api::{ImageRendering, ColorDepth, YuvRangedColorSpace, YuvFormat, AlphaType};
use api::units::*;
@@ -111,7 +111,7 @@ use euclid::{vec3, Point2D, Scale, Vector2D, Box2D};
use euclid::approxeq::ApproxEq;
use crate::filterdata::SFilterData;
use crate::intern::ItemUid;
-use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, Filter, FrameId};
+use crate::internal_types::{FastHashMap, FastHashSet, PlaneSplitter, FilterGraphOp, FilterGraphNode, Filter, FrameId};
use crate::internal_types::{PlaneSplitterIndex, PlaneSplitAnchor, TextureSource};
use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureState, PictureContext};
use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle};
@@ -281,9 +281,10 @@ pub const TILE_SIZE_SCROLLBAR_VERTICAL: DeviceIntSize = DeviceIntSize {
_unit: marker::PhantomData,
};
-/// The maximum size per axis of a surface,
-/// in WorldPixel coordinates.
-const MAX_SURFACE_SIZE: usize = 4096;
+/// The maximum size per axis of a surface, in DevicePixel coordinates.
+/// Render tasks larger than this size are scaled down to fit, which may cause
+/// some blurriness.
+pub const MAX_SURFACE_SIZE: usize = 4096;
/// Maximum size of a compositor surface.
const MAX_COMPOSITOR_SURFACES_SIZE: f32 = 8192.0;
@@ -3821,7 +3822,7 @@ pub struct SurfaceIndex(pub usize);
/// frames and display lists.
pub struct SurfaceInfo {
/// A local rect defining the size of this surface, in the
- /// coordinate system of the surface itself. This contains
+ /// coordinate system of the parent surface. This contains
/// the unclipped bounding rect of child primitives.
pub unclipped_local_rect: PictureRect,
/// The local space coverage of child primitives after they are
@@ -4048,6 +4049,8 @@ pub enum PictureCompositeMode {
},
/// Apply an SVG filter
SvgFilter(Vec<FilterPrimitive>, Vec<SFilterData>),
+ /// Apply an SVG filter graph
+ SVGFEGraph(Vec<(FilterGraphNode, FilterGraphOp)>),
/// A surface that is used as an input to another primitive
IntermediateSurface,
}
@@ -4137,6 +4140,9 @@ impl PictureCompositeMode {
}
result_rect
}
+ PictureCompositeMode::SVGFEGraph(ref filters) => {
+ self.get_coverage_svgfe(filters, surface_rect.cast_unit(), true, false).0
+ }
_ => {
surface_rect
}
@@ -4232,11 +4238,338 @@ impl PictureCompositeMode {
}
result_rect
}
+ PictureCompositeMode::SVGFEGraph(ref filters) => {
+ let mut rect = self.get_coverage_svgfe(filters, surface_rect.cast_unit(), true, true).0;
+ // Inflate a bit for invalidation purposes, but we don't do this in get_surface_rects or get_surface_rect.'
+ if !rect.is_empty() {
+ rect = rect.inflate(1.0, 1.0);
+ }
+ rect
+ }
_ => {
surface_rect
}
}
}
+
+ /// Returns a static str describing the type of PictureCompositeMode (and
+ /// filter type if applicable)
+ pub fn kind(&self) -> &'static str {
+ match *self {
+ PictureCompositeMode::Blit(..) => "Blit",
+ PictureCompositeMode::ComponentTransferFilter(..) => "ComponentTransferFilter",
+ PictureCompositeMode::IntermediateSurface => "IntermediateSurface",
+ PictureCompositeMode::MixBlend(..) => "MixBlend",
+ PictureCompositeMode::SVGFEGraph(..) => "SVGFEGraph",
+ PictureCompositeMode::SvgFilter(..) => "SvgFilter",
+ PictureCompositeMode::TileCache{..} => "TileCache",
+ PictureCompositeMode::Filter(Filter::Blur{..}) => "Filter::Blur",
+ PictureCompositeMode::Filter(Filter::Brightness(..)) => "Filter::Brightness",
+ PictureCompositeMode::Filter(Filter::ColorMatrix(..)) => "Filter::ColorMatrix",
+ PictureCompositeMode::Filter(Filter::ComponentTransfer) => "Filter::ComponentTransfer",
+ PictureCompositeMode::Filter(Filter::Contrast(..)) => "Filter::Contrast",
+ PictureCompositeMode::Filter(Filter::DropShadows(..)) => "Filter::DropShadows",
+ PictureCompositeMode::Filter(Filter::Flood(..)) => "Filter::Flood",
+ PictureCompositeMode::Filter(Filter::Grayscale(..)) => "Filter::Grayscale",
+ PictureCompositeMode::Filter(Filter::HueRotate(..)) => "Filter::HueRotate",
+ PictureCompositeMode::Filter(Filter::Identity) => "Filter::Identity",
+ PictureCompositeMode::Filter(Filter::Invert(..)) => "Filter::Invert",
+ PictureCompositeMode::Filter(Filter::LinearToSrgb) => "Filter::LinearToSrgb",
+ PictureCompositeMode::Filter(Filter::Opacity(..)) => "Filter::Opacity",
+ PictureCompositeMode::Filter(Filter::Saturate(..)) => "Filter::Saturate",
+ PictureCompositeMode::Filter(Filter::Sepia(..)) => "Filter::Sepia",
+ PictureCompositeMode::Filter(Filter::SrgbToLinear) => "Filter::SrgbToLinear",
+ PictureCompositeMode::Filter(Filter::SVGGraphNode(..)) => "Filter::SVGGraphNode",
+ }
+ }
+
+ /// Here we compute the source and target rects for SVGFEGraph by walking
+ /// the whole graph and propagating subregions based on the provided
+ /// invalidation rect (in either source or target space), and we want it to
+ /// be a tight fit so we don't waste time applying multiple filters to
+ /// pixels that do not contribute to the invalidated rect.
+ ///
+ /// The interesting parts of the handling of SVG filters are:
+ /// * scene_building.rs : wrap_prim_with_filters
+ /// * picture.rs : get_coverage_svgfe (you are here)
+ /// * render_task.rs : new_svg_filter_graph
+ /// * render_target.rs : add_svg_filter_node_instances
+ pub fn get_coverage_svgfe(
+ &self,
+ filters: &[(FilterGraphNode, FilterGraphOp)],
+ surface_rect: LayoutRect,
+ surface_rect_is_source: bool,
+ skip_subregion_clips: bool,
+ ) -> (LayoutRect, LayoutRect, LayoutRect) {
+
+ // The value of BUFFER_LIMIT here must be the same as in
+ // scene_building.rs, or we'll hit asserts here.
+ const BUFFER_LIMIT: usize = 256;
+
+ fn calc_target_from_source(
+ source_rect: LayoutRect,
+ filters: &[(FilterGraphNode, FilterGraphOp)],
+ skip_subregion_clips: bool,
+ ) -> LayoutRect {
+ // We need to evaluate the subregions based on the proposed
+ // SourceGraphic rect as it isn't known at scene build time.
+ let mut subregion_by_buffer_id: [LayoutRect; BUFFER_LIMIT] = [LayoutRect::zero(); BUFFER_LIMIT];
+ for (id, (node, op)) in filters.iter().enumerate() {
+ let full_subregion = node.subregion;
+ let mut used_subregion = LayoutRect::zero();
+ for input in &node.inputs {
+ match input.buffer_id {
+ FilterOpGraphPictureBufferId::BufferId(id) => {
+ assert!((id as usize) < BUFFER_LIMIT, "BUFFER_LIMIT must be the same in frame building and scene building");
+ // This id lookup should always succeed.
+ let input_subregion = subregion_by_buffer_id[id as usize];
+ // Now add the padding that transforms from
+ // source to target, this was determined during
+ // scene build based on the operation.
+ let input_subregion =
+ LayoutRect::new(
+ LayoutPoint::new(
+ input_subregion.min.x + input.target_padding.min.x,
+ input_subregion.min.y + input.target_padding.min.y,
+ ),
+ LayoutPoint::new(
+ input_subregion.max.x + input.target_padding.max.x,
+ input_subregion.max.y + input.target_padding.max.y,
+ ),
+ );
+ used_subregion = used_subregion
+ .union(&input_subregion);
+ }
+ FilterOpGraphPictureBufferId::None => {
+ panic!("Unsupported BufferId type");
+ }
+ }
+ }
+ // We can clip the used subregion.
+ if !skip_subregion_clips {
+ used_subregion = used_subregion
+ .intersection(&full_subregion)
+ .unwrap_or(LayoutRect::zero());
+ }
+ match op {
+ FilterGraphOp::SVGFEBlendColor => {}
+ FilterGraphOp::SVGFEBlendColorBurn => {}
+ FilterGraphOp::SVGFEBlendColorDodge => {}
+ FilterGraphOp::SVGFEBlendDarken => {}
+ FilterGraphOp::SVGFEBlendDifference => {}
+ FilterGraphOp::SVGFEBlendExclusion => {}
+ FilterGraphOp::SVGFEBlendHardLight => {}
+ FilterGraphOp::SVGFEBlendHue => {}
+ FilterGraphOp::SVGFEBlendLighten => {}
+ FilterGraphOp::SVGFEBlendLuminosity => {}
+ FilterGraphOp::SVGFEBlendMultiply => {}
+ FilterGraphOp::SVGFEBlendNormal => {}
+ FilterGraphOp::SVGFEBlendOverlay => {}
+ FilterGraphOp::SVGFEBlendSaturation => {}
+ FilterGraphOp::SVGFEBlendScreen => {}
+ FilterGraphOp::SVGFEBlendSoftLight => {}
+ FilterGraphOp::SVGFEColorMatrix { values } => {
+ if values[3] != 0.0 ||
+ values[7] != 0.0 ||
+ values[11] != 0.0 ||
+ values[19] != 0.0 {
+ // Manipulating alpha can easily create new
+ // pixels outside of input subregions
+ used_subregion = full_subregion;
+ }
+ }
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned{handle: _, creates_pixels} => {
+ // Check if the value of alpha[0] is modified, if so
+ // the whole subregion is used because it will be
+ // creating new pixels outside of input subregions
+ if *creates_pixels {
+ used_subregion = full_subregion;
+ }
+ }
+ FilterGraphOp::SVGFECompositeArithmetic { k1, k2, k3, k4 } => {
+ // Optimization opportunity - some inputs may be
+ // smaller subregions due to the way the math works,
+ // k1 is the intersection of the two inputs, k2 is
+ // the first input only, k3 is the second input
+ // only, and k4 changes the whole subregion.
+ //
+ // See logic for SVG_FECOMPOSITE_OPERATOR_ARITHMETIC
+ // in FilterSupport.cpp
+ //
+ // We can at least ignore the entire node if
+ // everything is zero.
+ if *k1 <= 0.0 &&
+ *k2 <= 0.0 &&
+ *k3 <= 0.0 {
+ used_subregion = LayoutRect::zero();
+ }
+ // Check if alpha is added to pixels as it means it
+ // can fill pixels outside input subregions
+ if *k4 > 0.0 {
+ used_subregion = full_subregion;
+ }
+ }
+ FilterGraphOp::SVGFECompositeATop => {}
+ FilterGraphOp::SVGFECompositeIn => {}
+ FilterGraphOp::SVGFECompositeLighter => {}
+ FilterGraphOp::SVGFECompositeOut => {}
+ FilterGraphOp::SVGFECompositeOver => {}
+ FilterGraphOp::SVGFECompositeXOR => {}
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{..} => {}
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{..} => {}
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{..} => {}
+ FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {}
+ FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {}
+ FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {}
+ FilterGraphOp::SVGFEDisplacementMap{..} => {}
+ FilterGraphOp::SVGFEDropShadow{..} => {}
+ FilterGraphOp::SVGFEFlood { color } => {
+ // Subregion needs to be set to the full node
+ // subregion for fills (unless the fill is a no-op)
+ if color.a > 0.0 {
+ used_subregion = full_subregion;
+ }
+ }
+ FilterGraphOp::SVGFEGaussianBlur{..} => {}
+ FilterGraphOp::SVGFEIdentity => {}
+ FilterGraphOp::SVGFEImage { sampling_filter: _sampling_filter, matrix: _matrix } => {
+ // TODO: calculate the actual subregion
+ used_subregion = full_subregion;
+ }
+ FilterGraphOp::SVGFEMorphologyDilate{..} => {}
+ FilterGraphOp::SVGFEMorphologyErode{..} => {}
+ FilterGraphOp::SVGFEOpacity { valuebinding: _valuebinding, value } => {
+ // If fully transparent, we can ignore this node
+ if *value <= 0.0 {
+ used_subregion = LayoutRect::zero();
+ }
+ }
+ FilterGraphOp::SVGFESourceAlpha |
+ FilterGraphOp::SVGFESourceGraphic => {
+ used_subregion = source_rect;
+ }
+ FilterGraphOp::SVGFESpecularLightingDistant{..} => {}
+ FilterGraphOp::SVGFESpecularLightingPoint{..} => {}
+ FilterGraphOp::SVGFESpecularLightingSpot{..} => {}
+ FilterGraphOp::SVGFETile => {
+ // feTile fills the entire output with
+ // source pixels, so it's effectively a flood.
+ used_subregion = full_subregion;
+ }
+ FilterGraphOp::SVGFEToAlpha => {}
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {
+ // Turbulence produces pixel values throughout the
+ // node subregion.
+ used_subregion = full_subregion;
+ }
+ }
+ // Store the subregion so later nodes can refer back
+ // to this and propagate rects properly
+ assert!((id as usize) < BUFFER_LIMIT, "BUFFER_LIMIT must be the same in frame building and scene building");
+ subregion_by_buffer_id[id] = used_subregion;
+ }
+ subregion_by_buffer_id[filters.len() - 1]
+ }
+
+ fn calc_source_from_target(
+ target_rect: LayoutRect,
+ filters: &[(FilterGraphNode, FilterGraphOp)],
+ skip_subregion_clips: bool,
+ ) -> LayoutRect {
+ // We're solving the source rect from target rect (e.g. due
+ // to invalidation of a region, we need to know how much of
+ // SourceGraphic is needed to draw that region accurately),
+ // so we need to walk the DAG in reverse and accumulate the source
+ // subregion for each input onto the referenced node, which can then
+ // propagate that to its inputs when it is iterated.
+ let mut source_subregion = LayoutRect::zero();
+ let mut subregion_by_buffer_id: [LayoutRect; BUFFER_LIMIT] =
+ [LayoutRect::zero(); BUFFER_LIMIT];
+ let final_buffer_id = filters.len() - 1;
+ assert!(final_buffer_id < BUFFER_LIMIT, "BUFFER_LIMIT must be the same in frame building and scene building");
+ subregion_by_buffer_id[final_buffer_id] = target_rect;
+ for (node_buffer_id, (node, op)) in filters.iter().enumerate().rev() {
+ // This is the subregion this node outputs, we can clip
+ // the inputs based on source_padding relative to this,
+ // and accumulate a new subregion for them.
+ assert!(node_buffer_id < BUFFER_LIMIT, "BUFFER_LIMIT must be the same in frame building and scene building");
+ let full_subregion = node.subregion;
+ let mut used_subregion =
+ subregion_by_buffer_id[node_buffer_id];
+ // We can clip the used subregion.
+ if !skip_subregion_clips {
+ used_subregion = used_subregion
+ .intersection(&full_subregion)
+ .unwrap_or(LayoutRect::zero());
+ }
+ if !used_subregion.is_empty() {
+ for input in &node.inputs {
+ let input_subregion = LayoutRect::new(
+ LayoutPoint::new(
+ used_subregion.min.x + input.source_padding.min.x,
+ used_subregion.min.y + input.source_padding.min.y,
+ ),
+ LayoutPoint::new(
+ used_subregion.max.x + input.source_padding.max.x,
+ used_subregion.max.y + input.source_padding.max.y,
+ ),
+ );
+ match input.buffer_id {
+ FilterOpGraphPictureBufferId::BufferId(id) => {
+ // Add the used area to the input, later when
+ // the referneced node is iterated as a node it
+ // will propagate the used bounds.
+ subregion_by_buffer_id[id as usize] =
+ subregion_by_buffer_id[id as usize]
+ .union(&input_subregion);
+ }
+ FilterOpGraphPictureBufferId::None => {}
+ }
+ }
+ }
+ // If this is the SourceGraphic, we now have the subregion.
+ match op {
+ FilterGraphOp::SVGFESourceAlpha |
+ FilterGraphOp::SVGFESourceGraphic => {
+ source_subregion = used_subregion;
+ }
+ _ => {}
+ }
+ }
+
+ // Note that this can be zero if SourceGraphic is not in the graph.
+ source_subregion
+ }
+
+ let (source, target) = match surface_rect_is_source {
+ true => {
+ // If we have a surface_rect for SourceGraphic, transform
+ // it to a target rect, and then transform the target
+ // rect back to a source rect (because blurs need the
+ // source to be enlarged).
+ let target = calc_target_from_source(surface_rect, filters, skip_subregion_clips);
+ let source = calc_source_from_target(target, filters, skip_subregion_clips);
+ (source, target)
+ }
+ false => {
+ // If we have a surface_rect for invalidation of target,
+ // we want to calculate the source rect from it
+ let target = surface_rect;
+ let source = calc_source_from_target(target, filters, skip_subregion_clips);
+ (source, target)
+ }
+ };
+
+ // Combine the source and target rect because other code assumes just
+ // a single rect expanded for blurs
+ let combined = source.union(&target);
+
+ (combined, source, target)
+ }
}
/// Enum value describing the place of a picture in a 3D context.
@@ -4500,6 +4833,7 @@ pub struct PicturePrimitive {
/// it will be considered invisible.
pub is_backface_visible: bool,
+ /// All render tasks have 0-2 input tasks.
pub primary_render_task_id: Option<RenderTaskId>,
/// If a mix-blend-mode, contains the render task for
/// the readback of the framebuffer that we use to sample
@@ -4507,6 +4841,8 @@ pub struct PicturePrimitive {
/// For drop-shadow filter, this will store the original
/// picture task which would be rendered on screen after
/// blur pass.
+ /// This is also used by SVGFEBlend, SVGFEComposite and
+ /// SVGFEDisplacementMap filters.
pub secondary_render_task_id: Option<RenderTaskId>,
/// How this picture should be composited.
/// If None, don't composite - just draw directly on parent surface.
@@ -4646,6 +4982,7 @@ impl PicturePrimitive {
parent_subpixel_mode: SubpixelMode,
frame_state: &mut FrameBuildingState,
frame_context: &FrameBuildingContext,
+ data_stores: &mut DataStores,
scratch: &mut PrimitiveScratchBuffer,
tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
) -> Option<(PictureContext, PictureState, PrimitiveList)> {
@@ -4837,7 +5174,7 @@ impl PicturePrimitive {
// Ensure that the dirty rect doesn't extend outside the local valid rect.
tile.local_dirty_rect = tile.local_dirty_rect
.intersection(&tile.current_descriptor.local_valid_rect)
- .unwrap_or_else(PictureRect::zero);
+ .unwrap_or_else(|| { tile.is_valid = true; PictureRect::zero() });
surface_local_dirty_rect = surface_local_dirty_rect.union(&tile.local_dirty_rect);
@@ -5746,6 +6083,47 @@ impl PicturePrimitive {
surface_rects.clipped_local,
);
}
+ PictureCompositeMode::SVGFEGraph(ref filters) => {
+ let cmd_buffer_index = frame_state.cmd_buffers.create_cmd_buffer();
+
+ let picture_task_id = frame_state.rg_builder.add().init(
+ RenderTask::new_dynamic(
+ surface_rects.task_size,
+ RenderTaskKind::new_picture(
+ surface_rects.task_size,
+ surface_rects.needs_scissor_rect,
+ surface_rects.clipped.min,
+ surface_spatial_node_index,
+ raster_spatial_node_index,
+ device_pixel_scale,
+ None,
+ None,
+ None,
+ cmd_buffer_index,
+ can_use_shared_surface,
+ )
+ ).with_uv_rect_kind(surface_rects.uv_rect_kind)
+ );
+
+ let filter_task_id = RenderTask::new_svg_filter_graph(
+ filters,
+ frame_state,
+ data_stores,
+ surface_rects.uv_rect_kind,
+ picture_task_id,
+ surface_rects.task_size,
+ surface_rects.clipped,
+ surface_rects.clipped_local,
+ );
+
+ primary_render_task_id = filter_task_id;
+
+ surface_descriptor = SurfaceDescriptor::new_chained(
+ picture_task_id,
+ filter_task_id,
+ surface_rects.clipped_local,
+ );
+ }
}
let is_sub_graph = self.flags.contains(PictureFlags::IS_SUB_GRAPH);
@@ -5792,7 +6170,8 @@ impl PicturePrimitive {
PictureCompositeMode::Filter(..) |
PictureCompositeMode::MixBlend(..) |
PictureCompositeMode::IntermediateSurface |
- PictureCompositeMode::SvgFilter(..) => {
+ PictureCompositeMode::SvgFilter(..) |
+ PictureCompositeMode::SVGFEGraph(..) => {
// TODO(gw): We can take advantage of the same logic that
// exists in the opaque rect detection for tile
// caches, to allow subpixel text on other surfaces
@@ -6425,6 +6804,18 @@ impl PicturePrimitive {
PictureCompositeMode::Blit(_) |
PictureCompositeMode::IntermediateSurface |
PictureCompositeMode::SvgFilter(..) => {}
+ PictureCompositeMode::SVGFEGraph(ref filters) => {
+ // Update interned filter data
+ for (_node, op) in filters {
+ match op {
+ FilterGraphOp::SVGFEComponentTransferInterned { handle, creates_pixels: _ } => {
+ let filter_data = &mut data_stores.filter_data[*handle];
+ filter_data.update(frame_state);
+ }
+ _ => {}
+ }
+ }
+ }
}
true
@@ -7109,6 +7500,38 @@ fn get_surface_rects(
let surface = &mut surfaces[surface_index.0];
let (clipped_local, unclipped_local) = match composite_mode {
+ PictureCompositeMode::SVGFEGraph(ref filters) => {
+ // We need to get the primitive rect, and get_coverage for
+ // SVGFEGraph requires the provided rect is in user space (defined
+ // in SVG spec) for subregion calculations to work properly
+ let clipped: LayoutRect = surface.clipped_local_rect
+ .cast_unit();
+ let unclipped: LayoutRect = surface.unclipped_local_rect
+ .cast_unit();
+
+ // Get the rects of SourceGraphic and target based on the local rect
+ // and clip rect.
+ let (coverage, _source, target) = composite_mode.get_coverage_svgfe(
+ filters, clipped, true, false);
+
+ // If no part of the source rect contributes to target pixels, we're
+ // done here; this is the hot path for quick culling of composited
+ // pictures, where the view doesn't overlap the target.
+ //
+ // Note that the filter may contain fill regions such as feFlood
+ // which do not depend on the source at all, so the source rect is
+ // largely irrelevant to our decision here as it may be empty.
+ if target.is_empty() {
+ return None;
+ }
+
+ // Since the design of WebRender PictureCompositeMode does not
+ // actually permit source and target rects as separate concepts, we
+ // have to use the combined coverage rect.
+ let clipped = coverage;
+
+ (clipped.cast_unit(), unclipped)
+ }
PictureCompositeMode::Filter(Filter::DropShadows(ref shadows)) => {
let local_prim_rect = surface.clipped_local_rect;
diff --git a/gfx/wr/webrender/src/prepare.rs b/gfx/wr/webrender/src/prepare.rs
index d9b4521cfc..a7eca830f8 100644
--- a/gfx/wr/webrender/src/prepare.rs
+++ b/gfx/wr/webrender/src/prepare.rs
@@ -28,7 +28,7 @@ use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION;
use crate::prim_store::*;
use crate::quad;
use crate::pattern::Pattern;
-use crate::prim_store::gradient::GradientGpuBlockBuilder;
+use crate::prim_store::gradient::{radial_gradient_pattern, conic_gradient_pattern, GradientGpuBlockBuilder};
use crate::render_backend::DataStores;
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::RenderTaskCacheKeyKind;
@@ -174,6 +174,7 @@ fn prepare_prim_for_render(
pic_context.subpixel_mode,
frame_state,
frame_context,
+ data_stores,
scratch,
tile_caches,
) {
@@ -211,6 +212,34 @@ fn prepare_prim_for_render(
let prim_instance = &mut prim_instances[prim_instance_index];
if !is_passthrough {
+ fn may_need_repetition(stretch_size: LayoutSize, prim_rect: LayoutRect) -> bool {
+ stretch_size.width < prim_rect.width() ||
+ stretch_size.height < prim_rect.height()
+ }
+ // Bug 1887841: At the moment the quad shader does not support repetitions.
+ // Bug 1888349: Some primitives have brush segments that aren't handled by
+ // the quad infrastructure yet.
+ let disable_quad_path = match &prim_instance.kind {
+ PrimitiveInstanceKind::Rectangle { .. } => false,
+ PrimitiveInstanceKind::LinearGradient { data_handle, .. } => {
+ let prim_data = &data_stores.linear_grad[*data_handle];
+ !prim_data.brush_segments.is_empty() ||
+ may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
+ }
+ PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
+ let prim_data = &data_stores.radial_grad[*data_handle];
+ !prim_data.brush_segments.is_empty() ||
+ may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
+ }
+ PrimitiveInstanceKind::ConicGradient { .. } => {
+ // TODO(nical) Enable quad conic gradients.
+ true
+ // let prim_data = &data_stores.conic_grad[*data_handle];
+ // !prim_data.brush_segments.is_empty() ||
+ // may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
+ }
+ _ => true,
+ };
// In this initial patch, we only support non-masked primitives through the new
// quad rendering path. Follow up patches will extend this to support masks, and
@@ -218,18 +247,19 @@ fn prepare_prim_for_render(
// to skip the entry point to `update_clip_task` as that does old-style segmenting
// and mask generation.
let should_update_clip_task = match prim_instance.kind {
- PrimitiveInstanceKind::Rectangle { ref mut use_legacy_path, .. } => {
- *use_legacy_path = !can_use_clip_chain_for_quad_path(
+ PrimitiveInstanceKind::Rectangle { use_legacy_path: ref mut no_quads, .. }
+ | PrimitiveInstanceKind::RadialGradient { cached: ref mut no_quads, .. }
+ | PrimitiveInstanceKind::ConicGradient { cached: ref mut no_quads, .. }
+ => {
+ *no_quads = disable_quad_path || !can_use_clip_chain_for_quad_path(
&prim_instance.vis.clip_chain,
frame_state.clip_store,
data_stores,
);
- *use_legacy_path
- }
- PrimitiveInstanceKind::Picture { .. } => {
- false
+ *no_quads
}
+ PrimitiveInstanceKind::Picture { .. } => false,
_ => true,
};
@@ -365,7 +395,7 @@ fn prepare_interned_prim_for_render(
frame_state.rg_builder,
None,
false,
- RenderTaskParent::Surface(pic_context.surface_index),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, _| {
rg_builder.add().init(RenderTask::new_dynamic(
@@ -520,7 +550,7 @@ fn prepare_interned_prim_for_render(
frame_state.rg_builder,
None,
false, // TODO(gw): We don't calculate opacity for borders yet!
- RenderTaskParent::Surface(pic_context.surface_index),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, _| {
rg_builder.add().init(RenderTask::new_dynamic(
@@ -669,7 +699,6 @@ fn prepare_interned_prim_for_render(
image_data.update(
common_data,
image_instance,
- pic_context.surface_index,
prim_spatial_node_index,
frame_state,
frame_context,
@@ -692,7 +721,7 @@ fn prepare_interned_prim_for_render(
// Update the template this instane references, which may refresh the GPU
// cache with any shared template data.
- prim_data.update(frame_state, pic_context.surface_index);
+ prim_data.update(frame_state);
if prim_data.stretch_size.width >= prim_data.common.prim_rect.width() &&
prim_data.stretch_size.height >= prim_data.common.prim_rect.height() {
@@ -758,7 +787,7 @@ fn prepare_interned_prim_for_render(
// Update the template this instance references, which may refresh the GPU
// cache with any shared template data.
- prim_data.update(frame_state, pic_context.surface_index);
+ prim_data.update(frame_state);
if prim_data.tile_spacing != LayoutSize::zero() {
prim_data.common.may_need_repetition = false;
@@ -780,16 +809,49 @@ fn prepare_interned_prim_for_render(
}
}
}
- PrimitiveInstanceKind::RadialGradient { data_handle, ref mut visible_tiles_range, .. } => {
+ PrimitiveInstanceKind::RadialGradient { data_handle, ref mut visible_tiles_range, cached, .. } => {
profile_scope!("RadialGradient");
let prim_data = &mut data_stores.radial_grad[*data_handle];
+ if !*cached {
+ // The scaling parameter is used to compensate for when we reduce the size
+ // of the render task for cached gradients. Here we aren't applying any.
+ let no_scale = DeviceVector2D::one();
+
+ let pattern = radial_gradient_pattern(
+ prim_data.center,
+ no_scale,
+ &prim_data.params,
+ prim_data.extend_mode,
+ &prim_data.stops,
+ &mut frame_state.frame_gpu_data,
+ );
+
+ quad::push_quad(
+ &pattern,
+ &prim_data.common.prim_rect,
+ prim_instance_index,
+ prim_spatial_node_index,
+ &prim_instance.vis.clip_chain,
+ device_pixel_scale,
+ frame_context,
+ pic_context,
+ targets,
+ &data_stores.clip,
+ frame_state,
+ pic_state,
+ scratch,
+ );
+
+ return;
+ }
+
prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
- || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
+ || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
// Update the template this instane references, which may refresh the GPU
// cache with any shared template data.
- prim_data.update(frame_state, pic_context.surface_index);
+ prim_data.update(frame_state);
if prim_data.tile_spacing != LayoutSize::zero() {
prim_data.common.may_need_repetition = false;
@@ -810,20 +872,50 @@ fn prepare_interned_prim_for_render(
prim_instance.clear_visibility();
}
}
-
- // TODO(gw): Consider whether it's worth doing segment building
- // for gradient primitives.
}
- PrimitiveInstanceKind::ConicGradient { data_handle, ref mut visible_tiles_range, .. } => {
+ PrimitiveInstanceKind::ConicGradient { data_handle, ref mut visible_tiles_range, cached, .. } => {
profile_scope!("ConicGradient");
let prim_data = &mut data_stores.conic_grad[*data_handle];
+ if !*cached {
+ // The scaling parameter is used to compensate for when we reduce the size
+ // of the render task for cached gradients. Here we aren't applying any.
+ let no_scale = DeviceVector2D::one();
+
+ let pattern = conic_gradient_pattern(
+ prim_data.center,
+ no_scale,
+ &prim_data.params,
+ prim_data.extend_mode,
+ &prim_data.stops,
+ &mut frame_state.frame_gpu_data,
+ );
+
+ quad::push_quad(
+ &pattern,
+ &prim_data.common.prim_rect,
+ prim_instance_index,
+ prim_spatial_node_index,
+ &prim_instance.vis.clip_chain,
+ device_pixel_scale,
+ frame_context,
+ pic_context,
+ targets,
+ &data_stores.clip,
+ frame_state,
+ pic_state,
+ scratch,
+ );
+
+ return;
+ }
+
prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
|| prim_data.stretch_size.height < prim_data.common.prim_rect.height();
// Update the template this instane references, which may refresh the GPU
// cache with any shared template data.
- prim_data.update(frame_state, pic_context.surface_index);
+ prim_data.update(frame_state);
if prim_data.tile_spacing != LayoutSize::zero() {
prim_data.common.may_need_repetition = false;
@@ -870,7 +962,8 @@ fn prepare_interned_prim_for_render(
// may have changed due to downscaling. We could handle this separate
// case as a follow up.
Some(PictureCompositeMode::Filter(Filter::Blur { .. })) |
- Some(PictureCompositeMode::Filter(Filter::DropShadows { .. })) => {
+ Some(PictureCompositeMode::Filter(Filter::DropShadows { .. })) |
+ Some(PictureCompositeMode::SVGFEGraph( .. )) => {
true
}
_ => {
diff --git a/gfx/wr/webrender/src/prim_store/gradient/conic.rs b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
index 2c4818095e..9f993b9758 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/conic.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
@@ -11,6 +11,7 @@
use euclid::vec2;
use api::{ExtendMode, GradientStop, PremultipliedColorF};
use api::units::*;
+use crate::pattern::{Pattern, PatternKind, PatternShaderInput};
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
@@ -22,8 +23,7 @@ use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimit
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
-use crate::renderer::GpuBufferAddress;
-use crate::picture::{SurfaceIndex};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
use std::{hash, ops::{Deref, DerefMut}};
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
@@ -213,7 +213,6 @@ impl ConicGradientTemplate {
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
- parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
@@ -258,7 +257,7 @@ impl ConicGradientTemplate {
frame_state.rg_builder,
None,
false,
- RenderTaskParent::Surface(parent_surface),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, gpu_buffer_builder| {
let stops = GradientGpuBlockBuilder::build(
@@ -329,6 +328,7 @@ impl InternablePrimitive for ConicGradient {
PrimitiveInstanceKind::ConicGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
+ cached: true,
}
}
}
@@ -397,3 +397,44 @@ pub struct ConicGradientCacheKey {
pub stops: Vec<GradientStopKey>,
}
+pub fn conic_gradient_pattern(
+ center: DevicePoint,
+ scale: DeviceVector2D,
+ params: &ConicGradientParams,
+ extend_mode: ExtendMode,
+ stops: &[GradientStop],
+ gpu_buffer_builder: &mut GpuBufferBuilder
+) -> Pattern {
+ let mut writer = gpu_buffer_builder.f32.write_blocks(2);
+ writer.push_one([
+ center.x,
+ center.y,
+ scale.x,
+ scale.y,
+ ]);
+ writer.push_one([
+ params.start_offset,
+ params.end_offset,
+ params.angle,
+ if extend_mode == ExtendMode::Repeat { 1.0 } else { 0.0 }
+ ]);
+ let gradient_address = writer.finish();
+
+ let stops_address = GradientGpuBlockBuilder::build(
+ false,
+ &mut gpu_buffer_builder.f32,
+ &stops,
+ );
+
+ let is_opaque = stops.iter().all(|stop| stop.color.a >= 1.0);
+
+ Pattern {
+ kind: PatternKind::ConicGradient,
+ shader_input: PatternShaderInput(
+ gradient_address.as_int(),
+ stops_address.as_int(),
+ ),
+ base_color: PremultipliedColorF::WHITE,
+ is_opaque,
+ }
+} \ No newline at end of file
diff --git a/gfx/wr/webrender/src/prim_store/gradient/linear.rs b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
index 7075daac0d..0fdb268449 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/linear.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
@@ -26,7 +26,6 @@ use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
use crate::renderer::GpuBufferAddress;
use crate::segment::EdgeAaSegmentMask;
-use crate::picture::{SurfaceIndex};
use crate::util::pack_as_float;
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder, apply_gradient_local_clip};
use std::ops::{Deref, DerefMut};
@@ -450,7 +449,6 @@ impl LinearGradientTemplate {
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
- parent_surface: SurfaceIndex,
) {
if let Some(mut request) = frame_state.gpu_cache.request(
&mut self.common.gpu_cache_handle
@@ -526,7 +524,7 @@ impl LinearGradientTemplate {
frame_state.rg_builder,
None,
false,
- RenderTaskParent::Surface(parent_surface),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, _| {
rg_builder.add().init(RenderTask::new_dynamic(
@@ -556,7 +554,7 @@ impl LinearGradientTemplate {
frame_state.rg_builder,
None,
false,
- RenderTaskParent::Surface(parent_surface),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, gpu_buffer_builder| {
let stops = Some(GradientGpuBlockBuilder::build(
diff --git a/gfx/wr/webrender/src/prim_store/gradient/radial.rs b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
index 4d91b28633..4d655ffe7e 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/radial.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
@@ -11,6 +11,7 @@
use euclid::{vec2, size2};
use api::{ExtendMode, GradientStop, PremultipliedColorF, ColorU};
use api::units::*;
+use crate::pattern::{Pattern, PatternKind, PatternShaderInput};
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
@@ -22,8 +23,7 @@ use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, FloatKey};
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
-use crate::renderer::GpuBufferAddress;
-use crate::picture::{SurfaceIndex};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
use std::{hash, ops::{Deref, DerefMut}};
use super::{
@@ -178,7 +178,6 @@ impl RadialGradientTemplate {
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
- parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
@@ -224,7 +223,7 @@ impl RadialGradientTemplate {
frame_state.rg_builder,
None,
false,
- RenderTaskParent::Surface(parent_surface),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, gpu_buffer_builder| {
let stops = GradientGpuBlockBuilder::build(
@@ -295,6 +294,7 @@ impl InternablePrimitive for RadialGradient {
PrimitiveInstanceKind::RadialGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
+ cached: true,
}
}
}
@@ -529,3 +529,45 @@ pub fn optimize_radial_gradient(
tile_spacing.width += l + r;
tile_spacing.height += t + b;
}
+
+pub fn radial_gradient_pattern(
+ center: DevicePoint,
+ scale: DeviceVector2D,
+ params: &RadialGradientParams,
+ extend_mode: ExtendMode,
+ stops: &[GradientStop],
+ gpu_buffer_builder: &mut GpuBufferBuilder
+) -> Pattern {
+ let mut writer = gpu_buffer_builder.f32.write_blocks(2);
+ writer.push_one([
+ center.x,
+ center.y,
+ scale.x,
+ scale.y,
+ ]);
+ writer.push_one([
+ params.start_radius,
+ params.end_radius,
+ params.ratio_xy,
+ if extend_mode == ExtendMode::Repeat { 1.0 } else { 0.0 }
+ ]);
+ let gradient_address = writer.finish();
+
+ let stops_address = GradientGpuBlockBuilder::build(
+ false,
+ &mut gpu_buffer_builder.f32,
+ &stops,
+ );
+
+ let is_opaque = stops.iter().all(|stop| stop.color.a >= 1.0);
+
+ Pattern {
+ kind: PatternKind::RadialGradient,
+ shader_input: PatternShaderInput(
+ gradient_address.as_int(),
+ stops_address.as_int(),
+ ),
+ base_color: PremultipliedColorF::WHITE,
+ is_opaque,
+ }
+} \ No newline at end of file
diff --git a/gfx/wr/webrender/src/prim_store/image.rs b/gfx/wr/webrender/src/prim_store/image.rs
index 8a05965536..b10695f095 100644
--- a/gfx/wr/webrender/src/prim_store/image.rs
+++ b/gfx/wr/webrender/src/prim_store/image.rs
@@ -13,8 +13,7 @@ use crate::scene_building::{CreateShadow, IsVisible};
use crate::frame_builder::{FrameBuildingContext, FrameBuildingState};
use crate::gpu_cache::{GpuCache, GpuDataRequest};
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
-use crate::internal_types::{LayoutPrimitiveInfo};
-use crate::picture::SurfaceIndex;
+use crate::internal_types::LayoutPrimitiveInfo;
use crate::prim_store::{
EdgeAaSegmentMask, PrimitiveInstanceKind,
PrimitiveOpacity, PrimKey,
@@ -136,7 +135,6 @@ impl ImageData {
&mut self,
common: &mut PrimTemplateCommonData,
image_instance: &mut ImageInstance,
- parent_surface: SurfaceIndex,
prim_spatial_node_index: SpatialNodeIndex,
frame_state: &mut FrameBuildingState,
frame_context: &FrameBuildingContext,
@@ -261,7 +259,7 @@ impl ImageData {
frame_state.rg_builder,
None,
descriptor.is_opaque(),
- RenderTaskParent::Surface(parent_surface),
+ RenderTaskParent::Surface,
&mut frame_state.surface_builder,
|rg_builder, _| {
// Create a task to blit from the texture cache to
diff --git a/gfx/wr/webrender/src/prim_store/mod.rs b/gfx/wr/webrender/src/prim_store/mod.rs
index cc09eab6b1..902ecff43a 100644
--- a/gfx/wr/webrender/src/prim_store/mod.rs
+++ b/gfx/wr/webrender/src/prim_store/mod.rs
@@ -1029,11 +1029,13 @@ pub enum PrimitiveInstanceKind {
/// Handle to the common interned data for this primitive.
data_handle: RadialGradientDataHandle,
visible_tiles_range: GradientTileRange,
+ cached: bool,
},
ConicGradient {
/// Handle to the common interned data for this primitive.
data_handle: ConicGradientDataHandle,
visible_tiles_range: GradientTileRange,
+ cached: bool,
},
/// Clear out a rect, used for special effects.
Clear {
@@ -1214,8 +1216,9 @@ pub struct PrimitiveScratchBuffer {
/// Set of sub-graphs that are required, determined during visibility pass
pub required_sub_graphs: FastHashSet<PictureIndex>,
- /// Temporary buffer for building segments in to during prepare pass
- pub quad_segments: Vec<QuadSegment>,
+ /// Temporary buffers for building segments in to during prepare pass
+ pub quad_direct_segments: Vec<QuadSegment>,
+ pub quad_indirect_segments: Vec<QuadSegment>,
}
impl Default for PrimitiveScratchBuffer {
@@ -1230,7 +1233,8 @@ impl Default for PrimitiveScratchBuffer {
debug_items: Vec::new(),
messages: Vec::new(),
required_sub_graphs: FastHashSet::default(),
- quad_segments: Vec::new(),
+ quad_direct_segments: Vec::new(),
+ quad_indirect_segments: Vec::new(),
}
}
}
@@ -1244,7 +1248,8 @@ impl PrimitiveScratchBuffer {
self.segment_instances.recycle(recycler);
self.gradient_tiles.recycle(recycler);
recycler.recycle_vec(&mut self.debug_items);
- recycler.recycle_vec(&mut self.quad_segments);
+ recycler.recycle_vec(&mut self.quad_direct_segments);
+ recycler.recycle_vec(&mut self.quad_indirect_segments);
}
pub fn begin_frame(&mut self) {
@@ -1253,7 +1258,8 @@ impl PrimitiveScratchBuffer {
// location.
self.clip_mask_instances.clear();
self.clip_mask_instances.push(ClipMaskKind::None);
- self.quad_segments.clear();
+ self.quad_direct_segments.clear();
+ self.quad_indirect_segments.clear();
self.border_cache_handles.clear();
diff --git a/gfx/wr/webrender/src/prim_store/picture.rs b/gfx/wr/webrender/src/prim_store/picture.rs
index c3ec88783a..f8857a4f11 100644
--- a/gfx/wr/webrender/src/prim_store/picture.rs
+++ b/gfx/wr/webrender/src/prim_store/picture.rs
@@ -3,15 +3,17 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{
- ColorU, MixBlendMode, FilterPrimitiveInput, FilterPrimitiveKind, ColorSpace,
- PropertyBinding, PropertyBindingId, CompositeOperator, RasterSpace,
+ ColorU, MixBlendMode, FilterPrimitiveInput, FilterPrimitiveKind,
+ ColorSpace, PropertyBinding, PropertyBindingId, CompositeOperator,
+ RasterSpace, FilterOpGraphPictureBufferId,
};
use api::units::{Au, LayoutVector2D};
use crate::scene_building::IsVisible;
use crate::filterdata::SFilterData;
use crate::intern::ItemUid;
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
-use crate::internal_types::{LayoutPrimitiveInfo, Filter};
+use crate::internal_types::{LayoutPrimitiveInfo, FilterGraphPictureReference,
+ FilterGraphOp, FilterGraphNode, SVGFE_CONVOLVE_VALUES_LIMIT, Filter};
use crate::picture::PictureCompositeMode;
use crate::prim_store::{
PrimitiveInstanceKind, PrimitiveStore, VectorKey,
@@ -69,6 +71,758 @@ pub enum FilterPrimitiveKey {
Composite(ColorSpace, FilterPrimitiveInput, FilterPrimitiveInput, CompositeOperatorKey),
}
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Copy, Default, MallocSizeOf, PartialEq, Hash, Eq)]
+pub enum FilterGraphPictureBufferIdKey {
+ #[default]
+ /// empty slot in feMerge inputs
+ None,
+ /// reference to another (earlier) node in filter graph
+ BufferId(i16),
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Copy, Default, MallocSizeOf, PartialEq, Hash, Eq)]
+pub struct FilterGraphPictureReferenceKey {
+ /// Id of the picture in question in a namespace unique to this filter DAG,
+ /// some are special values like
+ /// FilterPrimitiveDescription::kPrimitiveIndexSourceGraphic.
+ pub buffer_id: FilterGraphPictureBufferIdKey,
+ /// Place the input image here in Layout space (like node.subregion)
+ pub subregion: [Au; 4],
+ /// Translate the subregion by this amount
+ pub offset: [Au; 2],
+}
+
+impl From<FilterGraphPictureReference> for FilterGraphPictureReferenceKey {
+ fn from(pic: FilterGraphPictureReference) -> Self {
+ FilterGraphPictureReferenceKey{
+ buffer_id: match pic.buffer_id {
+ FilterOpGraphPictureBufferId::None => FilterGraphPictureBufferIdKey::None,
+ FilterOpGraphPictureBufferId::BufferId(id) => FilterGraphPictureBufferIdKey::BufferId(id),
+ },
+ subregion: [
+ Au::from_f32_px(pic.subregion.min.x),
+ Au::from_f32_px(pic.subregion.min.y),
+ Au::from_f32_px(pic.subregion.max.x),
+ Au::from_f32_px(pic.subregion.max.y),
+ ],
+ offset: [
+ Au::from_f32_px(pic.offset.x),
+ Au::from_f32_px(pic.offset.y),
+ ],
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq, Hash, Eq)]
+pub enum FilterGraphOpKey {
+ /// combine 2 images with SVG_FEBLEND_MODE_DARKEN
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendDarken,
+ /// combine 2 images with SVG_FEBLEND_MODE_LIGHTEN
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendLighten,
+ /// combine 2 images with SVG_FEBLEND_MODE_MULTIPLY
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendMultiply,
+ /// combine 2 images with SVG_FEBLEND_MODE_NORMAL
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendNormal,
+ /// combine 2 images with SVG_FEBLEND_MODE_SCREEN
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feBlendElement
+ SVGFEBlendScreen,
+ /// combine 2 images with SVG_FEBLEND_MODE_OVERLAY
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendOverlay,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR_DODGE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColorDodge,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR_BURN
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColorBurn,
+ /// combine 2 images with SVG_FEBLEND_MODE_HARD_LIGHT
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendHardLight,
+ /// combine 2 images with SVG_FEBLEND_MODE_SOFT_LIGHT
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendSoftLight,
+ /// combine 2 images with SVG_FEBLEND_MODE_DIFFERENCE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendDifference,
+ /// combine 2 images with SVG_FEBLEND_MODE_EXCLUSION
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendExclusion,
+ /// combine 2 images with SVG_FEBLEND_MODE_HUE
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendHue,
+ /// combine 2 images with SVG_FEBLEND_MODE_SATURATION
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendSaturation,
+ /// combine 2 images with SVG_FEBLEND_MODE_COLOR
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendColor,
+ /// combine 2 images with SVG_FEBLEND_MODE_LUMINOSITY
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Source: https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
+ SVGFEBlendLuminosity,
+ /// transform colors of image through 5x4 color matrix (transposed for
+ /// efficiency)
+ /// parameters: FilterOpGraphNode, matrix[5][4]
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feColorMatrixElement
+ SVGFEColorMatrix{values: [Au; 20]},
+ /// transform colors of image through configurable gradients with component
+ /// swizzle
+ /// parameters: FilterOpGraphNode, FilterData
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feComponentTransferElement
+ SVGFEComponentTransferInterned{handle: ItemUid, creates_pixels: bool},
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode, k1, k2, k3, k4
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeArithmetic{k1: Au, k2: Au, k3: Au, k4: Au},
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeATop,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeIn,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Docs: https://developer.mozilla.org/en-US/docs/Web/SVG/Element/feComposite
+ SVGFECompositeLighter,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeOut,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeOver,
+ /// composite 2 images with chosen composite mode with parameters for that
+ /// mode
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feCompositeElement
+ SVGFECompositeXOR,
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterOpGraphNode, orderX, orderY, kernelValues[25],
+ /// divisor, bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeDuplicate{order_x: i32, order_y: i32,
+ kernel: [Au; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: Au, bias: Au,
+ target_x: i32, target_y: i32, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, preserve_alpha: i32},
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterOpGraphNode, orderX, orderY, kernelValues[25],
+ /// divisor, bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeNone{order_x: i32, order_y: i32,
+ kernel: [Au; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: Au, bias: Au,
+ target_x: i32, target_y: i32, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, preserve_alpha: i32},
+ /// transform image through convolution matrix of up to 25 values (spec
+ /// allows more but for performance reasons we do not)
+ /// parameters: FilterOpGraphNode, orderX, orderY, kernelValues[25],
+ /// divisor, bias, targetX, targetY, kernelUnitLengthX, kernelUnitLengthY,
+ /// preserveAlpha
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#feConvolveMatrixElement
+ SVGFEConvolveMatrixEdgeModeWrap{order_x: i32, order_y: i32,
+ kernel: [Au; SVGFE_CONVOLVE_VALUES_LIMIT], divisor: Au, bias: Au,
+ target_x: i32, target_y: i32, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, preserve_alpha: i32},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// distant light source with specified direction
+ /// parameters: FilterOpGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, azimuth, elevation
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDistantLightElement
+ SVGFEDiffuseLightingDistant{surface_scale: Au, diffuse_constant: Au,
+ kernel_unit_length_x: Au, kernel_unit_length_y: Au, azimuth: Au,
+ elevation: Au},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// point light source at specified location
+ /// parameters: FilterOpGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, x, y, z
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEPointLightElement
+ SVGFEDiffuseLightingPoint{surface_scale: Au, diffuse_constant: Au,
+ kernel_unit_length_x: Au, kernel_unit_length_y: Au, x: Au, y: Au,
+ z: Au},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// spot light source at specified location pointing at specified target
+ /// location with specified hotspot sharpness and cone angle
+ /// parameters: FilterOpGraphNode, surfaceScale, diffuseConstant,
+ /// kernelUnitLengthX, kernelUnitLengthY, x, y, z, pointsAtX, pointsAtY,
+ /// pointsAtZ, specularExponent, limitingConeAngle
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDiffuseLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpotLightElement
+ SVGFEDiffuseLightingSpot{surface_scale: Au, diffuse_constant: Au,
+ kernel_unit_length_x: Au, kernel_unit_length_y: Au, x: Au, y: Au, z: Au,
+ points_at_x: Au, points_at_y: Au, points_at_z: Au, cone_exponent: Au,
+ limiting_cone_angle: Au},
+ /// calculate a distorted version of first input image using offset values
+ /// from second input image at specified intensity
+ /// parameters: FilterOpGraphNode, scale, xChannelSelector, yChannelSelector
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDisplacementMapElement
+ SVGFEDisplacementMap{scale: Au, x_channel_selector: u32,
+ y_channel_selector: u32},
+ /// create and merge a dropshadow version of the specified image's alpha
+ /// channel with specified offset and blur radius
+ /// parameters: FilterOpGraphNode, flood_color, flood_opacity, dx, dy,
+ /// stdDeviationX, stdDeviationY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDropShadowElement
+ SVGFEDropShadow{color: ColorU, dx: Au, dy: Au, std_deviation_x: Au,
+ std_deviation_y: Au},
+ /// synthesize a new image of specified size containing a solid color
+ /// parameters: FilterOpGraphNode, color
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEFloodElement
+ SVGFEFlood{color: ColorU},
+ /// create a blurred version of the input image
+ /// parameters: FilterOpGraphNode, stdDeviationX, stdDeviationY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEGaussianBlurElement
+ SVGFEGaussianBlur{std_deviation_x: Au, std_deviation_y: Au},
+ /// Filter that does no transformation of the colors, needed for
+ /// debug purposes, and is the default value in impl_default_for_enums.
+ SVGFEIdentity,
+ /// synthesize a new image based on a url (i.e. blob image source)
+ /// parameters: FilterOpGraphNode, sampling_filter (see SamplingFilter in
+ /// Types.h), transform
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEImageElement
+ SVGFEImage{sampling_filter: u32, matrix: [Au; 6]},
+ /// create a new image based on the input image with the contour stretched
+ /// outward (dilate operator)
+ /// parameters: FilterOpGraphNode, radiusX, radiusY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEMorphologyElement
+ SVGFEMorphologyDilate{radius_x: Au, radius_y: Au},
+ /// create a new image based on the input image with the contour shrunken
+ /// inward (erode operator)
+ /// parameters: FilterOpGraphNode, radiusX, radiusY
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEMorphologyElement
+ SVGFEMorphologyErode{radius_x: Au, radius_y: Au},
+ /// represents CSS opacity property as a graph node like the rest of the
+ /// SVGFE* filters
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ SVGFEOpacity{value: Au},
+ /// represents CSS opacity property as a graph node like the rest of the
+ /// SVGFE* filters
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ SVGFEOpacityBinding{valuebindingid: PropertyBindingId, value: Au},
+ /// Filter that copies the SourceGraphic image into the specified subregion,
+ /// This is intentionally the only way to get SourceGraphic into the graph,
+ /// as the filter region must be applied before it is used.
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - no inputs, no linear
+ SVGFESourceGraphic,
+ /// Filter that copies the SourceAlpha image into the specified subregion,
+ /// This is intentionally the only way to get SourceAlpha into the graph,
+ /// as the filter region must be applied before it is used.
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - no inputs, no linear
+ SVGFESourceAlpha,
+ /// calculate lighting based on heightmap image with provided values for a
+ /// distant light source with specified direction
+ /// parameters: FilerData, surfaceScale, specularConstant, specularExponent,
+ /// kernelUnitLengthX, kernelUnitLengthY, azimuth, elevation
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEDistantLightElement
+ SVGFESpecularLightingDistant{surface_scale: Au, specular_constant: Au,
+ specular_exponent: Au, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, azimuth: Au, elevation: Au},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// point light source at specified location
+ /// parameters: FilterOpGraphNode, surfaceScale, specularConstant,
+ /// specularExponent, kernelUnitLengthX, kernelUnitLengthY, x, y, z
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFEPointLightElement
+ SVGFESpecularLightingPoint{surface_scale: Au, specular_constant: Au,
+ specular_exponent: Au, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, x: Au, y: Au, z: Au},
+ /// calculate lighting based on heightmap image with provided values for a
+ /// spot light source at specified location pointing at specified target
+ /// location with specified hotspot sharpness and cone angle
+ /// parameters: FilterOpGraphNode, surfaceScale, specularConstant,
+ /// specularExponent, kernelUnitLengthX, kernelUnitLengthY, x, y, z,
+ /// pointsAtX, pointsAtY, pointsAtZ, specularExponent, limitingConeAngle
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpecularLightingElement
+ /// https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFESpotLightElement
+ SVGFESpecularLightingSpot{surface_scale: Au, specular_constant: Au,
+ specular_exponent: Au, kernel_unit_length_x: Au,
+ kernel_unit_length_y: Au, x: Au, y: Au, z: Au, points_at_x: Au,
+ points_at_y: Au, points_at_z: Au, cone_exponent: Au,
+ limiting_cone_angle: Au},
+ /// create a new image based on the input image, repeated throughout the
+ /// output rectangle
+ /// parameters: FilterOpGraphNode
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETileElement
+ SVGFETile,
+ /// convert a color image to an alpha channel - internal use; generated by
+ /// SVGFilterInstance::GetOrCreateSourceAlphaIndex().
+ SVGFEToAlpha,
+ /// synthesize a new image based on Fractal Noise (Perlin) with the chosen
+ /// stitching mode
+ /// parameters: FilterOpGraphNode, baseFrequencyX, baseFrequencyY,
+ /// numOctaves, seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithFractalNoiseWithNoStitching{base_frequency_x: Au,
+ base_frequency_y: Au, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Fractal Noise (Perlin) with the chosen
+ /// stitching mode
+ /// parameters: FilterOpGraphNode, baseFrequencyX, baseFrequencyY,
+ /// numOctaves, seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithFractalNoiseWithStitching{base_frequency_x: Au,
+ base_frequency_y: Au, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Turbulence Noise (offset vectors)
+ /// parameters: FilterOpGraphNode, baseFrequencyX, baseFrequencyY,
+ /// numOctaves, seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{base_frequency_x: Au,
+ base_frequency_y: Au, num_octaves: u32, seed: u32},
+ /// synthesize a new image based on Turbulence Noise (offset vectors)
+ /// parameters: FilterOpGraphNode, baseFrequencyX, baseFrequencyY,
+ /// numOctaves, seed
+ /// SVG filter semantics - selectable input(s), selectable between linear
+ /// (default) and sRGB color space for calculations
+ /// Spec: https://www.w3.org/TR/filter-effects-1/#InterfaceSVGFETurbulenceElement
+ SVGFETurbulenceWithTurbulenceNoiseWithStitching{base_frequency_x: Au,
+ base_frequency_y: Au, num_octaves: u32, seed: u32},
+}
+
+impl From<FilterGraphOp> for FilterGraphOpKey {
+ fn from(op: FilterGraphOp) -> Self {
+ match op {
+ FilterGraphOp::SVGFEBlendDarken => FilterGraphOpKey::SVGFEBlendDarken,
+ FilterGraphOp::SVGFEBlendLighten => FilterGraphOpKey::SVGFEBlendLighten,
+ FilterGraphOp::SVGFEBlendMultiply => FilterGraphOpKey::SVGFEBlendMultiply,
+ FilterGraphOp::SVGFEBlendNormal => FilterGraphOpKey::SVGFEBlendNormal,
+ FilterGraphOp::SVGFEBlendScreen => FilterGraphOpKey::SVGFEBlendScreen,
+ FilterGraphOp::SVGFEBlendOverlay => FilterGraphOpKey::SVGFEBlendOverlay,
+ FilterGraphOp::SVGFEBlendColorDodge => FilterGraphOpKey::SVGFEBlendColorDodge,
+ FilterGraphOp::SVGFEBlendColorBurn => FilterGraphOpKey::SVGFEBlendColorBurn,
+ FilterGraphOp::SVGFEBlendHardLight => FilterGraphOpKey::SVGFEBlendHardLight,
+ FilterGraphOp::SVGFEBlendSoftLight => FilterGraphOpKey::SVGFEBlendSoftLight,
+ FilterGraphOp::SVGFEBlendDifference => FilterGraphOpKey::SVGFEBlendDifference,
+ FilterGraphOp::SVGFEBlendExclusion => FilterGraphOpKey::SVGFEBlendExclusion,
+ FilterGraphOp::SVGFEBlendHue => FilterGraphOpKey::SVGFEBlendHue,
+ FilterGraphOp::SVGFEBlendSaturation => FilterGraphOpKey::SVGFEBlendSaturation,
+ FilterGraphOp::SVGFEBlendColor => FilterGraphOpKey::SVGFEBlendColor,
+ FilterGraphOp::SVGFEBlendLuminosity => FilterGraphOpKey::SVGFEBlendLuminosity,
+ FilterGraphOp::SVGFEColorMatrix { values: color_matrix } => {
+ let mut quantized_values: [Au; 20] = [Au(0); 20];
+ for (value, result) in color_matrix.iter().zip(quantized_values.iter_mut()) {
+ *result = Au::from_f32_px(*value);
+ }
+ FilterGraphOpKey::SVGFEColorMatrix{values: quantized_values}
+ }
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned { handle, creates_pixels } => FilterGraphOpKey::SVGFEComponentTransferInterned{
+ handle: handle.uid(),
+ creates_pixels,
+ },
+ FilterGraphOp::SVGFECompositeArithmetic { k1, k2, k3, k4 } => {
+ FilterGraphOpKey::SVGFECompositeArithmetic{
+ k1: Au::from_f32_px(k1),
+ k2: Au::from_f32_px(k2),
+ k3: Au::from_f32_px(k3),
+ k4: Au::from_f32_px(k4),
+ }
+ }
+ FilterGraphOp::SVGFECompositeATop => FilterGraphOpKey::SVGFECompositeATop,
+ FilterGraphOp::SVGFECompositeIn => FilterGraphOpKey::SVGFECompositeIn,
+ FilterGraphOp::SVGFECompositeLighter => FilterGraphOpKey::SVGFECompositeLighter,
+ FilterGraphOp::SVGFECompositeOut => FilterGraphOpKey::SVGFECompositeOut,
+ FilterGraphOp::SVGFECompositeOver => FilterGraphOpKey::SVGFECompositeOver,
+ FilterGraphOp::SVGFECompositeXOR => FilterGraphOpKey::SVGFECompositeXOR,
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate { order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha } => {
+ let mut values: [Au; SVGFE_CONVOLVE_VALUES_LIMIT] = [Au(0); SVGFE_CONVOLVE_VALUES_LIMIT];
+ for (value, result) in kernel.iter().zip(values.iter_mut()) {
+ *result = Au::from_f32_px(*value)
+ }
+ FilterGraphOpKey::SVGFEConvolveMatrixEdgeModeDuplicate{
+ order_x,
+ order_y,
+ kernel: values,
+ divisor: Au::from_f32_px(divisor),
+ bias: Au::from_f32_px(bias),
+ target_x,
+ target_y,
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ preserve_alpha,
+ }
+ }
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone { order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha } => {
+ let mut values: [Au; SVGFE_CONVOLVE_VALUES_LIMIT] = [Au(0); SVGFE_CONVOLVE_VALUES_LIMIT];
+ for (value, result) in kernel.iter().zip(values.iter_mut()) {
+ *result = Au::from_f32_px(*value)
+ }
+ FilterGraphOpKey::SVGFEConvolveMatrixEdgeModeNone{
+ order_x,
+ order_y,
+ kernel: values,
+ divisor: Au::from_f32_px(divisor),
+ bias: Au::from_f32_px(bias),
+ target_x,
+ target_y,
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ preserve_alpha,
+ }
+ }
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap { order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha } => {
+ let mut values: [Au; SVGFE_CONVOLVE_VALUES_LIMIT] = [Au(0); SVGFE_CONVOLVE_VALUES_LIMIT];
+ for (value, result) in kernel.iter().zip(values.iter_mut()) {
+ *result = Au::from_f32_px(*value)
+ }
+ FilterGraphOpKey::SVGFEConvolveMatrixEdgeModeWrap{
+ order_x,
+ order_y,
+ kernel: values,
+ divisor: Au::from_f32_px(divisor),
+ bias: Au::from_f32_px(bias),
+ target_x,
+ target_y,
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ preserve_alpha,
+ }
+ }
+ FilterGraphOp::SVGFEDiffuseLightingDistant { surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation } => {
+ FilterGraphOpKey::SVGFEDiffuseLightingDistant{
+ surface_scale: Au::from_f32_px(surface_scale),
+ diffuse_constant: Au::from_f32_px(diffuse_constant),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ azimuth: Au::from_f32_px(azimuth),
+ elevation: Au::from_f32_px(elevation),
+ }
+ }
+ FilterGraphOp::SVGFEDiffuseLightingPoint { surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z } => {
+ FilterGraphOpKey::SVGFEDiffuseLightingPoint{
+ surface_scale: Au::from_f32_px(surface_scale),
+ diffuse_constant: Au::from_f32_px(diffuse_constant),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ x: Au::from_f32_px(x),
+ y: Au::from_f32_px(y),
+ z: Au::from_f32_px(z),
+ }
+ }
+ FilterGraphOp::SVGFEDiffuseLightingSpot { surface_scale, diffuse_constant, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle } => {
+ FilterGraphOpKey::SVGFEDiffuseLightingSpot{
+ surface_scale: Au::from_f32_px(surface_scale),
+ diffuse_constant: Au::from_f32_px(diffuse_constant),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ x: Au::from_f32_px(x),
+ y: Au::from_f32_px(y),
+ z: Au::from_f32_px(z),
+ points_at_x: Au::from_f32_px(points_at_x),
+ points_at_y: Au::from_f32_px(points_at_y),
+ points_at_z: Au::from_f32_px(points_at_z),
+ cone_exponent: Au::from_f32_px(cone_exponent),
+ limiting_cone_angle: Au::from_f32_px(limiting_cone_angle),
+ }
+ }
+ FilterGraphOp::SVGFEDisplacementMap { scale, x_channel_selector, y_channel_selector } => {
+ FilterGraphOpKey::SVGFEDisplacementMap{
+ scale: Au::from_f32_px(scale),
+ x_channel_selector,
+ y_channel_selector,
+ }
+ }
+ FilterGraphOp::SVGFEDropShadow { color, dx, dy, std_deviation_x, std_deviation_y } => {
+ FilterGraphOpKey::SVGFEDropShadow{
+ color: color.into(),
+ dx: Au::from_f32_px(dx),
+ dy: Au::from_f32_px(dy),
+ std_deviation_x: Au::from_f32_px(std_deviation_x),
+ std_deviation_y: Au::from_f32_px(std_deviation_y),
+ }
+ }
+ FilterGraphOp::SVGFEFlood { color } => FilterGraphOpKey::SVGFEFlood{color: color.into()},
+ FilterGraphOp::SVGFEGaussianBlur { std_deviation_x, std_deviation_y } => {
+ FilterGraphOpKey::SVGFEGaussianBlur{
+ std_deviation_x: Au::from_f32_px(std_deviation_x),
+ std_deviation_y: Au::from_f32_px(std_deviation_y),
+ }
+ }
+ FilterGraphOp::SVGFEIdentity => FilterGraphOpKey::SVGFEIdentity,
+ FilterGraphOp::SVGFEImage { sampling_filter, matrix } => {
+ let mut values: [Au; 6] = [Au(0); 6];
+ for (value, result) in matrix.iter().zip(values.iter_mut()) {
+ *result = Au::from_f32_px(*value)
+ }
+ FilterGraphOpKey::SVGFEImage{
+ sampling_filter,
+ matrix: values,
+ }
+ }
+ FilterGraphOp::SVGFEMorphologyDilate { radius_x, radius_y } => {
+ FilterGraphOpKey::SVGFEMorphologyDilate{
+ radius_x: Au::from_f32_px(radius_x),
+ radius_y: Au::from_f32_px(radius_y),
+ }
+ }
+ FilterGraphOp::SVGFEMorphologyErode { radius_x, radius_y } => {
+ FilterGraphOpKey::SVGFEMorphologyErode{
+ radius_x: Au::from_f32_px(radius_x),
+ radius_y: Au::from_f32_px(radius_y),
+ }
+ }
+ FilterGraphOp::SVGFEOpacity{valuebinding: binding, value: _} => {
+ match binding {
+ PropertyBinding::Value(value) => {
+ FilterGraphOpKey::SVGFEOpacity{value: Au::from_f32_px(value)}
+ }
+ PropertyBinding::Binding(key, default) => {
+ FilterGraphOpKey::SVGFEOpacityBinding{valuebindingid: key.id, value: Au::from_f32_px(default)}
+ }
+ }
+ }
+ FilterGraphOp::SVGFESourceAlpha => FilterGraphOpKey::SVGFESourceAlpha,
+ FilterGraphOp::SVGFESourceGraphic => FilterGraphOpKey::SVGFESourceGraphic,
+ FilterGraphOp::SVGFESpecularLightingDistant { surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, azimuth, elevation } => {
+ FilterGraphOpKey::SVGFESpecularLightingDistant{
+ surface_scale: Au::from_f32_px(surface_scale),
+ specular_constant: Au::from_f32_px(specular_constant),
+ specular_exponent: Au::from_f32_px(specular_exponent),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ azimuth: Au::from_f32_px(azimuth),
+ elevation: Au::from_f32_px(elevation),
+ }
+ }
+ FilterGraphOp::SVGFESpecularLightingPoint { surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z } => {
+ FilterGraphOpKey::SVGFESpecularLightingPoint{
+ surface_scale: Au::from_f32_px(surface_scale),
+ specular_constant: Au::from_f32_px(specular_constant),
+ specular_exponent: Au::from_f32_px(specular_exponent),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ x: Au::from_f32_px(x),
+ y: Au::from_f32_px(y),
+ z: Au::from_f32_px(z),
+ }
+ }
+ FilterGraphOp::SVGFESpecularLightingSpot { surface_scale, specular_constant, specular_exponent, kernel_unit_length_x, kernel_unit_length_y, x, y, z, points_at_x, points_at_y, points_at_z, cone_exponent, limiting_cone_angle } => {
+ FilterGraphOpKey::SVGFESpecularLightingSpot{
+ surface_scale: Au::from_f32_px(surface_scale),
+ specular_constant: Au::from_f32_px(specular_constant),
+ specular_exponent: Au::from_f32_px(specular_exponent),
+ kernel_unit_length_x: Au::from_f32_px(kernel_unit_length_x),
+ kernel_unit_length_y: Au::from_f32_px(kernel_unit_length_y),
+ x: Au::from_f32_px(x),
+ y: Au::from_f32_px(y),
+ z: Au::from_f32_px(z),
+ points_at_x: Au::from_f32_px(points_at_x),
+ points_at_y: Au::from_f32_px(points_at_y),
+ points_at_z: Au::from_f32_px(points_at_z),
+ cone_exponent: Au::from_f32_px(cone_exponent),
+ limiting_cone_angle: Au::from_f32_px(limiting_cone_angle),
+ }
+ }
+ FilterGraphOp::SVGFETile => FilterGraphOpKey::SVGFETile,
+ FilterGraphOp::SVGFEToAlpha => FilterGraphOpKey::SVGFEToAlpha,
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching { base_frequency_x, base_frequency_y, num_octaves, seed } => {
+ FilterGraphOpKey::SVGFETurbulenceWithFractalNoiseWithNoStitching {
+ base_frequency_x: Au::from_f32_px(base_frequency_x),
+ base_frequency_y: Au::from_f32_px(base_frequency_y),
+ num_octaves,
+ seed,
+ }
+ }
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching { base_frequency_x, base_frequency_y, num_octaves, seed } => {
+ FilterGraphOpKey::SVGFETurbulenceWithFractalNoiseWithStitching {
+ base_frequency_x: Au::from_f32_px(base_frequency_x),
+ base_frequency_y: Au::from_f32_px(base_frequency_y),
+ num_octaves,
+ seed,
+ }
+ }
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching { base_frequency_x, base_frequency_y, num_octaves, seed } => {
+ FilterGraphOpKey::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching {
+ base_frequency_x: Au::from_f32_px(base_frequency_x),
+ base_frequency_y: Au::from_f32_px(base_frequency_y),
+ num_octaves,
+ seed,
+ }
+ }
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching { base_frequency_x, base_frequency_y, num_octaves, seed } => {
+ FilterGraphOpKey::SVGFETurbulenceWithTurbulenceNoiseWithStitching {
+ base_frequency_x: Au::from_f32_px(base_frequency_x),
+ base_frequency_y: Au::from_f32_px(base_frequency_y),
+ num_octaves,
+ seed,
+ }
+ }
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq, Hash, Eq)]
+pub struct FilterGraphNodeKey {
+ /// Indicates this graph node was marked as unnecessary by the DAG optimizer
+ /// (for example SVGFEOffset can often be folded into downstream nodes)
+ pub kept_by_optimizer: bool,
+ /// True if color_interpolation_filter == LinearRgb; shader will convert
+ /// sRGB texture pixel colors on load and convert back on store, for correct
+ /// interpolation
+ pub linear: bool,
+ /// padding for output rect if we need a border to get correct clamping, or
+ /// to account for larger final subregion than source rect (see bug 1869672)
+ pub inflate: i16,
+ /// virtualized picture input binding 1 (i.e. texture source), typically
+ /// this is used, but certain filters do not use it
+ pub inputs: Vec<FilterGraphPictureReferenceKey>,
+ /// rect this node will render into, in filter space, does not account for
+ /// inflate or device_pixel_scale
+ pub subregion: [Au; 4],
+}
+
+impl From<FilterGraphNode> for FilterGraphNodeKey {
+ fn from(node: FilterGraphNode) -> Self {
+ FilterGraphNodeKey{
+ kept_by_optimizer: node.kept_by_optimizer,
+ linear: node.linear,
+ inflate: node.inflate,
+ inputs: node.inputs.into_iter().map(|node| {node.into()}).collect(),
+ subregion: [
+ Au::from_f32_px(node.subregion.min.x),
+ Au::from_f32_px(node.subregion.min.y),
+ Au::from_f32_px(node.subregion.max.x),
+ Au::from_f32_px(node.subregion.max.y),
+ ],
+ }
+ }
+}
+
/// Represents a hashable description of how a picture primitive
/// will be composited into its parent.
#[cfg_attr(feature = "capture", derive(Serialize))]
@@ -96,6 +850,7 @@ pub enum PictureCompositeKey {
ComponentTransfer(ItemUid),
Flood(ColorU),
SvgFilter(Vec<FilterPrimitiveKey>),
+ SVGFEGraph(Vec<(FilterGraphNodeKey, FilterGraphOpKey)>),
// MixBlendMode
Multiply,
@@ -180,6 +935,7 @@ impl From<Option<PictureCompositeMode>> for PictureCompositeKey {
}
Filter::ComponentTransfer => unreachable!(),
Filter::Flood(color) => PictureCompositeKey::Flood(color.into()),
+ Filter::SVGGraphNode(_node, _op) => unreachable!(),
}
}
Some(PictureCompositeMode::ComponentTransferFilter(handle)) => {
@@ -222,6 +978,12 @@ impl From<Option<PictureCompositeMode>> for PictureCompositeKey {
}
}).collect())
}
+ Some(PictureCompositeMode::SVGFEGraph(filter_nodes)) => {
+ PictureCompositeKey::SVGFEGraph(
+ filter_nodes.into_iter().map(|(node, op)| {
+ (node.into(), op.into())
+ }).collect())
+ }
Some(PictureCompositeMode::Blit(_)) |
Some(PictureCompositeMode::TileCache { .. }) |
Some(PictureCompositeMode::IntermediateSurface) |
diff --git a/gfx/wr/webrender/src/quad.rs b/gfx/wr/webrender/src/quad.rs
index 5455611f3f..4e83b0c425 100644
--- a/gfx/wr/webrender/src/quad.rs
+++ b/gfx/wr/webrender/src/quad.rs
@@ -88,6 +88,7 @@ pub fn push_quad(
frame_state.clip_store,
interned_clips,
prim_is_2d_scale_translation,
+ pattern,
frame_context.spatial_tree,
);
@@ -161,10 +162,10 @@ pub fn push_quad(
match strategy {
QuadRenderStrategy::Direct => {}
QuadRenderStrategy::Indirect => {
- let segment = add_segment(
+ let task_id = add_render_task_with_mask(
pattern,
- &clipped_surface_rect,
- true,
+ clipped_surface_rect.size(),
+ clipped_surface_rect.min.to_f32(),
clip_chain,
prim_spatial_node_index,
pic_context.raster_spatial_node_index,
@@ -177,21 +178,23 @@ pub fn push_quad(
frame_state,
);
+ let rect = clipped_surface_rect.to_f32().cast_unit();
+ let is_masked = true;
add_composite_prim(
pattern,
+ is_masked,
prim_instance_index,
- segment.rect,
- quad_flags,
+ rect,
frame_state,
targets,
- &[segment],
+ &[QuadSegment { rect, task_id }],
);
}
QuadRenderStrategy::Tiled { x_tiles, y_tiles } => {
let unclipped_surface_rect = surface
.map_to_device_rect(&clip_chain.pic_coverage_rect, frame_context.spatial_tree);
- scratch.quad_segments.clear();
+ scratch.quad_indirect_segments.clear();
let mut x_coords = vec![clipped_surface_rect.min.x];
let mut y_coords = vec![clipped_surface_rect.min.y];
@@ -225,16 +228,17 @@ pub fn push_quad(
continue;
}
- let create_task = true;
- let rect = DeviceIntRect {
+ let int_rect = DeviceIntRect {
min: point2(x0, y0),
max: point2(x1, y1),
};
- let segment = add_segment(
+ let rect = int_rect.to_f32();
+
+ let task_id = add_render_task_with_mask(
pattern,
- &rect,
- create_task,
+ int_rect.size(),
+ rect.min,
clip_chain,
prim_spatial_node_index,
pic_context.raster_spatial_node_index,
@@ -246,18 +250,20 @@ pub fn push_quad(
needs_scissor,
frame_state,
);
- scratch.quad_segments.push(segment);
+
+ scratch.quad_indirect_segments.push(QuadSegment { rect: rect.cast_unit(), task_id });
}
}
+ let is_masked = true;
add_composite_prim(
pattern,
+ is_masked,
prim_instance_index,
unclipped_surface_rect.cast_unit(),
- quad_flags,
frame_state,
targets,
- &scratch.quad_segments,
+ &scratch.quad_indirect_segments,
);
}
QuadRenderStrategy::NinePatch { clip_rect, radius } => {
@@ -298,7 +304,21 @@ pub fn push_quad(
x_coords.sort_by(|a, b| a.partial_cmp(b).unwrap());
y_coords.sort_by(|a, b| a.partial_cmp(b).unwrap());
- scratch.quad_segments.clear();
+ scratch.quad_direct_segments.clear();
+ scratch.quad_indirect_segments.clear();
+
+ // TODO: re-land clip-out mode.
+ let mode = ClipMode::Clip;
+
+ fn should_create_task(mode: ClipMode, x: usize, y: usize) -> bool {
+ match mode {
+ // Only create render tasks for the corners.
+ ClipMode::Clip => x != 1 && y != 1,
+ // Create render tasks for all segments (the
+ // center will be skipped).
+ ClipMode::ClipOut => true,
+ }
+ }
for y in 0 .. y_coords.len()-1 {
let y0 = y_coords[y];
@@ -309,6 +329,10 @@ pub fn push_quad(
}
for x in 0 .. x_coords.len()-1 {
+ if mode == ClipMode::ClipOut && x == 1 && y == 1 {
+ continue;
+ }
+
let x0 = x_coords[x];
let x1 = x_coords[x+1];
@@ -316,46 +340,68 @@ pub fn push_quad(
continue;
}
- // Only create render tasks for the corners.
- let create_task = x != 1 && y != 1;
-
let rect = DeviceIntRect::new(point2(x0, y0), point2(x1, y1));
- let rect = match rect.intersection(&clipped_surface_rect) {
+ let device_rect = match rect.intersection(&clipped_surface_rect) {
Some(rect) => rect,
None => {
continue;
}
};
- let segment = add_segment(
- pattern,
- &rect,
- create_task,
- clip_chain,
- prim_spatial_node_index,
- pic_context.raster_spatial_node_index,
- main_prim_address,
- transform_id,
- aa_flags,
- quad_flags,
- device_pixel_scale,
- false,
- frame_state,
- );
- scratch.quad_segments.push(segment);
+ if should_create_task(mode, x, y) {
+ let task_id = add_render_task_with_mask(
+ pattern,
+ device_rect.size(),
+ device_rect.min.to_f32(),
+ clip_chain,
+ prim_spatial_node_index,
+ pic_context.raster_spatial_node_index,
+ main_prim_address,
+ transform_id,
+ aa_flags,
+ quad_flags,
+ device_pixel_scale,
+ false,
+ frame_state,
+ );
+ scratch.quad_indirect_segments.push(QuadSegment {
+ rect: device_rect.to_f32().cast_unit(),
+ task_id,
+ });
+ } else {
+ scratch.quad_direct_segments.push(QuadSegment {
+ rect: device_rect.to_f32().cast_unit(),
+ task_id: RenderTaskId::INVALID,
+ });
+ };
}
}
- add_composite_prim(
- pattern,
- prim_instance_index,
- unclipped_surface_rect.cast_unit(),
- quad_flags,
- frame_state,
- targets,
- &scratch.quad_segments,
- );
+ if !scratch.quad_direct_segments.is_empty() {
+ add_pattern_prim(
+ pattern,
+ prim_instance_index,
+ unclipped_surface_rect.cast_unit(),
+ pattern.is_opaque,
+ frame_state,
+ targets,
+ &scratch.quad_direct_segments,
+ );
+ }
+
+ if !scratch.quad_indirect_segments.is_empty() {
+ let is_masked = true;
+ add_composite_prim(
+ pattern,
+ is_masked,
+ prim_instance_index,
+ unclipped_surface_rect.cast_unit(),
+ frame_state,
+ targets,
+ &scratch.quad_indirect_segments,
+ );
+ }
}
}
}
@@ -366,6 +412,7 @@ fn get_prim_render_strategy(
clip_store: &ClipStore,
interned_clips: &DataStore<ClipIntern>,
can_use_nine_patch: bool,
+ pattern: &Pattern,
spatial_tree: &SpatialTree,
) -> QuadRenderStrategy {
if !clip_chain.needs_mask {
@@ -385,6 +432,10 @@ fn get_prim_render_strategy(
return QuadRenderStrategy::Indirect;
}
+ if !pattern.supports_segmented_rendering() {
+ return QuadRenderStrategy::Indirect;
+ }
+
if can_use_nine_patch && clip_chain.clips_range.count == 1 {
let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, 0);
let clip_node = &interned_clips[clip_instance.handle];
@@ -432,10 +483,10 @@ fn get_prim_render_strategy(
}
}
-fn add_segment(
+fn add_render_task_with_mask(
pattern: &Pattern,
- rect: &DeviceIntRect,
- create_task: bool,
+ task_size: DeviceIntSize,
+ content_origin: DevicePoint,
clip_chain: &ClipChainInstance,
prim_spatial_node_index: SpatialNodeIndex,
raster_spatial_node_index: SpatialNodeIndex,
@@ -446,56 +497,86 @@ fn add_segment(
device_pixel_scale: DevicePixelScale,
needs_scissor_rect: bool,
frame_state: &mut FrameBuildingState,
-) -> QuadSegment {
- let task_size = rect.size();
- let rect = rect.to_f32();
- let content_origin = rect.min;
-
- let task_id = if create_task {
- let task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
- task_size,
- RenderTaskKind::new_prim(
- pattern.kind,
- pattern.shader_input,
- prim_spatial_node_index,
- raster_spatial_node_index,
- device_pixel_scale,
- content_origin,
- prim_address_f,
- transform_id,
- aa_flags,
- quad_flags,
- clip_chain.clips_range,
- needs_scissor_rect,
- ),
- ));
-
- let masks = MaskSubPass {
- clip_node_range: clip_chain.clips_range,
- prim_spatial_node_index,
+) -> RenderTaskId {
+ let task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ task_size,
+ RenderTaskKind::new_prim(
+ pattern.kind,
+ pattern.shader_input,
+ raster_spatial_node_index,
+ device_pixel_scale,
+ content_origin,
prim_address_f,
- };
+ transform_id,
+ aa_flags,
+ quad_flags,
+ clip_chain.clips_range,
+ needs_scissor_rect,
+ ),
+ ));
- let task = frame_state.rg_builder.get_task_mut(task_id);
- task.add_sub_pass(SubPass::Masks { masks });
+ let masks = MaskSubPass {
+ clip_node_range: clip_chain.clips_range,
+ prim_spatial_node_index,
+ prim_address_f,
+ };
- frame_state
- .surface_builder
- .add_child_render_task(task_id, frame_state.rg_builder);
+ let task = frame_state.rg_builder.get_task_mut(task_id);
+ task.add_sub_pass(SubPass::Masks { masks });
- task_id
- } else {
- RenderTaskId::INVALID
- };
+ frame_state
+ .surface_builder
+ .add_child_render_task(task_id, frame_state.rg_builder);
- QuadSegment { rect: rect.cast_unit(), task_id }
+ task_id
+}
+
+fn add_pattern_prim(
+ pattern: &Pattern,
+ prim_instance_index: PrimitiveInstanceIndex,
+ rect: LayoutRect,
+ is_opaque: bool,
+ frame_state: &mut FrameBuildingState,
+ targets: &[CommandBufferIndex],
+ segments: &[QuadSegment],
+) {
+ let prim_address = write_prim_blocks(
+ &mut frame_state.frame_gpu_data.f32,
+ rect,
+ rect,
+ pattern.base_color,
+ segments,
+ );
+
+ frame_state.set_segments(segments, targets);
+
+ let mut quad_flags = QuadFlags::IGNORE_DEVICE_PIXEL_SCALE
+ | QuadFlags::APPLY_DEVICE_CLIP;
+
+ if is_opaque {
+ quad_flags |= QuadFlags::IS_OPAQUE;
+ }
+
+ frame_state.push_cmd(
+ &PrimitiveCommand::quad(
+ pattern.kind,
+ pattern.shader_input,
+ prim_instance_index,
+ prim_address,
+ TransformPaletteId::IDENTITY,
+ quad_flags,
+ // TODO(gw): No AA on composite, unless we use it to apply 2d clips
+ EdgeAaSegmentMask::empty(),
+ ),
+ targets,
+ );
}
fn add_composite_prim(
pattern: &Pattern,
+ is_masked: bool,
prim_instance_index: PrimitiveInstanceIndex,
rect: LayoutRect,
- quad_flags: QuadFlags,
frame_state: &mut FrameBuildingState,
targets: &[CommandBufferIndex],
segments: &[QuadSegment],
@@ -504,16 +585,17 @@ fn add_composite_prim(
&mut frame_state.frame_gpu_data.f32,
rect,
rect,
- pattern.base_color,
+ PremultipliedColorF::WHITE,
segments,
);
frame_state.set_segments(segments, targets);
- let mut composite_quad_flags =
- QuadFlags::IGNORE_DEVICE_PIXEL_SCALE | QuadFlags::APPLY_DEVICE_CLIP;
- if quad_flags.contains(QuadFlags::IS_OPAQUE) {
- composite_quad_flags |= QuadFlags::IS_OPAQUE;
+ let mut quad_flags = QuadFlags::IGNORE_DEVICE_PIXEL_SCALE
+ | QuadFlags::APPLY_DEVICE_CLIP;
+
+ if pattern.is_opaque && !is_masked {
+ quad_flags |= QuadFlags::IS_OPAQUE;
}
frame_state.push_cmd(
@@ -523,7 +605,7 @@ fn add_composite_prim(
prim_instance_index,
composite_prim_address,
TransformPaletteId::IDENTITY,
- composite_quad_flags,
+ quad_flags,
// TODO(gw): No AA on composite, unless we use it to apply 2d clips
EdgeAaSegmentMask::empty(),
),
@@ -562,13 +644,13 @@ pub fn write_prim_blocks(
pub fn add_to_batch<F>(
kind: PatternKind,
pattern_input: PatternShaderInput,
- render_task_address: RenderTaskAddress,
+ dst_task_address: RenderTaskAddress,
transform_id: TransformPaletteId,
prim_address_f: GpuBufferAddress,
quad_flags: QuadFlags,
edge_flags: EdgeAaSegmentMask,
segment_index: u8,
- task_id: RenderTaskId,
+ src_task_id: RenderTaskId,
z_id: ZBufferId,
render_tasks: &RenderTaskGraph,
gpu_buffer_builder: &mut GpuBufferBuilder,
@@ -596,13 +678,11 @@ pub fn add_to_batch<F>(
]);
let prim_address_i = writer.finish();
- let texture = match task_id {
- RenderTaskId::INVALID => {
- TextureSource::Invalid
- }
+ let texture = match src_task_id {
+ RenderTaskId::INVALID => TextureSource::Invalid,
_ => {
let texture = render_tasks
- .resolve_texture(task_id)
+ .resolve_texture(src_task_id)
.expect("bug: valid task id must be resolvable");
texture
@@ -614,7 +694,7 @@ pub fn add_to_batch<F>(
TextureSource::Invalid,
);
- let default_blend_mode = if quad_flags.contains(QuadFlags::IS_OPAQUE) && task_id == RenderTaskId::INVALID {
+ let default_blend_mode = if quad_flags.contains(QuadFlags::IS_OPAQUE) {
BlendMode::None
} else {
BlendMode::PremultipliedAlpha
@@ -635,7 +715,7 @@ pub fn add_to_batch<F>(
};
let mut instance = QuadInstance {
- render_task_address,
+ dst_task_address,
prim_address_i,
prim_address_f,
z_id,
diff --git a/gfx/wr/webrender/src/render_target.rs b/gfx/wr/webrender/src/render_target.rs
index f53b5dd4f8..c50a1b2303 100644
--- a/gfx/wr/webrender/src/render_target.rs
+++ b/gfx/wr/webrender/src/render_target.rs
@@ -14,10 +14,10 @@ use crate::spatial_tree::SpatialTree;
use crate::clip::{ClipStore, ClipItemKind};
use crate::frame_builder::{FrameGlobalResources};
use crate::gpu_cache::{GpuCache, GpuCacheAddress};
-use crate::gpu_types::{BorderInstance, SvgFilterInstance, BlurDirection, BlurInstance, PrimitiveHeaders, ScalingInstance};
+use crate::gpu_types::{BorderInstance, SvgFilterInstance, SVGFEFilterInstance, BlurDirection, BlurInstance, PrimitiveHeaders, ScalingInstance};
use crate::gpu_types::{TransformPalette, ZBufferIdGenerator, MaskInstance, ClipSpace};
use crate::gpu_types::{ZBufferId, QuadSegment, PrimitiveInstanceData, TransformPaletteId};
-use crate::internal_types::{FastHashMap, TextureSource, CacheTextureId};
+use crate::internal_types::{FastHashMap, TextureSource, CacheTextureId, FilterGraphOp};
use crate::picture::{SliceId, SurfaceInfo, ResolvedSurfaceTexture, TileCacheInstance};
use crate::quad;
use crate::prim_store::{PrimitiveInstance, PrimitiveStore, PrimitiveScratchBuffer};
@@ -28,7 +28,7 @@ use crate::prim_store::gradient::{
use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
use crate::render_backend::DataStores;
use crate::render_task::{RenderTaskKind, RenderTaskAddress, SubPass};
-use crate::render_task::{RenderTask, ScalingTask, SvgFilterInfo, MaskSubPass};
+use crate::render_task::{RenderTask, ScalingTask, SvgFilterInfo, MaskSubPass, SVGFEFilterTask};
use crate::render_task_graph::{RenderTaskGraph, RenderTaskId};
use crate::resource_cache::ResourceCache;
use crate::spatial_tree::{SpatialNodeIndex};
@@ -226,6 +226,7 @@ pub struct ColorRenderTarget {
pub horizontal_blurs: FastHashMap<TextureSource, Vec<BlurInstance>>,
pub scalings: FastHashMap<TextureSource, Vec<ScalingInstance>>,
pub svg_filters: Vec<(BatchTextures, Vec<SvgFilterInstance>)>,
+ pub svg_nodes: Vec<(BatchTextures, Vec<SVGFEFilterInstance>)>,
pub blits: Vec<BlitJob>,
alpha_tasks: Vec<RenderTaskId>,
screen_size: DeviceIntSize,
@@ -256,6 +257,7 @@ impl RenderTarget for ColorRenderTarget {
horizontal_blurs: FastHashMap::default(),
scalings: FastHashMap::default(),
svg_filters: Vec::new(),
+ svg_nodes: Vec::new(),
blits: Vec::new(),
alpha_tasks: Vec::new(),
screen_size,
@@ -263,7 +265,7 @@ impl RenderTarget for ColorRenderTarget {
used_rect,
resolve_ops: Vec::new(),
clear_color: Some(ColorF::TRANSPARENT),
- prim_instances: [Vec::new(), Vec::new()],
+ prim_instances: [Vec::new(), Vec::new(), Vec::new(), Vec::new()],
prim_instances_with_scissor: FastHashMap::default(),
clip_masks: ClipMaskInstanceList::new(),
}
@@ -438,6 +440,17 @@ impl RenderTarget for ColorRenderTarget {
task_info.extra_gpu_cache_handle.map(|handle| gpu_cache.get_address(&handle)),
)
}
+ RenderTaskKind::SVGFENode(ref task_info) => {
+ add_svg_filter_node_instances(
+ &mut self.svg_nodes,
+ render_tasks,
+ &task_info,
+ task,
+ task.children.get(0).cloned(),
+ task.children.get(1).cloned(),
+ task_info.extra_gpu_cache_handle.map(|handle| gpu_cache.get_address(&handle)),
+ )
+ }
RenderTaskKind::Image(..) |
RenderTaskKind::Cached(..) |
RenderTaskKind::ClipRegion(..) |
@@ -559,7 +572,8 @@ impl RenderTarget for AlphaRenderTarget {
RenderTaskKind::ConicGradient(..) |
RenderTaskKind::TileComposite(..) |
RenderTaskKind::Prim(..) |
- RenderTaskKind::SvgFilter(..) => {
+ RenderTaskKind::SvgFilter(..) |
+ RenderTaskKind::SVGFENode(..) => {
panic!("BUG: should not be added to alpha target!");
}
RenderTaskKind::Empty(..) => {
@@ -799,7 +813,8 @@ impl TextureCacheRenderTarget {
RenderTaskKind::Scaling(..) |
RenderTaskKind::TileComposite(..) |
RenderTaskKind::Empty(..) |
- RenderTaskKind::SvgFilter(..) => {
+ RenderTaskKind::SvgFilter(..) |
+ RenderTaskKind::SVGFENode(..) => {
panic!("BUG: unexpected task kind for texture cache target");
}
#[cfg(test)]
@@ -945,6 +960,175 @@ fn add_svg_filter_instances(
instances.push((textures, vec![instance]));
}
+/// Generates SVGFEFilterInstances from a single SVGFEFilterTask, this is what
+/// prepares vertex data for the shader, and adds it to the appropriate batch.
+///
+/// The interesting parts of the handling of SVG filters are:
+/// * scene_building.rs : wrap_prim_with_filters
+/// * picture.rs : get_coverage_svgfe
+/// * render_task.rs : new_svg_filter_graph
+/// * render_target.rs : add_svg_filter_node_instances (you are here)
+fn add_svg_filter_node_instances(
+ instances: &mut Vec<(BatchTextures, Vec<SVGFEFilterInstance>)>,
+ render_tasks: &RenderTaskGraph,
+ task_info: &SVGFEFilterTask,
+ target_task: &RenderTask,
+ input_1_task: Option<RenderTaskId>,
+ input_2_task: Option<RenderTaskId>,
+ extra_data_address: Option<GpuCacheAddress>,
+) {
+ let node = &task_info.node;
+ let op = &task_info.op;
+ let mut textures = BatchTextures::empty();
+
+ // We have to undo the inflate here as the inflated target rect is meant to
+ // have a blank border
+ let target_rect = target_task
+ .get_target_rect()
+ .inner_box(DeviceIntSideOffsets::new(node.inflate as i32, node.inflate as i32, node.inflate as i32, node.inflate as i32))
+ .to_f32();
+
+ let mut instance = SVGFEFilterInstance {
+ target_rect,
+ input_1_content_scale_and_offset: [0.0; 4],
+ input_2_content_scale_and_offset: [0.0; 4],
+ input_1_task_address: RenderTaskId::INVALID.into(),
+ input_2_task_address: RenderTaskId::INVALID.into(),
+ kind: 0,
+ input_count: node.inputs.len() as u16,
+ extra_data_address: extra_data_address.unwrap_or(GpuCacheAddress::INVALID),
+ };
+
+ // Must match FILTER_* in cs_svg_filter_node.glsl
+ instance.kind = match op {
+ // Identity does not modify color, no linear case
+ FilterGraphOp::SVGFEIdentity => 0,
+ // SourceGraphic does not have its own shader mode, it uses Identity.
+ FilterGraphOp::SVGFESourceGraphic => 0,
+ // SourceAlpha does not have its own shader mode, it uses ToAlpha.
+ FilterGraphOp::SVGFESourceAlpha => 4,
+ // Opacity scales the entire rgba color, so it does not need a linear
+ // case as the rgb / a ratio does not change (sRGB is a curve on the RGB
+ // before alpha multiply, not after)
+ FilterGraphOp::SVGFEOpacity{..} => 2,
+ FilterGraphOp::SVGFEToAlpha => 4,
+ FilterGraphOp::SVGFEBlendColor => {match node.linear {false => 6, true => 7}},
+ FilterGraphOp::SVGFEBlendColorBurn => {match node.linear {false => 8, true => 9}},
+ FilterGraphOp::SVGFEBlendColorDodge => {match node.linear {false => 10, true => 11}},
+ FilterGraphOp::SVGFEBlendDarken => {match node.linear {false => 12, true => 13}},
+ FilterGraphOp::SVGFEBlendDifference => {match node.linear {false => 14, true => 15}},
+ FilterGraphOp::SVGFEBlendExclusion => {match node.linear {false => 16, true => 17}},
+ FilterGraphOp::SVGFEBlendHardLight => {match node.linear {false => 18, true => 19}},
+ FilterGraphOp::SVGFEBlendHue => {match node.linear {false => 20, true => 21}},
+ FilterGraphOp::SVGFEBlendLighten => {match node.linear {false => 22, true => 23}},
+ FilterGraphOp::SVGFEBlendLuminosity => {match node.linear {false => 24, true => 25}},
+ FilterGraphOp::SVGFEBlendMultiply => {match node.linear {false => 26, true => 27}},
+ FilterGraphOp::SVGFEBlendNormal => {match node.linear {false => 28, true => 29}},
+ FilterGraphOp::SVGFEBlendOverlay => {match node.linear {false => 30, true => 31}},
+ FilterGraphOp::SVGFEBlendSaturation => {match node.linear {false => 32, true => 33}},
+ FilterGraphOp::SVGFEBlendScreen => {match node.linear {false => 34, true => 35}},
+ FilterGraphOp::SVGFEBlendSoftLight => {match node.linear {false => 36, true => 37}},
+ FilterGraphOp::SVGFEColorMatrix{..} => {match node.linear {false => 38, true => 39}},
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned{..} => {match node.linear {false => 40, true => 41}},
+ FilterGraphOp::SVGFECompositeArithmetic{..} => {match node.linear {false => 42, true => 43}},
+ FilterGraphOp::SVGFECompositeATop => {match node.linear {false => 44, true => 45}},
+ FilterGraphOp::SVGFECompositeIn => {match node.linear {false => 46, true => 47}},
+ FilterGraphOp::SVGFECompositeLighter => {match node.linear {false => 48, true => 49}},
+ FilterGraphOp::SVGFECompositeOut => {match node.linear {false => 50, true => 51}},
+ FilterGraphOp::SVGFECompositeOver => {match node.linear {false => 52, true => 53}},
+ FilterGraphOp::SVGFECompositeXOR => {match node.linear {false => 54, true => 55}},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{..} => {match node.linear {false => 56, true => 57}},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{..} => {match node.linear {false => 58, true => 59}},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{..} => {match node.linear {false => 60, true => 61}},
+ FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {match node.linear {false => 62, true => 63}},
+ FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {match node.linear {false => 64, true => 65}},
+ FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {match node.linear {false => 66, true => 67}},
+ FilterGraphOp::SVGFEDisplacementMap{..} => {match node.linear {false => 68, true => 69}},
+ FilterGraphOp::SVGFEDropShadow{..} => {match node.linear {false => 70, true => 71}},
+ // feFlood takes an sRGB color and does no math on it, no linear case
+ FilterGraphOp::SVGFEFlood{..} => 72,
+ FilterGraphOp::SVGFEGaussianBlur{..} => {match node.linear {false => 74, true => 75}},
+ // feImage does not meaningfully modify the color of its input, though a
+ // case could be made for gamma-correct image scaling, that's a bit out
+ // of scope for now
+ FilterGraphOp::SVGFEImage{..} => 76,
+ FilterGraphOp::SVGFEMorphologyDilate{..} => {match node.linear {false => 80, true => 81}},
+ FilterGraphOp::SVGFEMorphologyErode{..} => {match node.linear {false => 82, true => 83}},
+ FilterGraphOp::SVGFESpecularLightingDistant{..} => {match node.linear {false => 86, true => 87}},
+ FilterGraphOp::SVGFESpecularLightingPoint{..} => {match node.linear {false => 88, true => 89}},
+ FilterGraphOp::SVGFESpecularLightingSpot{..} => {match node.linear {false => 90, true => 91}},
+ // feTile does not modify color, no linear case
+ FilterGraphOp::SVGFETile => 92,
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} => {match node.linear {false => 94, true => 95}},
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} => {match node.linear {false => 96, true => 97}},
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} => {match node.linear {false => 98, true => 99}},
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {match node.linear {false => 100, true => 101}},
+ };
+
+ // This is a bit of an ugly way to do this, but avoids code duplication.
+ let mut resolve_input = |index: usize, src_task: Option<RenderTaskId>| -> (RenderTaskAddress, [f32; 4]) {
+ let mut src_task_id = RenderTaskId::INVALID;
+ let mut resolved_scale_and_offset: [f32; 4] = [0.0; 4];
+ if let Some(input) = node.inputs.get(index) {
+ src_task_id = src_task.unwrap();
+ let src_task = &render_tasks[src_task_id];
+
+ textures.input.colors[index] = src_task.get_texture_source();
+ let src_task_size = src_task.location.size();
+ let src_scale_x = (src_task_size.width as f32 - input.inflate as f32 * 2.0) / input.subregion.width();
+ let src_scale_y = (src_task_size.height as f32 - input.inflate as f32 * 2.0) / input.subregion.height();
+ let scale_x = src_scale_x * node.subregion.width();
+ let scale_y = src_scale_y * node.subregion.height();
+ let offset_x = src_scale_x * (node.subregion.min.x - input.subregion.min.x) + input.inflate as f32;
+ let offset_y = src_scale_y * (node.subregion.min.y - input.subregion.min.y) + input.inflate as f32;
+ resolved_scale_and_offset = [
+ scale_x,
+ scale_y,
+ offset_x,
+ offset_y];
+ }
+ let address: RenderTaskAddress = src_task_id.into();
+ (address, resolved_scale_and_offset)
+ };
+ (instance.input_1_task_address, instance.input_1_content_scale_and_offset) = resolve_input(0, input_1_task);
+ (instance.input_2_task_address, instance.input_2_content_scale_and_offset) = resolve_input(1, input_2_task);
+
+ // Additional instance modifications for certain filters
+ match op {
+ FilterGraphOp::SVGFEOpacity { valuebinding: _, value } => {
+ // opacity only has one input so we can use the other
+ // components to store the opacity value
+ instance.input_2_content_scale_and_offset = [*value, 0.0, 0.0, 0.0];
+ },
+ FilterGraphOp::SVGFEMorphologyDilate { radius_x, radius_y } |
+ FilterGraphOp::SVGFEMorphologyErode { radius_x, radius_y } => {
+ // morphology filters only use one input, so we use the
+ // second offset coord to store the radius values.
+ instance.input_2_content_scale_and_offset = [*radius_x, *radius_y, 0.0, 0.0];
+ },
+ FilterGraphOp::SVGFEFlood { color } => {
+ // flood filters don't use inputs, so we store color here.
+ // We can't do the same trick on DropShadow because it does have two
+ // inputs.
+ instance.input_2_content_scale_and_offset = [color.r, color.g, color.b, color.a];
+ },
+ _ => {},
+ }
+
+ for (ref mut batch_textures, ref mut batch) in instances.iter_mut() {
+ if let Some(combined_textures) = batch_textures.combine_textures(textures) {
+ batch.push(instance);
+ // Update the batch textures to the newly combined batch textures
+ *batch_textures = combined_textures;
+ // is this really the intended behavior?
+ return;
+ }
+ }
+
+ instances.push((textures, vec![instance]));
+}
+
// Information required to do a blit from a source to a target.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
diff --git a/gfx/wr/webrender/src/render_task.rs b/gfx/wr/webrender/src/render_task.rs
index bf9050712c..5106971591 100644
--- a/gfx/wr/webrender/src/render_task.rs
+++ b/gfx/wr/webrender/src/render_task.rs
@@ -3,19 +3,20 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{CompositeOperator, FilterPrimitive, FilterPrimitiveInput, FilterPrimitiveKind};
-use api::{LineStyle, LineOrientation, ClipMode, MixBlendMode, ColorF, ColorSpace};
+use api::{LineStyle, LineOrientation, ClipMode, MixBlendMode, ColorF, ColorSpace, FilterOpGraphPictureBufferId};
use api::MAX_RENDER_TASK_SIZE;
use api::units::*;
+use crate::box_shadow::BLUR_SAMPLE_SCALE;
use crate::clip::{ClipDataStore, ClipItemKind, ClipStore, ClipNodeRange};
use crate::command_buffer::{CommandBufferIndex, QuadFlags};
use crate::pattern::{PatternKind, PatternShaderInput};
use crate::spatial_tree::SpatialNodeIndex;
use crate::filterdata::SFilterData;
-use crate::frame_builder::FrameBuilderConfig;
+use crate::frame_builder::{FrameBuilderConfig, FrameBuildingState};
use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle};
use crate::gpu_types::{BorderInstance, ImageSource, UvRectKind, TransformPaletteId};
-use crate::internal_types::{CacheTextureId, FastHashMap, TextureSource, Swizzle};
-use crate::picture::ResolvedSurfaceTexture;
+use crate::internal_types::{CacheTextureId, FastHashMap, FilterGraphNode, FilterGraphOp, FilterGraphPictureReference, SVGFE_CONVOLVE_VALUES_LIMIT, TextureSource, Swizzle};
+use crate::picture::{ResolvedSurfaceTexture, MAX_SURFACE_SIZE};
use crate::prim_store::ClipData;
use crate::prim_store::gradient::{
FastLinearGradientTask, RadialGradientTask,
@@ -24,6 +25,7 @@ use crate::prim_store::gradient::{
use crate::resource_cache::{ResourceCache, ImageRequest};
use std::{usize, f32, i32, u32};
use crate::renderer::{GpuBufferAddress, GpuBufferBuilderF};
+use crate::render_backend::DataStores;
use crate::render_target::{ResolveOp, RenderTargetKind};
use crate::render_task_graph::{PassId, RenderTaskId, RenderTaskGraphBuilder};
use crate::render_task_cache::{RenderTaskCacheEntryHandle, RenderTaskCacheKey, RenderTaskCacheKeyKind, RenderTaskParent};
@@ -190,7 +192,6 @@ pub struct PrimTask {
pub device_pixel_scale: DevicePixelScale,
pub content_origin: DevicePoint,
pub prim_address_f: GpuBufferAddress,
- pub prim_spatial_node_index: SpatialNodeIndex,
pub raster_spatial_node_index: SpatialNodeIndex,
pub transform_id: TransformPaletteId,
pub edge_flags: EdgeAaSegmentMask,
@@ -335,6 +336,16 @@ pub struct SvgFilterTask {
pub extra_gpu_cache_handle: Option<GpuCacheHandle>,
}
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct SVGFEFilterTask {
+ pub node: FilterGraphNode,
+ pub op: FilterGraphOp,
+ pub content_origin: DevicePoint,
+ pub extra_gpu_cache_handle: Option<GpuCacheHandle>,
+}
+
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ReadbackTask {
@@ -372,6 +383,7 @@ pub enum RenderTaskKind {
RadialGradient(RadialGradientTask),
ConicGradient(ConicGradientTask),
SvgFilter(SvgFilterTask),
+ SVGFENode(SVGFEFilterTask),
TileComposite(TileCompositeTask),
Prim(PrimTask),
Empty(EmptyTask),
@@ -423,6 +435,7 @@ impl RenderTaskKind {
RenderTaskKind::RadialGradient(..) => "RadialGradient",
RenderTaskKind::ConicGradient(..) => "ConicGradient",
RenderTaskKind::SvgFilter(..) => "SvgFilter",
+ RenderTaskKind::SVGFENode(..) => "SVGFENode",
RenderTaskKind::TileComposite(..) => "TileComposite",
RenderTaskKind::Prim(..) => "Prim",
RenderTaskKind::Empty(..) => "Empty",
@@ -448,6 +461,9 @@ impl RenderTaskKind {
RenderTaskKind::SvgFilter(..) => {
RenderTargetKind::Color
}
+ RenderTaskKind::SVGFENode(..) => {
+ RenderTargetKind::Color
+ }
RenderTaskKind::ClipRegion(..) |
RenderTaskKind::CacheMask(..) |
@@ -521,7 +537,6 @@ impl RenderTaskKind {
pub fn new_prim(
pattern: PatternKind,
pattern_input: PatternShaderInput,
- prim_spatial_node_index: SpatialNodeIndex,
raster_spatial_node_index: SpatialNodeIndex,
device_pixel_scale: DevicePixelScale,
content_origin: DevicePoint,
@@ -535,7 +550,6 @@ impl RenderTaskKind {
RenderTaskKind::Prim(PrimTask {
pattern,
pattern_input,
- prim_spatial_node_index,
raster_spatial_node_index,
device_pixel_scale,
content_origin,
@@ -791,6 +805,11 @@ impl RenderTaskKind {
_ => [0.0; 4]
}
}
+ RenderTaskKind::SVGFENode(_task) => {
+ // we don't currently use this for SVGFE filters.
+ // see SVGFEFilterInstance instead
+ [0.0; 4]
+ }
#[cfg(test)]
RenderTaskKind::Test(..) => {
@@ -816,39 +835,138 @@ impl RenderTaskKind {
&mut self,
gpu_cache: &mut GpuCache,
) {
- if let RenderTaskKind::SvgFilter(ref mut filter_task) = self {
- match filter_task.info {
- SvgFilterInfo::ColorMatrix(ref matrix) => {
- let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
- if let Some(mut request) = gpu_cache.request(handle) {
- for i in 0..5 {
- request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]);
+ match self {
+ RenderTaskKind::SvgFilter(ref mut filter_task) => {
+ match filter_task.info {
+ SvgFilterInfo::ColorMatrix(ref matrix) => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ for i in 0..5 {
+ request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]);
+ }
}
}
- }
- SvgFilterInfo::DropShadow(color) |
- SvgFilterInfo::Flood(color) => {
- let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
- if let Some(mut request) = gpu_cache.request(handle) {
- request.push(color.to_array());
+ SvgFilterInfo::DropShadow(color) |
+ SvgFilterInfo::Flood(color) => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push(color.to_array());
+ }
}
- }
- SvgFilterInfo::ComponentTransfer(ref data) => {
- let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
- if let Some(request) = gpu_cache.request(handle) {
- data.update(request);
+ SvgFilterInfo::ComponentTransfer(ref data) => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(request) = gpu_cache.request(handle) {
+ data.update(request);
+ }
}
+ SvgFilterInfo::Composite(ref operator) => {
+ if let CompositeOperator::Arithmetic(k_vals) = operator {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push(*k_vals);
+ }
+ }
+ }
+ _ => {},
}
- SvgFilterInfo::Composite(ref operator) => {
- if let CompositeOperator::Arithmetic(k_vals) = operator {
+ }
+ RenderTaskKind::SVGFENode(ref mut filter_task) => {
+ match filter_task.op {
+ FilterGraphOp::SVGFEBlendDarken => {}
+ FilterGraphOp::SVGFEBlendLighten => {}
+ FilterGraphOp::SVGFEBlendMultiply => {}
+ FilterGraphOp::SVGFEBlendNormal => {}
+ FilterGraphOp::SVGFEBlendScreen => {}
+ FilterGraphOp::SVGFEBlendOverlay => {}
+ FilterGraphOp::SVGFEBlendColorDodge => {}
+ FilterGraphOp::SVGFEBlendColorBurn => {}
+ FilterGraphOp::SVGFEBlendHardLight => {}
+ FilterGraphOp::SVGFEBlendSoftLight => {}
+ FilterGraphOp::SVGFEBlendDifference => {}
+ FilterGraphOp::SVGFEBlendExclusion => {}
+ FilterGraphOp::SVGFEBlendHue => {}
+ FilterGraphOp::SVGFEBlendSaturation => {}
+ FilterGraphOp::SVGFEBlendColor => {}
+ FilterGraphOp::SVGFEBlendLuminosity => {}
+ FilterGraphOp::SVGFEColorMatrix{values: matrix} => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ for i in 0..5 {
+ request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]);
+ }
+ }
+ }
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned{..} => {}
+ FilterGraphOp::SVGFECompositeArithmetic{k1, k2, k3, k4} => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push([k1, k2, k3, k4]);
+ }
+ }
+ FilterGraphOp::SVGFECompositeATop => {}
+ FilterGraphOp::SVGFECompositeIn => {}
+ FilterGraphOp::SVGFECompositeLighter => {}
+ FilterGraphOp::SVGFECompositeOut => {}
+ FilterGraphOp::SVGFECompositeOver => {}
+ FilterGraphOp::SVGFECompositeXOR => {}
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} |
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} |
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push([-target_x as f32, -target_y as f32, order_x as f32, order_y as f32]);
+ request.push([kernel_unit_length_x as f32, kernel_unit_length_y as f32, 1.0 / divisor, bias]);
+ assert!(SVGFE_CONVOLVE_VALUES_LIMIT == 25);
+ request.push([kernel[0], kernel[1], kernel[2], kernel[3]]);
+ request.push([kernel[4], kernel[5], kernel[6], kernel[7]]);
+ request.push([kernel[8], kernel[9], kernel[10], kernel[11]]);
+ request.push([kernel[12], kernel[13], kernel[14], kernel[15]]);
+ request.push([kernel[16], kernel[17], kernel[18], kernel[19]]);
+ request.push([kernel[20], 0.0, 0.0, preserve_alpha as f32]);
+ }
+ }
+ FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {}
+ FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {}
+ FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {}
+ FilterGraphOp::SVGFEDisplacementMap{scale, x_channel_selector, y_channel_selector} => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push([x_channel_selector as f32, y_channel_selector as f32, scale, 0.0]);
+ }
+ }
+ FilterGraphOp::SVGFEDropShadow{color, ..} |
+ FilterGraphOp::SVGFEFlood{color} => {
+ let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
+ if let Some(mut request) = gpu_cache.request(handle) {
+ request.push(color.to_array());
+ }
+ }
+ FilterGraphOp::SVGFEGaussianBlur{..} => {}
+ FilterGraphOp::SVGFEIdentity => {}
+ FilterGraphOp::SVGFEImage{..} => {}
+ FilterGraphOp::SVGFEMorphologyDilate{radius_x, radius_y} |
+ FilterGraphOp::SVGFEMorphologyErode{radius_x, radius_y} => {
let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new);
if let Some(mut request) = gpu_cache.request(handle) {
- request.push(*k_vals);
+ request.push([radius_x, radius_y, 0.0, 0.0]);
}
}
+ FilterGraphOp::SVGFEOpacity{..} => {}
+ FilterGraphOp::SVGFESourceAlpha => {}
+ FilterGraphOp::SVGFESourceGraphic => {}
+ FilterGraphOp::SVGFESpecularLightingDistant{..} => {}
+ FilterGraphOp::SVGFESpecularLightingPoint{..} => {}
+ FilterGraphOp::SVGFESpecularLightingSpot{..} => {}
+ FilterGraphOp::SVGFETile => {}
+ FilterGraphOp::SVGFEToAlpha{..} => {}
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} => {}
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} => {}
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} => {}
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {}
}
- _ => {},
}
+ _ => {}
}
}
}
@@ -1510,6 +1628,1115 @@ impl RenderTask {
self.sub_pass = Some(sub_pass);
}
+ /// Creates render tasks from PictureCompositeMode::SVGFEGraph.
+ ///
+ /// The interesting parts of the handling of SVG filters are:
+ /// * scene_building.rs : wrap_prim_with_filters
+ /// * picture.rs : get_coverage_svgfe
+ /// * render_task.rs : new_svg_filter_graph (you are here)
+ /// * render_target.rs : add_svg_filter_node_instances
+ pub fn new_svg_filter_graph(
+ filter_nodes: &[(FilterGraphNode, FilterGraphOp)],
+ frame_state: &mut FrameBuildingState,
+ data_stores: &mut DataStores,
+ uv_rect_kind: UvRectKind,
+ original_task_id: RenderTaskId,
+ _surface_rects_task_size: DeviceIntSize,
+ surface_rects_clipped: DeviceRect,
+ surface_rects_clipped_local: PictureRect,
+ ) -> RenderTaskId {
+ const BUFFER_LIMIT: usize = 256;
+ let mut task_by_buffer_id: [RenderTaskId; BUFFER_LIMIT] = [RenderTaskId::INVALID; BUFFER_LIMIT];
+ let mut subregion_by_buffer_id: [LayoutRect; BUFFER_LIMIT] = [LayoutRect::zero(); BUFFER_LIMIT];
+ // If nothing replaces this value (all node subregions are empty), we
+ // can just return the original picture
+ let mut output_task_id = original_task_id;
+
+ // By this point we assume the following about the graph:
+ // * BUFFER_LIMIT here should be >= BUFFER_LIMIT in the scene_building.rs code.
+ // * input buffer id < output buffer id
+ // * output buffer id between 0 and BUFFER_LIMIT
+ // * the number of filter_datas matches the number of kept nodes with op
+ // SVGFEComponentTransfer.
+ //
+ // These assumptions are verified with asserts in this function as
+ // appropriate.
+
+ // Converts a UvRectKind::Quad to a subregion, we need this for
+ // SourceGraphic because it could source from a larger image when doing
+ // a dirty rect update. In theory this can be used for blur output as
+ // well but it doesn't seem to be necessary from early testing.
+ //
+ // See calculate_uv_rect_kind in picture.rs for how these were generated.
+ fn subregion_for_uvrectkind(kind: &UvRectKind, rect: LayoutRect) -> LayoutRect {
+ let used =
+ match kind {
+ UvRectKind::Quad{top_left: tl, top_right: _tr, bottom_left: _bl, bottom_right: br} => {
+ LayoutRect::new(
+ LayoutPoint::new(
+ rect.min.x + rect.width() * tl.x / tl.w,
+ rect.min.y + rect.height() * tl.y / tl.w,
+ ),
+ LayoutPoint::new(
+ rect.min.x + rect.width() * br.x / br.w,
+ rect.min.y + rect.height() * br.y / br.w,
+ ),
+ )
+ }
+ UvRectKind::Rect => {
+ rect
+ }
+ };
+ // For some reason, the following test passes a uv_rect_kind that
+ // resolves to [-.2, -.2, -.2, -.2]
+ // reftest layout/reftests/svg/filters/dynamic-filter-invalidation-01.svg
+ match used.is_empty() {
+ true => rect,
+ false => used,
+ }
+ }
+
+ // Make a UvRectKind::Quad that represents a task for a node, which may
+ // have an inflate border, must be a Quad because the surface_rects
+ // compositing shader expects it to be one, we don't actually use this
+ // internally as we use subregions, see calculate_uv_rect_kind for how
+ // this works, it projects from clipped rect to unclipped rect, where
+ // our clipped rect is simply task_size minus the inflate, and unclipped
+ // is our full task_size
+ fn uv_rect_kind_for_task_size(task_size: DeviceIntSize, inflate: i16) -> UvRectKind {
+ let unclipped = DeviceRect::new(
+ DevicePoint::new(
+ inflate as f32,
+ inflate as f32,
+ ),
+ DevicePoint::new(
+ task_size.width as f32 - inflate as f32,
+ task_size.height as f32 - inflate as f32,
+ ),
+ );
+ let clipped = DeviceRect::new(
+ DevicePoint::zero(),
+ DevicePoint::new(
+ task_size.width as f32,
+ task_size.height as f32,
+ ),
+ );
+ let scale_x = 1.0 / clipped.width();
+ let scale_y = 1.0 / clipped.height();
+ UvRectKind::Quad{
+ top_left: DeviceHomogeneousVector::new(
+ (unclipped.min.x - clipped.min.x) * scale_x,
+ (unclipped.min.y - clipped.min.y) * scale_y,
+ 0.0, 1.0),
+ top_right: DeviceHomogeneousVector::new(
+ (unclipped.max.x - clipped.min.x) * scale_x,
+ (unclipped.min.y - clipped.min.y) * scale_y,
+ 0.0, 1.0),
+ bottom_left: DeviceHomogeneousVector::new(
+ (unclipped.min.x - clipped.min.x) * scale_x,
+ (unclipped.max.y - clipped.min.y) * scale_y,
+ 0.0, 1.0),
+ bottom_right: DeviceHomogeneousVector::new(
+ (unclipped.max.x - clipped.min.x) * scale_x,
+ (unclipped.max.y - clipped.min.y) * scale_y,
+ 0.0, 1.0),
+ }
+ }
+
+ // Determine the local space to device pixel scaling in the most robust
+ // way, this accounts for local to device transform and
+ // device_pixel_scale (if the task is shrunk in get_surface_rects).
+ //
+ // This has some precision issues because surface_rects_clipped was
+ // rounded already, so it's not exactly the same transform that
+ // get_surface_rects performed, but it is very close, since it is not
+ // quite the same we have to round the offset a certain way to avoid
+ // introducing subpixel offsets caused by the slight deviation.
+ let subregion_to_device_scale_x = surface_rects_clipped.width() / surface_rects_clipped_local.width();
+ let subregion_to_device_scale_y = surface_rects_clipped.height() / surface_rects_clipped_local.height();
+ let subregion_to_device_offset_x = surface_rects_clipped.min.x - (surface_rects_clipped_local.min.x * subregion_to_device_scale_x).floor();
+ let subregion_to_device_offset_y = surface_rects_clipped.min.y - (surface_rects_clipped_local.min.y * subregion_to_device_scale_y).floor();
+
+ // We will treat the entire SourceGraphic coordinate space as being this
+ // subregion, which is how large the source picture task is.
+ let filter_subregion: LayoutRect = surface_rects_clipped.cast_unit();
+
+ // Calculate the used subregion (invalidation rect) for SourceGraphic
+ // that we are painting for, the intermediate task sizes are based on
+ // this portion of SourceGraphic, this also serves as a clip on the
+ // SourceGraphic, which is necessary for this reftest:
+ // layout/reftests/svg/filters/svg-filter-chains/clip-original-SourceGraphic.svg
+ let source_subregion =
+ subregion_for_uvrectkind(
+ &uv_rect_kind,
+ surface_rects_clipped.cast_unit(),
+ )
+ .intersection(&filter_subregion)
+ .unwrap_or(LayoutRect::zero())
+ .round_out();
+
+ // This is the rect for the output picture we are producing
+ let output_rect = filter_subregion.to_i32();
+ // Output to the same subregion we were provided
+ let output_subregion = filter_subregion;
+
+ // Iterate the filter nodes and create tasks
+ let mut made_dependency_on_source = false;
+ for (filter_index, (filter_node, op)) in filter_nodes.iter().enumerate() {
+ let node = &filter_node;
+ let is_output = filter_index == filter_nodes.len() - 1;
+
+ // Note that this is never set on the final output by design.
+ if !node.kept_by_optimizer {
+ continue;
+ }
+
+ // Certain ops have parameters that need to be scaled to device
+ // space.
+ let op = match op {
+ FilterGraphOp::SVGFEBlendColor => op.clone(),
+ FilterGraphOp::SVGFEBlendColorBurn => op.clone(),
+ FilterGraphOp::SVGFEBlendColorDodge => op.clone(),
+ FilterGraphOp::SVGFEBlendDarken => op.clone(),
+ FilterGraphOp::SVGFEBlendDifference => op.clone(),
+ FilterGraphOp::SVGFEBlendExclusion => op.clone(),
+ FilterGraphOp::SVGFEBlendHardLight => op.clone(),
+ FilterGraphOp::SVGFEBlendHue => op.clone(),
+ FilterGraphOp::SVGFEBlendLighten => op.clone(),
+ FilterGraphOp::SVGFEBlendLuminosity => op.clone(),
+ FilterGraphOp::SVGFEBlendMultiply => op.clone(),
+ FilterGraphOp::SVGFEBlendNormal => op.clone(),
+ FilterGraphOp::SVGFEBlendOverlay => op.clone(),
+ FilterGraphOp::SVGFEBlendSaturation => op.clone(),
+ FilterGraphOp::SVGFEBlendScreen => op.clone(),
+ FilterGraphOp::SVGFEBlendSoftLight => op.clone(),
+ FilterGraphOp::SVGFEColorMatrix{..} => op.clone(),
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned{..} => op.clone(),
+ FilterGraphOp::SVGFECompositeArithmetic{..} => op.clone(),
+ FilterGraphOp::SVGFECompositeATop => op.clone(),
+ FilterGraphOp::SVGFECompositeIn => op.clone(),
+ FilterGraphOp::SVGFECompositeLighter => op.clone(),
+ FilterGraphOp::SVGFECompositeOut => op.clone(),
+ FilterGraphOp::SVGFECompositeOver => op.clone(),
+ FilterGraphOp::SVGFECompositeXOR => op.clone(),
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{
+ kernel_unit_length_x, kernel_unit_length_y, order_x,
+ order_y, kernel, divisor, bias, target_x, target_y,
+ preserve_alpha} => {
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ order_x: *order_x, order_y: *order_y, kernel: *kernel,
+ divisor: *divisor, bias: *bias, target_x: *target_x,
+ target_y: *target_y, preserve_alpha: *preserve_alpha}
+ },
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{
+ kernel_unit_length_x, kernel_unit_length_y, order_x,
+ order_y, kernel, divisor, bias, target_x, target_y,
+ preserve_alpha} => {
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ order_x: *order_x, order_y: *order_y, kernel: *kernel,
+ divisor: *divisor, bias: *bias, target_x: *target_x,
+ target_y: *target_y, preserve_alpha: *preserve_alpha}
+ },
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{
+ kernel_unit_length_x, kernel_unit_length_y, order_x,
+ order_y, kernel, divisor, bias, target_x, target_y,
+ preserve_alpha} => {
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ order_x: *order_x, order_y: *order_y, kernel: *kernel,
+ divisor: *divisor, bias: *bias, target_x: *target_x,
+ target_y: *target_y, preserve_alpha: *preserve_alpha}
+ },
+ FilterGraphOp::SVGFEDiffuseLightingDistant{
+ surface_scale, diffuse_constant, kernel_unit_length_x,
+ kernel_unit_length_y, azimuth, elevation} => {
+ FilterGraphOp::SVGFEDiffuseLightingDistant{
+ surface_scale: *surface_scale,
+ diffuse_constant: *diffuse_constant,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ azimuth: *azimuth, elevation: *elevation}
+ },
+ FilterGraphOp::SVGFEDiffuseLightingPoint{
+ surface_scale, diffuse_constant, kernel_unit_length_x,
+ kernel_unit_length_y, x, y, z} => {
+ FilterGraphOp::SVGFEDiffuseLightingPoint{
+ surface_scale: *surface_scale,
+ diffuse_constant: *diffuse_constant,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ x: x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ y: y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ z: *z}
+ },
+ FilterGraphOp::SVGFEDiffuseLightingSpot{
+ surface_scale, diffuse_constant, kernel_unit_length_x,
+ kernel_unit_length_y, x, y, z, points_at_x, points_at_y,
+ points_at_z, cone_exponent, limiting_cone_angle} => {
+ FilterGraphOp::SVGFEDiffuseLightingSpot{
+ surface_scale: *surface_scale,
+ diffuse_constant: *diffuse_constant,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ x: x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ y: y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ z: *z,
+ points_at_x: points_at_x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ points_at_y: points_at_y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ points_at_z: *points_at_z,
+ cone_exponent: *cone_exponent,
+ limiting_cone_angle: *limiting_cone_angle}
+ },
+ FilterGraphOp::SVGFEFlood{..} => op.clone(),
+ FilterGraphOp::SVGFEDisplacementMap{
+ scale, x_channel_selector, y_channel_selector} => {
+ FilterGraphOp::SVGFEDisplacementMap{
+ scale: scale * subregion_to_device_scale_x,
+ x_channel_selector: *x_channel_selector,
+ y_channel_selector: *y_channel_selector}
+ },
+ FilterGraphOp::SVGFEDropShadow{
+ color, dx, dy, std_deviation_x, std_deviation_y} => {
+ FilterGraphOp::SVGFEDropShadow{
+ color: *color,
+ dx: dx * subregion_to_device_scale_x,
+ dy: dy * subregion_to_device_scale_y,
+ std_deviation_x: std_deviation_x * subregion_to_device_scale_x,
+ std_deviation_y: std_deviation_y * subregion_to_device_scale_y}
+ },
+ FilterGraphOp::SVGFEGaussianBlur{std_deviation_x, std_deviation_y} => {
+ let std_deviation_x = std_deviation_x * subregion_to_device_scale_x;
+ let std_deviation_y = std_deviation_y * subregion_to_device_scale_y;
+ // For blurs that effectively have no radius in display
+ // space, we can convert to identity.
+ if std_deviation_x + std_deviation_y >= 0.125 {
+ FilterGraphOp::SVGFEGaussianBlur{
+ std_deviation_x,
+ std_deviation_y}
+ } else {
+ FilterGraphOp::SVGFEIdentity
+ }
+ },
+ FilterGraphOp::SVGFEIdentity => op.clone(),
+ FilterGraphOp::SVGFEImage{..} => op.clone(),
+ FilterGraphOp::SVGFEMorphologyDilate{radius_x, radius_y} => {
+ FilterGraphOp::SVGFEMorphologyDilate{
+ radius_x: (radius_x * subregion_to_device_scale_x).round(),
+ radius_y: (radius_y * subregion_to_device_scale_y).round()}
+ },
+ FilterGraphOp::SVGFEMorphologyErode{radius_x, radius_y} => {
+ FilterGraphOp::SVGFEMorphologyErode{
+ radius_x: (radius_x * subregion_to_device_scale_x).round(),
+ radius_y: (radius_y * subregion_to_device_scale_y).round()}
+ },
+ FilterGraphOp::SVGFEOpacity{..} => op.clone(),
+ FilterGraphOp::SVGFESourceAlpha => op.clone(),
+ FilterGraphOp::SVGFESourceGraphic => op.clone(),
+ FilterGraphOp::SVGFESpecularLightingDistant{
+ surface_scale, specular_constant, specular_exponent,
+ kernel_unit_length_x, kernel_unit_length_y, azimuth,
+ elevation} => {
+ FilterGraphOp::SVGFESpecularLightingDistant{
+ surface_scale: *surface_scale,
+ specular_constant: *specular_constant,
+ specular_exponent: *specular_exponent,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ azimuth: *azimuth, elevation: *elevation}
+ },
+ FilterGraphOp::SVGFESpecularLightingPoint{
+ surface_scale, specular_constant, specular_exponent,
+ kernel_unit_length_x, kernel_unit_length_y, x, y, z } => {
+ FilterGraphOp::SVGFESpecularLightingPoint{
+ surface_scale: *surface_scale,
+ specular_constant: *specular_constant,
+ specular_exponent: *specular_exponent,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ x: x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ y: y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ z: *z }
+ },
+ FilterGraphOp::SVGFESpecularLightingSpot{
+ surface_scale, specular_constant, specular_exponent,
+ kernel_unit_length_x, kernel_unit_length_y, x, y, z,
+ points_at_x, points_at_y, points_at_z, cone_exponent,
+ limiting_cone_angle} => {
+ FilterGraphOp::SVGFESpecularLightingSpot{
+ surface_scale: *surface_scale,
+ specular_constant: *specular_constant,
+ specular_exponent: *specular_exponent,
+ kernel_unit_length_x:
+ (kernel_unit_length_x * subregion_to_device_scale_x).round(),
+ kernel_unit_length_y:
+ (kernel_unit_length_y * subregion_to_device_scale_y).round(),
+ x: x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ y: y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ z: *z,
+ points_at_x: points_at_x * subregion_to_device_scale_x + subregion_to_device_offset_x,
+ points_at_y: points_at_y * subregion_to_device_scale_y + subregion_to_device_offset_y,
+ points_at_z: *points_at_z,
+ cone_exponent: *cone_exponent,
+ limiting_cone_angle: *limiting_cone_angle}
+ },
+ FilterGraphOp::SVGFETile => op.clone(),
+ FilterGraphOp::SVGFEToAlpha => op.clone(),
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{
+ base_frequency_x, base_frequency_y, num_octaves, seed} => {
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{
+ base_frequency_x:
+ base_frequency_x * subregion_to_device_scale_x,
+ base_frequency_y:
+ base_frequency_y * subregion_to_device_scale_y,
+ num_octaves: *num_octaves, seed: *seed}
+ },
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{
+ base_frequency_x, base_frequency_y, num_octaves, seed} => {
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{
+ base_frequency_x:
+ base_frequency_x * subregion_to_device_scale_x,
+ base_frequency_y:
+ base_frequency_y * subregion_to_device_scale_y,
+ num_octaves: *num_octaves, seed: *seed}
+ },
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{
+ base_frequency_x, base_frequency_y, num_octaves, seed} => {
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{
+ base_frequency_x:
+ base_frequency_x * subregion_to_device_scale_x,
+ base_frequency_y:
+ base_frequency_y * subregion_to_device_scale_y,
+ num_octaves: *num_octaves, seed: *seed}
+ },
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{
+ base_frequency_x, base_frequency_y, num_octaves, seed} => {
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{
+ base_frequency_x:
+ base_frequency_x * subregion_to_device_scale_x,
+ base_frequency_y:
+ base_frequency_y * subregion_to_device_scale_y,
+ num_octaves: *num_octaves, seed: *seed}
+ },
+ };
+
+ // Process the inputs and figure out their new subregion, because
+ // the SourceGraphic subregion is smaller than it was in scene build
+ // now that it reflects the invalidation rect
+ //
+ // Also look up the child tasks while we are here.
+ let mut used_subregion = LayoutRect::zero();
+ let node_inputs: Vec<(FilterGraphPictureReference, RenderTaskId)> = node.inputs.iter().map(|input| {
+ let (subregion, task) =
+ match input.buffer_id {
+ FilterOpGraphPictureBufferId::BufferId(id) => {
+ (subregion_by_buffer_id[id as usize], task_by_buffer_id[id as usize])
+ }
+ FilterOpGraphPictureBufferId::None => {
+ // Task must resolve so we use the SourceGraphic as
+ // a placeholder for these, they don't actually
+ // contribute anything to the output
+ (LayoutRect::zero(), original_task_id)
+ }
+ };
+ // Convert offset to device coordinates.
+ let offset = LayoutVector2D::new(
+ (input.offset.x * subregion_to_device_scale_x).round(),
+ (input.offset.y * subregion_to_device_scale_y).round(),
+ );
+ // To figure out the portion of the node subregion used by this
+ // source image we need to apply the target padding. Note that
+ // this does not affect the subregion of the input, as that
+ // can't be modified as it is used for placement (offset).
+ let target_padding = input.target_padding
+ .scale(subregion_to_device_scale_x, subregion_to_device_scale_y)
+ .round();
+ let target_subregion =
+ LayoutRect::new(
+ LayoutPoint::new(
+ subregion.min.x + target_padding.min.x,
+ subregion.min.y + target_padding.min.y,
+ ),
+ LayoutPoint::new(
+ subregion.max.x + target_padding.max.x,
+ subregion.max.y + target_padding.max.y,
+ ),
+ );
+ used_subregion = used_subregion.union(&target_subregion);
+ (FilterGraphPictureReference{
+ buffer_id: input.buffer_id,
+ // Apply offset to the placement of the input subregion.
+ subregion: subregion.translate(offset),
+ offset: LayoutVector2D::zero(),
+ inflate: input.inflate,
+ // Nothing past this point uses the padding.
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }, task)
+ }).collect();
+
+ // Convert subregion from PicturePixels to DevicePixels and round.
+ let full_subregion = node.subregion
+ .scale(subregion_to_device_scale_x, subregion_to_device_scale_y)
+ .translate(LayoutVector2D::new(subregion_to_device_offset_x, subregion_to_device_offset_y))
+ .round();
+
+ // Clip the used subregion we calculated from the inputs to fit
+ // within the node's specified subregion.
+ used_subregion = used_subregion
+ .intersection(&full_subregion)
+ .unwrap_or(LayoutRect::zero())
+ .round();
+
+ // Certain filters need to override the used_subregion directly.
+ match op {
+ FilterGraphOp::SVGFEBlendColor => {},
+ FilterGraphOp::SVGFEBlendColorBurn => {},
+ FilterGraphOp::SVGFEBlendColorDodge => {},
+ FilterGraphOp::SVGFEBlendDarken => {},
+ FilterGraphOp::SVGFEBlendDifference => {},
+ FilterGraphOp::SVGFEBlendExclusion => {},
+ FilterGraphOp::SVGFEBlendHardLight => {},
+ FilterGraphOp::SVGFEBlendHue => {},
+ FilterGraphOp::SVGFEBlendLighten => {},
+ FilterGraphOp::SVGFEBlendLuminosity => {},
+ FilterGraphOp::SVGFEBlendMultiply => {},
+ FilterGraphOp::SVGFEBlendNormal => {},
+ FilterGraphOp::SVGFEBlendOverlay => {},
+ FilterGraphOp::SVGFEBlendSaturation => {},
+ FilterGraphOp::SVGFEBlendScreen => {},
+ FilterGraphOp::SVGFEBlendSoftLight => {},
+ FilterGraphOp::SVGFEColorMatrix{values} => {
+ if values[3] != 0.0 ||
+ values[7] != 0.0 ||
+ values[11] != 0.0 ||
+ values[15] != 1.0 ||
+ values[19] != 0.0 {
+ // Manipulating alpha can easily create new
+ // pixels outside of input subregions
+ used_subregion = full_subregion;
+ }
+ },
+ FilterGraphOp::SVGFEComponentTransfer => unreachable!(),
+ FilterGraphOp::SVGFEComponentTransferInterned{handle: _, creates_pixels} => {
+ // Check if the value of alpha[0] is modified, if so
+ // the whole subregion is used because it will be
+ // creating new pixels outside of input subregions
+ if creates_pixels {
+ used_subregion = full_subregion;
+ }
+ },
+ FilterGraphOp::SVGFECompositeArithmetic { k1, k2, k3, k4 } => {
+ // Optimize certain cases of Arithmetic operator
+ //
+ // See logic for SVG_FECOMPOSITE_OPERATOR_ARITHMETIC
+ // in FilterSupport.cpp for more information.
+ //
+ // Any other case uses the union of input subregions
+ if k4 != 0.0 {
+ // Can produce pixels anywhere in the subregion.
+ used_subregion = full_subregion;
+ } else if k1 != 0.0 && k2 == 0.0 && k3 == 0.0 && k4 == 0.0 {
+ // Can produce pixels where both exist.
+ used_subregion = full_subregion
+ .intersection(&node_inputs[0].0.subregion)
+ .unwrap_or(LayoutRect::zero())
+ .intersection(&node_inputs[1].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ }
+ else if k2 != 0.0 && k3 == 0.0 && k4 == 0.0 {
+ // Can produce pixels where source exists.
+ used_subregion = full_subregion
+ .intersection(&node_inputs[0].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ }
+ else if k2 == 0.0 && k3 != 0.0 && k4 == 0.0 {
+ // Can produce pixels where background exists.
+ used_subregion = full_subregion
+ .intersection(&node_inputs[1].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ }
+ },
+ FilterGraphOp::SVGFECompositeATop => {
+ // Can only produce pixels where background exists.
+ used_subregion = full_subregion
+ .intersection(&node_inputs[1].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ },
+ FilterGraphOp::SVGFECompositeIn => {
+ // Can only produce pixels where both exist.
+ used_subregion = used_subregion
+ .intersection(&node_inputs[0].0.subregion)
+ .unwrap_or(LayoutRect::zero())
+ .intersection(&node_inputs[1].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ },
+ FilterGraphOp::SVGFECompositeLighter => {},
+ FilterGraphOp::SVGFECompositeOut => {
+ // Can only produce pixels where source exists.
+ used_subregion = full_subregion
+ .intersection(&node_inputs[0].0.subregion)
+ .unwrap_or(LayoutRect::zero());
+ },
+ FilterGraphOp::SVGFECompositeOver => {},
+ FilterGraphOp::SVGFECompositeXOR => {},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{..} => {},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{..} => {},
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{..} => {},
+ FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {},
+ FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {},
+ FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {},
+ FilterGraphOp::SVGFEDisplacementMap{..} => {},
+ FilterGraphOp::SVGFEDropShadow{..} => {},
+ FilterGraphOp::SVGFEFlood { color } => {
+ // Subregion needs to be set to the full node
+ // subregion for fills (unless the fill is a no-op),
+ // we know at this point that it has no inputs, so the
+ // used_region is empty unless we set it here.
+ if color.a > 0.0 {
+ used_subregion = full_subregion;
+ }
+ },
+ FilterGraphOp::SVGFEIdentity => {},
+ FilterGraphOp::SVGFEImage { sampling_filter: _sampling_filter, matrix: _matrix } => {
+ // TODO: calculate the actual subregion
+ used_subregion = full_subregion;
+ },
+ FilterGraphOp::SVGFEGaussianBlur{..} => {},
+ FilterGraphOp::SVGFEMorphologyDilate{..} => {},
+ FilterGraphOp::SVGFEMorphologyErode{..} => {},
+ FilterGraphOp::SVGFEOpacity{valuebinding: _valuebinding, value} => {
+ // If fully transparent, we can ignore this node
+ if value <= 0.0 {
+ used_subregion = LayoutRect::zero();
+ }
+ },
+ FilterGraphOp::SVGFESourceAlpha |
+ FilterGraphOp::SVGFESourceGraphic => {
+ used_subregion = source_subregion;
+ },
+ FilterGraphOp::SVGFESpecularLightingDistant{..} => {},
+ FilterGraphOp::SVGFESpecularLightingPoint{..} => {},
+ FilterGraphOp::SVGFESpecularLightingSpot{..} => {},
+ FilterGraphOp::SVGFETile => {
+ if !used_subregion.is_empty() {
+ // This fills the entire target, at least if there are
+ // any input pixels to work with.
+ used_subregion = full_subregion;
+ }
+ },
+ FilterGraphOp::SVGFEToAlpha => {},
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {
+ // Turbulence produces pixel values throughout the
+ // node subregion.
+ used_subregion = full_subregion;
+ },
+ }
+
+ // If this is the output node, we have to match the provided filter
+ // subregion as the primitive it is applied to is already placed (it
+ // was calculated in get_surface_rects using get_coverage_svgfe).
+ let node_subregion = match is_output {
+ true => output_subregion,
+ false => used_subregion,
+ };
+
+ // Convert subregion from layout pixels to integer device pixels and
+ // then calculate size afterwards so it reflects the used pixel area
+ //
+ // In case of the output node we preserve the exact filter_subregion
+ // task size.
+ //
+ // This can be an empty rect if the source_subregion invalidation
+ // rect didn't request any pixels of this node, but we can't skip
+ // creating tasks that have no size because they would leak in the
+ // render task graph with no consumers
+ let node_task_rect =
+ match is_output {
+ true => output_rect,
+ false => node_subregion.to_i32(),
+ };
+
+ // SVG spec requires that a later node sampling pixels outside
+ // this node's subregion will receive a transparent black color
+ // for those samples, we achieve this by adding a 1 pixel border
+ // around the target rect, which works fine with the clamping of the
+ // texture fetch in the shader, and to account for the offset we
+ // have to make a UvRectKind::Quad mapping for later nodes to use
+ // when sampling this output, if they use feOffset or have a
+ // larger target rect those samples will be clamped to the
+ // transparent black border and thus meet spec.
+ let mut node_task_size = node_task_rect.size().cast_unit();
+
+ // We have to limit the render target sizes we're asking for on the
+ // intermediate nodes; it's not feasible to allocate extremely large
+ // surfaces. Note that the SVGFEFilterTask code can adapt to any
+ // scaling that we use here, input subregions simply have to be in
+ // the same space as the target subregion, which we're not changing,
+ // and operator parameters like kernel_unit_length are also in that
+ // space. Blurs will do this same logic if their intermediate is
+ // too large. We use a simple halving calculation here so that
+ // pixel alignment is still vaguely sensible.
+ while node_task_size.width as usize + node.inflate as usize * 2 > MAX_SURFACE_SIZE ||
+ node_task_size.height as usize + node.inflate as usize * 2 > MAX_SURFACE_SIZE {
+ node_task_size.width >>= 1;
+ node_task_size.height >>= 1;
+ }
+ // Add the inflate border
+ node_task_size.width += node.inflate as i32 * 2;
+ node_task_size.height += node.inflate as i32 * 2;
+
+ // Make the uv_rect_kind for this node's task to use, this matters
+ // only on the final node because we don't use it internally
+ let node_uv_rect_kind =
+ uv_rect_kind_for_task_size(node_task_size, node.inflate);
+
+ // Create task for this node
+ let task_id;
+ match op {
+ FilterGraphOp::SVGFEGaussianBlur { std_deviation_x, std_deviation_y } => {
+ // Note: wrap_prim_with_filters copies the SourceGraphic to
+ // a node to apply the transparent border around the image,
+ // we rely on that behavior here as the Blur filter is a
+ // different shader without awareness of the subregion
+ // rules in the SVG spec.
+
+ // Find the input task id
+ assert!(node_inputs.len() == 1);
+ let blur_input = &node_inputs[0].0;
+ let source_task_id = node_inputs[0].1;
+
+ // We have to make a copy of the input that is padded with
+ // transparent black for the area outside the subregion, so
+ // that the blur task does not duplicate at the edges, and
+ // this is also where we have to adjust size to account for
+ // for downscaling of the image in the blur task to avoid
+ // introducing sampling artifacts on the downscale
+ let mut adjusted_blur_std_deviation = DeviceSize::new(
+ std_deviation_x,
+ std_deviation_y,
+ );
+ let blur_subregion = blur_input.subregion
+ .inflate(
+ std_deviation_x.ceil() * BLUR_SAMPLE_SCALE,
+ std_deviation_y.ceil() * BLUR_SAMPLE_SCALE);
+ let blur_task_size = blur_subregion.size().cast_unit();
+ // Adjust task size to prevent potential sampling errors
+ let mut adjusted_blur_task_size =
+ BlurTask::adjusted_blur_source_size(
+ blur_task_size,
+ adjusted_blur_std_deviation,
+ );
+ // Now change the subregion to match the revised task size,
+ // keeping it centered should keep animated radius smooth.
+ let corner = LayoutPoint::new(
+ blur_subregion.min.x + ((
+ blur_task_size.width as i32 -
+ adjusted_blur_task_size.width) / 2) as f32,
+ blur_subregion.min.y + ((
+ blur_task_size.height as i32 -
+ adjusted_blur_task_size.height) / 2) as f32,
+ )
+ .floor();
+ // Recalculate the blur_subregion to match, note that if the
+ // task was downsized it doesn't affect the size of this
+ // rect, so we don't have to scale blur_input.subregion for
+ // input purposes as they are the same scale.
+ let blur_subregion = LayoutRect::new(
+ corner,
+ LayoutPoint::new(
+ corner.x + adjusted_blur_task_size.width as f32,
+ corner.y + adjusted_blur_task_size.height as f32,
+ ),
+ );
+ // For extremely large blur radius we have to limit size,
+ // see comments on node_task_size above for more details.
+ while adjusted_blur_task_size.to_i32().width as usize > MAX_SURFACE_SIZE ||
+ adjusted_blur_task_size.to_i32().height as usize > MAX_SURFACE_SIZE {
+ adjusted_blur_task_size.width >>= 1;
+ adjusted_blur_task_size.height >>= 1;
+ adjusted_blur_std_deviation.width *= 0.5;
+ adjusted_blur_std_deviation.height *= 0.5;
+ if adjusted_blur_task_size.width < 2 {
+ adjusted_blur_task_size.width = 2;
+ }
+ if adjusted_blur_task_size.height < 2 {
+ adjusted_blur_task_size.height = 2;
+ }
+ }
+
+ let input_subregion_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ adjusted_blur_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: false,
+ inflate: 0,
+ inputs: [blur_input.clone()].to_vec(),
+ subregion: blur_subregion,
+ },
+ op: FilterGraphOp::SVGFEIdentity,
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(UvRectKind::Rect));
+ // Adding the dependencies sets the inputs for this task
+ frame_state.rg_builder.add_dependency(input_subregion_task_id, source_task_id);
+
+ // TODO: We should do this blur in the correct
+ // colorspace, linear=true is the default in SVG and
+ // new_blur does not currently support it. If the nodes
+ // that consume the result only use the alpha channel, it
+ // does not matter, but when they use the RGB it matters.
+ let blur_task_id =
+ RenderTask::new_blur(
+ adjusted_blur_std_deviation,
+ input_subregion_task_id,
+ frame_state.rg_builder,
+ RenderTargetKind::Color,
+ None,
+ adjusted_blur_task_size,
+ );
+
+ task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ node_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: node.linear,
+ inflate: node.inflate,
+ inputs: [
+ FilterGraphPictureReference{
+ buffer_id: blur_input.buffer_id,
+ subregion: blur_subregion,
+ inflate: 0,
+ offset: LayoutVector2D::zero(),
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }].to_vec(),
+ subregion: node_subregion,
+ },
+ op: FilterGraphOp::SVGFEIdentity,
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(node_uv_rect_kind));
+ // Adding the dependencies sets the inputs for this task
+ frame_state.rg_builder.add_dependency(task_id, blur_task_id);
+ }
+ FilterGraphOp::SVGFEDropShadow { color, dx, dy, std_deviation_x, std_deviation_y } => {
+ // Note: wrap_prim_with_filters copies the SourceGraphic to
+ // a node to apply the transparent border around the image,
+ // we rely on that behavior here as the Blur filter is a
+ // different shader without awareness of the subregion
+ // rules in the SVG spec.
+
+ // Find the input task id
+ assert!(node_inputs.len() == 1);
+ let blur_input = &node_inputs[0].0;
+ let source_task_id = node_inputs[0].1;
+
+ // We have to make a copy of the input that is padded with
+ // transparent black for the area outside the subregion, so
+ // that the blur task does not duplicate at the edges, and
+ // this is also where we have to adjust size to account for
+ // for downscaling of the image in the blur task to avoid
+ // introducing sampling artifacts on the downscale
+ let mut adjusted_blur_std_deviation = DeviceSize::new(
+ std_deviation_x,
+ std_deviation_y,
+ );
+ let blur_subregion = blur_input.subregion
+ .inflate(
+ std_deviation_x.ceil() * BLUR_SAMPLE_SCALE,
+ std_deviation_y.ceil() * BLUR_SAMPLE_SCALE);
+ let blur_task_size = blur_subregion.size().cast_unit();
+ // Adjust task size to prevent potential sampling errors
+ let mut adjusted_blur_task_size =
+ BlurTask::adjusted_blur_source_size(
+ blur_task_size,
+ adjusted_blur_std_deviation,
+ );
+ // Now change the subregion to match the revised task size,
+ // keeping it centered should keep animated radius smooth.
+ let corner = LayoutPoint::new(
+ blur_subregion.min.x + ((
+ blur_task_size.width as i32 -
+ adjusted_blur_task_size.width) / 2) as f32,
+ blur_subregion.min.y + ((
+ blur_task_size.height as i32 -
+ adjusted_blur_task_size.height) / 2) as f32,
+ )
+ .floor();
+ // Recalculate the blur_subregion to match, note that if the
+ // task was downsized it doesn't affect the size of this
+ // rect, so we don't have to scale blur_input.subregion for
+ // input purposes as they are the same scale.
+ let blur_subregion = LayoutRect::new(
+ corner,
+ LayoutPoint::new(
+ corner.x + adjusted_blur_task_size.width as f32,
+ corner.y + adjusted_blur_task_size.height as f32,
+ ),
+ );
+ // For extremely large blur radius we have to limit size,
+ // see comments on node_task_size above for more details.
+ while adjusted_blur_task_size.to_i32().width as usize > MAX_SURFACE_SIZE ||
+ adjusted_blur_task_size.to_i32().height as usize > MAX_SURFACE_SIZE {
+ adjusted_blur_task_size.width >>= 1;
+ adjusted_blur_task_size.height >>= 1;
+ adjusted_blur_std_deviation.width *= 0.5;
+ adjusted_blur_std_deviation.height *= 0.5;
+ if adjusted_blur_task_size.width < 2 {
+ adjusted_blur_task_size.width = 2;
+ }
+ if adjusted_blur_task_size.height < 2 {
+ adjusted_blur_task_size.height = 2;
+ }
+ }
+
+ let input_subregion_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ adjusted_blur_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: false,
+ inputs: [
+ FilterGraphPictureReference{
+ buffer_id: blur_input.buffer_id,
+ subregion: blur_input.subregion,
+ offset: LayoutVector2D::zero(),
+ inflate: blur_input.inflate,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }].to_vec(),
+ subregion: blur_subregion,
+ inflate: 0,
+ },
+ op: FilterGraphOp::SVGFEIdentity,
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(UvRectKind::Rect));
+ // Adding the dependencies sets the inputs for this task
+ frame_state.rg_builder.add_dependency(input_subregion_task_id, source_task_id);
+
+ // The shadow compositing only cares about alpha channel
+ // which is always linear, so we can blur this in sRGB or
+ // linear color space and the result is the same as we will
+ // be replacing the rgb completely.
+ let blur_task_id =
+ RenderTask::new_blur(
+ adjusted_blur_std_deviation,
+ input_subregion_task_id,
+ frame_state.rg_builder,
+ RenderTargetKind::Color,
+ None,
+ adjusted_blur_task_size,
+ );
+
+ // Now we make the compositing task, for this we need to put
+ // the blurred shadow image at the correct subregion offset
+ let blur_subregion = blur_subregion
+ .translate(LayoutVector2D::new(dx, dy));
+ task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ node_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: node.linear,
+ inflate: node.inflate,
+ inputs: [
+ // Original picture
+ *blur_input,
+ // Shadow picture
+ FilterGraphPictureReference{
+ buffer_id: blur_input.buffer_id,
+ subregion: blur_subregion,
+ inflate: 0,
+ offset: LayoutVector2D::zero(),
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }].to_vec(),
+ subregion: node_subregion,
+ },
+ op: FilterGraphOp::SVGFEDropShadow{
+ color,
+ // These parameters don't matter here
+ dx: 0.0, dy: 0.0,
+ std_deviation_x: 0.0, std_deviation_y: 0.0,
+ },
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(node_uv_rect_kind));
+ // Adding the dependencies sets the inputs for this task
+ frame_state.rg_builder.add_dependency(task_id, source_task_id);
+ frame_state.rg_builder.add_dependency(task_id, blur_task_id);
+ }
+ FilterGraphOp::SVGFESourceAlpha |
+ FilterGraphOp::SVGFESourceGraphic => {
+ // These copy from the original task, we have to synthesize
+ // a fake input binding to make the shader do the copy. In
+ // the case of SourceAlpha the shader will zero the RGB but
+ // we don't have to care about that distinction here.
+ task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ node_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: node.linear,
+ inflate: node.inflate,
+ inputs: [
+ FilterGraphPictureReference{
+ buffer_id: FilterOpGraphPictureBufferId::None,
+ // This is what makes the mapping
+ // actually work - this has to be
+ // the subregion of the whole filter
+ // because that is the size of the
+ // input task, it will be cropped to
+ // the used area (source_subregion).
+ subregion: filter_subregion,
+ offset: LayoutVector2D::zero(),
+ inflate: 0,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }
+ ].to_vec(),
+ subregion: node_subregion,
+ },
+ op: op.clone(),
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(node_uv_rect_kind));
+ frame_state.rg_builder.add_dependency(task_id, original_task_id);
+ made_dependency_on_source = true;
+ }
+ FilterGraphOp::SVGFEComponentTransferInterned { handle, creates_pixels: _ } => {
+ // FIXME: Doing this in prepare_interned_prim_for_render
+ // doesn't seem to be enough, where should it be done?
+ let filter_data = &mut data_stores.filter_data[handle];
+ filter_data.update(frame_state);
+ // ComponentTransfer has a gpu_cache_handle that we need to
+ // pass along
+ task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ node_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: node.linear,
+ inputs: node_inputs.iter().map(|input| {input.0}).collect(),
+ subregion: node_subregion,
+ inflate: node.inflate,
+ },
+ op: op.clone(),
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: Some(filter_data.gpu_cache_handle),
+ }
+ ),
+ ).with_uv_rect_kind(node_uv_rect_kind));
+
+ // Add the dependencies for inputs of this node, which will
+ // be used by add_svg_filter_node_instances later
+ for (_input, input_task) in &node_inputs {
+ if *input_task == original_task_id {
+ made_dependency_on_source = true;
+ }
+ if *input_task != RenderTaskId::INVALID {
+ frame_state.rg_builder.add_dependency(task_id, *input_task);
+ }
+ }
+ }
+ _ => {
+ // This is the usual case - zero, one or two inputs that
+ // reference earlier node results.
+ task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
+ node_task_size,
+ RenderTaskKind::SVGFENode(
+ SVGFEFilterTask{
+ node: FilterGraphNode{
+ kept_by_optimizer: true,
+ linear: node.linear,
+ inputs: node_inputs.iter().map(|input| {input.0}).collect(),
+ subregion: node_subregion,
+ inflate: node.inflate,
+ },
+ op: op.clone(),
+ content_origin: DevicePoint::zero(),
+ extra_gpu_cache_handle: None,
+ }
+ ),
+ ).with_uv_rect_kind(node_uv_rect_kind));
+
+ // Add the dependencies for inputs of this node, which will
+ // be used by add_svg_filter_node_instances later
+ for (_input, input_task) in &node_inputs {
+ if *input_task == original_task_id {
+ made_dependency_on_source = true;
+ }
+ if *input_task != RenderTaskId::INVALID {
+ frame_state.rg_builder.add_dependency(task_id, *input_task);
+ }
+ }
+ }
+ }
+
+ // We track the tasks we created by output buffer id to make it easy
+ // to look them up quickly, since nodes can only depend on previous
+ // nodes in the same list
+ task_by_buffer_id[filter_index] = task_id;
+ subregion_by_buffer_id[filter_index] = node_subregion;
+
+ if is_output {
+ output_task_id = task_id;
+ }
+ }
+
+ // If no tasks referenced the SourceGraphic, we actually have to create
+ // a fake dependency so that it does not leak.
+ if !made_dependency_on_source && output_task_id != original_task_id {
+ frame_state.rg_builder.add_dependency(output_task_id, original_task_id);
+ }
+
+ output_task_id
+ }
+
pub fn uv_rect_kind(&self) -> UvRectKind {
self.uv_rect_kind
}
@@ -1580,6 +2807,16 @@ impl RenderTask {
}
}
+ pub fn get_target_size(&self) -> DeviceIntSize {
+ match self.location {
+ RenderTaskLocation::Dynamic { rect, .. } => rect.size(),
+ RenderTaskLocation::Static { rect, .. } => rect.size(),
+ RenderTaskLocation::Existing { size, .. } => size,
+ RenderTaskLocation::CacheRequest { size } => size,
+ RenderTaskLocation::Unallocated { size } => size,
+ }
+ }
+
pub fn target_kind(&self) -> RenderTargetKind {
self.kind.target_kind()
}
diff --git a/gfx/wr/webrender/src/render_task_cache.rs b/gfx/wr/webrender/src/render_task_cache.rs
index 2c81a9824f..621a9afd92 100644
--- a/gfx/wr/webrender/src/render_task_cache.rs
+++ b/gfx/wr/webrender/src/render_task_cache.rs
@@ -11,7 +11,6 @@ use crate::device::TextureFilter;
use crate::freelist::{FreeList, FreeListHandle, WeakFreeListHandle};
use crate::gpu_cache::GpuCache;
use crate::internal_types::FastHashMap;
-use crate::picture::SurfaceIndex;
use crate::prim_store::image::ImageCacheKey;
use crate::prim_store::gradient::{
FastLinearGradientCacheKey, LinearGradientCacheKey, RadialGradientCacheKey,
@@ -36,7 +35,7 @@ const MAX_CACHE_TASK_SIZE: f32 = 4096.0;
/// box-shadow input).
pub enum RenderTaskParent {
/// Parent is a surface
- Surface(SurfaceIndex),
+ Surface,
/// Parent is a render task
RenderTask(RenderTaskId),
}
@@ -288,9 +287,7 @@ impl RenderTaskCache {
// an input source.
if let Some(render_task_id) = cache_entry.render_task_id {
match parent {
- // TODO(gw): Remove surface from here as a follow up patch, as it's now implicit
- // due to using SurfaceBuilder
- RenderTaskParent::Surface(_surface_index) => {
+ RenderTaskParent::Surface => {
// If parent is a surface, use helper fn to add this dependency,
// which correctly takes account of the render task configuration
// of the surface.
diff --git a/gfx/wr/webrender/src/render_task_graph.rs b/gfx/wr/webrender/src/render_task_graph.rs
index 6c02de8b65..4422d17e60 100644
--- a/gfx/wr/webrender/src/render_task_graph.rs
+++ b/gfx/wr/webrender/src/render_task_graph.rs
@@ -591,9 +591,12 @@ impl RenderTaskGraphBuilder {
}
}
- // By now, all surfaces that were borrowed from the render target pool must
- // be returned to the resource cache, or we are leaking intermediate surfaces!
- assert!(self.active_surfaces.is_empty());
+ if !self.active_surfaces.is_empty() {
+ graph.print();
+ // By now, all surfaces that were borrowed from the render target pool must
+ // be returned to the resource cache, or we are leaking intermediate surfaces!
+ assert!(self.active_surfaces.is_empty());
+ }
// Each task is now allocated to a surface and target rect. Write that to the
// GPU blocks and task_data. After this point, the graph is returned and is
@@ -656,29 +659,30 @@ impl RenderTaskGraph {
pub fn print(
&self,
) {
- debug!("-- RenderTaskGraph --");
+ print!("-- RenderTaskGraph --\n");
for (i, task) in self.tasks.iter().enumerate() {
- debug!("Task {} [{}]: render_on={} free_after={} children={:?}",
+ print!("Task {} [{}]: render_on={} free_after={} children={:?} target_size={:?}\n",
i,
task.kind.as_str(),
task.render_on.0,
task.free_after.0,
task.children,
+ task.get_target_size(),
);
}
for (p, pass) in self.passes.iter().enumerate() {
- debug!("Pass {}:", p);
+ print!("Pass {}:\n", p);
for (s, sub_pass) in pass.sub_passes.iter().enumerate() {
- debug!("\tSubPass {}: {:?}",
+ print!("\tSubPass {}: {:?}\n",
s,
sub_pass.surface,
);
for task_id in &sub_pass.task_ids {
- debug!("\t\tTask {:?}", task_id.index);
+ print!("\t\tTask {:?}\n", task_id.index);
}
}
}
diff --git a/gfx/wr/webrender/src/renderer/mod.rs b/gfx/wr/webrender/src/renderer/mod.rs
index a70d3eca18..ab3eb956b0 100644
--- a/gfx/wr/webrender/src/renderer/mod.rs
+++ b/gfx/wr/webrender/src/renderer/mod.rs
@@ -69,7 +69,7 @@ use crate::frame_builder::Frame;
use glyph_rasterizer::GlyphFormat;
use crate::gpu_cache::{GpuCacheUpdate, GpuCacheUpdateList};
use crate::gpu_cache::{GpuCacheDebugChunk, GpuCacheDebugCmd};
-use crate::gpu_types::{ScalingInstance, SvgFilterInstance, CopyInstance, PrimitiveInstanceData};
+use crate::gpu_types::{ScalingInstance, SvgFilterInstance, SVGFEFilterInstance, CopyInstance, PrimitiveInstanceData};
use crate::gpu_types::{BlurInstance, ClearInstance, CompositeInstance, CompositorTransform};
use crate::internal_types::{TextureSource, TextureCacheCategory, FrameId};
#[cfg(any(feature = "capture", feature = "replay"))]
@@ -193,11 +193,11 @@ const GPU_TAG_CACHE_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
label: "C_LinearGradient",
color: debug_colors::BROWN,
};
-const GPU_TAG_CACHE_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
+const GPU_TAG_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
label: "C_RadialGradient",
color: debug_colors::BROWN,
};
-const GPU_TAG_CACHE_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
+const GPU_TAG_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
label: "C_ConicGradient",
color: debug_colors::BROWN,
};
@@ -257,6 +257,10 @@ const GPU_TAG_SVG_FILTER: GpuProfileTag = GpuProfileTag {
label: "SvgFilter",
color: debug_colors::LEMONCHIFFON,
};
+const GPU_TAG_SVG_FILTER_NODES: GpuProfileTag = GpuProfileTag {
+ label: "SvgFilterNodes",
+ color: debug_colors::LEMONCHIFFON,
+};
const GPU_TAG_COMPOSITE: GpuProfileTag = GpuProfileTag {
label: "Composite",
color: debug_colors::TOMATO,
@@ -288,6 +292,8 @@ impl BatchKind {
}
BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN,
BatchKind::Quad(PatternKind::ColorOrTexture) => GPU_TAG_PRIMITIVE,
+ BatchKind::Quad(PatternKind::RadialGradient) => GPU_TAG_RADIAL_GRADIENT,
+ BatchKind::Quad(PatternKind::ConicGradient) => GPU_TAG_CONIC_GRADIENT,
BatchKind::Quad(PatternKind::Mask) => GPU_TAG_INDIRECT_MASK,
}
}
@@ -2527,6 +2533,35 @@ impl Renderer {
);
}
+ fn handle_svg_nodes(
+ &mut self,
+ textures: &BatchTextures,
+ svg_filters: &[SVGFEFilterInstance],
+ projection: &default::Transform3D<f32>,
+ stats: &mut RendererStats,
+ ) {
+ if svg_filters.is_empty() {
+ return;
+ }
+
+ let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER_NODES);
+
+ self.shaders.borrow_mut().cs_svg_filter_node.bind(
+ &mut self.device,
+ &projection,
+ None,
+ &mut self.renderer_errors,
+ &mut self.profile,
+ );
+
+ self.draw_instanced_batch(
+ &svg_filters,
+ VertexArrayKind::SvgFilterNode,
+ textures,
+ stats,
+ );
+ }
+
fn handle_resolve(
&mut self,
resolve_op: &ResolveOp,
@@ -3576,6 +3611,10 @@ impl Renderer {
);
}
+ for (ref textures, ref filters) in &target.svg_nodes {
+ self.handle_svg_nodes(textures, filters, projection, stats);
+ }
+
for alpha_batch_container in &target.alpha_batch_containers {
self.draw_alpha_batch_container(
alpha_batch_container,
@@ -4069,7 +4108,7 @@ impl Renderer {
// Draw any radial gradients for this target.
if !target.radial_gradients.is_empty() {
- let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_RADIAL_GRADIENT);
+ let _timer = self.gpu_profiler.start_timer(GPU_TAG_RADIAL_GRADIENT);
self.set_blend(false, FramebufferKind::Other);
@@ -4095,7 +4134,7 @@ impl Renderer {
// Draw any conic gradients for this target.
if !target.conic_gradients.is_empty() {
- let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CONIC_GRADIENT);
+ let _timer = self.gpu_profiler.start_timer(GPU_TAG_CONIC_GRADIENT);
self.set_blend(false, FramebufferKind::Other);
diff --git a/gfx/wr/webrender/src/renderer/shade.rs b/gfx/wr/webrender/src/renderer/shade.rs
index 96e8982aa0..5463e8eb67 100644
--- a/gfx/wr/webrender/src/renderer/shade.rs
+++ b/gfx/wr/webrender/src/renderer/shade.rs
@@ -263,6 +263,7 @@ impl LazilyCompiledShader {
VertexArrayKind::Scale => &desc::SCALE,
VertexArrayKind::Resolve => &desc::RESOLVE,
VertexArrayKind::SvgFilter => &desc::SVG_FILTER,
+ VertexArrayKind::SvgFilterNode => &desc::SVG_FILTER_NODE,
VertexArrayKind::Composite => &desc::COMPOSITE,
VertexArrayKind::Clear => &desc::CLEAR,
VertexArrayKind::Copy => &desc::COPY,
@@ -601,6 +602,7 @@ pub struct Shaders {
pub cs_radial_gradient: LazilyCompiledShader,
pub cs_conic_gradient: LazilyCompiledShader,
pub cs_svg_filter: LazilyCompiledShader,
+ pub cs_svg_filter_node: LazilyCompiledShader,
// Brush shaders
brush_solid: BrushShader,
@@ -632,6 +634,8 @@ pub struct Shaders {
ps_split_composite: LazilyCompiledShader,
pub ps_quad_textured: LazilyCompiledShader,
+ pub ps_quad_radial_gradient: LazilyCompiledShader,
+ pub ps_quad_conic_gradient: LazilyCompiledShader,
pub ps_mask: LazilyCompiledShader,
pub ps_mask_fast: LazilyCompiledShader,
pub ps_clear: LazilyCompiledShader,
@@ -768,6 +772,16 @@ impl Shaders {
profile,
)?;
+ let cs_svg_filter_node = LazilyCompiledShader::new(
+ ShaderKind::Cache(VertexArrayKind::SvgFilterNode),
+ "cs_svg_filter_node",
+ &[],
+ device,
+ options.precache_flags,
+ &shader_list,
+ profile,
+ )?;
+
let ps_mask = LazilyCompiledShader::new(
ShaderKind::Cache(VertexArrayKind::Mask),
"ps_quad_mask",
@@ -888,6 +902,26 @@ impl Shaders {
profile,
)?;
+ let ps_quad_radial_gradient = LazilyCompiledShader::new(
+ ShaderKind::Primitive,
+ "ps_quad_radial_gradient",
+ &[],
+ device,
+ options.precache_flags,
+ &shader_list,
+ profile,
+ )?;
+
+ let ps_quad_conic_gradient = LazilyCompiledShader::new(
+ ShaderKind::Primitive,
+ "ps_quad_conic_gradient",
+ &[],
+ device,
+ options.precache_flags,
+ &shader_list,
+ profile,
+ )?;
+
let ps_split_composite = LazilyCompiledShader::new(
ShaderKind::Primitive,
"ps_split_composite",
@@ -1107,6 +1141,7 @@ impl Shaders {
cs_border_solid,
cs_scale,
cs_svg_filter,
+ cs_svg_filter_node,
brush_solid,
brush_image,
brush_fast_image,
@@ -1122,6 +1157,8 @@ impl Shaders {
ps_text_run,
ps_text_run_dual_source,
ps_quad_textured,
+ ps_quad_radial_gradient,
+ ps_quad_conic_gradient,
ps_mask,
ps_mask_fast,
ps_split_composite,
@@ -1160,6 +1197,8 @@ impl Shaders {
) -> &mut LazilyCompiledShader {
match pattern {
PatternKind::ColorOrTexture => &mut self.ps_quad_textured,
+ PatternKind::RadialGradient => &mut self.ps_quad_radial_gradient,
+ PatternKind::ConicGradient => &mut self.ps_quad_conic_gradient,
PatternKind::Mask => unreachable!(),
}
}
@@ -1175,6 +1214,12 @@ impl Shaders {
BatchKind::Quad(PatternKind::ColorOrTexture) => {
&mut self.ps_quad_textured
}
+ BatchKind::Quad(PatternKind::RadialGradient) => {
+ &mut self.ps_quad_radial_gradient
+ }
+ BatchKind::Quad(PatternKind::ConicGradient) => {
+ &mut self.ps_quad_conic_gradient
+ }
BatchKind::Quad(PatternKind::Mask) => {
unreachable!();
}
@@ -1268,6 +1313,7 @@ impl Shaders {
self.cs_blur_a8.deinit(device);
self.cs_blur_rgba8.deinit(device);
self.cs_svg_filter.deinit(device);
+ self.cs_svg_filter_node.deinit(device);
self.brush_solid.deinit(device);
self.brush_blend.deinit(device);
self.brush_mix_blend.deinit(device);
@@ -1305,6 +1351,8 @@ impl Shaders {
self.cs_border_segment.deinit(device);
self.ps_split_composite.deinit(device);
self.ps_quad_textured.deinit(device);
+ self.ps_quad_radial_gradient.deinit(device);
+ self.ps_quad_conic_gradient.deinit(device);
self.ps_mask.deinit(device);
self.ps_mask_fast.deinit(device);
self.ps_clear.deinit(device);
diff --git a/gfx/wr/webrender/src/renderer/vertex.rs b/gfx/wr/webrender/src/renderer/vertex.rs
index cd73975ddd..6ee162ae38 100644
--- a/gfx/wr/webrender/src/renderer/vertex.rs
+++ b/gfx/wr/webrender/src/renderer/vertex.rs
@@ -567,6 +567,56 @@ pub mod desc {
],
};
+ pub const SVG_FILTER_NODE: VertexDescriptor = VertexDescriptor {
+ vertex_attributes: &[VertexAttribute {
+ name: "aPosition",
+ count: 2,
+ kind: VertexAttributeKind::U8Norm,
+ }],
+ instance_attributes: &[
+ VertexAttribute {
+ name: "aFilterTargetRect",
+ count: 4,
+ kind: VertexAttributeKind::F32,
+ },
+ VertexAttribute {
+ name: "aFilterInput1ContentScaleAndOffset",
+ count: 4,
+ kind: VertexAttributeKind::F32,
+ },
+ VertexAttribute {
+ name: "aFilterInput2ContentScaleAndOffset",
+ count: 4,
+ kind: VertexAttributeKind::F32,
+ },
+ VertexAttribute {
+ name: "aFilterInput1TaskAddress",
+ count: 1,
+ kind: VertexAttributeKind::I32,
+ },
+ VertexAttribute {
+ name: "aFilterInput2TaskAddress",
+ count: 1,
+ kind: VertexAttributeKind::I32,
+ },
+ VertexAttribute {
+ name: "aFilterKind",
+ count: 1,
+ kind: VertexAttributeKind::U16,
+ },
+ VertexAttribute {
+ name: "aFilterInputCount",
+ count: 1,
+ kind: VertexAttributeKind::U16,
+ },
+ VertexAttribute {
+ name: "aFilterExtraDataAddress",
+ count: 2,
+ kind: VertexAttributeKind::U16,
+ },
+ ],
+ };
+
pub const MASK: VertexDescriptor = VertexDescriptor {
vertex_attributes: &[VertexAttribute {
name: "aPosition",
@@ -780,6 +830,7 @@ pub enum VertexArrayKind {
ConicGradient,
Resolve,
SvgFilter,
+ SvgFilterNode,
Composite,
Clear,
Copy,
@@ -1004,6 +1055,7 @@ pub struct RendererVAOs {
conic_gradient_vao: VAO,
resolve_vao: VAO,
svg_filter_vao: VAO,
+ svg_filter_node_vao: VAO,
composite_vao: VAO,
clear_vao: VAO,
copy_vao: VAO,
@@ -1051,6 +1103,7 @@ impl RendererVAOs {
conic_gradient_vao: device.create_vao_with_new_instances(&desc::CONIC_GRADIENT, &prim_vao),
resolve_vao: device.create_vao_with_new_instances(&desc::RESOLVE, &prim_vao),
svg_filter_vao: device.create_vao_with_new_instances(&desc::SVG_FILTER, &prim_vao),
+ svg_filter_node_vao: device.create_vao_with_new_instances(&desc::SVG_FILTER_NODE, &prim_vao),
composite_vao: device.create_vao_with_new_instances(&desc::COMPOSITE, &prim_vao),
clear_vao: device.create_vao_with_new_instances(&desc::CLEAR, &prim_vao),
copy_vao: device.create_vao_with_new_instances(&desc::COPY, &prim_vao),
@@ -1073,6 +1126,7 @@ impl RendererVAOs {
device.delete_vao(self.border_vao);
device.delete_vao(self.scale_vao);
device.delete_vao(self.svg_filter_vao);
+ device.delete_vao(self.svg_filter_node_vao);
device.delete_vao(self.composite_vao);
device.delete_vao(self.clear_vao);
device.delete_vao(self.copy_vao);
@@ -1098,6 +1152,7 @@ impl ops::Index<VertexArrayKind> for RendererVAOs {
VertexArrayKind::ConicGradient => &self.conic_gradient_vao,
VertexArrayKind::Resolve => &self.resolve_vao,
VertexArrayKind::SvgFilter => &self.svg_filter_vao,
+ VertexArrayKind::SvgFilterNode => &self.svg_filter_node_vao,
VertexArrayKind::Composite => &self.composite_vao,
VertexArrayKind::Clear => &self.clear_vao,
VertexArrayKind::Copy => &self.copy_vao,
diff --git a/gfx/wr/webrender/src/scene_building.rs b/gfx/wr/webrender/src/scene_building.rs
index 00f29f2ce2..4c76c9522e 100644
--- a/gfx/wr/webrender/src/scene_building.rs
+++ b/gfx/wr/webrender/src/scene_building.rs
@@ -45,8 +45,10 @@ use api::{PropertyBinding, ReferenceFrameKind, ScrollFrameDescriptor, ReferenceF
use api::{APZScrollGeneration, HasScrollLinkedEffect, Shadow, SpatialId, StickyFrameDescriptor, ImageMask, ItemTag};
use api::{ClipMode, PrimitiveKeyKind, TransformStyle, YuvColorSpace, ColorRange, YuvData, TempFilterData};
use api::{ReferenceTransformBinding, Rotation, FillRule, SpatialTreeItem, ReferenceFrameDescriptor};
+use api::FilterOpGraphPictureBufferId;
use api::units::*;
use crate::image_tiling::simplify_repeated_primitive;
+use crate::box_shadow::BLUR_SAMPLE_SCALE;
use crate::clip::{ClipItemKey, ClipStore, ClipItemKeyKind, ClipIntern};
use crate::clip::{ClipInternData, ClipNodeId, ClipLeafId};
use crate::clip::{PolygonDataHandle, ClipTreeBuilder};
@@ -56,7 +58,7 @@ use crate::frame_builder::{FrameBuilderConfig};
use glyph_rasterizer::{FontInstance, SharedFontResources};
use crate::hit_test::HitTestingScene;
use crate::intern::Interner;
-use crate::internal_types::{FastHashMap, LayoutPrimitiveInfo, Filter, PlaneSplitterIndex, PipelineInstanceId};
+use crate::internal_types::{FastHashMap, LayoutPrimitiveInfo, Filter, FilterGraphNode, FilterGraphOp, FilterGraphPictureReference, PlaneSplitterIndex, PipelineInstanceId};
use crate::picture::{Picture3DContext, PictureCompositeMode, PicturePrimitive};
use crate::picture::{BlitReason, OrderedPictureChild, PrimitiveList, SurfaceInfo, PictureFlags};
use crate::picture_graph::PictureGraph;
@@ -90,6 +92,7 @@ use std::collections::vec_deque::VecDeque;
use std::sync::Arc;
use crate::util::{VecHelper, MaxRect};
use crate::filterdata::{SFilterDataComponent, SFilterData, SFilterDataKey};
+use log::Level;
/// Offsets primitives (and clips) by the external scroll offset
/// supplied to scroll nodes.
@@ -192,6 +195,7 @@ impl CompositeOps {
return true;
}
}
+ Filter::SVGGraphNode(..) => {return true;}
_ => {
if filter.is_noop() {
continue;
@@ -724,6 +728,7 @@ impl<'a> SceneBuilder<'a> {
Some(PictureCompositeMode::Filter(Filter::Blur { .. })) => true,
Some(PictureCompositeMode::Filter(Filter::DropShadows { .. })) => true,
Some(PictureCompositeMode::SvgFilter( .. )) => true,
+ Some(PictureCompositeMode::SVGFEGraph( .. )) => true,
_ => false,
};
@@ -899,7 +904,11 @@ impl<'a> SceneBuilder<'a> {
let spatial_node_index = self.get_space(info.spatial_id);
let mut subtraversal = item.sub_iter();
// Avoid doing unnecessary work for empty stacking contexts.
- if subtraversal.current_stacking_context_empty() {
+ // We still have to process it if it has filters, they
+ // may be things like SVGFEFlood or various specific
+ // ways to use ComponentTransfer, ColorMatrix, Composite
+ // which are still visible on an empty stacking context
+ if subtraversal.current_stacking_context_empty() && item.filters().is_empty() {
subtraversal.skip_current_stacking_context();
traversal = subtraversal;
continue;
@@ -982,8 +991,8 @@ impl<'a> SceneBuilder<'a> {
match bc.kind {
ContextKind::Root => {}
ContextKind::StackingContext { sc_info } => {
- self.rf_mapper.pop_offset();
self.pop_stacking_context(sc_info);
+ self.rf_mapper.pop_offset();
}
ContextKind::ReferenceFrame => {
self.rf_mapper.pop_scope();
@@ -1041,6 +1050,7 @@ impl<'a> SceneBuilder<'a> {
info.vertical_offset_bounds,
info.horizontal_offset_bounds,
info.previously_applied_offset,
+ info.transform,
);
let index = self.spatial_tree.add_sticky_frame(
@@ -2526,6 +2536,7 @@ impl<'a> SceneBuilder<'a> {
let has_filters = stacking_context.composite_ops.has_valid_filters();
+ let spatial_node_context_offset = self.current_offset(stacking_context.spatial_node_index);
source = self.wrap_prim_with_filters(
source,
stacking_context.clip_node_id,
@@ -2533,6 +2544,7 @@ impl<'a> SceneBuilder<'a> {
stacking_context.composite_ops.filter_primitives,
stacking_context.composite_ops.filter_datas,
None,
+ spatial_node_context_offset,
);
// Same for mix-blend-mode, except we can skip if this primitive is the first in the parent
@@ -3669,6 +3681,7 @@ impl<'a> SceneBuilder<'a> {
filter_primitives,
filter_datas,
Some(false),
+ LayoutVector2D::zero(),
);
// If all the filters were no-ops (e.g. opacity(0)) then we don't get a picture here
@@ -3767,6 +3780,7 @@ impl<'a> SceneBuilder<'a> {
mut filter_primitives: Vec<FilterPrimitive>,
filter_datas: Vec<FilterData>,
should_inflate_override: Option<bool>,
+ context_offset: LayoutVector2D,
) -> PictureChainBuilder {
// TODO(cbrewster): Currently CSS and SVG filters live side by side in WebRender, but unexpected results will
// happen if they are used simulataneously. Gecko only provides either filter ops or filter primitives.
@@ -3776,6 +3790,495 @@ impl<'a> SceneBuilder<'a> {
// For each filter, create a new image with that composite mode.
let mut current_filter_data_index = 0;
+ // Check if the filter chain is actually an SVGFE filter graph DAG
+ if let Some(Filter::SVGGraphNode(..)) = filter_ops.first() {
+ // The interesting parts of the handling of SVG filters are:
+ // * scene_building.rs : wrap_prim_with_filters (you are here)
+ // * picture.rs : get_coverage_svgfe
+ // * render_task.rs : new_svg_filter_graph
+ // * render_target.rs : add_svg_filter_node_instances
+
+ // The SVG spec allows us to drop the entire filter graph if it is
+ // unreasonable, so we limit the number of filters in a graph
+ const BUFFER_LIMIT: usize = 256;
+ // Easily tunable for debugging proper handling of inflated rects,
+ // this should normally be 1
+ const SVGFE_INFLATE: i16 = 1;
+ // Easily tunable for debugging proper handling of inflated rects,
+ // this should normally be 0
+ const SVGFE_INFLATE_OUTPUT: i16 = 0;
+
+ // Validate inputs to all filters.
+ //
+ // Several assumptions can be made about the DAG:
+ // * All filters take a specific number of inputs (feMerge is not
+ // supported, the code that built the display items had to convert
+ // any feMerge ops to SVGFECompositeOver already).
+ // * All input buffer ids are < the output buffer id of the node.
+ // * If SourceGraphic or SourceAlpha are used, they are standalone
+ // nodes with no inputs.
+ // * Whenever subregion of a node is smaller than the subregion
+ // of the inputs, it is a deliberate clip of those inputs to the
+ // new rect, this can occur before/after blur and dropshadow for
+ // example, so we must explicitly handle subregion correctly, but
+ // we do not have to allocate the unused pixels as the transparent
+ // black has no efect on any of the filters, only certain filters
+ // like feFlood can generate something from nothing.
+ // * Coordinate basis of the graph has to be adjusted by
+ // context_offset to put the subregions in the same space that the
+ // primitives are in, as they do that offset as well.
+ let mut reference_for_buffer_id: [FilterGraphPictureReference; BUFFER_LIMIT] = [
+ FilterGraphPictureReference{
+ // This value is deliberately invalid, but not a magic
+ // number, it's just this way to guarantee an assertion
+ // failure if something goes wrong.
+ buffer_id: FilterOpGraphPictureBufferId::BufferId(-1),
+ subregion: LayoutRect::zero(), // Always overridden
+ offset: LayoutVector2D::zero(),
+ inflate: 0,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ }; BUFFER_LIMIT];
+ let mut filters: Vec<(FilterGraphNode, FilterGraphOp)> = Vec::new();
+ filters.reserve(BUFFER_LIMIT);
+ for (original_id, parsefilter) in filter_ops.iter().enumerate() {
+ match parsefilter {
+ Filter::SVGGraphNode(parsenode, op) => {
+ if filters.len() >= BUFFER_LIMIT {
+ // If the DAG is too large we drop it entirely, the spec
+ // allows this.
+ return source;
+ }
+
+ // We need to offset the subregion by the stacking context
+ // offset or we'd be in the wrong coordinate system, prims
+ // are already offset by this same amount.
+ let clip_region = parsenode.subregion
+ .translate(context_offset);
+
+ let mut newnode = FilterGraphNode {
+ kept_by_optimizer: false,
+ linear: parsenode.linear,
+ inflate: SVGFE_INFLATE,
+ inputs: Vec::new(),
+ subregion: clip_region,
+ };
+
+ // Initialize remapped versions of the inputs, this is
+ // done here to share code between the enum variants.
+ let mut remapped_inputs: Vec<FilterGraphPictureReference> = Vec::new();
+ remapped_inputs.reserve_exact(parsenode.inputs.len());
+ for input in &parsenode.inputs {
+ match input.buffer_id {
+ FilterOpGraphPictureBufferId::BufferId(buffer_id) => {
+ // Reference to earlier node output, if this
+ // is None, it's a bug
+ let pic = *reference_for_buffer_id
+ .get(buffer_id as usize)
+ .expect("BufferId not valid?");
+ // We have to adjust the subregion and
+ // padding based on the input offset for
+ // feOffset ops, the padding may be inflated
+ // further by other ops such as blurs below.
+ let offset = input.offset;
+ let subregion = pic.subregion
+ .translate(offset);
+ let source_padding = LayoutRect::zero()
+ .translate(-offset);
+ let target_padding = LayoutRect::zero()
+ .translate(offset);
+ remapped_inputs.push(
+ FilterGraphPictureReference {
+ buffer_id: pic.buffer_id,
+ subregion,
+ offset,
+ inflate: pic.inflate,
+ source_padding,
+ target_padding,
+ });
+ }
+ FilterOpGraphPictureBufferId::None => panic!("Unsupported FilterOpGraphPictureBufferId"),
+ }
+ }
+
+ fn union_unchecked(a: LayoutRect, b: LayoutRect) -> LayoutRect {
+ let mut r = a;
+ if r.min.x > b.min.x {r.min.x = b.min.x}
+ if r.min.y > b.min.y {r.min.y = b.min.y}
+ if r.max.x < b.max.x {r.max.x = b.max.x}
+ if r.max.y < b.max.y {r.max.y = b.max.y}
+ r
+ }
+
+ match op {
+ FilterGraphOp::SVGFEFlood{..} |
+ FilterGraphOp::SVGFESourceAlpha |
+ FilterGraphOp::SVGFESourceGraphic |
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} |
+ FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {
+ assert!(remapped_inputs.len() == 0);
+ filters.push((newnode.clone(), op.clone()));
+ }
+ FilterGraphOp::SVGFEColorMatrix{..} |
+ FilterGraphOp::SVGFEIdentity |
+ FilterGraphOp::SVGFEImage{..} |
+ FilterGraphOp::SVGFEOpacity{..} |
+ FilterGraphOp::SVGFEToAlpha => {
+ assert!(remapped_inputs.len() == 1);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ }
+ FilterGraphOp::SVGFEComponentTransfer => {
+ assert!(remapped_inputs.len() == 1);
+ // Convert to SVGFEComponentTransferInterned
+ let filter_data =
+ &filter_datas[current_filter_data_index];
+ let filter_data = filter_data.sanitize();
+ current_filter_data_index = current_filter_data_index + 1;
+
+ // filter data is 4KiB of gamma ramps used
+ // only by SVGFEComponentTransferWithHandle.
+ //
+ // The gamma ramps are interleaved as RGBA32F
+ // pixels (unlike in regular ComponentTransfer,
+ // where the values are not interleaved), so
+ // r_values[3] is the alpha of the first color,
+ // not the 4th red value. This layout makes the
+ // shader more compatible with buggy compilers that
+ // do not like indexing components on a vec4.
+ let creates_pixels =
+ if let Some(a) = filter_data.r_values.get(3) {
+ *a != 0.0
+ } else {
+ false
+ };
+ let filter_data_key = SFilterDataKey {
+ data:
+ SFilterData {
+ r_func: SFilterDataComponent::from_functype_values(
+ filter_data.func_r_type, &filter_data.r_values),
+ g_func: SFilterDataComponent::from_functype_values(
+ filter_data.func_g_type, &filter_data.g_values),
+ b_func: SFilterDataComponent::from_functype_values(
+ filter_data.func_b_type, &filter_data.b_values),
+ a_func: SFilterDataComponent::from_functype_values(
+ filter_data.func_a_type, &filter_data.a_values),
+ },
+ };
+
+ let handle = self.interners
+ .filter_data
+ .intern(&filter_data_key, || ());
+
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), FilterGraphOp::SVGFEComponentTransferInterned{handle, creates_pixels}));
+ }
+ FilterGraphOp::SVGFEComponentTransferInterned{..} => unreachable!(),
+ FilterGraphOp::SVGFETile => {
+ assert!(remapped_inputs.len() == 1);
+ // feTile usually uses every pixel of input
+ remapped_inputs[0].source_padding =
+ LayoutRect::max_rect();
+ remapped_inputs[0].target_padding =
+ LayoutRect::max_rect();
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ }
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEMorphologyDilate{radius_x: kernel_unit_length_x, radius_y: kernel_unit_length_y} => {
+ assert!(remapped_inputs.len() == 1);
+ let padding = LayoutSize::new(
+ kernel_unit_length_x.ceil(),
+ kernel_unit_length_y.ceil(),
+ );
+ // Add source padding to represent the kernel pixels
+ // needed relative to target pixels
+ remapped_inputs[0].source_padding =
+ remapped_inputs[0].source_padding
+ .inflate(padding.width, padding.height);
+ // Add target padding to represent the area affected
+ // by a source pixel
+ remapped_inputs[0].target_padding =
+ remapped_inputs[0].target_padding
+ .inflate(padding.width, padding.height);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ },
+ FilterGraphOp::SVGFEDiffuseLightingDistant{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEDiffuseLightingPoint{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEDiffuseLightingSpot{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFESpecularLightingDistant{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFESpecularLightingPoint{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFESpecularLightingSpot{kernel_unit_length_x, kernel_unit_length_y, ..} |
+ FilterGraphOp::SVGFEMorphologyErode{radius_x: kernel_unit_length_x, radius_y: kernel_unit_length_y} => {
+ assert!(remapped_inputs.len() == 1);
+ let padding = LayoutSize::new(
+ kernel_unit_length_x.ceil(),
+ kernel_unit_length_y.ceil(),
+ );
+ // Add source padding to represent the kernel pixels
+ // needed relative to target pixels
+ remapped_inputs[0].source_padding =
+ remapped_inputs[0].source_padding
+ .inflate(padding.width, padding.height);
+ // Add target padding to represent the area affected
+ // by a source pixel
+ remapped_inputs[0].target_padding =
+ remapped_inputs[0].target_padding
+ .inflate(padding.width, padding.height);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ },
+ FilterGraphOp::SVGFEDisplacementMap { scale, .. } => {
+ assert!(remapped_inputs.len() == 2);
+ let padding = LayoutSize::new(
+ scale.ceil(),
+ scale.ceil(),
+ );
+ // Add padding to both inputs for source and target
+ // rects, we might be able to skip some of these,
+ // but it's not that important to optimize here, a
+ // loose fit is fine.
+ remapped_inputs[0].source_padding =
+ remapped_inputs[0].source_padding
+ .inflate(padding.width, padding.height);
+ remapped_inputs[1].source_padding =
+ remapped_inputs[1].source_padding
+ .inflate(padding.width, padding.height);
+ remapped_inputs[0].target_padding =
+ remapped_inputs[0].target_padding
+ .inflate(padding.width, padding.height);
+ remapped_inputs[1].target_padding =
+ remapped_inputs[1].target_padding
+ .inflate(padding.width, padding.height);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ },
+ FilterGraphOp::SVGFEDropShadow{ dx, dy, std_deviation_x, std_deviation_y, .. } => {
+ assert!(remapped_inputs.len() == 1);
+ let padding = LayoutSize::new(
+ std_deviation_x.ceil() * BLUR_SAMPLE_SCALE,
+ std_deviation_y.ceil() * BLUR_SAMPLE_SCALE,
+ );
+ // Add source padding to represent the shadow
+ remapped_inputs[0].source_padding =
+ union_unchecked(
+ remapped_inputs[0].source_padding,
+ remapped_inputs[0].source_padding
+ .inflate(padding.width, padding.height)
+ .translate(
+ LayoutVector2D::new(-dx, -dy)
+ )
+ );
+ // Add target padding to represent the area needed
+ // to calculate pixels of the shadow
+ remapped_inputs[0].target_padding =
+ union_unchecked(
+ remapped_inputs[0].target_padding,
+ remapped_inputs[0].target_padding
+ .inflate(padding.width, padding.height)
+ .translate(
+ LayoutVector2D::new(*dx, *dy)
+ )
+ );
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ },
+ FilterGraphOp::SVGFEGaussianBlur{std_deviation_x, std_deviation_y} => {
+ assert!(remapped_inputs.len() == 1);
+ let padding = LayoutSize::new(
+ std_deviation_x.ceil() * BLUR_SAMPLE_SCALE,
+ std_deviation_y.ceil() * BLUR_SAMPLE_SCALE,
+ );
+ // Add source padding to represent the blur
+ remapped_inputs[0].source_padding =
+ remapped_inputs[0].source_padding
+ .inflate(padding.width, padding.height);
+ // Add target padding to represent the blur
+ remapped_inputs[0].target_padding =
+ remapped_inputs[0].target_padding
+ .inflate(padding.width, padding.height);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ }
+ FilterGraphOp::SVGFEBlendColor |
+ FilterGraphOp::SVGFEBlendColorBurn |
+ FilterGraphOp::SVGFEBlendColorDodge |
+ FilterGraphOp::SVGFEBlendDarken |
+ FilterGraphOp::SVGFEBlendDifference |
+ FilterGraphOp::SVGFEBlendExclusion |
+ FilterGraphOp::SVGFEBlendHardLight |
+ FilterGraphOp::SVGFEBlendHue |
+ FilterGraphOp::SVGFEBlendLighten |
+ FilterGraphOp::SVGFEBlendLuminosity|
+ FilterGraphOp::SVGFEBlendMultiply |
+ FilterGraphOp::SVGFEBlendNormal |
+ FilterGraphOp::SVGFEBlendOverlay |
+ FilterGraphOp::SVGFEBlendSaturation |
+ FilterGraphOp::SVGFEBlendScreen |
+ FilterGraphOp::SVGFEBlendSoftLight |
+ FilterGraphOp::SVGFECompositeArithmetic{..} |
+ FilterGraphOp::SVGFECompositeATop |
+ FilterGraphOp::SVGFECompositeIn |
+ FilterGraphOp::SVGFECompositeLighter |
+ FilterGraphOp::SVGFECompositeOut |
+ FilterGraphOp::SVGFECompositeOver |
+ FilterGraphOp::SVGFECompositeXOR => {
+ assert!(remapped_inputs.len() == 2);
+ newnode.inputs = remapped_inputs;
+ filters.push((newnode.clone(), op.clone()));
+ }
+ }
+
+ // Set the reference remapping for the last (or only) node
+ // that we just pushed
+ let id = (filters.len() - 1) as i16;
+ if let Some(pic) = reference_for_buffer_id.get_mut(original_id as usize) {
+ *pic = FilterGraphPictureReference {
+ buffer_id: FilterOpGraphPictureBufferId::BufferId(id),
+ subregion: newnode.subregion,
+ offset: LayoutVector2D::zero(),
+ inflate: newnode.inflate,
+ source_padding: LayoutRect::zero(),
+ target_padding: LayoutRect::zero(),
+ };
+ }
+ }
+ _ => {
+ panic!("wrap_prim_with_filters: Mixed SVG and CSS filters?")
+ }
+ }
+ }
+
+ // Push a special output node at the end, this will correctly handle
+ // the final subregion, which may not have the same bounds as the
+ // surface it is being blitted into, so it needs to properly handle
+ // the cropping and UvRectKind, it also has no inflate.
+ if filters.len() >= BUFFER_LIMIT {
+ // If the DAG is too large we drop it entirely
+ return source;
+ }
+ let mut outputnode = FilterGraphNode {
+ kept_by_optimizer: true,
+ linear: false,
+ inflate: SVGFE_INFLATE_OUTPUT,
+ inputs: Vec::new(),
+ subregion: LayoutRect::max_rect(),
+ };
+ outputnode.inputs.push(reference_for_buffer_id[filter_ops.len() - 1]);
+ filters.push((
+ outputnode,
+ FilterGraphOp::SVGFEIdentity,
+ ));
+
+ // We want to optimize the filter DAG and then wrap it in a single
+ // picture, we will use a custom RenderTask method to process the
+ // DAG later, there's not really an easy way to keep it as a series
+ // of pictures like CSS filters use.
+ //
+ // The main optimization we can do here is looking for feOffset
+ // filters we can merge away - because all of the node inputs
+ // support offset capability implicitly. We can also remove no-op
+ // filters (identity) if Gecko produced any.
+ //
+ // TODO: optimize the graph here
+
+ // Mark used graph nodes, starting at the last graph node, since
+ // this is a DAG in sorted order we can just iterate backwards and
+ // know we will find children before parents in order.
+ //
+ // Per SVG spec the last node (which is the first we encounter this
+ // way) is the final output, so its dependencies are what we want to
+ // mark as kept_by_optimizer
+ let mut kept_node_by_buffer_id = [false; BUFFER_LIMIT];
+ kept_node_by_buffer_id[filters.len() - 1] = true;
+ for (index, (node, _op)) in filters.iter_mut().enumerate().rev() {
+ let mut keep = false;
+ // Check if this node's output was marked to be kept
+ if let Some(k) = kept_node_by_buffer_id.get(index) {
+ if *k {
+ keep = true;
+ }
+ }
+ if keep {
+ // If this node contributes to the final output we need
+ // to mark its inputs as also contributing when they are
+ // encountered later
+ node.kept_by_optimizer = true;
+ for input in &node.inputs {
+ if let FilterOpGraphPictureBufferId::BufferId(id) = input.buffer_id {
+ if let Some(k) = kept_node_by_buffer_id.get_mut(id as usize) {
+ *k = true;
+ }
+ }
+ }
+ }
+ }
+
+ // Validate the DAG nature of the graph again - if we find anything
+ // wrong here it means the above code is bugged.
+ let mut invalid_dag = false;
+ for (id, (node, _op)) in filters.iter().enumerate() {
+ for input in &node.inputs {
+ if let FilterOpGraphPictureBufferId::BufferId(buffer_id) = input.buffer_id {
+ if buffer_id < 0 || buffer_id as usize >= id {
+ invalid_dag = true;
+ }
+ }
+ }
+ }
+
+ if invalid_dag {
+ log!(Level::Warn, "List of FilterOp::SVGGraphNode filter primitives appears to be invalid!");
+ for (id, (node, op)) in filters.iter().enumerate() {
+ log!(Level::Warn, " node: buffer=BufferId({}) op={} inflate={} subregion {:?} linear={} kept={}",
+ id, op.kind(), node.inflate,
+ node.subregion,
+ node.linear,
+ node.kept_by_optimizer,
+ );
+ for input in &node.inputs {
+ log!(Level::Warn, "input: buffer={} inflate={} subregion {:?} offset {:?} target_padding={:?} source_padding={:?}",
+ match input.buffer_id {
+ FilterOpGraphPictureBufferId::BufferId(id) => format!("BufferId({})", id),
+ FilterOpGraphPictureBufferId::None => "None".into(),
+ },
+ input.inflate,
+ input.subregion,
+ input.offset,
+ input.target_padding,
+ input.source_padding,
+ );
+ }
+ }
+ }
+ if invalid_dag {
+ // if the DAG is invalid, we can't render it
+ return source;
+ }
+
+ let composite_mode = PictureCompositeMode::SVGFEGraph(
+ filters,
+ );
+
+ source = source.add_picture(
+ composite_mode,
+ clip_node_id,
+ Picture3DContext::Out,
+ &mut self.interners,
+ &mut self.prim_store,
+ &mut self.prim_instances,
+ &mut self.clip_tree_builder,
+ );
+
+ return source;
+ }
+
+ // Handle regular CSS filter chains
for filter in &mut filter_ops {
let composite_mode = match filter {
Filter::ComponentTransfer => {
@@ -3806,6 +4309,10 @@ impl<'a> SceneBuilder<'a> {
PictureCompositeMode::ComponentTransferFilter(handle)
}
}
+ Filter::SVGGraphNode(_, _) => {
+ // SVG filter graphs were handled above
+ panic!("SVGGraphNode encountered in regular CSS filter chain?");
+ }
_ => {
if filter.is_noop() {
continue;
diff --git a/gfx/wr/webrender/src/spatial_node.rs b/gfx/wr/webrender/src/spatial_node.rs
index 6bf1313e0d..727ad405ed 100644
--- a/gfx/wr/webrender/src/spatial_node.rs
+++ b/gfx/wr/webrender/src/spatial_node.rs
@@ -518,17 +518,46 @@ impl SpatialNode {
self.viewport_transform = cs_scale_offset;
self.content_transform = cs_scale_offset;
}
- _ => {
- // We calculate this here to avoid a double-borrow later.
- let sticky_offset = self.calculate_sticky_offset(
+ SpatialNodeType::StickyFrame(ref mut info) => {
+ let animated_offset = if let Some(transform_binding) = info.transform {
+ let transform = scene_properties.resolve_layout_transform(&transform_binding);
+ match ScaleOffset::from_transform(&transform) {
+ Some(ref scale_offset) => {
+ debug_assert!(scale_offset.scale == Vector2D::new(1.0, 1.0),
+ "Can only animate a translation on sticky elements");
+ LayoutVector2D::from_untyped(scale_offset.offset)
+ }
+ None => {
+ debug_assert!(false, "Can only animate a translation on sticky elements");
+ LayoutVector2D::zero()
+ }
+ }
+ } else {
+ LayoutVector2D::zero()
+ };
+
+ let sticky_offset = Self::calculate_sticky_offset(
&state.nearest_scrolling_ancestor_offset,
&state.nearest_scrolling_ancestor_viewport,
+ info,
);
// The transformation for the bounds of our viewport is the parent reference frame
// transform, plus any accumulated scroll offset from our parents, plus any offset
// provided by our own sticky positioning.
- let accumulated_offset = state.parent_accumulated_scroll_offset + sticky_offset;
+ let accumulated_offset = state.parent_accumulated_scroll_offset + sticky_offset + animated_offset;
+ self.viewport_transform = state.coordinate_system_relative_scale_offset
+ .offset(snap_offset(accumulated_offset, state.coordinate_system_relative_scale_offset.scale).to_untyped());
+ self.content_transform = self.viewport_transform;
+
+ info.current_offset = sticky_offset + animated_offset;
+
+ self.coordinate_system_id = state.current_coordinate_system_id;
+ }
+ SpatialNodeType::ScrollFrame(_) => {
+ // The transformation for the bounds of our viewport is the parent reference frame
+ // transform, plus any accumulated scroll offset from our parents.
+ let accumulated_offset = state.parent_accumulated_scroll_offset;
self.viewport_transform = state.coordinate_system_relative_scale_offset
.offset(snap_offset(accumulated_offset, state.coordinate_system_relative_scale_offset.scale).to_untyped());
@@ -538,12 +567,8 @@ impl SpatialNode {
self.content_transform = state.coordinate_system_relative_scale_offset
.offset(snap_offset(added_offset, state.coordinate_system_relative_scale_offset.scale).to_untyped());
- if let SpatialNodeType::StickyFrame(ref mut info) = self.node_type {
- info.current_offset = sticky_offset;
- }
-
self.coordinate_system_id = state.current_coordinate_system_id;
- }
+ }
}
//TODO: remove the field entirely?
@@ -555,15 +580,10 @@ impl SpatialNode {
}
fn calculate_sticky_offset(
- &self,
viewport_scroll_offset: &LayoutVector2D,
viewport_rect: &LayoutRect,
+ info: &StickyFrameInfo
) -> LayoutVector2D {
- let info = match self.node_type {
- SpatialNodeType::StickyFrame(ref info) => info,
- _ => return LayoutVector2D::zero(),
- };
-
if info.margins.top.is_none() && info.margins.bottom.is_none() &&
info.margins.left.is_none() && info.margins.right.is_none() {
return LayoutVector2D::zero();
@@ -885,12 +905,13 @@ pub struct ReferenceFrameInfo {
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct StickyFrameInfo {
- pub frame_rect: LayoutRect,
- pub margins: SideOffsets2D<Option<f32>, LayoutPixel>,
+ pub margins: SideOffsets2D<Option<f32>, LayoutPixel>,
+ pub frame_rect: LayoutRect,
pub vertical_offset_bounds: StickyOffsetBounds,
pub horizontal_offset_bounds: StickyOffsetBounds,
pub previously_applied_offset: LayoutVector2D,
pub current_offset: LayoutVector2D,
+ pub transform: Option<PropertyBinding<LayoutTransform>>,
}
impl StickyFrameInfo {
@@ -899,7 +920,8 @@ impl StickyFrameInfo {
margins: SideOffsets2D<Option<f32>, LayoutPixel>,
vertical_offset_bounds: StickyOffsetBounds,
horizontal_offset_bounds: StickyOffsetBounds,
- previously_applied_offset: LayoutVector2D
+ previously_applied_offset: LayoutVector2D,
+ transform: Option<PropertyBinding<LayoutTransform>>,
) -> StickyFrameInfo {
StickyFrameInfo {
frame_rect,
@@ -908,6 +930,7 @@ impl StickyFrameInfo {
horizontal_offset_bounds,
previously_applied_offset,
current_offset: LayoutVector2D::zero(),
+ transform,
}
}
}
diff --git a/gfx/wr/webrender/src/spatial_tree.rs b/gfx/wr/webrender/src/spatial_tree.rs
index 0aa6bb5296..94e934ca53 100644
--- a/gfx/wr/webrender/src/spatial_tree.rs
+++ b/gfx/wr/webrender/src/spatial_tree.rs
@@ -335,9 +335,11 @@ impl SceneSpatialTree {
pub fn find_scroll_root(
&self,
spatial_node_index: SpatialNodeIndex,
+ allow_sticky_frames: bool,
) -> SpatialNodeIndex {
let mut real_scroll_root = self.root_reference_frame_index;
let mut outermost_scroll_root = self.root_reference_frame_index;
+ let mut current_scroll_root_is_sticky = false;
let mut node_index = spatial_node_index;
while node_index != self.root_reference_frame_index {
@@ -354,10 +356,21 @@ impl SceneSpatialTree {
// we have encountered, as they may end up with a non-axis-aligned transform.
real_scroll_root = self.root_reference_frame_index;
outermost_scroll_root = self.root_reference_frame_index;
+ current_scroll_root_is_sticky = false;
}
}
}
- SpatialNodeType::StickyFrame(..) => {}
+ SpatialNodeType::StickyFrame(..) => {
+ // Though not a scroll frame, we optionally treat sticky frames as scroll roots
+ // to ensure they are given a separate picture cache slice.
+ if allow_sticky_frames {
+ outermost_scroll_root = node_index;
+ real_scroll_root = node_index;
+ // Set this true so that we don't select an ancestor scroll frame as the scroll root
+ // on a subsequent iteration.
+ current_scroll_root_is_sticky = true;
+ }
+ }
SpatialNodeType::ScrollFrame(ref info) => {
match info.frame_kind {
ScrollFrameKind::PipelineRoot { is_root_pipeline } => {
@@ -371,24 +384,29 @@ impl SceneSpatialTree {
// later on, even if it's not actually scrollable.
outermost_scroll_root = node_index;
- // If the scroll root has no scrollable area, we don't want to
- // consider it. This helps pages that have a nested scroll root
- // within a redundant scroll root to avoid selecting the wrong
- // reference spatial node for a picture cache.
- if info.scrollable_size.width > MIN_SCROLLABLE_AMOUNT ||
- info.scrollable_size.height > MIN_SCROLLABLE_AMOUNT {
- // Since we are skipping redundant scroll roots, we may end up
- // selecting inner scroll roots that are very small. There is
- // no performance benefit to creating a slice for these roots,
- // as they are cheap to rasterize. The size comparison is in
- // local-space, but makes for a reasonable estimate. The value
- // is arbitrary, but is generally small enough to ignore things
- // like scroll roots around text input elements.
- if info.viewport_rect.width() > MIN_SCROLL_ROOT_SIZE &&
- info.viewport_rect.height() > MIN_SCROLL_ROOT_SIZE {
- // If we've found a root that is scrollable, and a reasonable
- // size, select that as the current root for this node
- real_scroll_root = node_index;
+ // If the previously identified scroll root is sticky then we don't
+ // want to choose an ancestor scroll root, as we want the sticky item
+ // to have its own picture cache slice.
+ if !current_scroll_root_is_sticky {
+ // If the scroll root has no scrollable area, we don't want to
+ // consider it. This helps pages that have a nested scroll root
+ // within a redundant scroll root to avoid selecting the wrong
+ // reference spatial node for a picture cache.
+ if info.scrollable_size.width > MIN_SCROLLABLE_AMOUNT ||
+ info.scrollable_size.height > MIN_SCROLLABLE_AMOUNT {
+ // Since we are skipping redundant scroll roots, we may end up
+ // selecting inner scroll roots that are very small. There is
+ // no performance benefit to creating a slice for these roots,
+ // as they are cheap to rasterize. The size comparison is in
+ // local-space, but makes for a reasonable estimate. The value
+ // is arbitrary, but is generally small enough to ignore things
+ // like scroll roots around text input elements.
+ if info.viewport_rect.width() > MIN_SCROLL_ROOT_SIZE &&
+ info.viewport_rect.height() > MIN_SCROLL_ROOT_SIZE {
+ // If we've found a root that is scrollable, and a reasonable
+ // size, select that as the current root for this node
+ real_scroll_root = node_index;
+ }
}
}
}
@@ -1732,7 +1750,7 @@ fn test_find_scroll_root_simple() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 1), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(scroll), scroll);
+ assert_eq!(st.find_scroll_root(scroll, true), scroll);
}
/// Tests that we select the root scroll frame rather than the subframe if both are scrollable.
@@ -1781,7 +1799,7 @@ fn test_find_scroll_root_sub_scroll_frame() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 2), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(sub_scroll), root_scroll);
+ assert_eq!(st.find_scroll_root(sub_scroll, true), root_scroll);
}
/// Tests that we select the sub scroll frame when the root scroll frame is not scrollable.
@@ -1830,7 +1848,7 @@ fn test_find_scroll_root_not_scrollable() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 2), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(sub_scroll), sub_scroll);
+ assert_eq!(st.find_scroll_root(sub_scroll, true), sub_scroll);
}
/// Tests that we select the sub scroll frame when the root scroll frame is too small.
@@ -1879,7 +1897,7 @@ fn test_find_scroll_root_too_small() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 2), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(sub_scroll), sub_scroll);
+ assert_eq!(st.find_scroll_root(sub_scroll, true), sub_scroll);
}
/// Tests that we select the root scroll node, even if it is not scrollable,
@@ -1941,7 +1959,7 @@ fn test_find_scroll_root_perspective() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 3), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(sub_scroll), root_scroll);
+ assert_eq!(st.find_scroll_root(sub_scroll, true), root_scroll);
}
/// Tests that encountering a 2D scale or translation transform does not prevent
@@ -2005,7 +2023,61 @@ fn test_find_scroll_root_2d_scale() {
SpatialNodeUid::external(SpatialTreeItemKey::new(0, 3), PipelineId::dummy(), pid),
);
- assert_eq!(st.find_scroll_root(sub_scroll), sub_scroll);
+ assert_eq!(st.find_scroll_root(sub_scroll, true), sub_scroll);
+}
+
+/// Tests that a sticky spatial node is chosen as the scroll root rather than
+/// its parent scroll frame
+#[test]
+fn test_find_scroll_root_sticky() {
+ let mut st = SceneSpatialTree::new();
+ let pid = PipelineInstanceId::new(0);
+
+ let root = st.add_reference_frame(
+ st.root_reference_frame_index(),
+ TransformStyle::Flat,
+ PropertyBinding::Value(LayoutTransform::identity()),
+ ReferenceFrameKind::Transform {
+ is_2d_scale_translation: true,
+ should_snap: true,
+ paired_with_perspective: false,
+ },
+ LayoutVector2D::new(0.0, 0.0),
+ PipelineId::dummy(),
+ SpatialNodeUid::external(SpatialTreeItemKey::new(0, 0), PipelineId::dummy(), pid),
+ );
+
+ let scroll = st.add_scroll_frame(
+ root,
+ ExternalScrollId(1, PipelineId::dummy()),
+ PipelineId::dummy(),
+ &LayoutRect::from_size(LayoutSize::new(400.0, 400.0)),
+ &LayoutSize::new(400.0, 800.0),
+ ScrollFrameKind::Explicit,
+ LayoutVector2D::new(0.0, 0.0),
+ APZScrollGeneration::default(),
+ HasScrollLinkedEffect::No,
+ SpatialNodeUid::external(SpatialTreeItemKey::new(0, 1), PipelineId::dummy(), pid),
+ );
+
+ let sticky = st.add_sticky_frame(
+ scroll,
+ StickyFrameInfo {
+ frame_rect: LayoutRect::from_size(LayoutSize::new(400.0, 100.0)),
+ margins: euclid::SideOffsets2D::new(Some(0.0), None, None, None),
+ vertical_offset_bounds: api::StickyOffsetBounds::new(0.0, 0.0),
+ horizontal_offset_bounds: api::StickyOffsetBounds::new(0.0, 0.0),
+ previously_applied_offset: LayoutVector2D::zero(),
+ current_offset: LayoutVector2D::zero(),
+ transform: None
+ },
+ PipelineId::dummy(),
+ SpatialTreeItemKey::new(0, 2),
+ pid,
+ );
+
+ assert_eq!(st.find_scroll_root(sticky, true), sticky);
+ assert_eq!(st.find_scroll_root(sticky, false), scroll);
}
#[test]
diff --git a/gfx/wr/webrender/src/tile_cache.rs b/gfx/wr/webrender/src/tile_cache.rs
index 89f42cfe21..a3c1ad233a 100644
--- a/gfx/wr/webrender/src/tile_cache.rs
+++ b/gfx/wr/webrender/src/tile_cache.rs
@@ -226,6 +226,7 @@ impl TileCacheBuilder {
cluster.spatial_node_index,
&mut self.prev_scroll_root_cache,
spatial_tree,
+ true,
);
*scroll_root_occurrences.entry(scroll_root).or_insert(0) += 1;
@@ -324,6 +325,9 @@ impl TileCacheBuilder {
spatial_node_index,
&mut self.prev_scroll_root_cache,
spatial_tree,
+ // Allow sticky frames as scroll roots, unless our quality settings prefer
+ // subpixel AA over performance.
+ !quality_settings.force_subpixel_aa_where_possible,
);
let current_scroll_root = secondary_slices
@@ -369,6 +373,7 @@ impl TileCacheBuilder {
clip_node_data.key.spatial_node_index,
&mut self.prev_scroll_root_cache,
spatial_tree,
+ true,
);
if spatial_root != self.root_spatial_node_index {
@@ -509,12 +514,13 @@ fn find_scroll_root(
spatial_node_index: SpatialNodeIndex,
prev_scroll_root_cache: &mut (SpatialNodeIndex, SpatialNodeIndex),
spatial_tree: &SceneSpatialTree,
+ allow_sticky_frames: bool,
) -> SpatialNodeIndex {
if prev_scroll_root_cache.0 == spatial_node_index {
return prev_scroll_root_cache.1;
}
- let scroll_root = spatial_tree.find_scroll_root(spatial_node_index);
+ let scroll_root = spatial_tree.find_scroll_root(spatial_node_index, allow_sticky_frames);
*prev_scroll_root_cache = (spatial_node_index, scroll_root);
scroll_root