diff options
Diffstat (limited to 'gfx/wr/webrender/src/render_task.rs')
-rw-r--r-- | gfx/wr/webrender/src/render_task.rs | 1295 |
1 files changed, 1266 insertions, 29 deletions
diff --git a/gfx/wr/webrender/src/render_task.rs b/gfx/wr/webrender/src/render_task.rs index bf9050712c..5106971591 100644 --- a/gfx/wr/webrender/src/render_task.rs +++ b/gfx/wr/webrender/src/render_task.rs @@ -3,19 +3,20 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{CompositeOperator, FilterPrimitive, FilterPrimitiveInput, FilterPrimitiveKind}; -use api::{LineStyle, LineOrientation, ClipMode, MixBlendMode, ColorF, ColorSpace}; +use api::{LineStyle, LineOrientation, ClipMode, MixBlendMode, ColorF, ColorSpace, FilterOpGraphPictureBufferId}; use api::MAX_RENDER_TASK_SIZE; use api::units::*; +use crate::box_shadow::BLUR_SAMPLE_SCALE; use crate::clip::{ClipDataStore, ClipItemKind, ClipStore, ClipNodeRange}; use crate::command_buffer::{CommandBufferIndex, QuadFlags}; use crate::pattern::{PatternKind, PatternShaderInput}; use crate::spatial_tree::SpatialNodeIndex; use crate::filterdata::SFilterData; -use crate::frame_builder::FrameBuilderConfig; +use crate::frame_builder::{FrameBuilderConfig, FrameBuildingState}; use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use crate::gpu_types::{BorderInstance, ImageSource, UvRectKind, TransformPaletteId}; -use crate::internal_types::{CacheTextureId, FastHashMap, TextureSource, Swizzle}; -use crate::picture::ResolvedSurfaceTexture; +use crate::internal_types::{CacheTextureId, FastHashMap, FilterGraphNode, FilterGraphOp, FilterGraphPictureReference, SVGFE_CONVOLVE_VALUES_LIMIT, TextureSource, Swizzle}; +use crate::picture::{ResolvedSurfaceTexture, MAX_SURFACE_SIZE}; use crate::prim_store::ClipData; use crate::prim_store::gradient::{ FastLinearGradientTask, RadialGradientTask, @@ -24,6 +25,7 @@ use crate::prim_store::gradient::{ use crate::resource_cache::{ResourceCache, ImageRequest}; use std::{usize, f32, i32, u32}; use crate::renderer::{GpuBufferAddress, GpuBufferBuilderF}; +use crate::render_backend::DataStores; use crate::render_target::{ResolveOp, RenderTargetKind}; use crate::render_task_graph::{PassId, RenderTaskId, RenderTaskGraphBuilder}; use crate::render_task_cache::{RenderTaskCacheEntryHandle, RenderTaskCacheKey, RenderTaskCacheKeyKind, RenderTaskParent}; @@ -190,7 +192,6 @@ pub struct PrimTask { pub device_pixel_scale: DevicePixelScale, pub content_origin: DevicePoint, pub prim_address_f: GpuBufferAddress, - pub prim_spatial_node_index: SpatialNodeIndex, pub raster_spatial_node_index: SpatialNodeIndex, pub transform_id: TransformPaletteId, pub edge_flags: EdgeAaSegmentMask, @@ -335,6 +336,16 @@ pub struct SvgFilterTask { pub extra_gpu_cache_handle: Option<GpuCacheHandle>, } +#[derive(Debug)] +#[cfg_attr(feature = "capture", derive(Serialize))] +#[cfg_attr(feature = "replay", derive(Deserialize))] +pub struct SVGFEFilterTask { + pub node: FilterGraphNode, + pub op: FilterGraphOp, + pub content_origin: DevicePoint, + pub extra_gpu_cache_handle: Option<GpuCacheHandle>, +} + #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ReadbackTask { @@ -372,6 +383,7 @@ pub enum RenderTaskKind { RadialGradient(RadialGradientTask), ConicGradient(ConicGradientTask), SvgFilter(SvgFilterTask), + SVGFENode(SVGFEFilterTask), TileComposite(TileCompositeTask), Prim(PrimTask), Empty(EmptyTask), @@ -423,6 +435,7 @@ impl RenderTaskKind { RenderTaskKind::RadialGradient(..) => "RadialGradient", RenderTaskKind::ConicGradient(..) => "ConicGradient", RenderTaskKind::SvgFilter(..) => "SvgFilter", + RenderTaskKind::SVGFENode(..) => "SVGFENode", RenderTaskKind::TileComposite(..) => "TileComposite", RenderTaskKind::Prim(..) => "Prim", RenderTaskKind::Empty(..) => "Empty", @@ -448,6 +461,9 @@ impl RenderTaskKind { RenderTaskKind::SvgFilter(..) => { RenderTargetKind::Color } + RenderTaskKind::SVGFENode(..) => { + RenderTargetKind::Color + } RenderTaskKind::ClipRegion(..) | RenderTaskKind::CacheMask(..) | @@ -521,7 +537,6 @@ impl RenderTaskKind { pub fn new_prim( pattern: PatternKind, pattern_input: PatternShaderInput, - prim_spatial_node_index: SpatialNodeIndex, raster_spatial_node_index: SpatialNodeIndex, device_pixel_scale: DevicePixelScale, content_origin: DevicePoint, @@ -535,7 +550,6 @@ impl RenderTaskKind { RenderTaskKind::Prim(PrimTask { pattern, pattern_input, - prim_spatial_node_index, raster_spatial_node_index, device_pixel_scale, content_origin, @@ -791,6 +805,11 @@ impl RenderTaskKind { _ => [0.0; 4] } } + RenderTaskKind::SVGFENode(_task) => { + // we don't currently use this for SVGFE filters. + // see SVGFEFilterInstance instead + [0.0; 4] + } #[cfg(test)] RenderTaskKind::Test(..) => { @@ -816,39 +835,138 @@ impl RenderTaskKind { &mut self, gpu_cache: &mut GpuCache, ) { - if let RenderTaskKind::SvgFilter(ref mut filter_task) = self { - match filter_task.info { - SvgFilterInfo::ColorMatrix(ref matrix) => { - let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); - if let Some(mut request) = gpu_cache.request(handle) { - for i in 0..5 { - request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]); + match self { + RenderTaskKind::SvgFilter(ref mut filter_task) => { + match filter_task.info { + SvgFilterInfo::ColorMatrix(ref matrix) => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + for i in 0..5 { + request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]); + } } } - } - SvgFilterInfo::DropShadow(color) | - SvgFilterInfo::Flood(color) => { - let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); - if let Some(mut request) = gpu_cache.request(handle) { - request.push(color.to_array()); + SvgFilterInfo::DropShadow(color) | + SvgFilterInfo::Flood(color) => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push(color.to_array()); + } } - } - SvgFilterInfo::ComponentTransfer(ref data) => { - let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); - if let Some(request) = gpu_cache.request(handle) { - data.update(request); + SvgFilterInfo::ComponentTransfer(ref data) => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(request) = gpu_cache.request(handle) { + data.update(request); + } } + SvgFilterInfo::Composite(ref operator) => { + if let CompositeOperator::Arithmetic(k_vals) = operator { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push(*k_vals); + } + } + } + _ => {}, } - SvgFilterInfo::Composite(ref operator) => { - if let CompositeOperator::Arithmetic(k_vals) = operator { + } + RenderTaskKind::SVGFENode(ref mut filter_task) => { + match filter_task.op { + FilterGraphOp::SVGFEBlendDarken => {} + FilterGraphOp::SVGFEBlendLighten => {} + FilterGraphOp::SVGFEBlendMultiply => {} + FilterGraphOp::SVGFEBlendNormal => {} + FilterGraphOp::SVGFEBlendScreen => {} + FilterGraphOp::SVGFEBlendOverlay => {} + FilterGraphOp::SVGFEBlendColorDodge => {} + FilterGraphOp::SVGFEBlendColorBurn => {} + FilterGraphOp::SVGFEBlendHardLight => {} + FilterGraphOp::SVGFEBlendSoftLight => {} + FilterGraphOp::SVGFEBlendDifference => {} + FilterGraphOp::SVGFEBlendExclusion => {} + FilterGraphOp::SVGFEBlendHue => {} + FilterGraphOp::SVGFEBlendSaturation => {} + FilterGraphOp::SVGFEBlendColor => {} + FilterGraphOp::SVGFEBlendLuminosity => {} + FilterGraphOp::SVGFEColorMatrix{values: matrix} => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + for i in 0..5 { + request.push([matrix[i*4], matrix[i*4+1], matrix[i*4+2], matrix[i*4+3]]); + } + } + } + FilterGraphOp::SVGFEComponentTransfer => unreachable!(), + FilterGraphOp::SVGFEComponentTransferInterned{..} => {} + FilterGraphOp::SVGFECompositeArithmetic{k1, k2, k3, k4} => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push([k1, k2, k3, k4]); + } + } + FilterGraphOp::SVGFECompositeATop => {} + FilterGraphOp::SVGFECompositeIn => {} + FilterGraphOp::SVGFECompositeLighter => {} + FilterGraphOp::SVGFECompositeOut => {} + FilterGraphOp::SVGFECompositeOver => {} + FilterGraphOp::SVGFECompositeXOR => {} + FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} | + FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} | + FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{order_x, order_y, kernel, divisor, bias, target_x, target_y, kernel_unit_length_x, kernel_unit_length_y, preserve_alpha} => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push([-target_x as f32, -target_y as f32, order_x as f32, order_y as f32]); + request.push([kernel_unit_length_x as f32, kernel_unit_length_y as f32, 1.0 / divisor, bias]); + assert!(SVGFE_CONVOLVE_VALUES_LIMIT == 25); + request.push([kernel[0], kernel[1], kernel[2], kernel[3]]); + request.push([kernel[4], kernel[5], kernel[6], kernel[7]]); + request.push([kernel[8], kernel[9], kernel[10], kernel[11]]); + request.push([kernel[12], kernel[13], kernel[14], kernel[15]]); + request.push([kernel[16], kernel[17], kernel[18], kernel[19]]); + request.push([kernel[20], 0.0, 0.0, preserve_alpha as f32]); + } + } + FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {} + FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {} + FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {} + FilterGraphOp::SVGFEDisplacementMap{scale, x_channel_selector, y_channel_selector} => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push([x_channel_selector as f32, y_channel_selector as f32, scale, 0.0]); + } + } + FilterGraphOp::SVGFEDropShadow{color, ..} | + FilterGraphOp::SVGFEFlood{color} => { + let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); + if let Some(mut request) = gpu_cache.request(handle) { + request.push(color.to_array()); + } + } + FilterGraphOp::SVGFEGaussianBlur{..} => {} + FilterGraphOp::SVGFEIdentity => {} + FilterGraphOp::SVGFEImage{..} => {} + FilterGraphOp::SVGFEMorphologyDilate{radius_x, radius_y} | + FilterGraphOp::SVGFEMorphologyErode{radius_x, radius_y} => { let handle = filter_task.extra_gpu_cache_handle.get_or_insert_with(GpuCacheHandle::new); if let Some(mut request) = gpu_cache.request(handle) { - request.push(*k_vals); + request.push([radius_x, radius_y, 0.0, 0.0]); } } + FilterGraphOp::SVGFEOpacity{..} => {} + FilterGraphOp::SVGFESourceAlpha => {} + FilterGraphOp::SVGFESourceGraphic => {} + FilterGraphOp::SVGFESpecularLightingDistant{..} => {} + FilterGraphOp::SVGFESpecularLightingPoint{..} => {} + FilterGraphOp::SVGFESpecularLightingSpot{..} => {} + FilterGraphOp::SVGFETile => {} + FilterGraphOp::SVGFEToAlpha{..} => {} + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} => {} + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} => {} + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} => {} + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => {} } - _ => {}, } + _ => {} } } } @@ -1510,6 +1628,1115 @@ impl RenderTask { self.sub_pass = Some(sub_pass); } + /// Creates render tasks from PictureCompositeMode::SVGFEGraph. + /// + /// The interesting parts of the handling of SVG filters are: + /// * scene_building.rs : wrap_prim_with_filters + /// * picture.rs : get_coverage_svgfe + /// * render_task.rs : new_svg_filter_graph (you are here) + /// * render_target.rs : add_svg_filter_node_instances + pub fn new_svg_filter_graph( + filter_nodes: &[(FilterGraphNode, FilterGraphOp)], + frame_state: &mut FrameBuildingState, + data_stores: &mut DataStores, + uv_rect_kind: UvRectKind, + original_task_id: RenderTaskId, + _surface_rects_task_size: DeviceIntSize, + surface_rects_clipped: DeviceRect, + surface_rects_clipped_local: PictureRect, + ) -> RenderTaskId { + const BUFFER_LIMIT: usize = 256; + let mut task_by_buffer_id: [RenderTaskId; BUFFER_LIMIT] = [RenderTaskId::INVALID; BUFFER_LIMIT]; + let mut subregion_by_buffer_id: [LayoutRect; BUFFER_LIMIT] = [LayoutRect::zero(); BUFFER_LIMIT]; + // If nothing replaces this value (all node subregions are empty), we + // can just return the original picture + let mut output_task_id = original_task_id; + + // By this point we assume the following about the graph: + // * BUFFER_LIMIT here should be >= BUFFER_LIMIT in the scene_building.rs code. + // * input buffer id < output buffer id + // * output buffer id between 0 and BUFFER_LIMIT + // * the number of filter_datas matches the number of kept nodes with op + // SVGFEComponentTransfer. + // + // These assumptions are verified with asserts in this function as + // appropriate. + + // Converts a UvRectKind::Quad to a subregion, we need this for + // SourceGraphic because it could source from a larger image when doing + // a dirty rect update. In theory this can be used for blur output as + // well but it doesn't seem to be necessary from early testing. + // + // See calculate_uv_rect_kind in picture.rs for how these were generated. + fn subregion_for_uvrectkind(kind: &UvRectKind, rect: LayoutRect) -> LayoutRect { + let used = + match kind { + UvRectKind::Quad{top_left: tl, top_right: _tr, bottom_left: _bl, bottom_right: br} => { + LayoutRect::new( + LayoutPoint::new( + rect.min.x + rect.width() * tl.x / tl.w, + rect.min.y + rect.height() * tl.y / tl.w, + ), + LayoutPoint::new( + rect.min.x + rect.width() * br.x / br.w, + rect.min.y + rect.height() * br.y / br.w, + ), + ) + } + UvRectKind::Rect => { + rect + } + }; + // For some reason, the following test passes a uv_rect_kind that + // resolves to [-.2, -.2, -.2, -.2] + // reftest layout/reftests/svg/filters/dynamic-filter-invalidation-01.svg + match used.is_empty() { + true => rect, + false => used, + } + } + + // Make a UvRectKind::Quad that represents a task for a node, which may + // have an inflate border, must be a Quad because the surface_rects + // compositing shader expects it to be one, we don't actually use this + // internally as we use subregions, see calculate_uv_rect_kind for how + // this works, it projects from clipped rect to unclipped rect, where + // our clipped rect is simply task_size minus the inflate, and unclipped + // is our full task_size + fn uv_rect_kind_for_task_size(task_size: DeviceIntSize, inflate: i16) -> UvRectKind { + let unclipped = DeviceRect::new( + DevicePoint::new( + inflate as f32, + inflate as f32, + ), + DevicePoint::new( + task_size.width as f32 - inflate as f32, + task_size.height as f32 - inflate as f32, + ), + ); + let clipped = DeviceRect::new( + DevicePoint::zero(), + DevicePoint::new( + task_size.width as f32, + task_size.height as f32, + ), + ); + let scale_x = 1.0 / clipped.width(); + let scale_y = 1.0 / clipped.height(); + UvRectKind::Quad{ + top_left: DeviceHomogeneousVector::new( + (unclipped.min.x - clipped.min.x) * scale_x, + (unclipped.min.y - clipped.min.y) * scale_y, + 0.0, 1.0), + top_right: DeviceHomogeneousVector::new( + (unclipped.max.x - clipped.min.x) * scale_x, + (unclipped.min.y - clipped.min.y) * scale_y, + 0.0, 1.0), + bottom_left: DeviceHomogeneousVector::new( + (unclipped.min.x - clipped.min.x) * scale_x, + (unclipped.max.y - clipped.min.y) * scale_y, + 0.0, 1.0), + bottom_right: DeviceHomogeneousVector::new( + (unclipped.max.x - clipped.min.x) * scale_x, + (unclipped.max.y - clipped.min.y) * scale_y, + 0.0, 1.0), + } + } + + // Determine the local space to device pixel scaling in the most robust + // way, this accounts for local to device transform and + // device_pixel_scale (if the task is shrunk in get_surface_rects). + // + // This has some precision issues because surface_rects_clipped was + // rounded already, so it's not exactly the same transform that + // get_surface_rects performed, but it is very close, since it is not + // quite the same we have to round the offset a certain way to avoid + // introducing subpixel offsets caused by the slight deviation. + let subregion_to_device_scale_x = surface_rects_clipped.width() / surface_rects_clipped_local.width(); + let subregion_to_device_scale_y = surface_rects_clipped.height() / surface_rects_clipped_local.height(); + let subregion_to_device_offset_x = surface_rects_clipped.min.x - (surface_rects_clipped_local.min.x * subregion_to_device_scale_x).floor(); + let subregion_to_device_offset_y = surface_rects_clipped.min.y - (surface_rects_clipped_local.min.y * subregion_to_device_scale_y).floor(); + + // We will treat the entire SourceGraphic coordinate space as being this + // subregion, which is how large the source picture task is. + let filter_subregion: LayoutRect = surface_rects_clipped.cast_unit(); + + // Calculate the used subregion (invalidation rect) for SourceGraphic + // that we are painting for, the intermediate task sizes are based on + // this portion of SourceGraphic, this also serves as a clip on the + // SourceGraphic, which is necessary for this reftest: + // layout/reftests/svg/filters/svg-filter-chains/clip-original-SourceGraphic.svg + let source_subregion = + subregion_for_uvrectkind( + &uv_rect_kind, + surface_rects_clipped.cast_unit(), + ) + .intersection(&filter_subregion) + .unwrap_or(LayoutRect::zero()) + .round_out(); + + // This is the rect for the output picture we are producing + let output_rect = filter_subregion.to_i32(); + // Output to the same subregion we were provided + let output_subregion = filter_subregion; + + // Iterate the filter nodes and create tasks + let mut made_dependency_on_source = false; + for (filter_index, (filter_node, op)) in filter_nodes.iter().enumerate() { + let node = &filter_node; + let is_output = filter_index == filter_nodes.len() - 1; + + // Note that this is never set on the final output by design. + if !node.kept_by_optimizer { + continue; + } + + // Certain ops have parameters that need to be scaled to device + // space. + let op = match op { + FilterGraphOp::SVGFEBlendColor => op.clone(), + FilterGraphOp::SVGFEBlendColorBurn => op.clone(), + FilterGraphOp::SVGFEBlendColorDodge => op.clone(), + FilterGraphOp::SVGFEBlendDarken => op.clone(), + FilterGraphOp::SVGFEBlendDifference => op.clone(), + FilterGraphOp::SVGFEBlendExclusion => op.clone(), + FilterGraphOp::SVGFEBlendHardLight => op.clone(), + FilterGraphOp::SVGFEBlendHue => op.clone(), + FilterGraphOp::SVGFEBlendLighten => op.clone(), + FilterGraphOp::SVGFEBlendLuminosity => op.clone(), + FilterGraphOp::SVGFEBlendMultiply => op.clone(), + FilterGraphOp::SVGFEBlendNormal => op.clone(), + FilterGraphOp::SVGFEBlendOverlay => op.clone(), + FilterGraphOp::SVGFEBlendSaturation => op.clone(), + FilterGraphOp::SVGFEBlendScreen => op.clone(), + FilterGraphOp::SVGFEBlendSoftLight => op.clone(), + FilterGraphOp::SVGFEColorMatrix{..} => op.clone(), + FilterGraphOp::SVGFEComponentTransfer => unreachable!(), + FilterGraphOp::SVGFEComponentTransferInterned{..} => op.clone(), + FilterGraphOp::SVGFECompositeArithmetic{..} => op.clone(), + FilterGraphOp::SVGFECompositeATop => op.clone(), + FilterGraphOp::SVGFECompositeIn => op.clone(), + FilterGraphOp::SVGFECompositeLighter => op.clone(), + FilterGraphOp::SVGFECompositeOut => op.clone(), + FilterGraphOp::SVGFECompositeOver => op.clone(), + FilterGraphOp::SVGFECompositeXOR => op.clone(), + FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{ + kernel_unit_length_x, kernel_unit_length_y, order_x, + order_y, kernel, divisor, bias, target_x, target_y, + preserve_alpha} => { + FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{ + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + order_x: *order_x, order_y: *order_y, kernel: *kernel, + divisor: *divisor, bias: *bias, target_x: *target_x, + target_y: *target_y, preserve_alpha: *preserve_alpha} + }, + FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{ + kernel_unit_length_x, kernel_unit_length_y, order_x, + order_y, kernel, divisor, bias, target_x, target_y, + preserve_alpha} => { + FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{ + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + order_x: *order_x, order_y: *order_y, kernel: *kernel, + divisor: *divisor, bias: *bias, target_x: *target_x, + target_y: *target_y, preserve_alpha: *preserve_alpha} + }, + FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{ + kernel_unit_length_x, kernel_unit_length_y, order_x, + order_y, kernel, divisor, bias, target_x, target_y, + preserve_alpha} => { + FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{ + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + order_x: *order_x, order_y: *order_y, kernel: *kernel, + divisor: *divisor, bias: *bias, target_x: *target_x, + target_y: *target_y, preserve_alpha: *preserve_alpha} + }, + FilterGraphOp::SVGFEDiffuseLightingDistant{ + surface_scale, diffuse_constant, kernel_unit_length_x, + kernel_unit_length_y, azimuth, elevation} => { + FilterGraphOp::SVGFEDiffuseLightingDistant{ + surface_scale: *surface_scale, + diffuse_constant: *diffuse_constant, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + azimuth: *azimuth, elevation: *elevation} + }, + FilterGraphOp::SVGFEDiffuseLightingPoint{ + surface_scale, diffuse_constant, kernel_unit_length_x, + kernel_unit_length_y, x, y, z} => { + FilterGraphOp::SVGFEDiffuseLightingPoint{ + surface_scale: *surface_scale, + diffuse_constant: *diffuse_constant, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + x: x * subregion_to_device_scale_x + subregion_to_device_offset_x, + y: y * subregion_to_device_scale_y + subregion_to_device_offset_y, + z: *z} + }, + FilterGraphOp::SVGFEDiffuseLightingSpot{ + surface_scale, diffuse_constant, kernel_unit_length_x, + kernel_unit_length_y, x, y, z, points_at_x, points_at_y, + points_at_z, cone_exponent, limiting_cone_angle} => { + FilterGraphOp::SVGFEDiffuseLightingSpot{ + surface_scale: *surface_scale, + diffuse_constant: *diffuse_constant, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + x: x * subregion_to_device_scale_x + subregion_to_device_offset_x, + y: y * subregion_to_device_scale_y + subregion_to_device_offset_y, + z: *z, + points_at_x: points_at_x * subregion_to_device_scale_x + subregion_to_device_offset_x, + points_at_y: points_at_y * subregion_to_device_scale_y + subregion_to_device_offset_y, + points_at_z: *points_at_z, + cone_exponent: *cone_exponent, + limiting_cone_angle: *limiting_cone_angle} + }, + FilterGraphOp::SVGFEFlood{..} => op.clone(), + FilterGraphOp::SVGFEDisplacementMap{ + scale, x_channel_selector, y_channel_selector} => { + FilterGraphOp::SVGFEDisplacementMap{ + scale: scale * subregion_to_device_scale_x, + x_channel_selector: *x_channel_selector, + y_channel_selector: *y_channel_selector} + }, + FilterGraphOp::SVGFEDropShadow{ + color, dx, dy, std_deviation_x, std_deviation_y} => { + FilterGraphOp::SVGFEDropShadow{ + color: *color, + dx: dx * subregion_to_device_scale_x, + dy: dy * subregion_to_device_scale_y, + std_deviation_x: std_deviation_x * subregion_to_device_scale_x, + std_deviation_y: std_deviation_y * subregion_to_device_scale_y} + }, + FilterGraphOp::SVGFEGaussianBlur{std_deviation_x, std_deviation_y} => { + let std_deviation_x = std_deviation_x * subregion_to_device_scale_x; + let std_deviation_y = std_deviation_y * subregion_to_device_scale_y; + // For blurs that effectively have no radius in display + // space, we can convert to identity. + if std_deviation_x + std_deviation_y >= 0.125 { + FilterGraphOp::SVGFEGaussianBlur{ + std_deviation_x, + std_deviation_y} + } else { + FilterGraphOp::SVGFEIdentity + } + }, + FilterGraphOp::SVGFEIdentity => op.clone(), + FilterGraphOp::SVGFEImage{..} => op.clone(), + FilterGraphOp::SVGFEMorphologyDilate{radius_x, radius_y} => { + FilterGraphOp::SVGFEMorphologyDilate{ + radius_x: (radius_x * subregion_to_device_scale_x).round(), + radius_y: (radius_y * subregion_to_device_scale_y).round()} + }, + FilterGraphOp::SVGFEMorphologyErode{radius_x, radius_y} => { + FilterGraphOp::SVGFEMorphologyErode{ + radius_x: (radius_x * subregion_to_device_scale_x).round(), + radius_y: (radius_y * subregion_to_device_scale_y).round()} + }, + FilterGraphOp::SVGFEOpacity{..} => op.clone(), + FilterGraphOp::SVGFESourceAlpha => op.clone(), + FilterGraphOp::SVGFESourceGraphic => op.clone(), + FilterGraphOp::SVGFESpecularLightingDistant{ + surface_scale, specular_constant, specular_exponent, + kernel_unit_length_x, kernel_unit_length_y, azimuth, + elevation} => { + FilterGraphOp::SVGFESpecularLightingDistant{ + surface_scale: *surface_scale, + specular_constant: *specular_constant, + specular_exponent: *specular_exponent, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + azimuth: *azimuth, elevation: *elevation} + }, + FilterGraphOp::SVGFESpecularLightingPoint{ + surface_scale, specular_constant, specular_exponent, + kernel_unit_length_x, kernel_unit_length_y, x, y, z } => { + FilterGraphOp::SVGFESpecularLightingPoint{ + surface_scale: *surface_scale, + specular_constant: *specular_constant, + specular_exponent: *specular_exponent, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + x: x * subregion_to_device_scale_x + subregion_to_device_offset_x, + y: y * subregion_to_device_scale_y + subregion_to_device_offset_y, + z: *z } + }, + FilterGraphOp::SVGFESpecularLightingSpot{ + surface_scale, specular_constant, specular_exponent, + kernel_unit_length_x, kernel_unit_length_y, x, y, z, + points_at_x, points_at_y, points_at_z, cone_exponent, + limiting_cone_angle} => { + FilterGraphOp::SVGFESpecularLightingSpot{ + surface_scale: *surface_scale, + specular_constant: *specular_constant, + specular_exponent: *specular_exponent, + kernel_unit_length_x: + (kernel_unit_length_x * subregion_to_device_scale_x).round(), + kernel_unit_length_y: + (kernel_unit_length_y * subregion_to_device_scale_y).round(), + x: x * subregion_to_device_scale_x + subregion_to_device_offset_x, + y: y * subregion_to_device_scale_y + subregion_to_device_offset_y, + z: *z, + points_at_x: points_at_x * subregion_to_device_scale_x + subregion_to_device_offset_x, + points_at_y: points_at_y * subregion_to_device_scale_y + subregion_to_device_offset_y, + points_at_z: *points_at_z, + cone_exponent: *cone_exponent, + limiting_cone_angle: *limiting_cone_angle} + }, + FilterGraphOp::SVGFETile => op.clone(), + FilterGraphOp::SVGFEToAlpha => op.clone(), + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{ + base_frequency_x, base_frequency_y, num_octaves, seed} => { + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{ + base_frequency_x: + base_frequency_x * subregion_to_device_scale_x, + base_frequency_y: + base_frequency_y * subregion_to_device_scale_y, + num_octaves: *num_octaves, seed: *seed} + }, + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{ + base_frequency_x, base_frequency_y, num_octaves, seed} => { + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{ + base_frequency_x: + base_frequency_x * subregion_to_device_scale_x, + base_frequency_y: + base_frequency_y * subregion_to_device_scale_y, + num_octaves: *num_octaves, seed: *seed} + }, + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{ + base_frequency_x, base_frequency_y, num_octaves, seed} => { + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{ + base_frequency_x: + base_frequency_x * subregion_to_device_scale_x, + base_frequency_y: + base_frequency_y * subregion_to_device_scale_y, + num_octaves: *num_octaves, seed: *seed} + }, + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{ + base_frequency_x, base_frequency_y, num_octaves, seed} => { + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{ + base_frequency_x: + base_frequency_x * subregion_to_device_scale_x, + base_frequency_y: + base_frequency_y * subregion_to_device_scale_y, + num_octaves: *num_octaves, seed: *seed} + }, + }; + + // Process the inputs and figure out their new subregion, because + // the SourceGraphic subregion is smaller than it was in scene build + // now that it reflects the invalidation rect + // + // Also look up the child tasks while we are here. + let mut used_subregion = LayoutRect::zero(); + let node_inputs: Vec<(FilterGraphPictureReference, RenderTaskId)> = node.inputs.iter().map(|input| { + let (subregion, task) = + match input.buffer_id { + FilterOpGraphPictureBufferId::BufferId(id) => { + (subregion_by_buffer_id[id as usize], task_by_buffer_id[id as usize]) + } + FilterOpGraphPictureBufferId::None => { + // Task must resolve so we use the SourceGraphic as + // a placeholder for these, they don't actually + // contribute anything to the output + (LayoutRect::zero(), original_task_id) + } + }; + // Convert offset to device coordinates. + let offset = LayoutVector2D::new( + (input.offset.x * subregion_to_device_scale_x).round(), + (input.offset.y * subregion_to_device_scale_y).round(), + ); + // To figure out the portion of the node subregion used by this + // source image we need to apply the target padding. Note that + // this does not affect the subregion of the input, as that + // can't be modified as it is used for placement (offset). + let target_padding = input.target_padding + .scale(subregion_to_device_scale_x, subregion_to_device_scale_y) + .round(); + let target_subregion = + LayoutRect::new( + LayoutPoint::new( + subregion.min.x + target_padding.min.x, + subregion.min.y + target_padding.min.y, + ), + LayoutPoint::new( + subregion.max.x + target_padding.max.x, + subregion.max.y + target_padding.max.y, + ), + ); + used_subregion = used_subregion.union(&target_subregion); + (FilterGraphPictureReference{ + buffer_id: input.buffer_id, + // Apply offset to the placement of the input subregion. + subregion: subregion.translate(offset), + offset: LayoutVector2D::zero(), + inflate: input.inflate, + // Nothing past this point uses the padding. + source_padding: LayoutRect::zero(), + target_padding: LayoutRect::zero(), + }, task) + }).collect(); + + // Convert subregion from PicturePixels to DevicePixels and round. + let full_subregion = node.subregion + .scale(subregion_to_device_scale_x, subregion_to_device_scale_y) + .translate(LayoutVector2D::new(subregion_to_device_offset_x, subregion_to_device_offset_y)) + .round(); + + // Clip the used subregion we calculated from the inputs to fit + // within the node's specified subregion. + used_subregion = used_subregion + .intersection(&full_subregion) + .unwrap_or(LayoutRect::zero()) + .round(); + + // Certain filters need to override the used_subregion directly. + match op { + FilterGraphOp::SVGFEBlendColor => {}, + FilterGraphOp::SVGFEBlendColorBurn => {}, + FilterGraphOp::SVGFEBlendColorDodge => {}, + FilterGraphOp::SVGFEBlendDarken => {}, + FilterGraphOp::SVGFEBlendDifference => {}, + FilterGraphOp::SVGFEBlendExclusion => {}, + FilterGraphOp::SVGFEBlendHardLight => {}, + FilterGraphOp::SVGFEBlendHue => {}, + FilterGraphOp::SVGFEBlendLighten => {}, + FilterGraphOp::SVGFEBlendLuminosity => {}, + FilterGraphOp::SVGFEBlendMultiply => {}, + FilterGraphOp::SVGFEBlendNormal => {}, + FilterGraphOp::SVGFEBlendOverlay => {}, + FilterGraphOp::SVGFEBlendSaturation => {}, + FilterGraphOp::SVGFEBlendScreen => {}, + FilterGraphOp::SVGFEBlendSoftLight => {}, + FilterGraphOp::SVGFEColorMatrix{values} => { + if values[3] != 0.0 || + values[7] != 0.0 || + values[11] != 0.0 || + values[15] != 1.0 || + values[19] != 0.0 { + // Manipulating alpha can easily create new + // pixels outside of input subregions + used_subregion = full_subregion; + } + }, + FilterGraphOp::SVGFEComponentTransfer => unreachable!(), + FilterGraphOp::SVGFEComponentTransferInterned{handle: _, creates_pixels} => { + // Check if the value of alpha[0] is modified, if so + // the whole subregion is used because it will be + // creating new pixels outside of input subregions + if creates_pixels { + used_subregion = full_subregion; + } + }, + FilterGraphOp::SVGFECompositeArithmetic { k1, k2, k3, k4 } => { + // Optimize certain cases of Arithmetic operator + // + // See logic for SVG_FECOMPOSITE_OPERATOR_ARITHMETIC + // in FilterSupport.cpp for more information. + // + // Any other case uses the union of input subregions + if k4 != 0.0 { + // Can produce pixels anywhere in the subregion. + used_subregion = full_subregion; + } else if k1 != 0.0 && k2 == 0.0 && k3 == 0.0 && k4 == 0.0 { + // Can produce pixels where both exist. + used_subregion = full_subregion + .intersection(&node_inputs[0].0.subregion) + .unwrap_or(LayoutRect::zero()) + .intersection(&node_inputs[1].0.subregion) + .unwrap_or(LayoutRect::zero()); + } + else if k2 != 0.0 && k3 == 0.0 && k4 == 0.0 { + // Can produce pixels where source exists. + used_subregion = full_subregion + .intersection(&node_inputs[0].0.subregion) + .unwrap_or(LayoutRect::zero()); + } + else if k2 == 0.0 && k3 != 0.0 && k4 == 0.0 { + // Can produce pixels where background exists. + used_subregion = full_subregion + .intersection(&node_inputs[1].0.subregion) + .unwrap_or(LayoutRect::zero()); + } + }, + FilterGraphOp::SVGFECompositeATop => { + // Can only produce pixels where background exists. + used_subregion = full_subregion + .intersection(&node_inputs[1].0.subregion) + .unwrap_or(LayoutRect::zero()); + }, + FilterGraphOp::SVGFECompositeIn => { + // Can only produce pixels where both exist. + used_subregion = used_subregion + .intersection(&node_inputs[0].0.subregion) + .unwrap_or(LayoutRect::zero()) + .intersection(&node_inputs[1].0.subregion) + .unwrap_or(LayoutRect::zero()); + }, + FilterGraphOp::SVGFECompositeLighter => {}, + FilterGraphOp::SVGFECompositeOut => { + // Can only produce pixels where source exists. + used_subregion = full_subregion + .intersection(&node_inputs[0].0.subregion) + .unwrap_or(LayoutRect::zero()); + }, + FilterGraphOp::SVGFECompositeOver => {}, + FilterGraphOp::SVGFECompositeXOR => {}, + FilterGraphOp::SVGFEConvolveMatrixEdgeModeDuplicate{..} => {}, + FilterGraphOp::SVGFEConvolveMatrixEdgeModeNone{..} => {}, + FilterGraphOp::SVGFEConvolveMatrixEdgeModeWrap{..} => {}, + FilterGraphOp::SVGFEDiffuseLightingDistant{..} => {}, + FilterGraphOp::SVGFEDiffuseLightingPoint{..} => {}, + FilterGraphOp::SVGFEDiffuseLightingSpot{..} => {}, + FilterGraphOp::SVGFEDisplacementMap{..} => {}, + FilterGraphOp::SVGFEDropShadow{..} => {}, + FilterGraphOp::SVGFEFlood { color } => { + // Subregion needs to be set to the full node + // subregion for fills (unless the fill is a no-op), + // we know at this point that it has no inputs, so the + // used_region is empty unless we set it here. + if color.a > 0.0 { + used_subregion = full_subregion; + } + }, + FilterGraphOp::SVGFEIdentity => {}, + FilterGraphOp::SVGFEImage { sampling_filter: _sampling_filter, matrix: _matrix } => { + // TODO: calculate the actual subregion + used_subregion = full_subregion; + }, + FilterGraphOp::SVGFEGaussianBlur{..} => {}, + FilterGraphOp::SVGFEMorphologyDilate{..} => {}, + FilterGraphOp::SVGFEMorphologyErode{..} => {}, + FilterGraphOp::SVGFEOpacity{valuebinding: _valuebinding, value} => { + // If fully transparent, we can ignore this node + if value <= 0.0 { + used_subregion = LayoutRect::zero(); + } + }, + FilterGraphOp::SVGFESourceAlpha | + FilterGraphOp::SVGFESourceGraphic => { + used_subregion = source_subregion; + }, + FilterGraphOp::SVGFESpecularLightingDistant{..} => {}, + FilterGraphOp::SVGFESpecularLightingPoint{..} => {}, + FilterGraphOp::SVGFESpecularLightingSpot{..} => {}, + FilterGraphOp::SVGFETile => { + if !used_subregion.is_empty() { + // This fills the entire target, at least if there are + // any input pixels to work with. + used_subregion = full_subregion; + } + }, + FilterGraphOp::SVGFEToAlpha => {}, + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithNoStitching{..} | + FilterGraphOp::SVGFETurbulenceWithFractalNoiseWithStitching{..} | + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithNoStitching{..} | + FilterGraphOp::SVGFETurbulenceWithTurbulenceNoiseWithStitching{..} => { + // Turbulence produces pixel values throughout the + // node subregion. + used_subregion = full_subregion; + }, + } + + // If this is the output node, we have to match the provided filter + // subregion as the primitive it is applied to is already placed (it + // was calculated in get_surface_rects using get_coverage_svgfe). + let node_subregion = match is_output { + true => output_subregion, + false => used_subregion, + }; + + // Convert subregion from layout pixels to integer device pixels and + // then calculate size afterwards so it reflects the used pixel area + // + // In case of the output node we preserve the exact filter_subregion + // task size. + // + // This can be an empty rect if the source_subregion invalidation + // rect didn't request any pixels of this node, but we can't skip + // creating tasks that have no size because they would leak in the + // render task graph with no consumers + let node_task_rect = + match is_output { + true => output_rect, + false => node_subregion.to_i32(), + }; + + // SVG spec requires that a later node sampling pixels outside + // this node's subregion will receive a transparent black color + // for those samples, we achieve this by adding a 1 pixel border + // around the target rect, which works fine with the clamping of the + // texture fetch in the shader, and to account for the offset we + // have to make a UvRectKind::Quad mapping for later nodes to use + // when sampling this output, if they use feOffset or have a + // larger target rect those samples will be clamped to the + // transparent black border and thus meet spec. + let mut node_task_size = node_task_rect.size().cast_unit(); + + // We have to limit the render target sizes we're asking for on the + // intermediate nodes; it's not feasible to allocate extremely large + // surfaces. Note that the SVGFEFilterTask code can adapt to any + // scaling that we use here, input subregions simply have to be in + // the same space as the target subregion, which we're not changing, + // and operator parameters like kernel_unit_length are also in that + // space. Blurs will do this same logic if their intermediate is + // too large. We use a simple halving calculation here so that + // pixel alignment is still vaguely sensible. + while node_task_size.width as usize + node.inflate as usize * 2 > MAX_SURFACE_SIZE || + node_task_size.height as usize + node.inflate as usize * 2 > MAX_SURFACE_SIZE { + node_task_size.width >>= 1; + node_task_size.height >>= 1; + } + // Add the inflate border + node_task_size.width += node.inflate as i32 * 2; + node_task_size.height += node.inflate as i32 * 2; + + // Make the uv_rect_kind for this node's task to use, this matters + // only on the final node because we don't use it internally + let node_uv_rect_kind = + uv_rect_kind_for_task_size(node_task_size, node.inflate); + + // Create task for this node + let task_id; + match op { + FilterGraphOp::SVGFEGaussianBlur { std_deviation_x, std_deviation_y } => { + // Note: wrap_prim_with_filters copies the SourceGraphic to + // a node to apply the transparent border around the image, + // we rely on that behavior here as the Blur filter is a + // different shader without awareness of the subregion + // rules in the SVG spec. + + // Find the input task id + assert!(node_inputs.len() == 1); + let blur_input = &node_inputs[0].0; + let source_task_id = node_inputs[0].1; + + // We have to make a copy of the input that is padded with + // transparent black for the area outside the subregion, so + // that the blur task does not duplicate at the edges, and + // this is also where we have to adjust size to account for + // for downscaling of the image in the blur task to avoid + // introducing sampling artifacts on the downscale + let mut adjusted_blur_std_deviation = DeviceSize::new( + std_deviation_x, + std_deviation_y, + ); + let blur_subregion = blur_input.subregion + .inflate( + std_deviation_x.ceil() * BLUR_SAMPLE_SCALE, + std_deviation_y.ceil() * BLUR_SAMPLE_SCALE); + let blur_task_size = blur_subregion.size().cast_unit(); + // Adjust task size to prevent potential sampling errors + let mut adjusted_blur_task_size = + BlurTask::adjusted_blur_source_size( + blur_task_size, + adjusted_blur_std_deviation, + ); + // Now change the subregion to match the revised task size, + // keeping it centered should keep animated radius smooth. + let corner = LayoutPoint::new( + blur_subregion.min.x + (( + blur_task_size.width as i32 - + adjusted_blur_task_size.width) / 2) as f32, + blur_subregion.min.y + (( + blur_task_size.height as i32 - + adjusted_blur_task_size.height) / 2) as f32, + ) + .floor(); + // Recalculate the blur_subregion to match, note that if the + // task was downsized it doesn't affect the size of this + // rect, so we don't have to scale blur_input.subregion for + // input purposes as they are the same scale. + let blur_subregion = LayoutRect::new( + corner, + LayoutPoint::new( + corner.x + adjusted_blur_task_size.width as f32, + corner.y + adjusted_blur_task_size.height as f32, + ), + ); + // For extremely large blur radius we have to limit size, + // see comments on node_task_size above for more details. + while adjusted_blur_task_size.to_i32().width as usize > MAX_SURFACE_SIZE || + adjusted_blur_task_size.to_i32().height as usize > MAX_SURFACE_SIZE { + adjusted_blur_task_size.width >>= 1; + adjusted_blur_task_size.height >>= 1; + adjusted_blur_std_deviation.width *= 0.5; + adjusted_blur_std_deviation.height *= 0.5; + if adjusted_blur_task_size.width < 2 { + adjusted_blur_task_size.width = 2; + } + if adjusted_blur_task_size.height < 2 { + adjusted_blur_task_size.height = 2; + } + } + + let input_subregion_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + adjusted_blur_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: false, + inflate: 0, + inputs: [blur_input.clone()].to_vec(), + subregion: blur_subregion, + }, + op: FilterGraphOp::SVGFEIdentity, + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(UvRectKind::Rect)); + // Adding the dependencies sets the inputs for this task + frame_state.rg_builder.add_dependency(input_subregion_task_id, source_task_id); + + // TODO: We should do this blur in the correct + // colorspace, linear=true is the default in SVG and + // new_blur does not currently support it. If the nodes + // that consume the result only use the alpha channel, it + // does not matter, but when they use the RGB it matters. + let blur_task_id = + RenderTask::new_blur( + adjusted_blur_std_deviation, + input_subregion_task_id, + frame_state.rg_builder, + RenderTargetKind::Color, + None, + adjusted_blur_task_size, + ); + + task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + node_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: node.linear, + inflate: node.inflate, + inputs: [ + FilterGraphPictureReference{ + buffer_id: blur_input.buffer_id, + subregion: blur_subregion, + inflate: 0, + offset: LayoutVector2D::zero(), + source_padding: LayoutRect::zero(), + target_padding: LayoutRect::zero(), + }].to_vec(), + subregion: node_subregion, + }, + op: FilterGraphOp::SVGFEIdentity, + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(node_uv_rect_kind)); + // Adding the dependencies sets the inputs for this task + frame_state.rg_builder.add_dependency(task_id, blur_task_id); + } + FilterGraphOp::SVGFEDropShadow { color, dx, dy, std_deviation_x, std_deviation_y } => { + // Note: wrap_prim_with_filters copies the SourceGraphic to + // a node to apply the transparent border around the image, + // we rely on that behavior here as the Blur filter is a + // different shader without awareness of the subregion + // rules in the SVG spec. + + // Find the input task id + assert!(node_inputs.len() == 1); + let blur_input = &node_inputs[0].0; + let source_task_id = node_inputs[0].1; + + // We have to make a copy of the input that is padded with + // transparent black for the area outside the subregion, so + // that the blur task does not duplicate at the edges, and + // this is also where we have to adjust size to account for + // for downscaling of the image in the blur task to avoid + // introducing sampling artifacts on the downscale + let mut adjusted_blur_std_deviation = DeviceSize::new( + std_deviation_x, + std_deviation_y, + ); + let blur_subregion = blur_input.subregion + .inflate( + std_deviation_x.ceil() * BLUR_SAMPLE_SCALE, + std_deviation_y.ceil() * BLUR_SAMPLE_SCALE); + let blur_task_size = blur_subregion.size().cast_unit(); + // Adjust task size to prevent potential sampling errors + let mut adjusted_blur_task_size = + BlurTask::adjusted_blur_source_size( + blur_task_size, + adjusted_blur_std_deviation, + ); + // Now change the subregion to match the revised task size, + // keeping it centered should keep animated radius smooth. + let corner = LayoutPoint::new( + blur_subregion.min.x + (( + blur_task_size.width as i32 - + adjusted_blur_task_size.width) / 2) as f32, + blur_subregion.min.y + (( + blur_task_size.height as i32 - + adjusted_blur_task_size.height) / 2) as f32, + ) + .floor(); + // Recalculate the blur_subregion to match, note that if the + // task was downsized it doesn't affect the size of this + // rect, so we don't have to scale blur_input.subregion for + // input purposes as they are the same scale. + let blur_subregion = LayoutRect::new( + corner, + LayoutPoint::new( + corner.x + adjusted_blur_task_size.width as f32, + corner.y + adjusted_blur_task_size.height as f32, + ), + ); + // For extremely large blur radius we have to limit size, + // see comments on node_task_size above for more details. + while adjusted_blur_task_size.to_i32().width as usize > MAX_SURFACE_SIZE || + adjusted_blur_task_size.to_i32().height as usize > MAX_SURFACE_SIZE { + adjusted_blur_task_size.width >>= 1; + adjusted_blur_task_size.height >>= 1; + adjusted_blur_std_deviation.width *= 0.5; + adjusted_blur_std_deviation.height *= 0.5; + if adjusted_blur_task_size.width < 2 { + adjusted_blur_task_size.width = 2; + } + if adjusted_blur_task_size.height < 2 { + adjusted_blur_task_size.height = 2; + } + } + + let input_subregion_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + adjusted_blur_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: false, + inputs: [ + FilterGraphPictureReference{ + buffer_id: blur_input.buffer_id, + subregion: blur_input.subregion, + offset: LayoutVector2D::zero(), + inflate: blur_input.inflate, + source_padding: LayoutRect::zero(), + target_padding: LayoutRect::zero(), + }].to_vec(), + subregion: blur_subregion, + inflate: 0, + }, + op: FilterGraphOp::SVGFEIdentity, + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(UvRectKind::Rect)); + // Adding the dependencies sets the inputs for this task + frame_state.rg_builder.add_dependency(input_subregion_task_id, source_task_id); + + // The shadow compositing only cares about alpha channel + // which is always linear, so we can blur this in sRGB or + // linear color space and the result is the same as we will + // be replacing the rgb completely. + let blur_task_id = + RenderTask::new_blur( + adjusted_blur_std_deviation, + input_subregion_task_id, + frame_state.rg_builder, + RenderTargetKind::Color, + None, + adjusted_blur_task_size, + ); + + // Now we make the compositing task, for this we need to put + // the blurred shadow image at the correct subregion offset + let blur_subregion = blur_subregion + .translate(LayoutVector2D::new(dx, dy)); + task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + node_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: node.linear, + inflate: node.inflate, + inputs: [ + // Original picture + *blur_input, + // Shadow picture + FilterGraphPictureReference{ + buffer_id: blur_input.buffer_id, + subregion: blur_subregion, + inflate: 0, + offset: LayoutVector2D::zero(), + source_padding: LayoutRect::zero(), + target_padding: LayoutRect::zero(), + }].to_vec(), + subregion: node_subregion, + }, + op: FilterGraphOp::SVGFEDropShadow{ + color, + // These parameters don't matter here + dx: 0.0, dy: 0.0, + std_deviation_x: 0.0, std_deviation_y: 0.0, + }, + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(node_uv_rect_kind)); + // Adding the dependencies sets the inputs for this task + frame_state.rg_builder.add_dependency(task_id, source_task_id); + frame_state.rg_builder.add_dependency(task_id, blur_task_id); + } + FilterGraphOp::SVGFESourceAlpha | + FilterGraphOp::SVGFESourceGraphic => { + // These copy from the original task, we have to synthesize + // a fake input binding to make the shader do the copy. In + // the case of SourceAlpha the shader will zero the RGB but + // we don't have to care about that distinction here. + task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + node_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: node.linear, + inflate: node.inflate, + inputs: [ + FilterGraphPictureReference{ + buffer_id: FilterOpGraphPictureBufferId::None, + // This is what makes the mapping + // actually work - this has to be + // the subregion of the whole filter + // because that is the size of the + // input task, it will be cropped to + // the used area (source_subregion). + subregion: filter_subregion, + offset: LayoutVector2D::zero(), + inflate: 0, + source_padding: LayoutRect::zero(), + target_padding: LayoutRect::zero(), + } + ].to_vec(), + subregion: node_subregion, + }, + op: op.clone(), + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(node_uv_rect_kind)); + frame_state.rg_builder.add_dependency(task_id, original_task_id); + made_dependency_on_source = true; + } + FilterGraphOp::SVGFEComponentTransferInterned { handle, creates_pixels: _ } => { + // FIXME: Doing this in prepare_interned_prim_for_render + // doesn't seem to be enough, where should it be done? + let filter_data = &mut data_stores.filter_data[handle]; + filter_data.update(frame_state); + // ComponentTransfer has a gpu_cache_handle that we need to + // pass along + task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + node_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: node.linear, + inputs: node_inputs.iter().map(|input| {input.0}).collect(), + subregion: node_subregion, + inflate: node.inflate, + }, + op: op.clone(), + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: Some(filter_data.gpu_cache_handle), + } + ), + ).with_uv_rect_kind(node_uv_rect_kind)); + + // Add the dependencies for inputs of this node, which will + // be used by add_svg_filter_node_instances later + for (_input, input_task) in &node_inputs { + if *input_task == original_task_id { + made_dependency_on_source = true; + } + if *input_task != RenderTaskId::INVALID { + frame_state.rg_builder.add_dependency(task_id, *input_task); + } + } + } + _ => { + // This is the usual case - zero, one or two inputs that + // reference earlier node results. + task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic( + node_task_size, + RenderTaskKind::SVGFENode( + SVGFEFilterTask{ + node: FilterGraphNode{ + kept_by_optimizer: true, + linear: node.linear, + inputs: node_inputs.iter().map(|input| {input.0}).collect(), + subregion: node_subregion, + inflate: node.inflate, + }, + op: op.clone(), + content_origin: DevicePoint::zero(), + extra_gpu_cache_handle: None, + } + ), + ).with_uv_rect_kind(node_uv_rect_kind)); + + // Add the dependencies for inputs of this node, which will + // be used by add_svg_filter_node_instances later + for (_input, input_task) in &node_inputs { + if *input_task == original_task_id { + made_dependency_on_source = true; + } + if *input_task != RenderTaskId::INVALID { + frame_state.rg_builder.add_dependency(task_id, *input_task); + } + } + } + } + + // We track the tasks we created by output buffer id to make it easy + // to look them up quickly, since nodes can only depend on previous + // nodes in the same list + task_by_buffer_id[filter_index] = task_id; + subregion_by_buffer_id[filter_index] = node_subregion; + + if is_output { + output_task_id = task_id; + } + } + + // If no tasks referenced the SourceGraphic, we actually have to create + // a fake dependency so that it does not leak. + if !made_dependency_on_source && output_task_id != original_task_id { + frame_state.rg_builder.add_dependency(output_task_id, original_task_id); + } + + output_task_id + } + pub fn uv_rect_kind(&self) -> UvRectKind { self.uv_rect_kind } @@ -1580,6 +2807,16 @@ impl RenderTask { } } + pub fn get_target_size(&self) -> DeviceIntSize { + match self.location { + RenderTaskLocation::Dynamic { rect, .. } => rect.size(), + RenderTaskLocation::Static { rect, .. } => rect.size(), + RenderTaskLocation::Existing { size, .. } => size, + RenderTaskLocation::CacheRequest { size } => size, + RenderTaskLocation::Unallocated { size } => size, + } + } + pub fn target_kind(&self) -> RenderTargetKind { self.kind.target_kind() } |