summaryrefslogtreecommitdiffstats
path: root/gfx/wr/webrender/src
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/wr/webrender/src')
-rw-r--r--gfx/wr/webrender/src/batch.rs215
-rw-r--r--gfx/wr/webrender/src/device/gl.rs10
-rw-r--r--gfx/wr/webrender/src/frame_builder.rs44
-rw-r--r--gfx/wr/webrender/src/gpu_types.rs56
-rw-r--r--gfx/wr/webrender/src/picture.rs57
-rw-r--r--gfx/wr/webrender/src/prepare.rs313
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/conic.rs2
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/linear.rs4
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/mod.rs4
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/radial.rs2
-rw-r--r--gfx/wr/webrender/src/prim_store/image.rs2
-rw-r--r--gfx/wr/webrender/src/prim_store/mod.rs31
-rw-r--r--gfx/wr/webrender/src/render_target.rs27
-rw-r--r--gfx/wr/webrender/src/render_task.rs27
-rw-r--r--gfx/wr/webrender/src/render_task_cache.rs6
-rw-r--r--gfx/wr/webrender/src/render_task_graph.rs7
-rw-r--r--gfx/wr/webrender/src/renderer/gpu_buffer.rs173
-rw-r--r--gfx/wr/webrender/src/renderer/mod.rs121
-rw-r--r--gfx/wr/webrender/src/renderer/shade.rs20
-rw-r--r--gfx/wr/webrender/src/renderer/upload.rs12
-rw-r--r--gfx/wr/webrender/src/renderer/vertex.rs67
-rw-r--r--gfx/wr/webrender/src/resource_cache.rs10
-rw-r--r--gfx/wr/webrender/src/spatial_node.rs2
-rw-r--r--gfx/wr/webrender/src/spatial_tree.rs43
-rw-r--r--gfx/wr/webrender/src/telemetry.rs8
25 files changed, 728 insertions, 535 deletions
diff --git a/gfx/wr/webrender/src/batch.rs b/gfx/wr/webrender/src/batch.rs
index f39b60acc6..605283c58d 100644
--- a/gfx/wr/webrender/src/batch.rs
+++ b/gfx/wr/webrender/src/batch.rs
@@ -16,7 +16,7 @@ use crate::gpu_types::{SplitCompositeInstance, QuadInstance};
use crate::gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance};
use crate::gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette};
use crate::gpu_types::{ImageBrushData, get_shader_opacity, BoxShadowData, MaskInstance};
-use crate::gpu_types::{ClipMaskInstanceCommon, ClipMaskInstanceImage, ClipMaskInstanceRect, ClipMaskInstanceBoxShadow};
+use crate::gpu_types::{ClipMaskInstanceCommon, ClipMaskInstanceRect, ClipMaskInstanceBoxShadow};
use crate::internal_types::{FastHashMap, Swizzle, TextureSource, Filter};
use crate::picture::{Picture3DContext, PictureCompositeMode, calculate_screen_uv};
use crate::prim_store::{PrimitiveInstanceKind, ClipData};
@@ -26,8 +26,8 @@ use crate::prim_store::{VECS_PER_SEGMENT, PrimitiveInstanceIndex};
use crate::render_target::RenderTargetContext;
use crate::render_task_graph::{RenderTaskId, RenderTaskGraph};
use crate::render_task::{RenderTaskAddress, RenderTaskKind, SubPass};
-use crate::renderer::{BlendMode, ShaderColorMode};
-use crate::renderer::{MAX_VERTEX_TEXTURE_WIDTH, GpuBufferBuilder, GpuBufferAddress};
+use crate::renderer::{BlendMode, GpuBufferBuilder, ShaderColorMode};
+use crate::renderer::{MAX_VERTEX_TEXTURE_WIDTH, GpuBufferAddress};
use crate::resource_cache::{GlyphFetchResult, ImageProperties};
use crate::space::SpaceMapper;
use crate::visibility::{PrimitiveVisibilityFlags, VisibilityState};
@@ -38,7 +38,7 @@ use crate::segment::EdgeAaSegmentMask;
// Special sentinel value recognized by the shader. It is considered to be
// a dummy task that doesn't mask out anything.
-const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fff);
+const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fffffff);
/// Used to signal there are no segments provided with this primitive.
pub const INVALID_SEGMENT_INDEX: i32 = 0xffff;
@@ -745,13 +745,10 @@ impl BatchBuilder {
prim_header_index: PrimitiveHeaderIndex,
resource_address: i32,
) {
- let render_task_address = self.batcher.render_task_address;
-
let instance = BrushInstance {
segment_index,
edge_flags,
clip_task_address,
- render_task_address,
brush_flags,
prim_header_index,
resource_address,
@@ -803,7 +800,7 @@ impl BatchBuilder {
&mut self,
prim_instance_index: PrimitiveInstanceIndex,
transform_id: TransformPaletteId,
- gpu_buffer_address: GpuBufferAddress,
+ prim_address_f: GpuBufferAddress,
quad_flags: QuadFlags,
edge_flags: EdgeAaSegmentMask,
segment_index: u8,
@@ -811,6 +808,7 @@ impl BatchBuilder {
z_generator: &mut ZBufferIdGenerator,
prim_instances: &[PrimitiveInstance],
render_tasks: &RenderTaskGraph,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
) {
let prim_instance = &prim_instances[prim_instance_index.0 as usize];
let prim_info = &prim_instance.vis;
@@ -820,13 +818,14 @@ impl BatchBuilder {
add_quad_to_batch(
self.batcher.render_task_address,
transform_id,
- gpu_buffer_address,
+ prim_address_f,
quad_flags,
edge_flags,
segment_index,
task_id,
z_id,
render_tasks,
+ gpu_buffer_builder,
|key, instance| {
let batch = self.batcher.set_params_and_get_batch(
key,
@@ -857,7 +856,7 @@ impl BatchBuilder {
surface_spatial_node_index: SpatialNodeIndex,
z_generator: &mut ZBufferIdGenerator,
prim_instances: &[PrimitiveInstance],
- _gpu_buffer_builder: &mut GpuBufferBuilder,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
segments: &[RenderTaskId],
) {
let (prim_instance_index, extra_prim_gpu_address) = match cmd {
@@ -883,6 +882,7 @@ impl BatchBuilder {
z_generator,
prim_instances,
render_tasks,
+ gpu_buffer_builder,
);
} else {
for (i, task_id) in segments.iter().enumerate() {
@@ -900,6 +900,7 @@ impl BatchBuilder {
z_generator,
prim_instances,
render_tasks,
+ gpu_buffer_builder,
);
}
}
@@ -1005,6 +1006,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
[get_shader_opacity(1.0), 0, 0, 0],
);
@@ -1083,6 +1085,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -1137,6 +1140,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
[
(run.raster_scale * 65535.0).round() as i32,
0,
@@ -1312,7 +1316,6 @@ impl BatchBuilder {
let key = BatchKey::new(kind, blend_mode, textures);
- let render_task_address = batcher.render_task_address;
let batch = batcher.alpha_batch_list.set_params_and_get_batch(
key,
batch_features,
@@ -1323,7 +1326,6 @@ impl BatchBuilder {
batch.reserve(glyphs.len());
for glyph in glyphs {
batch.push(base_instance.build(
- render_task_address,
clip_task_address,
subpx_dir,
glyph.index_in_text_run,
@@ -1397,6 +1399,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
prim_user_data,
);
@@ -1531,6 +1534,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: ShaderColorMode::Image,
alpha_type: AlphaType::PremultipliedAlpha,
@@ -1616,6 +1620,7 @@ impl BatchBuilder {
let shadow_prim_header_index = prim_headers.push(
&shadow_prim_header,
z_id,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: ShaderColorMode::Alpha,
alpha_type: AlphaType::PremultipliedAlpha,
@@ -1642,6 +1647,7 @@ impl BatchBuilder {
let content_prim_header_index = prim_headers.push(
&prim_header,
z_id_content,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: ShaderColorMode::Image,
alpha_type: AlphaType::PremultipliedAlpha,
@@ -1687,12 +1693,17 @@ impl BatchBuilder {
textures,
);
- let prim_header_index = prim_headers.push(&prim_header, z_id, [
- uv_rect_address.as_int(),
- amount,
- 0,
- 0,
- ]);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ [
+ uv_rect_address.as_int(),
+ amount,
+ 0,
+ 0,
+ ]
+ );
self.add_brush_instance_to_batches(
key,
@@ -1771,12 +1782,17 @@ impl BatchBuilder {
textures,
);
- let prim_header_index = prim_headers.push(&prim_header, z_id, [
- uv_rect_address.as_int(),
- filter_mode,
- user_data,
- 0,
- ]);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ [
+ uv_rect_address.as_int(),
+ filter_mode,
+ user_data,
+ 0,
+ ]
+ );
self.add_brush_instance_to_batches(
key,
@@ -1826,12 +1842,17 @@ impl BatchBuilder {
textures,
);
- let prim_header_index = prim_headers.push(&prim_header, z_id, [
- uv_rect_address.as_int(),
- filter_mode,
- user_data,
- 0,
- ]);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ [
+ uv_rect_address.as_int(),
+ filter_mode,
+ user_data,
+ 0,
+ ]
+ );
self.add_brush_instance_to_batches(
key,
@@ -1882,6 +1903,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: match key.blend_mode {
BlendMode::MultiplyDualSource => ShaderColorMode::MultiplyDualSource,
@@ -1924,8 +1946,6 @@ impl BatchBuilder {
// and allow mix-blends to operate on picture cache surfaces without
// a separate isolated intermediate surface.
- let render_task_address = self.batcher.render_task_address;
-
let batch_key = BatchKey::new(
BatchKind::Brush(
BrushBatchKind::MixBlend {
@@ -1953,18 +1973,22 @@ impl BatchBuilder {
);
let src_uv_address = render_tasks[pic_task_id].get_texture_address(gpu_cache);
let readback_uv_address = render_tasks[backdrop_id].get_texture_address(gpu_cache);
- let prim_header_index = prim_headers.push(&prim_header, z_id, [
- mode as u32 as i32,
- readback_uv_address.as_int(),
- src_uv_address.as_int(),
- 0,
- ]);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ [
+ mode as u32 as i32,
+ readback_uv_address.as_int(),
+ src_uv_address.as_int(),
+ 0,
+ ]
+ );
let instance = BrushInstance {
segment_index: INVALID_SEGMENT_INDEX,
edge_flags: EdgeAaSegmentMask::all(),
clip_task_address,
- render_task_address,
brush_flags,
prim_header_index,
resource_address: 0,
@@ -2030,12 +2054,17 @@ impl BatchBuilder {
// by this inner loop.
let z_id = z_generator.next();
- let prim_header_index = prim_headers.push(&prim_header, z_id, [
- uv_rect_address.as_int(),
- BrushFlags::PERSPECTIVE_INTERPOLATION.bits() as i32,
- 0,
- child_clip_task_address.0 as i32,
- ]);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ [
+ uv_rect_address.as_int(),
+ BrushFlags::PERSPECTIVE_INTERPOLATION.bits() as i32,
+ 0,
+ child_clip_task_address.0 as i32,
+ ]
+ );
let key = BatchKey::new(
BatchKind::SplitComposite,
@@ -2088,6 +2117,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2141,6 +2171,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: ShaderColorMode::Image,
alpha_type: AlphaType::PremultipliedAlpha,
@@ -2215,6 +2246,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2274,6 +2306,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2394,6 +2427,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2498,6 +2532,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2547,7 +2582,12 @@ impl BatchBuilder {
specific_prim_address: gpu_cache.get_address(&gpu_handle),
transform_id,
};
- let prim_header_index = prim_headers.push(&prim_header, z_id, prim_user_data);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ prim_user_data,
+ );
for (i, tile) in chunk.iter().enumerate() {
let (uv_rect_address, texture) = match render_tasks.resolve_location(tile.src_color, gpu_cache) {
@@ -2616,7 +2656,12 @@ impl BatchBuilder {
prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
- let prim_header_index = prim_headers.push(&prim_header, z_id, user_data);
+ let prim_header_index = prim_headers.push(
+ &prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ user_data,
+ );
let segments = if prim_data.brush_segments.is_empty() {
None
@@ -2660,7 +2705,12 @@ impl BatchBuilder {
local_clip_rect: tile.local_clip_rect,
..prim_header
};
- let prim_header_index = prim_headers.push(&tile_prim_header, z_id, user_data);
+ let prim_header_index = prim_headers.push(
+ &tile_prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ user_data,
+ );
self.add_brush_instance_to_batches(
key,
@@ -2735,6 +2785,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2777,7 +2828,12 @@ impl BatchBuilder {
local_clip_rect: tile.local_clip_rect,
..prim_header
};
- let prim_header_index = prim_headers.push(&tile_prim_header, z_id, prim_user_data);
+ let prim_header_index = prim_headers.push(
+ &tile_prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ prim_user_data,
+ );
self.add_brush_instance_to_batches(
batch_key,
@@ -2853,6 +2909,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -2895,7 +2952,12 @@ impl BatchBuilder {
local_clip_rect: tile.local_clip_rect,
..prim_header
};
- let prim_header_index = prim_headers.push(&tile_prim_header, z_id, prim_user_data);
+ let prim_header_index = prim_headers.push(
+ &tile_prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ prim_user_data,
+ );
self.add_brush_instance_to_batches(
batch_key,
@@ -2972,6 +3034,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
batch_params.prim_user_data,
);
@@ -3014,7 +3077,12 @@ impl BatchBuilder {
local_clip_rect: tile.local_clip_rect,
..prim_header
};
- let prim_header_index = prim_headers.push(&tile_prim_header, z_id, prim_user_data);
+ let prim_header_index = prim_headers.push(
+ &tile_prim_header,
+ z_id,
+ self.batcher.render_task_address,
+ prim_user_data,
+ );
self.add_brush_instance_to_batches(
batch_key,
@@ -3068,6 +3136,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
ImageBrushData {
color_mode: ShaderColorMode::Image,
alpha_type: AlphaType::PremultipliedAlpha,
@@ -3179,6 +3248,7 @@ impl BatchBuilder {
let prim_header_index = prim_headers.push(
&prim_header,
z_id,
+ self.batcher.render_task_address,
[get_shader_opacity(1.0), 0, 0, 0],
);
@@ -3477,8 +3547,6 @@ pub struct ClipBatchList {
/// Rectangle draws fill up the rectangles with rounded corners.
pub slow_rectangles: Vec<ClipMaskInstanceRect>,
pub fast_rectangles: Vec<ClipMaskInstanceRect>,
- /// Image draws apply the image masking.
- pub images: FastHashMap<(TextureSource, Option<DeviceIntRect>), Vec<ClipMaskInstanceImage>>,
pub box_shadows: FastHashMap<TextureSource, Vec<ClipMaskInstanceBoxShadow>>,
}
@@ -3487,7 +3555,6 @@ impl ClipBatchList {
ClipBatchList {
slow_rectangles: Vec::new(),
fast_rectangles: Vec::new(),
- images: FastHashMap::default(),
box_shadows: FastHashMap::default(),
}
}
@@ -3674,20 +3741,11 @@ impl ClipBatcher {
ctx.spatial_tree,
);
- // For clip mask images, we need to map from the primitive's layout space to
- // the target space, as the cs_clip_image shader needs to forward transform
- // the local image bounds, rather than backwards transform the target bounds
- // as in done in write_clip_tile_vertex.
- let prim_transform_id = match clip_node.item.kind {
- ClipItemKind::Image { .. } => { panic!("bug: old path not supported") }
- _ => {
- transforms.get_id(
- root_spatial_node_index,
- ctx.root_spatial_node_index,
- ctx.spatial_tree,
- )
- }
- };
+ let prim_transform_id = transforms.get_id(
+ root_spatial_node_index,
+ ctx.root_spatial_node_index,
+ ctx.spatial_tree,
+ );
let common = ClipMaskInstanceCommon {
sub_rect: DeviceRect::from_size(actual_rect.size()),
@@ -3837,13 +3895,14 @@ impl<'a, 'rc> RenderTargetContext<'a, 'rc> {
pub fn add_quad_to_batch<F>(
render_task_address: RenderTaskAddress,
transform_id: TransformPaletteId,
- gpu_buffer_address: GpuBufferAddress,
+ prim_address_f: GpuBufferAddress,
quad_flags: QuadFlags,
edge_flags: EdgeAaSegmentMask,
segment_index: u8,
task_id: RenderTaskId,
z_id: ZBufferId,
render_tasks: &RenderTaskGraph,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
mut f: F,
) where F: FnMut(BatchKey, PrimitiveInstanceData) {
@@ -3857,6 +3916,15 @@ pub fn add_quad_to_batch<F>(
All = 5,
}
+ let mut writer = gpu_buffer_builder.i32.write_blocks(1);
+ writer.push_one([
+ transform_id.0 as i32,
+ z_id.0,
+ 0,
+ 0,
+ ]);
+ let prim_address_i = writer.finish();
+
let texture = match task_id {
RenderTaskId::INVALID => {
TextureSource::Invalid
@@ -3898,7 +3966,8 @@ pub fn add_quad_to_batch<F>(
if edge_flags.is_empty() {
let instance = QuadInstance {
render_task_address,
- prim_address: gpu_buffer_address,
+ prim_address_i,
+ prim_address_f,
z_id,
transform_id,
edge_flags: edge_flags_bits,
@@ -3911,7 +3980,8 @@ pub fn add_quad_to_batch<F>(
} else if quad_flags.contains(QuadFlags::USE_AA_SEGMENTS) {
let main_instance = QuadInstance {
render_task_address,
- prim_address: gpu_buffer_address,
+ prim_address_i,
+ prim_address_f,
z_id,
transform_id,
edge_flags: edge_flags_bits,
@@ -3956,7 +4026,8 @@ pub fn add_quad_to_batch<F>(
} else {
let instance = QuadInstance {
render_task_address,
- prim_address: gpu_buffer_address,
+ prim_address_i,
+ prim_address_f,
z_id,
transform_id,
edge_flags: edge_flags_bits,
diff --git a/gfx/wr/webrender/src/device/gl.rs b/gfx/wr/webrender/src/device/gl.rs
index 5ad9469185..04a7e13023 100644
--- a/gfx/wr/webrender/src/device/gl.rs
+++ b/gfx/wr/webrender/src/device/gl.rs
@@ -138,9 +138,13 @@ pub enum UploadMethod {
}
/// Plain old data that can be used to initialize a texture.
-pub unsafe trait Texel: Copy {}
-unsafe impl Texel for u8 {}
-unsafe impl Texel for f32 {}
+pub unsafe trait Texel: Copy + Default {
+ fn image_format() -> ImageFormat;
+}
+
+unsafe impl Texel for u8 {
+ fn image_format() -> ImageFormat { ImageFormat::R8 }
+}
/// Returns the size in bytes of a depth target with the given dimensions.
fn depth_target_size_in_bytes(dimensions: &DeviceIntSize) -> usize {
diff --git a/gfx/wr/webrender/src/frame_builder.rs b/gfx/wr/webrender/src/frame_builder.rs
index 4ff798c103..b975c960eb 100644
--- a/gfx/wr/webrender/src/frame_builder.rs
+++ b/gfx/wr/webrender/src/frame_builder.rs
@@ -25,7 +25,7 @@ use crate::prim_store::{PictureIndex, PrimitiveScratchBuffer};
use crate::prim_store::{DeferredResolve, PrimitiveInstance};
use crate::profiler::{self, TransactionProfile};
use crate::render_backend::{DataStores, ScratchBuffer};
-use crate::renderer::{GpuBuffer, GpuBufferBuilder};
+use crate::renderer::{GpuBufferF, GpuBufferBuilderF, GpuBufferI, GpuBufferBuilderI, GpuBufferBuilder};
use crate::render_target::{RenderTarget, PictureCacheTarget, TextureCacheRenderTarget, PictureCacheTargetKind};
use crate::render_target::{RenderTargetContext, RenderTargetKind, AlphaRenderTarget, ColorRenderTarget};
use crate::render_task_graph::{RenderTaskGraph, Pass, SubPassSurface};
@@ -558,7 +558,10 @@ impl FrameBuilder {
let mut cmd_buffers = CommandBufferList::new();
// TODO(gw): Recycle backing vec buffers for gpu buffer builder between frames
- let mut gpu_buffer_builder = GpuBufferBuilder::new();
+ let mut gpu_buffer_builder = GpuBufferBuilder {
+ f32: GpuBufferBuilderF::new(),
+ i32: GpuBufferBuilderI::new(),
+ };
self.build_layer_screen_rects_and_cull_layers(
scene,
@@ -690,7 +693,8 @@ impl FrameBuilder {
scene.clip_store.end_frame(&mut scratch.clip_store);
scratch.end_frame();
- let gpu_buffer = gpu_buffer_builder.finalize(&render_tasks);
+ let gpu_buffer_f = gpu_buffer_builder.f32.finalize(&render_tasks);
+ let gpu_buffer_i = gpu_buffer_builder.i32.finalize(&render_tasks);
Frame {
device_rect: DeviceIntRect::from_origin_and_size(
@@ -707,7 +711,8 @@ impl FrameBuilder {
prim_headers,
debug_items: mem::replace(&mut scratch.primitive.debug_items, Vec::new()),
composite_state,
- gpu_buffer,
+ gpu_buffer_f,
+ gpu_buffer_i,
}
}
@@ -759,7 +764,6 @@ impl FrameBuilder {
const LAYOUT_PORT_COLOR: ColorF = debug_colors::RED;
const VISUAL_PORT_COLOR: ColorF = debug_colors::BLUE;
const DISPLAYPORT_COLOR: ColorF = debug_colors::LIME;
- const NOTHING: ColorF = ColorF { r: 0.0, g: 0.0, b: 0.0, a: 0.0 };
let viewport = scroll_frame_info.viewport_rect;
@@ -805,9 +809,10 @@ impl FrameBuilder {
}
let mut add_rect = |rect, border, fill| -> Option<()> {
+ const STROKE_WIDTH: f32 = 2.0;
// Place rect in scroll frame's local coordinate space
let transformed_rect = transform.outer_transformed_box2d(&rect)?;
-
+
// Transform to world coordinates, using root-content coords as an intermediate step.
let mut root_content_rect = local_to_root_content.outer_transformed_box2d(&transformed_rect)?;
// In root-content coords, apply the root content node's viewport clip.
@@ -820,21 +825,28 @@ impl FrameBuilder {
}
let world_rect = root_content_to_world.outer_transformed_box2d(&root_content_rect)?;
+ scratch.push_debug_rect_with_stroke_width(world_rect, border, STROKE_WIDTH);
+
// Add world coordinate rects to scratch.debug_items
- // TODO: Add a parameter to control the border thickness of the rects, and make them a bit thicker.
- scratch.push_debug_rect(world_rect * DevicePixelScale::new(1.0), border, fill);
+ if let Some(fill_color) = fill {
+ let interior_world_rect = WorldRect::new(
+ world_rect.min + WorldVector2D::new(STROKE_WIDTH, STROKE_WIDTH),
+ world_rect.max - WorldVector2D::new(STROKE_WIDTH, STROKE_WIDTH)
+ );
+ scratch.push_debug_rect(interior_world_rect * DevicePixelScale::new(1.0), border, fill_color);
+ }
Some(())
};
- add_rect(minimap_data.scrollable_rect, PAGE_BORDER_COLOR, BACKGROUND_COLOR);
- add_rect(minimap_data.visual_viewport, VISUAL_PORT_COLOR, NOTHING);
- add_rect(minimap_data.displayport, DISPLAYPORT_COLOR, DISPLAYPORT_BACKGROUND_COLOR);
+ add_rect(minimap_data.scrollable_rect, PAGE_BORDER_COLOR, Some(BACKGROUND_COLOR));
+ add_rect(minimap_data.displayport, DISPLAYPORT_COLOR, Some(DISPLAYPORT_BACKGROUND_COLOR));
// Only render a distinct layout viewport for the root content.
// For other scroll frames, the visual and layout viewports coincide.
if minimap_data.is_root_content {
- add_rect(minimap_data.layout_viewport, LAYOUT_PORT_COLOR, NOTHING);
+ add_rect(minimap_data.layout_viewport, LAYOUT_PORT_COLOR, None);
}
+ add_rect(minimap_data.visual_viewport, VISUAL_PORT_COLOR, None);
}
}
});
@@ -967,7 +979,6 @@ pub fn build_render_pass(
let task_id = sub_pass.task_ids[0];
let task = &render_tasks[task_id];
let target_rect = task.get_target_rect();
- let mut gpu_buffer_builder = GpuBufferBuilder::new();
match task.kind {
RenderTaskKind::Picture(ref pic_task) => {
@@ -998,7 +1009,7 @@ pub fn build_render_pass(
pic_task.surface_spatial_node_index,
z_generator,
prim_instances,
- &mut gpu_buffer_builder,
+ gpu_buffer_builder,
segments,
);
});
@@ -1072,6 +1083,7 @@ pub fn build_render_pass(
z_generator,
prim_instances,
cmd_buffers,
+ gpu_buffer_builder,
);
pass.alpha.build(
ctx,
@@ -1082,6 +1094,7 @@ pub fn build_render_pass(
z_generator,
prim_instances,
cmd_buffers,
+ gpu_buffer_builder,
);
pass
@@ -1127,7 +1140,8 @@ pub struct Frame {
/// Main GPU data buffer constructed (primarily) during the prepare
/// pass for primitives that were visible and dirty.
- pub gpu_buffer: GpuBuffer,
+ pub gpu_buffer_f: GpuBufferF,
+ pub gpu_buffer_i: GpuBufferI,
}
impl Frame {
diff --git a/gfx/wr/webrender/src/gpu_types.rs b/gfx/wr/webrender/src/gpu_types.rs
index f6d24b2e39..e222ebed04 100644
--- a/gfx/wr/webrender/src/gpu_types.rs
+++ b/gfx/wr/webrender/src/gpu_types.rs
@@ -125,6 +125,7 @@ pub struct SvgFilterInstance {
pub kind: u16,
pub input_count: u16,
pub generic_int: u16,
+ pub padding: u16,
pub extra_data_address: GpuCacheAddress,
}
@@ -175,17 +176,6 @@ pub struct ClipMaskInstanceCommon {
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[repr(C)]
-pub struct ClipMaskInstanceImage {
- pub common: ClipMaskInstanceCommon,
- pub tile_rect: LayoutRect,
- pub resource_address: GpuCacheAddress,
- pub local_rect: LayoutRect,
-}
-
-#[derive(Clone, Debug)]
-#[cfg_attr(feature = "capture", derive(Serialize))]
-#[cfg_attr(feature = "replay", derive(Deserialize))]
-#[repr(C)]
pub struct ClipMaskInstanceRect {
pub common: ClipMaskInstanceCommon,
pub local_pos: LayoutPoint,
@@ -428,6 +418,7 @@ impl PrimitiveHeaders {
&mut self,
prim_header: &PrimitiveHeader,
z: ZBufferId,
+ render_task_address: RenderTaskAddress,
user_data: [i32; 4],
) -> PrimitiveHeaderIndex {
debug_assert_eq!(self.headers_int.len(), self.headers_float.len());
@@ -440,7 +431,7 @@ impl PrimitiveHeaders {
self.headers_int.push(PrimitiveHeaderI {
z,
- unused: 0,
+ render_task_address,
specific_prim_address: prim_header.specific_prim_address.as_int(),
transform_id: prim_header.transform_id,
user_data,
@@ -480,7 +471,7 @@ pub struct PrimitiveHeaderI {
pub z: ZBufferId,
pub specific_prim_address: i32,
pub transform_id: TransformPaletteId,
- pub unused: i32, // To ensure required 16 byte alignment of vertex textures
+ pub render_task_address: RenderTaskAddress,
pub user_data: [i32; 4],
}
@@ -501,7 +492,6 @@ impl GlyphInstance {
// header since they are constant, and some can be
// compressed to a smaller size.
pub fn build(&self,
- render_task: RenderTaskAddress,
clip_task: RenderTaskAddress,
subpx_dir: SubpixelDirection,
glyph_index_in_text_run: i32,
@@ -511,8 +501,7 @@ impl GlyphInstance {
PrimitiveInstanceData {
data: [
self.prim_header_index.0 as i32,
- ((render_task.0 as i32) << 16)
- | clip_task.0 as i32,
+ clip_task.0 as i32,
(subpx_dir as u32 as i32) << 24
| (color_mode as u32 as i32) << 16
| glyph_index_in_text_run,
@@ -536,7 +525,7 @@ impl From<SplitCompositeInstance> for PrimitiveInstanceData {
instance.prim_header_index.0,
instance.polygons_address,
instance.z.0,
- instance.render_task_address.0 as i32,
+ instance.render_task_address.0,
],
}
}
@@ -547,7 +536,8 @@ impl From<SplitCompositeInstance> for PrimitiveInstanceData {
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct QuadInstance {
pub render_task_address: RenderTaskAddress,
- pub prim_address: GpuBufferAddress,
+ pub prim_address_i: GpuBufferAddress,
+ pub prim_address_f: GpuBufferAddress,
pub z_id: ZBufferId,
pub transform_id: TransformPaletteId,
pub quad_flags: u8,
@@ -559,19 +549,23 @@ pub struct QuadInstance {
impl From<QuadInstance> for PrimitiveInstanceData {
fn from(instance: QuadInstance) -> Self {
/*
- [32 bits prim address]
- [8 bits quad flags] [8 bits edge flags] [16 bits render task address]
- [8 bits segment flags] [24 bits z_id]
- [8 bits segment index] [24 bits xf_id]
- */
+ [32 prim address_i]
+ [32 prim address_f]
+ [8888 qf ef pi si]
+ [32 render task address]
+ */
+
PrimitiveInstanceData {
data: [
- instance.prim_address.as_int(),
- ((instance.quad_flags as i32) << 24) |
- ((instance.edge_flags as i32) << 16) |
- instance.render_task_address.0 as i32,
- ((instance.part_index as i32) << 24) | instance.z_id.0,
- ((instance.segment_index as i32) << 24) | instance.transform_id.0 as i32,
+ instance.prim_address_i.as_int(),
+ instance.prim_address_f.as_int(),
+
+ ((instance.quad_flags as i32) << 24) |
+ ((instance.edge_flags as i32) << 16) |
+ ((instance.part_index as i32) << 8) |
+ ((instance.segment_index as i32) << 0),
+
+ instance.render_task_address.0,
],
}
}
@@ -659,7 +653,6 @@ impl core::fmt::Debug for BrushFlags {
/// Convenience structure to encode into PrimitiveInstanceData.
pub struct BrushInstance {
pub prim_header_index: PrimitiveHeaderIndex,
- pub render_task_address: RenderTaskAddress,
pub clip_task_address: RenderTaskAddress,
pub segment_index: i32,
pub edge_flags: EdgeAaSegmentMask,
@@ -672,8 +665,7 @@ impl From<BrushInstance> for PrimitiveInstanceData {
PrimitiveInstanceData {
data: [
instance.prim_header_index.0,
- ((instance.render_task_address.0 as i32) << 16)
- | instance.clip_task_address.0 as i32,
+ instance.clip_task_address.0,
instance.segment_index
| ((instance.brush_flags.bits() as i32) << 16)
| ((instance.edge_flags.bits() as i32) << 28),
diff --git a/gfx/wr/webrender/src/picture.rs b/gfx/wr/webrender/src/picture.rs
index e8746ad161..1f1fd5e4f6 100644
--- a/gfx/wr/webrender/src/picture.rs
+++ b/gfx/wr/webrender/src/picture.rs
@@ -139,11 +139,11 @@ use std::collections::hash_map::Entry;
use std::ops::Range;
use crate::picture_textures::PictureCacheTextureHandle;
use crate::util::{MaxRect, VecHelper, MatrixHelpers, Recycler, ScaleOffset};
-use crate::filterdata::{FilterDataHandle};
+use crate::filterdata::FilterDataHandle;
use crate::tile_cache::{SliceDebugInfo, TileDebugInfo, DirtyTileDebugInfo};
use crate::visibility::{PrimitiveVisibilityFlags, FrameVisibilityContext};
use crate::visibility::{VisibilityState, FrameVisibilityState};
-use crate::scene_building::{SliceFlags};
+use crate::scene_building::SliceFlags;
// Maximum blur radius for blur filter (different than box-shadow blur).
// Taken from FilterNodeSoftware.cpp in Gecko.
@@ -3951,7 +3951,7 @@ impl SurfaceInfo {
&self,
local_rect: &PictureRect,
spatial_tree: &SpatialTree,
- ) -> Option<DeviceRect> {
+ ) -> Option<DeviceIntRect> {
let local_rect = match local_rect.intersection(&self.clipping_rect) {
Some(rect) => rect,
None => return None,
@@ -3969,10 +3969,21 @@ impl SurfaceInfo {
local_to_world.map(&local_rect).unwrap()
} else {
+ // The content should have been culled out earlier.
+ assert!(self.device_pixel_scale.0 > 0.0);
+
local_rect.cast_unit()
};
- Some((raster_rect * self.device_pixel_scale).round_out())
+ let surface_rect = (raster_rect * self.device_pixel_scale).round_out().to_i32();
+ if surface_rect.is_empty() {
+ // The local_rect computed above may have non-empty size that is very
+ // close to zero. Due to limited arithmetic precision, the SpaceMapper
+ // might transform the near-zero-sized rect into a zero-sized one.
+ return None;
+ }
+
+ Some(surface_rect)
}
}
@@ -5014,7 +5025,7 @@ impl PicturePrimitive {
let content_device_rect = content_device_rect
.intersection(&max_content_rect)
- .expect("bug: no intersection with tile dirty rect");
+ .expect("bug: no intersection with tile dirty rect: {content_device_rect:?} / {max_content_rect:?}");
let content_task_size = content_device_rect.size();
let normalized_content_rect = content_task_size.into();
@@ -6128,22 +6139,28 @@ impl PicturePrimitive {
PictureCompositeMode::TileCache { slice_id } => {
let tile_cache = tile_caches.get_mut(&slice_id).unwrap();
- // We only update the raster scale if we're in high quality zoom mode, or there is no
- // pinch-zoom active. This means that in low quality pinch-zoom, we retain the initial
- // scale factor until the zoom ends, then select a high quality zoom factor for the next
- // frame to be drawn.
- let update_raster_scale =
- !frame_context.fb_config.low_quality_pinch_zoom ||
- !frame_context.spatial_tree.get_spatial_node(tile_cache.spatial_node_index).is_ancestor_or_self_zooming;
-
- if update_raster_scale {
- // Get the complete scale-offset from local space to device space
- let local_to_device = get_relative_scale_offset(
- tile_cache.spatial_node_index,
- frame_context.root_spatial_node_index,
- frame_context.spatial_tree,
- );
+ // Get the complete scale-offset from local space to device space
+ let local_to_device = get_relative_scale_offset(
+ tile_cache.spatial_node_index,
+ frame_context.root_spatial_node_index,
+ frame_context.spatial_tree,
+ );
+ let local_to_cur_raster_scale = local_to_device.scale.x / tile_cache.current_raster_scale;
+ // We only update the raster scale if we're in high quality zoom mode, or there is no
+ // pinch-zoom active, or the zoom has doubled or halved since the raster scale was
+ // last updated. During a low-quality zoom we therefore typically retain the previous
+ // scale factor, which avoids expensive re-rasterizations, except for when the zoom
+ // has become too large or too small when we re-rasterize to avoid bluriness or a
+ // proliferation of picture cache tiles. When the zoom ends we select a high quality
+ // scale factor for the next frame to be drawn.
+ if !frame_context.fb_config.low_quality_pinch_zoom
+ || !frame_context
+ .spatial_tree.get_spatial_node(tile_cache.spatial_node_index)
+ .is_ancestor_or_self_zooming
+ || local_to_cur_raster_scale <= 0.5
+ || local_to_cur_raster_scale >= 2.0
+ {
tile_cache.current_raster_scale = local_to_device.scale.x;
}
diff --git a/gfx/wr/webrender/src/prepare.rs b/gfx/wr/webrender/src/prepare.rs
index f32c94073e..a59eca0670 100644
--- a/gfx/wr/webrender/src/prepare.rs
+++ b/gfx/wr/webrender/src/prepare.rs
@@ -28,18 +28,18 @@ use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION;
use crate::prim_store::*;
use crate::prim_store::gradient::GradientGpuBlockBuilder;
use crate::render_backend::DataStores;
-use crate::render_task_graph::{RenderTaskId};
+use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::RenderTaskCacheKeyKind;
use crate::render_task_cache::{RenderTaskCacheKey, to_cache_size, RenderTaskParent};
use crate::render_task::{RenderTaskKind, RenderTask, SubPass, MaskSubPass, EmptyTask};
-use crate::renderer::{GpuBufferBuilder, GpuBufferAddress};
+use crate::renderer::{GpuBufferBuilderF, GpuBufferAddress};
use crate::segment::{EdgeAaSegmentMask, SegmentBuilder};
use crate::space::SpaceMapper;
use crate::util::{clamp_to_scale_factor, pack_as_float, MaxRect};
use crate::visibility::{compute_conservative_visible_rect, PrimitiveVisibility, VisibilityState};
-const MAX_MASK_SIZE: f32 = 4096.0;
+const MAX_MASK_SIZE: i32 = 4096;
const MIN_BRUSH_SPLIT_SIZE: f32 = 256.0;
const MIN_BRUSH_SPLIT_AREA: f32 = 128.0 * 128.0;
@@ -141,14 +141,32 @@ fn can_use_clip_chain_for_quad_path(
true
}
+/// Describes how clipping affects the rendering of a quad primitive.
+///
+/// As a general rule, parts of the quad that require masking are prerendered in an
+/// intermediate target and the mask is applied using multiplicative blending to
+/// the intermediate result before compositing it into the destination target.
+///
+/// Each segment can opt in or out of masking independently.
#[derive(Debug, Copy, Clone)]
pub enum QuadRenderStrategy {
+ /// The quad is not affected by any mask and is drawn directly in the destination
+ /// target.
Direct,
+ /// The quad is drawn entirely in an intermediate target and a mask is applied
+ /// before compositing in the destination target.
Indirect,
+ /// A rounded rectangle clip is applied to the quad primitive via a nine-patch.
+ /// The segments of the nine-patch that require a mask are rendered and masked in
+ /// an intermediate target, while other segments are drawn directly in the destination
+ /// target.
NinePatch {
radius: LayoutVector2D,
clip_rect: LayoutRect,
},
+ /// Split the primitive into coarse tiles so that each tile independently
+ /// has the opportunity to be drawn directly in the destination target or
+ /// via an intermediate target if it is affected by a mask.
Tiled {
x_tiles: u16,
y_tiles: u16,
@@ -163,69 +181,67 @@ fn get_prim_render_strategy(
can_use_nine_patch: bool,
spatial_tree: &SpatialTree,
) -> QuadRenderStrategy {
- if clip_chain.needs_mask {
- fn tile_count_for_size(size: f32) -> u16 {
- (size / MIN_BRUSH_SPLIT_SIZE).min(4.0).max(1.0).ceil() as u16
- }
+ if !clip_chain.needs_mask {
+ return QuadRenderStrategy::Direct
+ }
- let prim_coverage_size = clip_chain.pic_coverage_rect.size();
- let x_tiles = tile_count_for_size(prim_coverage_size.width);
- let y_tiles = tile_count_for_size(prim_coverage_size.height);
- let try_split_prim = x_tiles > 1 || y_tiles > 1;
-
- if try_split_prim {
- if can_use_nine_patch {
- if clip_chain.clips_range.count == 1 {
- let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, 0);
- let clip_node = &data_stores.clip[clip_instance.handle];
-
- if let ClipItemKind::RoundedRectangle { ref radius, mode: ClipMode::Clip, rect, .. } = clip_node.item.kind {
- let max_corner_width = radius.top_left.width
- .max(radius.bottom_left.width)
- .max(radius.top_right.width)
- .max(radius.bottom_right.width);
- let max_corner_height = radius.top_left.height
- .max(radius.bottom_left.height)
- .max(radius.top_right.height)
- .max(radius.bottom_right.height);
-
- if max_corner_width <= 0.5 * rect.size().width &&
- max_corner_height <= 0.5 * rect.size().height {
-
- let clip_prim_coords_match = spatial_tree.is_matching_coord_system(
- prim_spatial_node_index,
- clip_node.item.spatial_node_index,
- );
+ fn tile_count_for_size(size: f32) -> u16 {
+ (size / MIN_BRUSH_SPLIT_SIZE).min(4.0).max(1.0).ceil() as u16
+ }
- if clip_prim_coords_match {
- let map_clip_to_prim = SpaceMapper::new_with_target(
- prim_spatial_node_index,
- clip_node.item.spatial_node_index,
- LayoutRect::max_rect(),
- spatial_tree,
- );
+ let prim_coverage_size = clip_chain.pic_coverage_rect.size();
+ let x_tiles = tile_count_for_size(prim_coverage_size.width);
+ let y_tiles = tile_count_for_size(prim_coverage_size.height);
+ let try_split_prim = x_tiles > 1 || y_tiles > 1;
- if let Some(rect) = map_clip_to_prim.map(&rect) {
- return QuadRenderStrategy::NinePatch {
- radius: LayoutVector2D::new(max_corner_width, max_corner_height),
- clip_rect: rect,
- };
- }
- }
- }
+ if !try_split_prim {
+ return QuadRenderStrategy::Indirect;
+ }
+
+ if can_use_nine_patch && clip_chain.clips_range.count == 1 {
+ let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, 0);
+ let clip_node = &data_stores.clip[clip_instance.handle];
+
+ if let ClipItemKind::RoundedRectangle { ref radius, mode: ClipMode::Clip, rect, .. } = clip_node.item.kind {
+ let max_corner_width = radius.top_left.width
+ .max(radius.bottom_left.width)
+ .max(radius.top_right.width)
+ .max(radius.bottom_right.width);
+ let max_corner_height = radius.top_left.height
+ .max(radius.bottom_left.height)
+ .max(radius.top_right.height)
+ .max(radius.bottom_right.height);
+
+ if max_corner_width <= 0.5 * rect.size().width &&
+ max_corner_height <= 0.5 * rect.size().height {
+
+ let clip_prim_coords_match = spatial_tree.is_matching_coord_system(
+ prim_spatial_node_index,
+ clip_node.item.spatial_node_index,
+ );
+
+ if clip_prim_coords_match {
+ let map_clip_to_prim = SpaceMapper::new_with_target(
+ prim_spatial_node_index,
+ clip_node.item.spatial_node_index,
+ LayoutRect::max_rect(),
+ spatial_tree,
+ );
+
+ if let Some(rect) = map_clip_to_prim.map(&rect) {
+ return QuadRenderStrategy::NinePatch {
+ radius: LayoutVector2D::new(max_corner_width, max_corner_height),
+ clip_rect: rect,
+ };
}
}
}
-
- QuadRenderStrategy::Tiled {
- x_tiles,
- y_tiles,
- }
- } else {
- QuadRenderStrategy::Indirect
}
- } else {
- QuadRenderStrategy::Direct
+ }
+
+ QuadRenderStrategy::Tiled {
+ x_tiles,
+ y_tiles,
}
}
@@ -452,7 +468,7 @@ fn prepare_interned_prim_for_render(
kind: RenderTaskCacheKeyKind::LineDecoration(cache_key.clone()),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false,
@@ -607,7 +623,7 @@ fn prepare_interned_prim_for_render(
handles.push(frame_state.resource_cache.request_render_task(
cache_key,
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false, // TODO(gw): We don't calculate opacity for borders yet!
@@ -764,7 +780,7 @@ fn prepare_interned_prim_for_render(
// the written block count) to gpu-buffer, we could add a trait for
// writing typed data?
let main_prim_address = write_prim_blocks(
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
prim_data.common.prim_rect,
prim_instance.vis.clip_chain.local_clip_rect,
premul_color,
@@ -787,24 +803,21 @@ fn prepare_interned_prim_for_render(
}
QuadRenderStrategy::Indirect => {
let surface = &frame_state.surfaces[pic_context.surface_index.0];
- let clipped_surface_rect = surface.get_surface_rect(
+ let Some(clipped_surface_rect) = surface.get_surface_rect(
&prim_instance.vis.clip_chain.pic_coverage_rect,
frame_context.spatial_tree,
- ).expect("bug: what can cause this?");
-
- let p0 = clipped_surface_rect.min.floor();
- let p1 = clipped_surface_rect.max.ceil();
+ ) else {
+ return;
+ };
- let x0 = p0.x;
- let y0 = p0.y;
- let x1 = p1.x;
- let y1 = p1.y;
+ let p0 = clipped_surface_rect.min.to_f32();
+ let p1 = clipped_surface_rect.max.to_f32();
let segment = add_segment(
- x0,
- y0,
- x1,
- y1,
+ p0.x,
+ p0.y,
+ p1.x,
+ p1.y,
true,
prim_instance,
prim_spatial_node_index,
@@ -820,7 +833,7 @@ fn prepare_interned_prim_for_render(
add_composite_prim(
prim_instance_index,
- LayoutRect::new(LayoutPoint::new(x0, y0), LayoutPoint::new(x1, y1)),
+ LayoutRect::new(p0.cast_unit(), p1.cast_unit()),
premul_color,
quad_flags,
frame_state,
@@ -831,10 +844,12 @@ fn prepare_interned_prim_for_render(
QuadRenderStrategy::Tiled { x_tiles, y_tiles } => {
let surface = &frame_state.surfaces[pic_context.surface_index.0];
- let clipped_surface_rect = surface.get_surface_rect(
+ let Some(clipped_surface_rect) = surface.get_surface_rect(
&prim_instance.vis.clip_chain.pic_coverage_rect,
frame_context.spatial_tree,
- ).expect("bug: what can cause this?");
+ ) else {
+ return;
+ };
let unclipped_surface_rect = surface.map_to_device_rect(
&prim_instance.vis.clip_chain.pic_coverage_rect,
@@ -843,21 +858,21 @@ fn prepare_interned_prim_for_render(
scratch.quad_segments.clear();
- let mut x_coords = vec![clipped_surface_rect.min.x.round()];
- let mut y_coords = vec![clipped_surface_rect.min.y.round()];
+ let mut x_coords = vec![clipped_surface_rect.min.x];
+ let mut y_coords = vec![clipped_surface_rect.min.y];
- let dx = (clipped_surface_rect.max.x - clipped_surface_rect.min.x) / x_tiles as f32;
- let dy = (clipped_surface_rect.max.y - clipped_surface_rect.min.y) / y_tiles as f32;
+ let dx = (clipped_surface_rect.max.x - clipped_surface_rect.min.x) as f32 / x_tiles as f32;
+ let dy = (clipped_surface_rect.max.y - clipped_surface_rect.min.y) as f32 / y_tiles as f32;
- for x in 1 .. x_tiles {
- x_coords.push((clipped_surface_rect.min.x + x as f32 * dx).round());
+ for x in 1 .. (x_tiles as i32) {
+ x_coords.push((clipped_surface_rect.min.x as f32 + x as f32 * dx).round() as i32);
}
- for y in 1 .. y_tiles {
- y_coords.push((clipped_surface_rect.min.y + y as f32 * dy).round());
+ for y in 1 .. (y_tiles as i32) {
+ y_coords.push((clipped_surface_rect.min.y as f32 + y as f32 * dy).round() as i32);
}
- x_coords.push(clipped_surface_rect.max.x.round());
- y_coords.push(clipped_surface_rect.max.y.round());
+ x_coords.push(clipped_surface_rect.max.x);
+ y_coords.push(clipped_surface_rect.max.y);
for y in 0 .. y_coords.len()-1 {
let y0 = y_coords[y];
@@ -877,18 +892,11 @@ fn prepare_interned_prim_for_render(
let create_task = true;
- let r = DeviceRect::new(DevicePoint::new(x0, y0), DevicePoint::new(x1, y1));
-
- let x0 = r.min.x;
- let y0 = r.min.y;
- let x1 = r.max.x;
- let y1 = r.max.y;
-
let segment = add_segment(
- x0,
- y0,
- x1,
- y1,
+ x0 as f32,
+ y0 as f32,
+ x1 as f32,
+ y1 as f32,
create_task,
prim_instance,
prim_spatial_node_index,
@@ -917,10 +925,12 @@ fn prepare_interned_prim_for_render(
}
QuadRenderStrategy::NinePatch { clip_rect, radius } => {
let surface = &frame_state.surfaces[pic_context.surface_index.0];
- let clipped_surface_rect = surface.get_surface_rect(
+ let Some(clipped_surface_rect) = surface.get_surface_rect(
&prim_instance.vis.clip_chain.pic_coverage_rect,
frame_context.spatial_tree,
- ).expect("bug: what can cause this?");
+ ) else {
+ return;
+ };
let unclipped_surface_rect = surface.map_to_device_rect(
&prim_instance.vis.clip_chain.pic_coverage_rect,
@@ -943,17 +953,17 @@ fn prepare_interned_prim_for_render(
let surface_rect_0 = surface.map_to_device_rect(
&pic_corner_0,
frame_context.spatial_tree,
- );
+ ).round_out().to_i32();
let surface_rect_1 = surface.map_to_device_rect(
&pic_corner_1,
frame_context.spatial_tree,
- );
+ ).round_out().to_i32();
- let p0 = surface_rect_0.min.floor();
- let p1 = surface_rect_0.max.ceil();
- let p2 = surface_rect_1.min.floor();
- let p3 = surface_rect_1.max.ceil();
+ let p0 = surface_rect_0.min;
+ let p1 = surface_rect_0.max;
+ let p2 = surface_rect_1.min;
+ let p3 = surface_rect_1.max;
let mut x_coords = [p0.x, p1.x, p2.x, p3.x];
let mut y_coords = [p0.y, p1.y, p2.y, p3.y];
@@ -985,7 +995,10 @@ fn prepare_interned_prim_for_render(
true
};
- let r = DeviceRect::new(DevicePoint::new(x0, y0), DevicePoint::new(x1, y1));
+ let r = DeviceIntRect::new(
+ DeviceIntPoint::new(x0, y0),
+ DeviceIntPoint::new(x1, y1),
+ );
let r = match r.intersection(&clipped_surface_rect) {
Some(r) => r,
@@ -994,16 +1007,11 @@ fn prepare_interned_prim_for_render(
}
};
- let x0 = r.min.x;
- let y0 = r.min.y;
- let x1 = r.max.x;
- let y1 = r.max.y;
-
let segment = add_segment(
- x0,
- y0,
- x1,
- y1,
+ r.min.x as f32,
+ r.min.y as f32,
+ r.max.x as f32,
+ r.max.y as f32,
create_task,
prim_instance,
prim_spatial_node_index,
@@ -1138,7 +1146,7 @@ fn prepare_interned_prim_for_render(
let stops_address = GradientGpuBlockBuilder::build(
prim_data.reverse_stops,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
&prim_data.stops,
);
@@ -1296,8 +1304,8 @@ fn prepare_interned_prim_for_render(
.clipped_local_rect
.cast_unit();
- let main_prim_address = write_prim_blocks(
- frame_state.frame_gpu_data,
+ let prim_address_f = write_prim_blocks(
+ &mut frame_state.frame_gpu_data.f32,
prim_local_rect,
prim_instance.vis.clip_chain.local_clip_rect,
PremultipliedColorF::WHITE,
@@ -1333,7 +1341,7 @@ fn prepare_interned_prim_for_render(
let masks = MaskSubPass {
clip_node_range,
prim_spatial_node_index,
- main_prim_address,
+ prim_address_f,
};
// Add the mask as a sub-pass of the picture
@@ -1353,30 +1361,22 @@ fn prepare_interned_prim_for_render(
let device_pixel_scale = surface.device_pixel_scale;
let raster_spatial_node_index = surface.raster_spatial_node_index;
- let clipped_surface_rect = surface.get_surface_rect(
+ let Some(clipped_surface_rect) = surface.get_surface_rect(
&coverage_rect,
frame_context.spatial_tree,
- ).expect("bug: what can cause this?");
-
- let p0 = clipped_surface_rect.min.floor();
- let x0 = p0.x;
- let y0 = p0.y;
-
- let content_origin = DevicePoint::new(x0, y0);
+ ) else {
+ return;
+ };
// Draw a normal screens-space mask to an alpha target that
// can be sampled when compositing this picture.
let empty_task = EmptyTask {
- content_origin,
+ content_origin: clipped_surface_rect.min.to_f32(),
device_pixel_scale,
raster_spatial_node_index,
};
- let p1 = clipped_surface_rect.max.ceil();
- let x1 = p1.x;
- let y1 = p1.y;
-
- let task_size = DeviceSize::new(x1 - x0, y1 - y0).round().to_i32();
+ let task_size = clipped_surface_rect.size();
let clip_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
task_size,
@@ -1406,7 +1406,7 @@ fn prepare_interned_prim_for_render(
let masks = MaskSubPass {
clip_node_range,
prim_spatial_node_index,
- main_prim_address,
+ prim_address_f,
};
let clip_task = frame_state.rg_builder.get_task_mut(clip_task_id);
@@ -1814,13 +1814,19 @@ pub fn update_clip_task(
unadjusted_device_rect,
device_pixel_scale,
);
+
+ if device_rect.size().to_i32().is_empty() {
+ log::warn!("Bad adjusted clip task size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
+ return false;
+ }
+
let clip_task_id = RenderTaskKind::new_mask(
device_rect,
instance.vis.clip_chain.clips_range,
root_spatial_node_index,
frame_state.clip_store,
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.resource_cache,
frame_state.rg_builder,
&mut data_stores.clip,
@@ -1865,7 +1871,7 @@ pub fn update_brush_segment_clip_task(
return ClipMaskKind::None;
}
- let device_rect = match frame_state.surfaces[surface_index.0].get_surface_rect(
+ let unadjusted_device_rect = match frame_state.surfaces[surface_index.0].get_surface_rect(
&clip_chain.pic_coverage_rect,
frame_context.spatial_tree,
) {
@@ -1873,7 +1879,12 @@ pub fn update_brush_segment_clip_task(
None => return ClipMaskKind::Clipped,
};
- let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(device_rect, device_pixel_scale);
+ let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(unadjusted_device_rect, device_pixel_scale);
+
+ if device_rect.size().to_i32().is_empty() {
+ log::warn!("Bad adjusted mask size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
+ return ClipMaskKind::Clipped;
+ }
let clip_task_id = RenderTaskKind::new_mask(
device_rect,
@@ -1881,7 +1892,7 @@ pub fn update_brush_segment_clip_task(
root_spatial_node_index,
frame_state.clip_store,
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.resource_cache,
frame_state.rg_builder,
clip_data_store,
@@ -2101,15 +2112,17 @@ fn build_segments_if_needed(
}
// Ensures that the size of mask render tasks are within MAX_MASK_SIZE.
-fn adjust_mask_scale_for_max_size(device_rect: DeviceRect, device_pixel_scale: DevicePixelScale) -> (DeviceRect, DevicePixelScale) {
+fn adjust_mask_scale_for_max_size(device_rect: DeviceIntRect, device_pixel_scale: DevicePixelScale) -> (DeviceIntRect, DevicePixelScale) {
if device_rect.width() > MAX_MASK_SIZE || device_rect.height() > MAX_MASK_SIZE {
// round_out will grow by 1 integer pixel if origin is on a
// fractional position, so keep that margin for error with -1:
- let scale = (MAX_MASK_SIZE - 1.0) /
- f32::max(device_rect.width(), device_rect.height());
+ let device_rect_f = device_rect.to_f32();
+ let scale = (MAX_MASK_SIZE - 1) as f32 /
+ f32::max(device_rect_f.width(), device_rect_f.height());
let new_device_pixel_scale = device_pixel_scale * Scale::new(scale);
- let new_device_rect = (device_rect.to_f32() * Scale::new(scale))
- .round_out();
+ let new_device_rect = (device_rect_f * Scale::new(scale))
+ .round_out()
+ .to_i32();
(new_device_rect, new_device_pixel_scale)
} else {
(device_rect, device_pixel_scale)
@@ -2117,7 +2130,7 @@ fn adjust_mask_scale_for_max_size(device_rect: DeviceRect, device_pixel_scale: D
}
pub fn write_prim_blocks(
- builder: &mut GpuBufferBuilder,
+ builder: &mut GpuBufferBuilderF,
prim_rect: LayoutRect,
clip_rect: LayoutRect,
color: PremultipliedColorF,
@@ -2153,7 +2166,7 @@ fn add_segment(
prim_instance: &PrimitiveInstance,
prim_spatial_node_index: SpatialNodeIndex,
raster_spatial_node_index: SpatialNodeIndex,
- main_prim_address: GpuBufferAddress,
+ prim_address_f: GpuBufferAddress,
transform_id: TransformPaletteId,
aa_flags: EdgeAaSegmentMask,
quad_flags: QuadFlags,
@@ -2177,7 +2190,7 @@ fn add_segment(
raster_spatial_node_index,
device_pixel_scale,
content_origin,
- main_prim_address,
+ prim_address_f,
transform_id,
aa_flags,
quad_flags,
@@ -2189,7 +2202,7 @@ fn add_segment(
let masks = MaskSubPass {
clip_node_range: prim_instance.vis.clip_chain.clips_range,
prim_spatial_node_index,
- main_prim_address,
+ prim_address_f,
};
let task = frame_state.rg_builder.get_task_mut(task_id);
@@ -2223,7 +2236,7 @@ fn add_composite_prim(
segments: &[QuadSegment],
) {
let composite_prim_address = write_prim_blocks(
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
rect,
rect,
color,
diff --git a/gfx/wr/webrender/src/prim_store/gradient/conic.rs b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
index d9c3f5d350..2c4818095e 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/conic.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
@@ -254,7 +254,7 @@ impl ConicGradientTemplate {
kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false,
diff --git a/gfx/wr/webrender/src/prim_store/gradient/linear.rs b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
index 85da4b670a..7075daac0d 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/linear.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
@@ -522,7 +522,7 @@ impl LinearGradientTemplate {
kind: RenderTaskCacheKeyKind::FastLinearGradient(gradient),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false,
@@ -552,7 +552,7 @@ impl LinearGradientTemplate {
kind: RenderTaskCacheKeyKind::LinearGradient(cache_key),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false,
diff --git a/gfx/wr/webrender/src/prim_store/gradient/mod.rs b/gfx/wr/webrender/src/prim_store/gradient/mod.rs
index d0b922c579..a0410549b0 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/mod.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/mod.rs
@@ -4,7 +4,7 @@
use api::{ColorF, ColorU, GradientStop, PremultipliedColorF};
use api::units::{LayoutRect, LayoutSize, LayoutVector2D};
-use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilderF};
use std::hash;
mod linear;
@@ -167,7 +167,7 @@ impl GradientGpuBlockBuilder {
// Build the gradient data from the supplied stops, reversing them if necessary.
pub fn build(
reverse_stops: bool,
- gpu_buffer_builder: &mut GpuBufferBuilder,
+ gpu_buffer_builder: &mut GpuBufferBuilderF,
src_stops: &[GradientStop],
) -> GpuBufferAddress {
// Preconditions (should be ensured by DisplayListBuilder):
diff --git a/gfx/wr/webrender/src/prim_store/gradient/radial.rs b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
index f3f20f9a55..4d91b28633 100644
--- a/gfx/wr/webrender/src/prim_store/gradient/radial.rs
+++ b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
@@ -220,7 +220,7 @@ impl RadialGradientTemplate {
kind: RenderTaskCacheKeyKind::RadialGradient(cache_key),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
false,
diff --git a/gfx/wr/webrender/src/prim_store/image.rs b/gfx/wr/webrender/src/prim_store/image.rs
index 6007b4ba9a..8a05965536 100644
--- a/gfx/wr/webrender/src/prim_store/image.rs
+++ b/gfx/wr/webrender/src/prim_store/image.rs
@@ -257,7 +257,7 @@ impl ImageData {
kind: RenderTaskCacheKeyKind::Image(image_cache_key),
},
frame_state.gpu_cache,
- frame_state.frame_gpu_data,
+ &mut frame_state.frame_gpu_data.f32,
frame_state.rg_builder,
None,
descriptor.is_opaque(),
diff --git a/gfx/wr/webrender/src/prim_store/mod.rs b/gfx/wr/webrender/src/prim_store/mod.rs
index db5480f597..cc09eab6b1 100644
--- a/gfx/wr/webrender/src/prim_store/mod.rs
+++ b/gfx/wr/webrender/src/prim_store/mod.rs
@@ -1312,6 +1312,37 @@ impl PrimitiveScratchBuffer {
}
}
+ pub fn push_debug_rect_with_stroke_width(
+ &mut self,
+ rect: WorldRect,
+ border: ColorF,
+ stroke_width: f32
+ ) {
+ let top_edge = WorldRect::new(
+ WorldPoint::new(rect.min.x + stroke_width, rect.min.y),
+ WorldPoint::new(rect.max.x - stroke_width, rect.min.y + stroke_width)
+ );
+ self.push_debug_rect(top_edge * DevicePixelScale::new(1.0), border, border);
+
+ let bottom_edge = WorldRect::new(
+ WorldPoint::new(rect.min.x + stroke_width, rect.max.y - stroke_width),
+ WorldPoint::new(rect.max.x - stroke_width, rect.max.y)
+ );
+ self.push_debug_rect(bottom_edge * DevicePixelScale::new(1.0), border, border);
+
+ let right_edge = WorldRect::new(
+ WorldPoint::new(rect.max.x - stroke_width, rect.min.y),
+ rect.max
+ );
+ self.push_debug_rect(right_edge * DevicePixelScale::new(1.0), border, border);
+
+ let left_edge = WorldRect::new(
+ rect.min,
+ WorldPoint::new(rect.min.x + stroke_width, rect.max.y)
+ );
+ self.push_debug_rect(left_edge * DevicePixelScale::new(1.0), border, border);
+ }
+
#[allow(dead_code)]
pub fn push_debug_rect(
&mut self,
diff --git a/gfx/wr/webrender/src/render_target.rs b/gfx/wr/webrender/src/render_target.rs
index 0db77d5ce0..f2d1c24c10 100644
--- a/gfx/wr/webrender/src/render_target.rs
+++ b/gfx/wr/webrender/src/render_target.rs
@@ -24,7 +24,7 @@ use crate::prim_store::gradient::{
FastLinearGradientInstance, LinearGradientInstance, RadialGradientInstance,
ConicGradientInstance,
};
-use crate::renderer::{GpuBufferBuilder, GpuBufferAddress};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
use crate::render_backend::DataStores;
use crate::render_task::{RenderTaskKind, RenderTaskAddress, SubPass};
use crate::render_task::{RenderTask, ScalingTask, SvgFilterInfo, MaskSubPass};
@@ -104,6 +104,7 @@ pub trait RenderTarget {
_z_generator: &mut ZBufferIdGenerator,
_prim_instances: &[PrimitiveInstance],
_cmd_buffers: &CommandBufferList,
+ _gpu_buffer_builder: &mut GpuBufferBuilder,
) {
}
@@ -183,6 +184,7 @@ impl<T: RenderTarget> RenderTargetList<T> {
z_generator: &mut ZBufferIdGenerator,
prim_instances: &[PrimitiveInstance],
cmd_buffers: &CommandBufferList,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
) {
if self.targets.is_empty() {
return;
@@ -198,6 +200,7 @@ impl<T: RenderTarget> RenderTargetList<T> {
z_generator,
prim_instances,
cmd_buffers,
+ gpu_buffer_builder,
);
}
}
@@ -274,10 +277,10 @@ impl RenderTarget for ColorRenderTarget {
z_generator: &mut ZBufferIdGenerator,
prim_instances: &[PrimitiveInstance],
cmd_buffers: &CommandBufferList,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
) {
profile_scope!("build");
let mut merged_batches = AlphaBatchContainer::new(None);
- let mut gpu_buffer_builder = GpuBufferBuilder::new();
for task_id in &self.alpha_tasks {
profile_scope!("alpha_task");
@@ -326,7 +329,7 @@ impl RenderTarget for ColorRenderTarget {
pic_task.surface_spatial_node_index,
z_generator,
prim_instances,
- &mut gpu_buffer_builder,
+ gpu_buffer_builder,
segments,
);
});
@@ -376,13 +379,14 @@ impl RenderTarget for ColorRenderTarget {
add_quad_to_batch(
render_task_address,
info.transform_id,
- info.prim_address,
+ info.prim_address_f,
info.quad_flags,
info.edge_flags,
INVALID_SEGMENT_INDEX as u8,
RenderTaskId::INVALID,
ZBufferId(0),
render_tasks,
+ gpu_buffer_builder,
|_, instance| {
if info.prim_needs_scissor_rect {
self.prim_instances_with_scissor
@@ -921,6 +925,7 @@ fn add_svg_filter_instances(
kind,
input_count,
generic_int,
+ padding: 0,
extra_data_address: extra_data_address.unwrap_or(GpuCacheAddress::INVALID),
};
@@ -978,7 +983,7 @@ fn build_mask_tasks(
let (clip_address, fast_path) = match clip_node.item.kind {
ClipItemKind::RoundedRectangle { rect, radius, mode } => {
let (fast_path, clip_address) = if radius.is_uniform().is_some() {
- let mut writer = gpu_buffer_builder.write_blocks(3);
+ let mut writer = gpu_buffer_builder.f32.write_blocks(3);
writer.push_one(rect);
writer.push_one([radius.top_left.width, 0.0, 0.0, 0.0]);
writer.push_one([mode as i32 as f32, 0.0, 0.0, 0.0]);
@@ -986,7 +991,7 @@ fn build_mask_tasks(
(true, clip_address)
} else {
- let mut writer = gpu_buffer_builder.write_blocks(4);
+ let mut writer = gpu_buffer_builder.f32.write_blocks(4);
writer.push_one(rect);
writer.push_one([
radius.top_left.width,
@@ -1011,7 +1016,7 @@ fn build_mask_tasks(
ClipItemKind::Rectangle { rect, mode, .. } => {
assert_eq!(mode, ClipMode::Clip);
- let mut writer = gpu_buffer_builder.write_blocks(3);
+ let mut writer = gpu_buffer_builder.f32.write_blocks(3);
writer.push_one(rect);
writer.push_one([0.0, 0.0, 0.0, 0.0]);
writer.push_one([mode as i32 as f32, 0.0, 0.0, 0.0]);
@@ -1043,7 +1048,7 @@ fn build_mask_tasks(
for tile in clip_store.visible_mask_tiles(&clip_instance) {
let clip_prim_address = write_prim_blocks(
- gpu_buffer_builder,
+ &mut gpu_buffer_builder.f32,
rect,
rect,
PremultipliedColorF::WHITE,
@@ -1067,6 +1072,7 @@ fn build_mask_tasks(
tile.task_id,
ZBufferId(0),
render_tasks,
+ gpu_buffer_builder,
|_, prim| {
if clip_needs_scissor_rect {
results
@@ -1107,7 +1113,7 @@ fn build_mask_tasks(
);
let main_prim_address = write_prim_blocks(
- gpu_buffer_builder,
+ &mut gpu_buffer_builder.f32,
task_world_rect.cast_unit(),
task_world_rect.cast_unit(),
PremultipliedColorF::WHITE,
@@ -1162,6 +1168,7 @@ fn build_mask_tasks(
RenderTaskId::INVALID,
ZBufferId(0),
render_tasks,
+ gpu_buffer_builder,
|_, prim| {
let instance = MaskInstance {
prim,
@@ -1235,7 +1242,7 @@ fn build_sub_pass(
render_task_address,
content_rect / device_pixel_scale,
target_rect,
- masks.main_prim_address,
+ masks.prim_address_f,
masks.prim_spatial_node_index,
raster_spatial_node_index,
ctx.clip_store,
diff --git a/gfx/wr/webrender/src/render_task.rs b/gfx/wr/webrender/src/render_task.rs
index 4e920cb356..8889ae1ea6 100644
--- a/gfx/wr/webrender/src/render_task.rs
+++ b/gfx/wr/webrender/src/render_task.rs
@@ -10,7 +10,7 @@ use crate::clip::{ClipDataStore, ClipItemKind, ClipStore, ClipNodeRange};
use crate::command_buffer::{CommandBufferIndex, QuadFlags};
use crate::spatial_tree::SpatialNodeIndex;
use crate::filterdata::SFilterData;
-use crate::frame_builder::{FrameBuilderConfig};
+use crate::frame_builder::FrameBuilderConfig;
use crate::gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle};
use crate::gpu_types::{BorderInstance, ImageSource, UvRectKind, TransformPaletteId};
use crate::internal_types::{CacheTextureId, FastHashMap, TextureSource, Swizzle};
@@ -22,7 +22,7 @@ use crate::prim_store::gradient::{
};
use crate::resource_cache::{ResourceCache, ImageRequest};
use std::{usize, f32, i32, u32};
-use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilderF};
use crate::render_target::{ResolveOp, RenderTargetKind};
use crate::render_task_graph::{PassId, RenderTaskId, RenderTaskGraphBuilder};
use crate::render_task_cache::{RenderTaskCacheEntryHandle, RenderTaskCacheKey, RenderTaskCacheKeyKind, RenderTaskParent};
@@ -46,11 +46,11 @@ fn render_task_sanity_check(size: &DeviceIntSize) {
#[repr(C)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
-pub struct RenderTaskAddress(pub u16);
+pub struct RenderTaskAddress(pub i32);
impl Into<RenderTaskAddress> for RenderTaskId {
fn into(self) -> RenderTaskAddress {
- RenderTaskAddress(self.index as u16)
+ RenderTaskAddress(self.index as i32)
}
}
@@ -186,7 +186,7 @@ pub struct EmptyTask {
pub struct PrimTask {
pub device_pixel_scale: DevicePixelScale,
pub content_origin: DevicePoint,
- pub prim_address: GpuBufferAddress,
+ pub prim_address_f: GpuBufferAddress,
pub prim_spatial_node_index: SpatialNodeIndex,
pub raster_spatial_node_index: SpatialNodeIndex,
pub transform_id: TransformPaletteId,
@@ -520,7 +520,7 @@ impl RenderTaskKind {
raster_spatial_node_index: SpatialNodeIndex,
device_pixel_scale: DevicePixelScale,
content_origin: DevicePoint,
- prim_address: GpuBufferAddress,
+ prim_address_f: GpuBufferAddress,
transform_id: TransformPaletteId,
edge_flags: EdgeAaSegmentMask,
quad_flags: QuadFlags,
@@ -532,7 +532,7 @@ impl RenderTaskKind {
raster_spatial_node_index,
device_pixel_scale,
content_origin,
- prim_address,
+ prim_address_f,
transform_id,
edge_flags,
quad_flags,
@@ -588,12 +588,12 @@ impl RenderTaskKind {
}
pub fn new_mask(
- outer_rect: DeviceRect,
+ outer_rect: DeviceIntRect,
clip_node_range: ClipNodeRange,
root_spatial_node_index: SpatialNodeIndex,
clip_store: &mut ClipStore,
gpu_cache: &mut GpuCache,
- gpu_buffer_builder: &mut GpuBufferBuilder,
+ gpu_buffer_builder: &mut GpuBufferBuilderF,
resource_cache: &mut ResourceCache,
rg_builder: &mut RenderTaskGraphBuilder,
clip_data_store: &mut ClipDataStore,
@@ -610,7 +610,7 @@ impl RenderTaskKind {
// TODO(gw): If this ever shows up in a profile, we could pre-calculate
// whether a ClipSources contains any box-shadows and skip
// this iteration for the majority of cases.
- let task_size = outer_rect.size().to_i32();
+ let task_size = outer_rect.size();
// If we have a potentially tiled clip mask, clear the mask area first. Otherwise,
// the first (primary) clip mask will overwrite all the clip mask pixels with
@@ -620,7 +620,7 @@ impl RenderTaskKind {
RenderTask::new_dynamic(
task_size,
RenderTaskKind::CacheMask(CacheMaskTask {
- actual_rect: outer_rect,
+ actual_rect: outer_rect.to_f32(),
clip_node_range,
root_spatial_node_index,
device_pixel_scale,
@@ -883,7 +883,7 @@ pub type TaskDependencies = SmallVec<[RenderTaskId;2]>;
pub struct MaskSubPass {
pub clip_node_range: ClipNodeRange,
pub prim_spatial_node_index: SpatialNodeIndex,
- pub main_prim_address: GpuBufferAddress,
+ pub prim_address_f: GpuBufferAddress,
}
#[cfg_attr(feature = "capture", derive(Serialize))]
@@ -940,6 +940,9 @@ impl RenderTask {
size: DeviceIntSize,
kind: RenderTaskKind,
) -> Self {
+ if size.is_empty() {
+ log::warn!("Bad {} render task size: {:?}", kind.as_str(), size);
+ }
RenderTask::new(
RenderTaskLocation::Unallocated { size },
kind,
diff --git a/gfx/wr/webrender/src/render_task_cache.rs b/gfx/wr/webrender/src/render_task_cache.rs
index 0454c1214f..2c81a9824f 100644
--- a/gfx/wr/webrender/src/render_task_cache.rs
+++ b/gfx/wr/webrender/src/render_task_cache.rs
@@ -22,7 +22,7 @@ use crate::resource_cache::CacheItem;
use std::{mem, usize, f32, i32};
use crate::surface::SurfaceBuilder;
use crate::texture_cache::{TextureCache, TextureCacheHandle, Eviction, TargetShader};
-use crate::renderer::GpuBufferBuilder;
+use crate::renderer::GpuBufferBuilderF;
use crate::render_target::RenderTargetKind;
use crate::render_task::{RenderTask, StaticRenderTaskSurface, RenderTaskLocation, RenderTaskKind, CachedTask};
use crate::render_task_graph::{RenderTaskGraphBuilder, RenderTaskId};
@@ -228,7 +228,7 @@ impl RenderTaskCache {
key: RenderTaskCacheKey,
texture_cache: &mut TextureCache,
gpu_cache: &mut GpuCache,
- gpu_buffer_builder: &mut GpuBufferBuilder,
+ gpu_buffer_builder: &mut GpuBufferBuilderF,
rg_builder: &mut RenderTaskGraphBuilder,
user_data: Option<[f32; 4]>,
is_opaque: bool,
@@ -237,7 +237,7 @@ impl RenderTaskCache {
f: F,
) -> Result<RenderTaskId, ()>
where
- F: FnOnce(&mut RenderTaskGraphBuilder, &mut GpuBufferBuilder) -> Result<RenderTaskId, ()>,
+ F: FnOnce(&mut RenderTaskGraphBuilder, &mut GpuBufferBuilderF) -> Result<RenderTaskId, ()>,
{
let frame_id = self.frame_id;
let size = key.size;
diff --git a/gfx/wr/webrender/src/render_task_graph.rs b/gfx/wr/webrender/src/render_task_graph.rs
index 29ecf66a2e..6c02de8b65 100644
--- a/gfx/wr/webrender/src/render_task_graph.rs
+++ b/gfx/wr/webrender/src/render_task_graph.rs
@@ -445,6 +445,13 @@ impl RenderTaskGraphBuilder {
)
};
+ if surface_size.is_empty() {
+ // We would panic in the guillotine allocator. Instead, panic here
+ // with some context.
+ let task_name = graph.tasks[task_id.index as usize].kind.as_str();
+ panic!("{} render task has invalid size {:?}", task_name, surface_size);
+ }
+
let format = match kind {
RenderTargetKind::Color => ImageFormat::RGBA8,
RenderTargetKind::Alpha => ImageFormat::R8,
diff --git a/gfx/wr/webrender/src/renderer/gpu_buffer.rs b/gfx/wr/webrender/src/renderer/gpu_buffer.rs
index 58a8aa6cbd..c8a2b87b0c 100644
--- a/gfx/wr/webrender/src/renderer/gpu_buffer.rs
+++ b/gfx/wr/webrender/src/renderer/gpu_buffer.rs
@@ -13,21 +13,57 @@
use crate::renderer::MAX_VERTEX_TEXTURE_WIDTH;
use api::units::{DeviceIntRect, DeviceIntSize, LayoutRect, PictureRect, DeviceRect};
-use api::{PremultipliedColorF};
+use api::{PremultipliedColorF, ImageFormat};
use crate::device::Texel;
use crate::render_task_graph::{RenderTaskGraph, RenderTaskId};
+pub struct GpuBufferBuilder {
+ pub i32: GpuBufferBuilderI,
+ pub f32: GpuBufferBuilderF,
+}
+
+pub type GpuBufferF = GpuBuffer<GpuBufferBlockF>;
+pub type GpuBufferBuilderF = GpuBufferBuilderImpl<GpuBufferBlockF>;
+
+pub type GpuBufferI = GpuBuffer<GpuBufferBlockI>;
+pub type GpuBufferBuilderI = GpuBufferBuilderImpl<GpuBufferBlockI>;
+
+unsafe impl Texel for GpuBufferBlockF {
+ fn image_format() -> ImageFormat { ImageFormat::RGBAF32 }
+}
+
+unsafe impl Texel for GpuBufferBlockI {
+ fn image_format() -> ImageFormat { ImageFormat::RGBAI32 }
+}
-unsafe impl Texel for GpuBufferBlock {}
+impl Default for GpuBufferBlockF {
+ fn default() -> Self {
+ GpuBufferBlockF::EMPTY
+ }
+}
+
+impl Default for GpuBufferBlockI {
+ fn default() -> Self {
+ GpuBufferBlockI::EMPTY
+ }
+}
/// A single texel in RGBAF32 texture - 16 bytes.
#[derive(Copy, Clone, Debug, MallocSizeOf)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
-pub struct GpuBufferBlock {
+pub struct GpuBufferBlockF {
data: [f32; 4],
}
+/// A single texel in RGBAI32 texture - 16 bytes.
+#[derive(Copy, Clone, Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct GpuBufferBlockI {
+ data: [i32; 4],
+}
+
#[derive(Copy, Debug, Clone, MallocSizeOf, Eq, PartialEq)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
@@ -48,13 +84,17 @@ impl GpuBufferAddress {
pub const INVALID: GpuBufferAddress = GpuBufferAddress { u: !0, v: !0 };
}
-impl GpuBufferBlock {
- pub const EMPTY: Self = GpuBufferBlock { data: [0.0; 4] };
+impl GpuBufferBlockF {
+ pub const EMPTY: Self = GpuBufferBlockF { data: [0.0; 4] };
+}
+
+impl GpuBufferBlockI {
+ pub const EMPTY: Self = GpuBufferBlockI { data: [0; 4] };
}
-impl Into<GpuBufferBlock> for LayoutRect {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl Into<GpuBufferBlockF> for LayoutRect {
+ fn into(self) -> GpuBufferBlockF {
+ GpuBufferBlockF {
data: [
self.min.x,
self.min.y,
@@ -65,9 +105,9 @@ impl Into<GpuBufferBlock> for LayoutRect {
}
}
-impl Into<GpuBufferBlock> for PictureRect {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl Into<GpuBufferBlockF> for PictureRect {
+ fn into(self) -> GpuBufferBlockF {
+ GpuBufferBlockF {
data: [
self.min.x,
self.min.y,
@@ -78,9 +118,9 @@ impl Into<GpuBufferBlock> for PictureRect {
}
}
-impl Into<GpuBufferBlock> for DeviceRect {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl Into<GpuBufferBlockF> for DeviceRect {
+ fn into(self) -> GpuBufferBlockF {
+ GpuBufferBlockF {
data: [
self.min.x,
self.min.y,
@@ -91,9 +131,9 @@ impl Into<GpuBufferBlock> for DeviceRect {
}
}
-impl Into<GpuBufferBlock> for PremultipliedColorF {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl Into<GpuBufferBlockF> for PremultipliedColorF {
+ fn into(self) -> GpuBufferBlockF {
+ GpuBufferBlockF {
data: [
self.r,
self.g,
@@ -104,22 +144,43 @@ impl Into<GpuBufferBlock> for PremultipliedColorF {
}
}
-impl Into<GpuBufferBlock> for DeviceIntRect {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl From<DeviceIntRect> for GpuBufferBlockF {
+ fn from(rect: DeviceIntRect) -> Self {
+ GpuBufferBlockF {
+ data: [
+ rect.min.x as f32,
+ rect.min.y as f32,
+ rect.max.x as f32,
+ rect.max.y as f32,
+ ],
+ }
+ }
+}
+
+impl From<DeviceIntRect> for GpuBufferBlockI {
+ fn from(rect: DeviceIntRect) -> Self {
+ GpuBufferBlockI {
data: [
- self.min.x as f32,
- self.min.y as f32,
- self.max.x as f32,
- self.max.y as f32,
+ rect.min.x,
+ rect.min.y,
+ rect.max.x,
+ rect.max.y,
],
}
}
}
-impl Into<GpuBufferBlock> for [f32; 4] {
- fn into(self) -> GpuBufferBlock {
- GpuBufferBlock {
+impl Into<GpuBufferBlockF> for [f32; 4] {
+ fn into(self) -> GpuBufferBlockF {
+ GpuBufferBlockF {
+ data: self,
+ }
+ }
+}
+
+impl Into<GpuBufferBlockI> for [i32; 4] {
+ fn into(self) -> GpuBufferBlockI {
+ GpuBufferBlockI {
data: self,
}
}
@@ -132,16 +193,16 @@ struct DeferredBlock {
}
/// Interface to allow writing multiple GPU blocks, possibly of different types
-pub struct GpuBufferWriter<'a> {
- buffer: &'a mut Vec<GpuBufferBlock>,
+pub struct GpuBufferWriter<'a, T> {
+ buffer: &'a mut Vec<T>,
deferred: &'a mut Vec<DeferredBlock>,
index: usize,
block_count: usize,
}
-impl<'a> GpuBufferWriter<'a> {
+impl<'a, T> GpuBufferWriter<'a, T> where T: Texel {
fn new(
- buffer: &'a mut Vec<GpuBufferBlock>,
+ buffer: &'a mut Vec<T>,
deferred: &'a mut Vec<DeferredBlock>,
index: usize,
block_count: usize,
@@ -155,7 +216,7 @@ impl<'a> GpuBufferWriter<'a> {
}
/// Push one (16 byte) block of data in to the writer
- pub fn push_one<B>(&mut self, block: B) where B: Into<GpuBufferBlock> {
+ pub fn push_one<B>(&mut self, block: B) where B: Into<T> {
self.buffer.push(block.into());
}
@@ -166,7 +227,7 @@ impl<'a> GpuBufferWriter<'a> {
task_id,
index: self.buffer.len(),
});
- self.buffer.push(GpuBufferBlock::EMPTY);
+ self.buffer.push(T::default());
}
/// Close this writer, returning the GPU address of this set of block(s).
@@ -180,20 +241,20 @@ impl<'a> GpuBufferWriter<'a> {
}
}
-impl<'a> Drop for GpuBufferWriter<'a> {
+impl<'a, T> Drop for GpuBufferWriter<'a, T> {
fn drop(&mut self) {
assert_eq!(self.buffer.len(), self.index + self.block_count, "Claimed block_count was not written");
}
}
-pub struct GpuBufferBuilder {
- data: Vec<GpuBufferBlock>,
+pub struct GpuBufferBuilderImpl<T> {
+ data: Vec<T>,
deferred: Vec<DeferredBlock>,
}
-impl GpuBufferBuilder {
+impl<T> GpuBufferBuilderImpl<T> where T: Texel + std::convert::From<DeviceIntRect> {
pub fn new() -> Self {
- GpuBufferBuilder {
+ GpuBufferBuilderImpl {
data: Vec::new(),
deferred: Vec::new(),
}
@@ -202,13 +263,13 @@ impl GpuBufferBuilder {
#[allow(dead_code)]
pub fn push(
&mut self,
- blocks: &[GpuBufferBlock],
+ blocks: &[T],
) -> GpuBufferAddress {
assert!(blocks.len() <= MAX_VERTEX_TEXTURE_WIDTH);
if (self.data.len() % MAX_VERTEX_TEXTURE_WIDTH) + blocks.len() > MAX_VERTEX_TEXTURE_WIDTH {
while self.data.len() % MAX_VERTEX_TEXTURE_WIDTH != 0 {
- self.data.push(GpuBufferBlock::EMPTY);
+ self.data.push(T::default());
}
}
@@ -226,12 +287,12 @@ impl GpuBufferBuilder {
pub fn write_blocks(
&mut self,
block_count: usize,
- ) -> GpuBufferWriter {
+ ) -> GpuBufferWriter<T> {
assert!(block_count <= MAX_VERTEX_TEXTURE_WIDTH);
if (self.data.len() % MAX_VERTEX_TEXTURE_WIDTH) + block_count > MAX_VERTEX_TEXTURE_WIDTH {
while self.data.len() % MAX_VERTEX_TEXTURE_WIDTH != 0 {
- self.data.push(GpuBufferBlock::EMPTY);
+ self.data.push(T::default());
}
}
@@ -248,11 +309,11 @@ impl GpuBufferBuilder {
pub fn finalize(
mut self,
render_tasks: &RenderTaskGraph,
- ) -> GpuBuffer {
+ ) -> GpuBuffer<T> {
let required_len = (self.data.len() + MAX_VERTEX_TEXTURE_WIDTH-1) & !(MAX_VERTEX_TEXTURE_WIDTH-1);
for _ in 0 .. required_len - self.data.len() {
- self.data.push(GpuBufferBlock::EMPTY);
+ self.data.push(T::default());
}
let len = self.data.len();
@@ -271,18 +332,20 @@ impl GpuBufferBuilder {
GpuBuffer {
data: self.data,
size: DeviceIntSize::new(MAX_VERTEX_TEXTURE_WIDTH as i32, (len / MAX_VERTEX_TEXTURE_WIDTH) as i32),
+ format: T::image_format(),
}
}
}
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
-pub struct GpuBuffer {
- pub data: Vec<GpuBufferBlock>,
+pub struct GpuBuffer<T> {
+ pub data: Vec<T>,
pub size: DeviceIntSize,
+ pub format: ImageFormat,
}
-impl GpuBuffer {
+impl<T> GpuBuffer<T> {
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
@@ -292,13 +355,13 @@ impl GpuBuffer {
#[test]
fn test_gpu_buffer_sizing_push() {
let render_task_graph = RenderTaskGraph::new_for_testing();
- let mut builder = GpuBufferBuilder::new();
+ let mut builder = GpuBufferBuilderF::new();
- let row = vec![GpuBufferBlock::EMPTY; MAX_VERTEX_TEXTURE_WIDTH];
+ let row = vec![GpuBufferBlockF::EMPTY; MAX_VERTEX_TEXTURE_WIDTH];
builder.push(&row);
- builder.push(&[GpuBufferBlock::EMPTY]);
- builder.push(&[GpuBufferBlock::EMPTY]);
+ builder.push(&[GpuBufferBlockF::EMPTY]);
+ builder.push(&[GpuBufferBlockF::EMPTY]);
let buffer = builder.finalize(&render_task_graph);
assert_eq!(buffer.data.len(), MAX_VERTEX_TEXTURE_WIDTH * 2);
@@ -307,20 +370,20 @@ fn test_gpu_buffer_sizing_push() {
#[test]
fn test_gpu_buffer_sizing_writer() {
let render_task_graph = RenderTaskGraph::new_for_testing();
- let mut builder = GpuBufferBuilder::new();
+ let mut builder = GpuBufferBuilderF::new();
let mut writer = builder.write_blocks(MAX_VERTEX_TEXTURE_WIDTH);
for _ in 0 .. MAX_VERTEX_TEXTURE_WIDTH {
- writer.push_one(GpuBufferBlock::EMPTY);
+ writer.push_one(GpuBufferBlockF::EMPTY);
}
writer.finish();
let mut writer = builder.write_blocks(1);
- writer.push_one(GpuBufferBlock::EMPTY);
+ writer.push_one(GpuBufferBlockF::EMPTY);
writer.finish();
let mut writer = builder.write_blocks(1);
- writer.push_one(GpuBufferBlock::EMPTY);
+ writer.push_one(GpuBufferBlockF::EMPTY);
writer.finish();
let buffer = builder.finalize(&render_task_graph);
diff --git a/gfx/wr/webrender/src/renderer/mod.rs b/gfx/wr/webrender/src/renderer/mod.rs
index 0eded7ffb4..3a058ae8f4 100644
--- a/gfx/wr/webrender/src/renderer/mod.rs
+++ b/gfx/wr/webrender/src/renderer/mod.rs
@@ -59,7 +59,7 @@ use crate::composite::{CompositorConfig, NativeSurfaceOperationDetails, NativeSu
use crate::composite::{TileKind};
use crate::debug_colors;
use crate::device::{DepthFunction, Device, DrawTarget, ExternalTexture, GpuFrameId, UploadPBOPool};
-use crate::device::{ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot};
+use crate::device::{ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot, Texel};
use crate::device::query::{GpuSampler, GpuTimer};
#[cfg(feature = "capture")]
use crate::device::FBOId;
@@ -127,7 +127,7 @@ pub(crate) mod init;
pub use debug::DebugRenderer;
pub use shade::{Shaders, SharedShaders};
pub use vertex::{desc, VertexArrayKind, MAX_VERTEX_TEXTURE_WIDTH};
-pub use gpu_buffer::{GpuBuffer, GpuBufferBuilder, GpuBufferAddress};
+pub use gpu_buffer::{GpuBuffer, GpuBufferF, GpuBufferBuilderF, GpuBufferI, GpuBufferBuilderI, GpuBufferAddress, GpuBufferBuilder};
/// The size of the array of each type of vertex data texture that
/// is round-robin-ed each frame during bind_frame_data. Doing this
@@ -341,7 +341,8 @@ pub(crate) enum TextureSampler {
PrimitiveHeadersF,
PrimitiveHeadersI,
ClipMask,
- GpuBuffer,
+ GpuBufferF,
+ GpuBufferI,
}
impl TextureSampler {
@@ -370,7 +371,8 @@ impl Into<TextureSlot> for TextureSampler {
TextureSampler::PrimitiveHeadersF => TextureSlot(7),
TextureSampler::PrimitiveHeadersI => TextureSlot(8),
TextureSampler::ClipMask => TextureSlot(9),
- TextureSampler::GpuBuffer => TextureSlot(10),
+ TextureSampler::GpuBufferF => TextureSlot(10),
+ TextureSampler::GpuBufferI => TextureSlot(11),
}
}
}
@@ -3616,7 +3618,6 @@ impl Renderer {
fn draw_clip_batch_list(
&mut self,
list: &ClipBatchList,
- draw_target: &DrawTarget,
projection: &default::Transform3D<f32>,
stats: &mut RendererStats,
) {
@@ -3671,42 +3672,6 @@ impl Renderer {
stats,
);
}
-
- // draw image masks
- let mut using_scissor = false;
- for ((mask_texture_id, clip_rect), items) in list.images.iter() {
- let _gm2 = self.gpu_profiler.start_marker("clip images");
- // Some image masks may require scissoring to ensure they don't draw
- // outside their task's target bounds. Axis-aligned primitives will
- // be clamped inside the shader and should not require scissoring.
- // TODO: We currently assume scissor state is off by default for
- // alpha targets here, but in the future we may want to track the
- // current scissor state so that this can be properly saved and
- // restored here.
- if let Some(clip_rect) = clip_rect {
- if !using_scissor {
- self.device.enable_scissor();
- using_scissor = true;
- }
- let scissor_rect = draw_target.build_scissor_rect(Some(*clip_rect));
- self.device.set_scissor_rect(scissor_rect);
- } else if using_scissor {
- self.device.disable_scissor();
- using_scissor = false;
- }
- let textures = BatchTextures::composite_rgb(*mask_texture_id);
- self.shaders.borrow_mut().cs_clip_image
- .bind(&mut self.device, projection, None, &mut self.renderer_errors, &mut self.profile);
- self.draw_instanced_batch(
- items,
- VertexArrayKind::ClipImage,
- &textures,
- stats,
- );
- }
- if using_scissor {
- self.device.disable_scissor();
- }
}
fn draw_alpha_target(
@@ -3861,7 +3826,6 @@ impl Renderer {
self.set_blend(false, FramebufferKind::Other);
self.draw_clip_batch_list(
&target.clip_batcher.primary_clips,
- &draw_target,
projection,
stats,
);
@@ -3872,7 +3836,6 @@ impl Renderer {
self.set_blend_mode_multiply(FramebufferKind::Other);
self.draw_clip_batch_list(
&target.clip_batcher.secondary_clips,
- &draw_target,
projection,
stats,
);
@@ -4452,6 +4415,38 @@ impl Renderer {
}
}
+ fn create_gpu_buffer_texture<T: Texel>(
+ &mut self,
+ buffer: &GpuBuffer<T>,
+ sampler: TextureSampler,
+ ) -> Option<Texture> {
+ if buffer.is_empty() {
+ None
+ } else {
+ let gpu_buffer_texture = self.device.create_texture(
+ ImageBufferKind::Texture2D,
+ buffer.format,
+ buffer.size.width,
+ buffer.size.height,
+ TextureFilter::Nearest,
+ None,
+ );
+
+ self.device.bind_texture(
+ sampler,
+ &gpu_buffer_texture,
+ Swizzle::default(),
+ );
+
+ self.device.upload_texture_immediate(
+ &gpu_buffer_texture,
+ &buffer.data,
+ );
+
+ Some(gpu_buffer_texture)
+ }
+ }
+
fn draw_frame(
&mut self,
frame: &mut Frame,
@@ -4478,31 +4473,14 @@ impl Renderer {
// Upload experimental GPU buffer texture if there is any data present
// TODO: Recycle these textures, upload via PBO or best approach for platform
- let gpu_buffer_texture = if frame.gpu_buffer.is_empty() {
- None
- } else {
- let gpu_buffer_texture = self.device.create_texture(
- ImageBufferKind::Texture2D,
- ImageFormat::RGBAF32,
- frame.gpu_buffer.size.width,
- frame.gpu_buffer.size.height,
- TextureFilter::Nearest,
- None,
- );
-
- self.device.bind_texture(
- TextureSampler::GpuBuffer,
- &gpu_buffer_texture,
- Swizzle::default(),
- );
-
- self.device.upload_texture_immediate(
- &gpu_buffer_texture,
- &frame.gpu_buffer.data,
- );
-
- Some(gpu_buffer_texture)
- };
+ let gpu_buffer_texture_f = self.create_gpu_buffer_texture(
+ &frame.gpu_buffer_f,
+ TextureSampler::GpuBufferF,
+ );
+ let gpu_buffer_texture_i = self.create_gpu_buffer_texture(
+ &frame.gpu_buffer_i,
+ TextureSampler::GpuBufferI,
+ );
// Determine the present mode and dirty rects, if device_size
// is Some(..). If it's None, no composite will occur and only
@@ -4761,8 +4739,11 @@ impl Renderer {
present_mode,
);
- if let Some(gpu_buffer_texture) = gpu_buffer_texture {
- self.device.delete_texture(gpu_buffer_texture);
+ if let Some(gpu_buffer_texture_f) = gpu_buffer_texture_f {
+ self.device.delete_texture(gpu_buffer_texture_f);
+ }
+ if let Some(gpu_buffer_texture_i) = gpu_buffer_texture_i {
+ self.device.delete_texture(gpu_buffer_texture_i);
}
frame.has_been_rendered = true;
diff --git a/gfx/wr/webrender/src/renderer/shade.rs b/gfx/wr/webrender/src/renderer/shade.rs
index 30baee0a19..777bfab44a 100644
--- a/gfx/wr/webrender/src/renderer/shade.rs
+++ b/gfx/wr/webrender/src/renderer/shade.rs
@@ -254,7 +254,6 @@ impl LazilyCompiledShader {
VertexArrayKind::RadialGradient => &desc::RADIAL_GRADIENT,
VertexArrayKind::ConicGradient => &desc::CONIC_GRADIENT,
VertexArrayKind::Blur => &desc::BLUR,
- VertexArrayKind::ClipImage => &desc::CLIP_IMAGE,
VertexArrayKind::ClipRect => &desc::CLIP_RECT,
VertexArrayKind::ClipBoxShadow => &desc::CLIP_BOX_SHADOW,
VertexArrayKind::VectorStencil => &desc::VECTOR_STENCIL,
@@ -282,7 +281,8 @@ impl LazilyCompiledShader {
("sGpuCache", TextureSampler::GpuCache),
("sPrimitiveHeadersF", TextureSampler::PrimitiveHeadersF),
("sPrimitiveHeadersI", TextureSampler::PrimitiveHeadersI),
- ("sGpuBuffer", TextureSampler::GpuBuffer),
+ ("sGpuBufferF", TextureSampler::GpuBufferF),
+ ("sGpuBufferI", TextureSampler::GpuBufferI),
],
);
}
@@ -300,7 +300,8 @@ impl LazilyCompiledShader {
("sPrimitiveHeadersF", TextureSampler::PrimitiveHeadersF),
("sPrimitiveHeadersI", TextureSampler::PrimitiveHeadersI),
("sClipMask", TextureSampler::ClipMask),
- ("sGpuBuffer", TextureSampler::GpuBuffer),
+ ("sGpuBufferF", TextureSampler::GpuBufferF),
+ ("sGpuBufferI", TextureSampler::GpuBufferI),
],
);
}
@@ -617,7 +618,6 @@ pub struct Shaders {
pub cs_clip_rectangle_slow: LazilyCompiledShader,
pub cs_clip_rectangle_fast: LazilyCompiledShader,
pub cs_clip_box_shadow: LazilyCompiledShader,
- pub cs_clip_image: LazilyCompiledShader,
// The are "primitive shaders". These shaders draw and blend
// final results on screen. They are aware of tile boundaries.
@@ -817,16 +817,6 @@ impl Shaders {
profile,
)?;
- let cs_clip_image = LazilyCompiledShader::new(
- ShaderKind::ClipCache(VertexArrayKind::ClipImage),
- "cs_clip_image",
- &["TEXTURE_2D"],
- device,
- options.precache_flags,
- &shader_list,
- profile,
- )?;
-
let mut cs_scale = Vec::new();
let scale_shader_num = IMAGE_BUFFER_KINDS.len();
// PrimitiveShader is not clonable. Use push() to initialize the vec.
@@ -1128,7 +1118,6 @@ impl Shaders {
cs_clip_rectangle_slow,
cs_clip_rectangle_fast,
cs_clip_box_shadow,
- cs_clip_image,
ps_text_run,
ps_text_run_dual_source,
ps_quad_textured,
@@ -1274,7 +1263,6 @@ impl Shaders {
self.cs_clip_rectangle_slow.deinit(device);
self.cs_clip_rectangle_fast.deinit(device);
self.cs_clip_box_shadow.deinit(device);
- self.cs_clip_image.deinit(device);
self.ps_text_run.deinit(device);
if let Some(shader) = self.ps_text_run_dual_source {
shader.deinit(device);
diff --git a/gfx/wr/webrender/src/renderer/upload.rs b/gfx/wr/webrender/src/renderer/upload.rs
index 0ba053cd76..c987038651 100644
--- a/gfx/wr/webrender/src/renderer/upload.rs
+++ b/gfx/wr/webrender/src/renderer/upload.rs
@@ -43,6 +43,7 @@ use crate::profiler;
use crate::render_api::MemoryReport;
pub const BATCH_UPLOAD_TEXTURE_SIZE: DeviceIntSize = DeviceIntSize::new(512, 512);
+const BATCH_UPLOAD_FORMAT_COUNT: usize = 4;
/// Upload a number of items to texture cache textures.
///
@@ -627,10 +628,10 @@ pub struct UploadTexturePool {
/// The textures in the pool associated with a last used frame index.
///
/// The outer array corresponds to each of teh three supported texture formats.
- textures: [VecDeque<(Texture, u64)>; 3],
+ textures: [VecDeque<(Texture, u64)>; BATCH_UPLOAD_FORMAT_COUNT],
// Frame at which to deallocate some textures if there are too many in the pool,
// for each format.
- delay_texture_deallocation: [u64; 3],
+ delay_texture_deallocation: [u64; BATCH_UPLOAD_FORMAT_COUNT],
current_frame: u64,
/// Temporary buffers that are used when using staging uploads + glTexImage2D.
@@ -646,8 +647,8 @@ pub struct UploadTexturePool {
impl UploadTexturePool {
pub fn new() -> Self {
UploadTexturePool {
- textures: [VecDeque::new(), VecDeque::new(), VecDeque::new()],
- delay_texture_deallocation: [0; 3],
+ textures: [VecDeque::new(), VecDeque::new(), VecDeque::new(), VecDeque::new()],
+ delay_texture_deallocation: [0; BATCH_UPLOAD_FORMAT_COUNT],
current_frame: 0,
temporary_buffers: Vec::new(),
min_temporary_buffers: 0,
@@ -660,7 +661,8 @@ impl UploadTexturePool {
ImageFormat::RGBA8 => 0,
ImageFormat::BGRA8 => 1,
ImageFormat::R8 => 2,
- _ => { panic!("unexpected format"); }
+ ImageFormat::R16 => 3,
+ _ => { panic!("unexpected format {:?}", format); }
}
}
diff --git a/gfx/wr/webrender/src/renderer/vertex.rs b/gfx/wr/webrender/src/renderer/vertex.rs
index ff555363d8..cd73975ddd 100644
--- a/gfx/wr/webrender/src/renderer/vertex.rs
+++ b/gfx/wr/webrender/src/renderer/vertex.rs
@@ -49,12 +49,12 @@ pub mod desc {
VertexAttribute {
name: "aBlurRenderTaskAddress",
count: 1,
- kind: VertexAttributeKind::U16,
+ kind: VertexAttributeKind::I32,
},
VertexAttribute {
name: "aBlurSourceTaskAddress",
count: 1,
- kind: VertexAttributeKind::U16,
+ kind: VertexAttributeKind::I32,
},
VertexAttribute {
name: "aBlurDirection",
@@ -488,53 +488,6 @@ pub mod desc {
],
};
- pub const CLIP_IMAGE: VertexDescriptor = VertexDescriptor {
- vertex_attributes: &[VertexAttribute {
- name: "aPosition",
- count: 2,
- kind: VertexAttributeKind::U8Norm,
- }],
- instance_attributes: &[
- // common clip attributes
- VertexAttribute {
- name: "aClipDeviceArea",
- count: 4,
- kind: VertexAttributeKind::F32,
- },
- VertexAttribute {
- name: "aClipOrigins",
- count: 4,
- kind: VertexAttributeKind::F32,
- },
- VertexAttribute {
- name: "aDevicePixelScale",
- count: 1,
- kind: VertexAttributeKind::F32,
- },
- VertexAttribute {
- name: "aTransformIds",
- count: 2,
- kind: VertexAttributeKind::I32,
- },
- // specific clip attributes
- VertexAttribute {
- name: "aClipTileRect",
- count: 4,
- kind: VertexAttributeKind::F32,
- },
- VertexAttribute {
- name: "aClipDataResourceAddress",
- count: 2,
- kind: VertexAttributeKind::U16,
- },
- VertexAttribute {
- name: "aClipLocalRect",
- count: 4,
- kind: VertexAttributeKind::F32,
- },
- ],
- };
-
pub const GPU_CACHE_UPDATE: VertexDescriptor = VertexDescriptor {
vertex_attributes: &[
VertexAttribute {
@@ -574,17 +527,17 @@ pub mod desc {
VertexAttribute {
name: "aFilterRenderTaskAddress",
count: 1,
- kind: VertexAttributeKind::U16,
+ kind: VertexAttributeKind::I32,
},
VertexAttribute {
name: "aFilterInput1TaskAddress",
count: 1,
- kind: VertexAttributeKind::U16,
+ kind: VertexAttributeKind::I32,
},
VertexAttribute {
name: "aFilterInput2TaskAddress",
count: 1,
- kind: VertexAttributeKind::U16,
+ kind: VertexAttributeKind::I32,
},
VertexAttribute {
name: "aFilterKind",
@@ -602,6 +555,11 @@ pub mod desc {
kind: VertexAttributeKind::U16,
},
VertexAttribute {
+ name: "aUnused",
+ count: 1,
+ kind: VertexAttributeKind::U16,
+ },
+ VertexAttribute {
name: "aFilterExtraDataAddress",
count: 2,
kind: VertexAttributeKind::U16,
@@ -809,7 +767,6 @@ pub mod desc {
pub enum VertexArrayKind {
Primitive,
Blur,
- ClipImage,
ClipRect,
ClipBoxShadow,
VectorStencil,
@@ -1038,7 +995,6 @@ pub struct RendererVAOs {
blur_vao: VAO,
clip_rect_vao: VAO,
clip_box_shadow_vao: VAO,
- clip_image_vao: VAO,
border_vao: VAO,
line_vao: VAO,
scale_vao: VAO,
@@ -1086,7 +1042,6 @@ impl RendererVAOs {
clip_rect_vao: device.create_vao_with_new_instances(&desc::CLIP_RECT, &prim_vao),
clip_box_shadow_vao: device
.create_vao_with_new_instances(&desc::CLIP_BOX_SHADOW, &prim_vao),
- clip_image_vao: device.create_vao_with_new_instances(&desc::CLIP_IMAGE, &prim_vao),
border_vao: device.create_vao_with_new_instances(&desc::BORDER, &prim_vao),
scale_vao: device.create_vao_with_new_instances(&desc::SCALE, &prim_vao),
line_vao: device.create_vao_with_new_instances(&desc::LINE, &prim_vao),
@@ -1109,7 +1064,6 @@ impl RendererVAOs {
device.delete_vao(self.resolve_vao);
device.delete_vao(self.clip_rect_vao);
device.delete_vao(self.clip_box_shadow_vao);
- device.delete_vao(self.clip_image_vao);
device.delete_vao(self.fast_linear_gradient_vao);
device.delete_vao(self.linear_gradient_vao);
device.delete_vao(self.radial_gradient_vao);
@@ -1131,7 +1085,6 @@ impl ops::Index<VertexArrayKind> for RendererVAOs {
fn index(&self, kind: VertexArrayKind) -> &VAO {
match kind {
VertexArrayKind::Primitive => &self.prim_vao,
- VertexArrayKind::ClipImage => &self.clip_image_vao,
VertexArrayKind::ClipRect => &self.clip_rect_vao,
VertexArrayKind::ClipBoxShadow => &self.clip_box_shadow_vao,
VertexArrayKind::Blur => &self.blur_vao,
diff --git a/gfx/wr/webrender/src/resource_cache.rs b/gfx/wr/webrender/src/resource_cache.rs
index 9459bf86da..349be25cb8 100644
--- a/gfx/wr/webrender/src/resource_cache.rs
+++ b/gfx/wr/webrender/src/resource_cache.rs
@@ -34,7 +34,7 @@ use crate::profiler::{self, TransactionProfile, bytes_to_mb};
use crate::render_task_graph::{RenderTaskId, RenderTaskGraphBuilder};
use crate::render_task_cache::{RenderTaskCache, RenderTaskCacheKey, RenderTaskParent};
use crate::render_task_cache::{RenderTaskCacheEntry, RenderTaskCacheEntryHandle};
-use crate::renderer::GpuBufferBuilder;
+use crate::renderer::GpuBufferBuilderF;
use crate::surface::SurfaceBuilder;
use euclid::point2;
use smallvec::SmallVec;
@@ -594,7 +594,7 @@ impl ResourceCache {
&mut self,
key: RenderTaskCacheKey,
gpu_cache: &mut GpuCache,
- gpu_buffer_builder: &mut GpuBufferBuilder,
+ gpu_buffer_builder: &mut GpuBufferBuilderF,
rg_builder: &mut RenderTaskGraphBuilder,
user_data: Option<[f32; 4]>,
is_opaque: bool,
@@ -603,7 +603,7 @@ impl ResourceCache {
f: F,
) -> RenderTaskId
where
- F: FnOnce(&mut RenderTaskGraphBuilder, &mut GpuBufferBuilder) -> RenderTaskId,
+ F: FnOnce(&mut RenderTaskGraphBuilder, &mut GpuBufferBuilderF) -> RenderTaskId,
{
self.cached_render_tasks.request_render_task(
key,
@@ -2087,14 +2087,14 @@ impl ResourceCache {
index,
}
}
- #[cfg(not(target_os = "macos"))]
+ #[cfg(not(any(target_os = "macos", target_os = "ios")))]
FontTemplate::Native(native) => {
PlainFontTemplate {
data: native.path.to_string_lossy().to_string(),
index: native.index,
}
}
- #[cfg(target_os = "macos")]
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
FontTemplate::Native(native) => {
PlainFontTemplate {
data: native.name,
diff --git a/gfx/wr/webrender/src/spatial_node.rs b/gfx/wr/webrender/src/spatial_node.rs
index 9a2039e37b..6bf1313e0d 100644
--- a/gfx/wr/webrender/src/spatial_node.rs
+++ b/gfx/wr/webrender/src/spatial_node.rs
@@ -673,7 +673,7 @@ impl SpatialNode {
pub fn prepare_state_for_children(&self, state: &mut TransformUpdateState) {
state.current_coordinate_system_id = self.coordinate_system_id;
- state.is_ancestor_or_self_zooming = self.is_async_zooming;
+ state.is_ancestor_or_self_zooming = self.is_ancestor_or_self_zooming;
state.invertible &= self.invertible;
// The transformation we are passing is the transformation of the parent
diff --git a/gfx/wr/webrender/src/spatial_tree.rs b/gfx/wr/webrender/src/spatial_tree.rs
index c2cd422076..0aa6bb5296 100644
--- a/gfx/wr/webrender/src/spatial_tree.rs
+++ b/gfx/wr/webrender/src/spatial_tree.rs
@@ -2042,3 +2042,46 @@ fn test_world_transforms() {
st.get_world_viewport_transform(scroll).into_transform(),
LayoutToWorldTransform::identity());
}
+
+/// Tests that a spatial node that is async zooming and all of its descendants
+/// are correctly marked as having themselves an ancestor that is zooming.
+#[test]
+fn test_is_ancestor_or_self_zooming() {
+ let mut cst = SceneSpatialTree::new();
+ let root_reference_frame_index = cst.root_reference_frame_index();
+
+ let root = add_reference_frame(
+ &mut cst,
+ root_reference_frame_index,
+ LayoutTransform::identity(),
+ LayoutVector2D::zero(),
+ SpatialTreeItemKey::new(0, 0),
+ );
+ let child1 = add_reference_frame(
+ &mut cst,
+ root,
+ LayoutTransform::identity(),
+ LayoutVector2D::zero(),
+ SpatialTreeItemKey::new(0, 1),
+ );
+ let child2 = add_reference_frame(
+ &mut cst,
+ child1,
+ LayoutTransform::identity(),
+ LayoutVector2D::zero(),
+ SpatialTreeItemKey::new(0, 2),
+ );
+
+ let mut st = SpatialTree::new();
+ st.apply_updates(cst.end_frame_and_get_pending_updates());
+
+ // Mark the root node as async zooming
+ st.get_spatial_node_mut(root).is_async_zooming = true;
+ st.update_tree(&SceneProperties::new());
+
+ // Ensure that the root node and all descendants are marked as having
+ // themselves or an ancestor zooming
+ assert!(st.get_spatial_node(root).is_ancestor_or_self_zooming);
+ assert!(st.get_spatial_node(child1).is_ancestor_or_self_zooming);
+ assert!(st.get_spatial_node(child2).is_ancestor_or_self_zooming);
+}
diff --git a/gfx/wr/webrender/src/telemetry.rs b/gfx/wr/webrender/src/telemetry.rs
index 9b4c02d68e..5fd67bb80c 100644
--- a/gfx/wr/webrender/src/telemetry.rs
+++ b/gfx/wr/webrender/src/telemetry.rs
@@ -2,6 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#[cfg(feature = "gecko")]
use glean::TimerId;
#[cfg(feature = "gecko")]
use firefox_on_glean::metrics::wr;
@@ -9,16 +10,19 @@ use std::time::Duration;
pub struct Telemetry;
+#[cfg(not(feature = "gecko"))]
+pub struct TimerId;
+
/// Defines the interface for hooking up an external telemetry reporter to WR.
#[cfg(not(feature = "gecko"))]
impl Telemetry {
pub fn record_rasterize_blobs_time(_duration: Duration) { }
- pub fn start_framebuild_time() -> TimerId { TimerId { id: 0 } }
+ pub fn start_framebuild_time() -> TimerId { TimerId }
pub fn stop_and_accumulate_framebuild_time(_id: TimerId) { }
pub fn record_renderer_time(_duration: Duration) { }
pub fn record_renderer_time_no_sc(_duration: Duration) { }
pub fn record_scenebuild_time(_duration: Duration) { }
- pub fn start_sceneswap_time() -> TimerId { TimerId { id: 0 } }
+ pub fn start_sceneswap_time() -> TimerId { TimerId }
pub fn stop_and_accumulate_sceneswap_time(_id: TimerId) { }
pub fn cancel_sceneswap_time(_id: TimerId) { }
pub fn record_texture_cache_update_time(_duration: Duration) { }