summaryrefslogtreecommitdiffstats
path: root/gfx/wr/webrender/src/prim_store
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /gfx/wr/webrender/src/prim_store
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/wr/webrender/src/prim_store')
-rw-r--r--gfx/wr/webrender/src/prim_store/backdrop.rs175
-rw-r--r--gfx/wr/webrender/src/prim_store/borders.rs387
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/conic.rs399
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/linear.rs750
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/mod.rs392
-rw-r--r--gfx/wr/webrender/src/prim_store/gradient/radial.rs531
-rw-r--r--gfx/wr/webrender/src/prim_store/image.rs682
-rw-r--r--gfx/wr/webrender/src/prim_store/interned.rs14
-rw-r--r--gfx/wr/webrender/src/prim_store/line_dec.rs257
-rw-r--r--gfx/wr/webrender/src/prim_store/mod.rs1448
-rw-r--r--gfx/wr/webrender/src/prim_store/picture.rs328
-rw-r--r--gfx/wr/webrender/src/prim_store/storage.rs156
-rw-r--r--gfx/wr/webrender/src/prim_store/text_run.rs505
13 files changed, 6024 insertions, 0 deletions
diff --git a/gfx/wr/webrender/src/prim_store/backdrop.rs b/gfx/wr/webrender/src/prim_store/backdrop.rs
new file mode 100644
index 0000000000..7c106e47bc
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/backdrop.rs
@@ -0,0 +1,175 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::units::*;
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::prim_store::{
+ InternablePrimitive, PrimitiveInstanceKind, PrimKey, PrimTemplate,
+ PrimTemplateCommonData, PrimitiveStore, PictureIndex,
+};
+use crate::scene_building::IsVisible;
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, MallocSizeOf, Hash)]
+pub struct BackdropCapture {
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, MallocSizeOf, Hash)]
+pub struct BackdropRender {
+}
+
+impl From<BackdropCapture> for BackdropCaptureData {
+ fn from(_backdrop: BackdropCapture) -> Self {
+ BackdropCaptureData {
+ }
+ }
+}
+
+impl From<BackdropRender> for BackdropRenderData {
+ fn from(_backdrop: BackdropRender) -> Self {
+ BackdropRenderData {
+ }
+ }
+}
+
+pub type BackdropCaptureKey = PrimKey<BackdropCapture>;
+pub type BackdropRenderKey = PrimKey<BackdropRender>;
+
+impl BackdropCaptureKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ backdrop_capture: BackdropCapture,
+ ) -> Self {
+ BackdropCaptureKey {
+ common: info.into(),
+ kind: backdrop_capture,
+ }
+ }
+}
+
+impl BackdropRenderKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ backdrop_render: BackdropRender,
+ ) -> Self {
+ BackdropRenderKey {
+ common: info.into(),
+ kind: backdrop_render,
+ }
+ }
+}
+
+impl InternDebug for BackdropCaptureKey {}
+impl InternDebug for BackdropRenderKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, MallocSizeOf)]
+pub struct BackdropCaptureData {
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, MallocSizeOf)]
+pub struct BackdropRenderData {
+}
+
+pub type BackdropCaptureTemplate = PrimTemplate<BackdropCaptureData>;
+pub type BackdropRenderTemplate = PrimTemplate<BackdropRenderData>;
+
+impl From<BackdropCaptureKey> for BackdropCaptureTemplate {
+ fn from(backdrop: BackdropCaptureKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(backdrop.common);
+
+ BackdropCaptureTemplate {
+ common,
+ kind: backdrop.kind.into(),
+ }
+ }
+}
+
+impl From<BackdropRenderKey> for BackdropRenderTemplate {
+ fn from(backdrop: BackdropRenderKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(backdrop.common);
+
+ BackdropRenderTemplate {
+ common,
+ kind: backdrop.kind.into(),
+ }
+ }
+}
+
+pub type BackdropCaptureDataHandle = InternHandle<BackdropCapture>;
+pub type BackdropRenderDataHandle = InternHandle<BackdropRender>;
+
+impl Internable for BackdropCapture {
+ type Key = BackdropCaptureKey;
+ type StoreData = BackdropCaptureTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_BACKDROP_CAPTURES;
+}
+
+impl Internable for BackdropRender {
+ type Key = BackdropRenderKey;
+ type StoreData = BackdropRenderTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_BACKDROP_RENDERS;
+}
+
+impl InternablePrimitive for BackdropCapture {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> BackdropCaptureKey {
+ BackdropCaptureKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: BackdropCaptureKey,
+ data_handle: BackdropCaptureDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::BackdropCapture {
+ data_handle,
+ }
+ }
+}
+
+impl InternablePrimitive for BackdropRender {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> BackdropRenderKey {
+ BackdropRenderKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: BackdropRenderKey,
+ data_handle: BackdropRenderDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::BackdropRender {
+ data_handle,
+ pic_index: PictureIndex::INVALID,
+ }
+ }
+}
+
+impl IsVisible for BackdropCapture {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+impl IsVisible for BackdropRender {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
diff --git a/gfx/wr/webrender/src/prim_store/borders.rs b/gfx/wr/webrender/src/prim_store/borders.rs
new file mode 100644
index 0000000000..7459dd75e1
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/borders.rs
@@ -0,0 +1,387 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{NormalBorder, PremultipliedColorF, Shadow, RasterSpace};
+use api::units::*;
+use crate::border::create_border_segments;
+use crate::border::NormalBorderAu;
+use crate::scene_building::{CreateShadow, IsVisible};
+use crate::frame_builder::{FrameBuildingState};
+use crate::gpu_cache::GpuDataRequest;
+use crate::intern;
+use crate::internal_types::{LayoutPrimitiveInfo, FrameId};
+use crate::prim_store::{
+ BorderSegmentInfo, BrushSegment, NinePatchDescriptor, PrimKey,
+ PrimTemplate, PrimTemplateCommonData,
+ PrimitiveInstanceKind, PrimitiveOpacity,
+ PrimitiveStore, InternablePrimitive,
+};
+use crate::resource_cache::ImageRequest;
+use crate::render_task::RenderTask;
+use crate::render_task_graph::RenderTaskId;
+
+use super::storage;
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct NormalBorderPrim {
+ pub border: NormalBorderAu,
+ pub widths: LayoutSideOffsetsAu,
+}
+
+pub type NormalBorderKey = PrimKey<NormalBorderPrim>;
+
+impl NormalBorderKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ normal_border: NormalBorderPrim,
+ ) -> Self {
+ NormalBorderKey {
+ common: info.into(),
+ kind: normal_border,
+ }
+ }
+}
+
+impl intern::InternDebug for NormalBorderKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct NormalBorderData {
+ pub brush_segments: Vec<BrushSegment>,
+ pub border_segments: Vec<BorderSegmentInfo>,
+ pub border: NormalBorder,
+ pub widths: LayoutSideOffsets,
+}
+
+impl NormalBorderData {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ common: &mut PrimTemplateCommonData,
+ frame_state: &mut FrameBuildingState,
+ ) {
+ if let Some(ref mut request) = frame_state.gpu_cache.request(&mut common.gpu_cache_handle) {
+ self.write_prim_gpu_blocks(request, common.prim_rect.size());
+ self.write_segment_gpu_blocks(request);
+ }
+
+ common.opacity = PrimitiveOpacity::translucent();
+ }
+
+ fn write_prim_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest,
+ prim_size: LayoutSize
+ ) {
+ // Border primitives currently used for
+ // image borders, and run through the
+ // normal brush_image shader.
+ request.push(PremultipliedColorF::WHITE);
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ prim_size.width,
+ prim_size.height,
+ 0.0,
+ 0.0,
+ ]);
+ }
+
+ fn write_segment_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest,
+ ) {
+ for segment in &self.brush_segments {
+ // has to match VECS_PER_SEGMENT
+ request.write_segment(
+ segment.local_rect,
+ segment.extra_data,
+ );
+ }
+ }
+}
+
+pub type NormalBorderTemplate = PrimTemplate<NormalBorderData>;
+
+impl From<NormalBorderKey> for NormalBorderTemplate {
+ fn from(key: NormalBorderKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(key.common);
+
+ let mut border: NormalBorder = key.kind.border.into();
+ let widths = LayoutSideOffsets::from_au(key.kind.widths);
+
+ // FIXME(emilio): Is this the best place to do this?
+ border.normalize(&widths);
+
+ let mut brush_segments = Vec::new();
+ let mut border_segments = Vec::new();
+
+ create_border_segments(
+ common.prim_rect.size(),
+ &border,
+ &widths,
+ &mut border_segments,
+ &mut brush_segments,
+ );
+
+ NormalBorderTemplate {
+ common,
+ kind: NormalBorderData {
+ brush_segments,
+ border_segments,
+ border,
+ widths,
+ }
+ }
+ }
+}
+
+pub type NormalBorderDataHandle = intern::Handle<NormalBorderPrim>;
+
+impl intern::Internable for NormalBorderPrim {
+ type Key = NormalBorderKey;
+ type StoreData = NormalBorderTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_NORMAL_BORDERS;
+}
+
+impl InternablePrimitive for NormalBorderPrim {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> NormalBorderKey {
+ NormalBorderKey::new(
+ info,
+ self,
+ )
+ }
+
+ fn make_instance_kind(
+ _key: NormalBorderKey,
+ data_handle: NormalBorderDataHandle,
+ _: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::NormalBorder {
+ data_handle,
+ render_task_ids: storage::Range::empty(),
+ }
+ }
+}
+
+impl CreateShadow for NormalBorderPrim {
+ fn create_shadow(
+ &self,
+ shadow: &Shadow,
+ _: bool,
+ _: RasterSpace,
+ ) -> Self {
+ let border = self.border.with_color(shadow.color.into());
+ NormalBorderPrim {
+ border,
+ widths: self.widths,
+ }
+ }
+}
+
+impl IsVisible for NormalBorderPrim {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct ImageBorder {
+ #[ignore_malloc_size_of = "Arc"]
+ pub request: ImageRequest,
+ pub nine_patch: NinePatchDescriptor,
+}
+
+pub type ImageBorderKey = PrimKey<ImageBorder>;
+
+impl ImageBorderKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ image_border: ImageBorder,
+ ) -> Self {
+ ImageBorderKey {
+ common: info.into(),
+ kind: image_border,
+ }
+ }
+}
+
+impl intern::InternDebug for ImageBorderKey {}
+
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct ImageBorderData {
+ #[ignore_malloc_size_of = "Arc"]
+ pub request: ImageRequest,
+ pub brush_segments: Vec<BrushSegment>,
+ pub src_color: Option<RenderTaskId>,
+ pub frame_id: FrameId,
+ pub is_opaque: bool,
+}
+
+impl ImageBorderData {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ common: &mut PrimTemplateCommonData,
+ frame_state: &mut FrameBuildingState,
+ ) {
+ if let Some(ref mut request) = frame_state.gpu_cache.request(&mut common.gpu_cache_handle) {
+ self.write_prim_gpu_blocks(request, &common.prim_rect.size());
+ self.write_segment_gpu_blocks(request);
+ }
+
+ let frame_id = frame_state.rg_builder.frame_id();
+ if self.frame_id != frame_id {
+ self.frame_id = frame_id;
+
+ let size = frame_state.resource_cache.request_image(
+ self.request,
+ frame_state.gpu_cache,
+ );
+
+ let task_id = frame_state.rg_builder.add().init(
+ RenderTask::new_image(size, self.request)
+ );
+
+ self.src_color = Some(task_id);
+
+ let image_properties = frame_state
+ .resource_cache
+ .get_image_properties(self.request.key);
+
+ self.is_opaque = image_properties
+ .map(|properties| properties.descriptor.is_opaque())
+ .unwrap_or(true);
+ }
+
+ common.opacity = PrimitiveOpacity { is_opaque: self.is_opaque };
+ }
+
+ fn write_prim_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest,
+ prim_size: &LayoutSize,
+ ) {
+ // Border primitives currently used for
+ // image borders, and run through the
+ // normal brush_image shader.
+ request.push(PremultipliedColorF::WHITE);
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ prim_size.width,
+ prim_size.height,
+ 0.0,
+ 0.0,
+ ]);
+ }
+
+ fn write_segment_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest,
+ ) {
+ for segment in &self.brush_segments {
+ // has to match VECS_PER_SEGMENT
+ request.write_segment(
+ segment.local_rect,
+ segment.extra_data,
+ );
+ }
+ }
+}
+
+pub type ImageBorderTemplate = PrimTemplate<ImageBorderData>;
+
+impl From<ImageBorderKey> for ImageBorderTemplate {
+ fn from(key: ImageBorderKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(key.common);
+
+ let brush_segments = key.kind.nine_patch.create_segments(common.prim_rect.size());
+ ImageBorderTemplate {
+ common,
+ kind: ImageBorderData {
+ request: key.kind.request,
+ brush_segments,
+ src_color: None,
+ frame_id: FrameId::INVALID,
+ is_opaque: false,
+ }
+ }
+ }
+}
+
+pub type ImageBorderDataHandle = intern::Handle<ImageBorder>;
+
+impl intern::Internable for ImageBorder {
+ type Key = ImageBorderKey;
+ type StoreData = ImageBorderTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_IMAGE_BORDERS;
+}
+
+impl InternablePrimitive for ImageBorder {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> ImageBorderKey {
+ ImageBorderKey::new(
+ info,
+ self,
+ )
+ }
+
+ fn make_instance_kind(
+ _key: ImageBorderKey,
+ data_handle: ImageBorderDataHandle,
+ _: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::ImageBorder {
+ data_handle
+ }
+ }
+}
+
+impl IsVisible for ImageBorder {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<NormalBorderPrim>(), 84, "NormalBorderPrim size changed");
+ assert_eq!(mem::size_of::<NormalBorderTemplate>(), 216, "NormalBorderTemplate size changed");
+ assert_eq!(mem::size_of::<NormalBorderKey>(), 104, "NormalBorderKey size changed");
+ assert_eq!(mem::size_of::<ImageBorder>(), 84, "ImageBorder size changed");
+ assert_eq!(mem::size_of::<ImageBorderTemplate>(), 104, "ImageBorderTemplate size changed");
+ assert_eq!(mem::size_of::<ImageBorderKey>(), 104, "ImageBorderKey size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/gradient/conic.rs b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
new file mode 100644
index 0000000000..d9c3f5d350
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/gradient/conic.rs
@@ -0,0 +1,399 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! Conic gradients
+//!
+//! Specification: https://drafts.csswg.org/css-images-4/#conic-gradients
+//!
+//! Conic gradients are rendered via cached render tasks and composited with the image brush.
+
+use euclid::vec2;
+use api::{ExtendMode, GradientStop, PremultipliedColorF};
+use api::units::*;
+use crate::scene_building::IsVisible;
+use crate::frame_builder::FrameBuildingState;
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::prim_store::{BrushSegment, GradientTileRange};
+use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity, FloatKey};
+use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
+use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
+use crate::render_task::{RenderTask, RenderTaskKind};
+use crate::render_task_graph::RenderTaskId;
+use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
+use crate::renderer::GpuBufferAddress;
+use crate::picture::{SurfaceIndex};
+
+use std::{hash, ops::{Deref, DerefMut}};
+use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
+
+/// Hashable conic gradient parameters, for use during prim interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct ConicGradientParams {
+ pub angle: f32, // in radians
+ pub start_offset: f32,
+ pub end_offset: f32,
+}
+
+impl Eq for ConicGradientParams {}
+
+impl hash::Hash for ConicGradientParams {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.angle.to_bits().hash(state);
+ self.start_offset.to_bits().hash(state);
+ self.end_offset.to_bits().hash(state);
+ }
+}
+
+/// Identifying key for a line decoration.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
+pub struct ConicGradientKey {
+ pub common: PrimKeyCommonData,
+ pub extend_mode: ExtendMode,
+ pub center: PointKey,
+ pub params: ConicGradientParams,
+ pub stretch_size: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub tile_spacing: SizeKey,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+}
+
+impl ConicGradientKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ conic_grad: ConicGradient,
+ ) -> Self {
+ ConicGradientKey {
+ common: info.into(),
+ extend_mode: conic_grad.extend_mode,
+ center: conic_grad.center,
+ params: conic_grad.params,
+ stretch_size: conic_grad.stretch_size,
+ stops: conic_grad.stops,
+ tile_spacing: conic_grad.tile_spacing,
+ nine_patch: conic_grad.nine_patch,
+ }
+ }
+}
+
+impl InternDebug for ConicGradientKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct ConicGradientTemplate {
+ pub common: PrimTemplateCommonData,
+ pub extend_mode: ExtendMode,
+ pub center: DevicePoint,
+ pub params: ConicGradientParams,
+ pub task_size: DeviceIntSize,
+ pub scale: DeviceVector2D,
+ pub stretch_size: LayoutSize,
+ pub tile_spacing: LayoutSize,
+ pub brush_segments: Vec<BrushSegment>,
+ pub stops_opacity: PrimitiveOpacity,
+ pub stops: Vec<GradientStop>,
+ pub src_color: Option<RenderTaskId>,
+}
+
+impl Deref for ConicGradientTemplate {
+ type Target = PrimTemplateCommonData;
+ fn deref(&self) -> &Self::Target {
+ &self.common
+ }
+}
+
+impl DerefMut for ConicGradientTemplate {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.common
+ }
+}
+
+impl From<ConicGradientKey> for ConicGradientTemplate {
+ fn from(item: ConicGradientKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(item.common);
+ let mut brush_segments = Vec::new();
+
+ if let Some(ref nine_patch) = item.nine_patch {
+ brush_segments = nine_patch.create_segments(common.prim_rect.size());
+ }
+
+ let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
+
+ // Save opacity of the stops for use in
+ // selecting which pass this gradient
+ // should be drawn in.
+ let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
+
+ let mut stretch_size: LayoutSize = item.stretch_size.into();
+ stretch_size.width = stretch_size.width.min(common.prim_rect.width());
+ stretch_size.height = stretch_size.height.min(common.prim_rect.height());
+
+ fn approx_eq(a: f32, b: f32) -> bool { (a - b).abs() < 0.01 }
+
+ // Attempt to detect some of the common configurations with hard gradient stops. Allow
+ // those a higher maximum resolution to avoid the worst cases of aliasing artifacts with
+ // large conic gradients. A better solution would be to go back to rendering very large
+ // conic gradients via a brush shader instead of caching all of them (unclear whether
+ // it is important enough to warrant the better solution).
+ let mut has_hard_stops = false;
+ let mut prev_stop = None;
+ let offset_range = item.params.end_offset - item.params.start_offset;
+ for stop in &stops {
+ if offset_range <= 0.0 {
+ break;
+ }
+ if let Some(prev_offset) = prev_stop {
+ // Check whether two consecutive stops are very close (hard stops).
+ if stop.offset < prev_offset + 0.005 / offset_range {
+ // a is the angle of the stop normalized into 0-1 space and repeating in the 0-0.25 range.
+ // If close to 0.0 or 0.25 it means the stop is vertical or horizontal. For those, the lower
+ // resolution isn't a big issue.
+ let a = item.params.angle / (2.0 * std::f32::consts::PI)
+ + item.params.start_offset
+ + stop.offset / offset_range;
+ let a = a.rem_euclid(0.25);
+
+ if !approx_eq(a, 0.0) && !approx_eq(a, 0.25) {
+ has_hard_stops = true;
+ break;
+ }
+ }
+ }
+ prev_stop = Some(stop.offset);
+ }
+
+ let max_size = if has_hard_stops {
+ 2048.0
+ } else {
+ 1024.0
+ };
+
+ // Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
+ // so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
+ // differences, especially with 8 bits per channel.
+ let mut task_size: DeviceSize = stretch_size.cast_unit();
+ let mut scale = vec2(1.0, 1.0);
+ if task_size.width > max_size {
+ scale.x = task_size.width / max_size;
+ task_size.width = max_size;
+ }
+ if task_size.height > max_size {
+ scale.y = task_size.height / max_size;
+ task_size.height = max_size;
+ }
+
+ ConicGradientTemplate {
+ common,
+ center: DevicePoint::new(item.center.x, item.center.y),
+ extend_mode: item.extend_mode,
+ params: item.params,
+ stretch_size,
+ task_size: task_size.ceil().to_i32(),
+ scale,
+ tile_spacing: item.tile_spacing.into(),
+ brush_segments,
+ stops_opacity,
+ stops,
+ src_color: None,
+ }
+ }
+}
+
+impl ConicGradientTemplate {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ parent_surface: SurfaceIndex,
+ ) {
+ if let Some(mut request) =
+ frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
+ // write_prim_gpu_blocks
+ request.push(PremultipliedColorF::WHITE);
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ self.stretch_size.width,
+ self.stretch_size.height,
+ 0.0,
+ 0.0,
+ ]);
+
+ // write_segment_gpu_blocks
+ for segment in &self.brush_segments {
+ // has to match VECS_PER_SEGMENT
+ request.write_segment(
+ segment.local_rect,
+ segment.extra_data,
+ );
+ }
+ }
+
+ let cache_key = ConicGradientCacheKey {
+ size: self.task_size,
+ center: PointKey { x: self.center.x, y: self.center.y },
+ scale: PointKey { x: self.scale.x, y: self.scale.y },
+ start_offset: FloatKey(self.params.start_offset),
+ end_offset: FloatKey(self.params.end_offset),
+ angle: FloatKey(self.params.angle),
+ extend_mode: self.extend_mode,
+ stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
+ };
+
+ let task_id = frame_state.resource_cache.request_render_task(
+ RenderTaskCacheKey {
+ size: self.task_size,
+ kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
+ },
+ frame_state.gpu_cache,
+ frame_state.frame_gpu_data,
+ frame_state.rg_builder,
+ None,
+ false,
+ RenderTaskParent::Surface(parent_surface),
+ &mut frame_state.surface_builder,
+ |rg_builder, gpu_buffer_builder| {
+ let stops = GradientGpuBlockBuilder::build(
+ false,
+ gpu_buffer_builder,
+ &self.stops,
+ );
+
+ rg_builder.add().init(RenderTask::new_dynamic(
+ self.task_size,
+ RenderTaskKind::ConicGradient(ConicGradientTask {
+ extend_mode: self.extend_mode,
+ scale: self.scale,
+ center: self.center,
+ params: self.params.clone(),
+ stops,
+ }),
+ ))
+ }
+ );
+
+ self.src_color = Some(task_id);
+
+ // Tile spacing is always handled by decomposing into separate draw calls so the
+ // primitive opacity is equivalent to stops opacity. This might change to being
+ // set to non-opaque in the presence of tile spacing if/when tile spacing is handled
+ // in the same way as with the image primitive.
+ self.opacity = self.stops_opacity;
+ }
+}
+
+pub type ConicGradientDataHandle = InternHandle<ConicGradient>;
+
+#[derive(Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ConicGradient {
+ pub extend_mode: ExtendMode,
+ pub center: PointKey,
+ pub params: ConicGradientParams,
+ pub stretch_size: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub tile_spacing: SizeKey,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+}
+
+impl Internable for ConicGradient {
+ type Key = ConicGradientKey;
+ type StoreData = ConicGradientTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_CONIC_GRADIENTS;
+}
+
+impl InternablePrimitive for ConicGradient {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> ConicGradientKey {
+ ConicGradientKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: ConicGradientKey,
+ data_handle: ConicGradientDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::ConicGradient {
+ data_handle,
+ visible_tiles_range: GradientTileRange::empty(),
+ }
+ }
+}
+
+impl IsVisible for ConicGradient {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ConicGradientTask {
+ pub extend_mode: ExtendMode,
+ pub center: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub params: ConicGradientParams,
+ pub stops: GpuBufferAddress,
+}
+
+impl ConicGradientTask {
+ pub fn to_instance(&self, target_rect: &DeviceIntRect) -> ConicGradientInstance {
+ ConicGradientInstance {
+ task_rect: target_rect.to_f32(),
+ center: self.center,
+ scale: self.scale,
+ start_offset: self.params.start_offset,
+ end_offset: self.params.end_offset,
+ angle: self.params.angle,
+ extend_mode: self.extend_mode as i32,
+ gradient_stops_address: self.stops.as_int(),
+ }
+ }
+}
+
+/// The per-instance shader input of a radial gradient render task.
+///
+/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[repr(C)]
+#[derive(Clone, Debug)]
+pub struct ConicGradientInstance {
+ pub task_rect: DeviceRect,
+ pub center: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub start_offset: f32,
+ pub end_offset: f32,
+ pub angle: f32,
+ pub extend_mode: i32,
+ pub gradient_stops_address: i32,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ConicGradientCacheKey {
+ pub size: DeviceIntSize,
+ pub center: PointKey,
+ pub scale: PointKey,
+ pub start_offset: FloatKey,
+ pub end_offset: FloatKey,
+ pub angle: FloatKey,
+ pub extend_mode: ExtendMode,
+ pub stops: Vec<GradientStopKey>,
+}
+
diff --git a/gfx/wr/webrender/src/prim_store/gradient/linear.rs b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
new file mode 100644
index 0000000000..85da4b670a
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/gradient/linear.rs
@@ -0,0 +1,750 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! Linear gradients
+//!
+//! Specification: https://drafts.csswg.org/css-images-4/#linear-gradients
+//!
+//! Linear gradients are rendered via cached render tasks and composited with the image brush.
+
+use euclid::approxeq::ApproxEq;
+use euclid::{point2, vec2, size2};
+use api::{ExtendMode, GradientStop, LineOrientation, PremultipliedColorF, ColorF, ColorU};
+use api::units::*;
+use crate::scene_building::IsVisible;
+use crate::frame_builder::FrameBuildingState;
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::image_tiling::simplify_repeated_primitive;
+use crate::prim_store::{BrushSegment, GradientTileRange};
+use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity};
+use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
+use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
+use crate::render_task::{RenderTask, RenderTaskKind};
+use crate::render_task_graph::RenderTaskId;
+use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
+use crate::renderer::GpuBufferAddress;
+use crate::segment::EdgeAaSegmentMask;
+use crate::picture::{SurfaceIndex};
+use crate::util::pack_as_float;
+use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder, apply_gradient_local_clip};
+use std::ops::{Deref, DerefMut};
+use std::mem::swap;
+
+pub const MAX_CACHED_SIZE: f32 = 1024.0;
+
+/// Identifying key for a linear gradient.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
+pub struct LinearGradientKey {
+ pub common: PrimKeyCommonData,
+ pub extend_mode: ExtendMode,
+ pub start_point: PointKey,
+ pub end_point: PointKey,
+ pub stretch_size: SizeKey,
+ pub tile_spacing: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub reverse_stops: bool,
+ pub cached: bool,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+ pub edge_aa_mask: EdgeAaSegmentMask,
+}
+
+impl LinearGradientKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ linear_grad: LinearGradient,
+ ) -> Self {
+ LinearGradientKey {
+ common: info.into(),
+ extend_mode: linear_grad.extend_mode,
+ start_point: linear_grad.start_point,
+ end_point: linear_grad.end_point,
+ stretch_size: linear_grad.stretch_size,
+ tile_spacing: linear_grad.tile_spacing,
+ stops: linear_grad.stops,
+ reverse_stops: linear_grad.reverse_stops,
+ cached: linear_grad.cached,
+ nine_patch: linear_grad.nine_patch,
+ edge_aa_mask: linear_grad.edge_aa_mask,
+ }
+ }
+}
+
+impl InternDebug for LinearGradientKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, MallocSizeOf)]
+pub struct LinearGradientTemplate {
+ pub common: PrimTemplateCommonData,
+ pub extend_mode: ExtendMode,
+ pub start_point: DevicePoint,
+ pub end_point: DevicePoint,
+ pub task_size: DeviceIntSize,
+ pub scale: DeviceVector2D,
+ pub stretch_size: LayoutSize,
+ pub tile_spacing: LayoutSize,
+ pub stops_opacity: PrimitiveOpacity,
+ pub stops: Vec<GradientStop>,
+ pub brush_segments: Vec<BrushSegment>,
+ pub reverse_stops: bool,
+ pub is_fast_path: bool,
+ pub cached: bool,
+ pub src_color: Option<RenderTaskId>,
+}
+
+impl Deref for LinearGradientTemplate {
+ type Target = PrimTemplateCommonData;
+ fn deref(&self) -> &Self::Target {
+ &self.common
+ }
+}
+
+impl DerefMut for LinearGradientTemplate {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.common
+ }
+}
+
+/// Perform a few optimizations to the gradient that are relevant to scene building.
+///
+/// Returns true if the gradient was decomposed into fast-path primitives, indicating
+/// that we shouldn't emit a regular gradient primitive after this returns.
+pub fn optimize_linear_gradient(
+ prim_rect: &mut LayoutRect,
+ tile_size: &mut LayoutSize,
+ mut tile_spacing: LayoutSize,
+ clip_rect: &LayoutRect,
+ start: &mut LayoutPoint,
+ end: &mut LayoutPoint,
+ extend_mode: ExtendMode,
+ stops: &mut [GradientStopKey],
+ // Callback called for each fast-path segment (rect, start end, stops).
+ callback: &mut dyn FnMut(&LayoutRect, LayoutPoint, LayoutPoint, &[GradientStopKey], EdgeAaSegmentMask)
+) -> bool {
+ // First sanitize the gradient parameters. See if we can remove repetitions,
+ // tighten the primitive bounds, etc.
+
+ simplify_repeated_primitive(&tile_size, &mut tile_spacing, prim_rect);
+
+ let vertical = start.x.approx_eq(&end.x);
+ let horizontal = start.y.approx_eq(&end.y);
+
+ let mut horizontally_tiled = prim_rect.width() > tile_size.width;
+ let mut vertically_tiled = prim_rect.height() > tile_size.height;
+
+ // Check whether the tiling is equivalent to stretching on either axis.
+ // Stretching the gradient is more efficient than repeating it.
+ if vertically_tiled && horizontal && tile_spacing.height == 0.0 {
+ tile_size.height = prim_rect.height();
+ vertically_tiled = false;
+ }
+
+ if horizontally_tiled && vertical && tile_spacing.width == 0.0 {
+ tile_size.width = prim_rect.width();
+ horizontally_tiled = false;
+ }
+
+ let offset = apply_gradient_local_clip(
+ prim_rect,
+ &tile_size,
+ &tile_spacing,
+ &clip_rect
+ );
+
+ // The size of gradient render tasks depends on the tile_size. No need to generate
+ // large stretch sizes that will be clipped to the bounds of the primitive.
+ tile_size.width = tile_size.width.min(prim_rect.width());
+ tile_size.height = tile_size.height.min(prim_rect.height());
+
+ *start += offset;
+ *end += offset;
+
+ // Next, in the case of axis-aligned gradients, see if it is worth
+ // decomposing the gradient into multiple gradients with only two
+ // gradient stops per segment to get a faster shader.
+
+ if extend_mode != ExtendMode::Clamp || stops.is_empty() {
+ return false;
+ }
+
+ if !vertical && !horizontal {
+ return false;
+ }
+
+ if vertical && horizontal {
+ return false;
+ }
+
+ if !tile_spacing.is_empty() || vertically_tiled || horizontally_tiled {
+ return false;
+ }
+
+ // If the gradient is small, no need to bother with decomposing it.
+ if (horizontal && tile_size.width < 256.0)
+ || (vertical && tile_size.height < 256.0) {
+
+ return false;
+ }
+
+ // Flip x and y if need be so that we only deal with the horizontal case.
+
+ // From now on don't return false. We are going modifying the caller's
+ // variables and not bother to restore them. If the control flow changes,
+ // Make sure to to restore &mut parameters to sensible values before
+ // returning false.
+
+ let adjust_rect = &mut |rect: &mut LayoutRect| {
+ if vertical {
+ swap(&mut rect.min.x, &mut rect.min.y);
+ swap(&mut rect.max.x, &mut rect.max.y);
+ }
+ };
+
+ let adjust_size = &mut |size: &mut LayoutSize| {
+ if vertical { swap(&mut size.width, &mut size.height); }
+ };
+
+ let adjust_point = &mut |p: &mut LayoutPoint| {
+ if vertical { swap(&mut p.x, &mut p.y); }
+ };
+
+ let clip_rect = match clip_rect.intersection(prim_rect) {
+ Some(clip) => clip,
+ None => {
+ return false;
+ }
+ };
+
+ adjust_rect(prim_rect);
+ adjust_point(start);
+ adjust_point(end);
+ adjust_size(tile_size);
+
+ let length = (end.x - start.x).abs();
+
+ // Decompose the gradient into simple segments. This lets us:
+ // - separate opaque from semi-transparent segments,
+ // - compress long segments into small render tasks,
+ // - make sure hard stops stay so even if the primitive is large.
+
+ let reverse_stops = start.x > end.x;
+
+ // Handle reverse stops so we can assume stops are arranged in increasing x.
+ if reverse_stops {
+ stops.reverse();
+ swap(start, end);
+ }
+
+ // Use fake gradient stop to emulate the potential constant color sections
+ // before and after the gradient endpoints.
+ let mut prev = *stops.first().unwrap();
+ let mut last = *stops.last().unwrap();
+
+ // Set the offsets of the fake stops to position them at the edges of the primitive.
+ prev.offset = -start.x / length;
+ last.offset = (tile_size.width - start.x) / length;
+ if reverse_stops {
+ prev.offset = 1.0 - prev.offset;
+ last.offset = 1.0 - last.offset;
+ }
+
+ let (side_edges, first_edge, last_edge) = if vertical {
+ (
+ EdgeAaSegmentMask::LEFT | EdgeAaSegmentMask::RIGHT,
+ EdgeAaSegmentMask::TOP,
+ EdgeAaSegmentMask::BOTTOM
+ )
+ } else {
+ (
+ EdgeAaSegmentMask::TOP | EdgeAaSegmentMask::BOTTOM,
+ EdgeAaSegmentMask::LEFT,
+ EdgeAaSegmentMask::RIGHT
+ )
+ };
+
+ let mut is_first = true;
+ let last_offset = last.offset;
+ for stop in stops.iter().chain((&[last]).iter()) {
+ let prev_stop = prev;
+ prev = *stop;
+
+ if prev_stop.color.a == 0 && stop.color.a == 0 {
+ continue;
+ }
+
+
+ let prev_offset = if reverse_stops { 1.0 - prev_stop.offset } else { prev_stop.offset };
+ let offset = if reverse_stops { 1.0 - stop.offset } else { stop.offset };
+
+ // In layout space, relative to the primitive.
+ let segment_start = start.x + prev_offset * length;
+ let segment_end = start.x + offset * length;
+ let segment_length = segment_end - segment_start;
+
+ if segment_length <= 0.0 {
+ continue;
+ }
+
+ let mut segment_rect = *prim_rect;
+ segment_rect.min.x += segment_start;
+ segment_rect.max.x = segment_rect.min.x + segment_length;
+
+ let mut start = point2(0.0, 0.0);
+ let mut end = point2(segment_length, 0.0);
+
+ adjust_point(&mut start);
+ adjust_point(&mut end);
+ adjust_rect(&mut segment_rect);
+
+ let origin_before_clip = segment_rect.min;
+ segment_rect = match segment_rect.intersection(&clip_rect) {
+ Some(rect) => rect,
+ None => {
+ continue;
+ }
+ };
+ let offset = segment_rect.min - origin_before_clip;
+
+ // Account for the clipping since start and end are relative to the origin.
+ start -= offset;
+ end -= offset;
+
+ let mut edge_flags = side_edges;
+ if is_first {
+ edge_flags |= first_edge;
+ is_first = false;
+ }
+ if stop.offset == last_offset {
+ edge_flags |= last_edge;
+ }
+
+ callback(
+ &segment_rect,
+ start,
+ end,
+ &[
+ GradientStopKey { offset: 0.0, .. prev_stop },
+ GradientStopKey { offset: 1.0, .. *stop },
+ ],
+ edge_flags,
+ );
+ }
+
+ true
+}
+
+impl From<LinearGradientKey> for LinearGradientTemplate {
+ fn from(item: LinearGradientKey) -> Self {
+
+ let mut common = PrimTemplateCommonData::with_key_common(item.common);
+ common.edge_aa_mask = item.edge_aa_mask;
+
+ let (mut stops, min_alpha) = stops_and_min_alpha(&item.stops);
+
+ let mut brush_segments = Vec::new();
+
+ if let Some(ref nine_patch) = item.nine_patch {
+ brush_segments = nine_patch.create_segments(common.prim_rect.size());
+ }
+
+ // Save opacity of the stops for use in
+ // selecting which pass this gradient
+ // should be drawn in.
+ let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
+
+ let start_point = DevicePoint::new(item.start_point.x, item.start_point.y);
+ let end_point = DevicePoint::new(item.end_point.x, item.end_point.y);
+ let tile_spacing: LayoutSize = item.tile_spacing.into();
+ let stretch_size: LayoutSize = item.stretch_size.into();
+ let mut task_size: DeviceSize = stretch_size.cast_unit();
+
+ let horizontal = start_point.y.approx_eq(&end_point.y);
+ let vertical = start_point.x.approx_eq(&end_point.x);
+
+ if horizontal {
+ // Completely horizontal, we can stretch the gradient vertically.
+ task_size.height = 1.0;
+ }
+
+ if vertical {
+ // Completely vertical, we can stretch the gradient horizontally.
+ task_size.width = 1.0;
+ }
+
+ // See if we can render the gradient using a special fast-path shader.
+ // The fast path path only works with two gradient stops.
+ let mut is_fast_path = false;
+ if item.cached && stops.len() == 2 && brush_segments.is_empty() {
+ if horizontal
+ && stretch_size.width >= common.prim_rect.width()
+ && start_point.x.approx_eq(&0.0)
+ && end_point.x.approx_eq(&stretch_size.width) {
+ is_fast_path = true;
+ task_size.width = task_size.width.min(256.0);
+ }
+ if vertical
+ && stretch_size.height >= common.prim_rect.height()
+ && start_point.y.approx_eq(&0.0)
+ && end_point.y.approx_eq(&stretch_size.height) {
+ is_fast_path = true;
+ task_size.height = task_size.height.min(256.0);
+ }
+
+ if stops[0].color == stops[1].color {
+ is_fast_path = true;
+ task_size = size2(1.0, 1.0);
+ }
+
+ if is_fast_path && item.reverse_stops {
+ // The fast path doesn't use the gradient gpu blocks builder so handle
+ // reversed stops here.
+ stops.swap(0, 1);
+ }
+ }
+
+ // Avoid rendering enormous gradients. Linear gradients are mostly made of soft transitions,
+ // so it is unlikely that rendering at a higher resolution than 1024 would produce noticeable
+ // differences, especially with 8 bits per channel.
+
+ let mut scale = vec2(1.0, 1.0);
+
+ if task_size.width > MAX_CACHED_SIZE {
+ scale.x = task_size.width / MAX_CACHED_SIZE;
+ task_size.width = MAX_CACHED_SIZE;
+ }
+
+ if task_size.height > MAX_CACHED_SIZE {
+ scale.y = task_size.height / MAX_CACHED_SIZE;
+ task_size.height = MAX_CACHED_SIZE;
+ }
+
+ LinearGradientTemplate {
+ common,
+ extend_mode: item.extend_mode,
+ start_point,
+ end_point,
+ task_size: task_size.ceil().to_i32(),
+ scale,
+ stretch_size,
+ tile_spacing,
+ stops_opacity,
+ stops,
+ brush_segments,
+ reverse_stops: item.reverse_stops,
+ is_fast_path,
+ cached: item.cached,
+ src_color: None,
+ }
+ }
+}
+
+impl LinearGradientTemplate {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ parent_surface: SurfaceIndex,
+ ) {
+ if let Some(mut request) = frame_state.gpu_cache.request(
+ &mut self.common.gpu_cache_handle
+ ) {
+
+ // Write_prim_gpu_blocks
+ if self.cached {
+ // We are using the image brush.
+ request.push(PremultipliedColorF::WHITE);
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ self.stretch_size.width,
+ self.stretch_size.height,
+ 0.0,
+ 0.0,
+ ]);
+ } else {
+ // We are using the gradient brush.
+ request.push([
+ self.start_point.x,
+ self.start_point.y,
+ self.end_point.x,
+ self.end_point.y,
+ ]);
+ request.push([
+ pack_as_float(self.extend_mode as u32),
+ self.stretch_size.width,
+ self.stretch_size.height,
+ 0.0,
+ ]);
+ }
+
+ // write_segment_gpu_blocks
+ for segment in &self.brush_segments {
+ // has to match VECS_PER_SEGMENT
+ request.write_segment(
+ segment.local_rect,
+ segment.extra_data,
+ );
+ }
+ }
+
+ // Tile spacing is always handled by decomposing into separate draw calls so the
+ // primitive opacity is equivalent to stops opacity. This might change to being
+ // set to non-opaque in the presence of tile spacing if/when tile spacing is handled
+ // in the same way as with the image primitive.
+ self.opacity = self.stops_opacity;
+
+ if !self.cached {
+ return;
+ }
+
+ let task_id = if self.is_fast_path {
+ let orientation = if self.task_size.width > self.task_size.height {
+ LineOrientation::Horizontal
+ } else {
+ LineOrientation::Vertical
+ };
+
+ let gradient = FastLinearGradientTask {
+ color0: self.stops[0].color.into(),
+ color1: self.stops[1].color.into(),
+ orientation,
+ };
+
+ frame_state.resource_cache.request_render_task(
+ RenderTaskCacheKey {
+ size: self.task_size,
+ kind: RenderTaskCacheKeyKind::FastLinearGradient(gradient),
+ },
+ frame_state.gpu_cache,
+ frame_state.frame_gpu_data,
+ frame_state.rg_builder,
+ None,
+ false,
+ RenderTaskParent::Surface(parent_surface),
+ &mut frame_state.surface_builder,
+ |rg_builder, _| {
+ rg_builder.add().init(RenderTask::new_dynamic(
+ self.task_size,
+ RenderTaskKind::FastLinearGradient(gradient),
+ ))
+ }
+ )
+ } else {
+ let cache_key = LinearGradientCacheKey {
+ size: self.task_size,
+ start: PointKey { x: self.start_point.x, y: self.start_point.y },
+ end: PointKey { x: self.end_point.x, y: self.end_point.y },
+ scale: PointKey { x: self.scale.x, y: self.scale.y },
+ extend_mode: self.extend_mode,
+ stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
+ reversed_stops: self.reverse_stops,
+ };
+
+ frame_state.resource_cache.request_render_task(
+ RenderTaskCacheKey {
+ size: self.task_size,
+ kind: RenderTaskCacheKeyKind::LinearGradient(cache_key),
+ },
+ frame_state.gpu_cache,
+ frame_state.frame_gpu_data,
+ frame_state.rg_builder,
+ None,
+ false,
+ RenderTaskParent::Surface(parent_surface),
+ &mut frame_state.surface_builder,
+ |rg_builder, gpu_buffer_builder| {
+ let stops = Some(GradientGpuBlockBuilder::build(
+ self.reverse_stops,
+ gpu_buffer_builder,
+ &self.stops,
+ ));
+
+ rg_builder.add().init(RenderTask::new_dynamic(
+ self.task_size,
+ RenderTaskKind::LinearGradient(LinearGradientTask {
+ start: self.start_point,
+ end: self.end_point,
+ scale: self.scale,
+ extend_mode: self.extend_mode,
+ stops: stops.unwrap(),
+ }),
+ ))
+ }
+ )
+ };
+
+ self.src_color = Some(task_id);
+ }
+}
+
+pub type LinearGradientDataHandle = InternHandle<LinearGradient>;
+
+#[derive(Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct LinearGradient {
+ pub extend_mode: ExtendMode,
+ pub start_point: PointKey,
+ pub end_point: PointKey,
+ pub stretch_size: SizeKey,
+ pub tile_spacing: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub reverse_stops: bool,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+ pub cached: bool,
+ pub edge_aa_mask: EdgeAaSegmentMask,
+}
+
+impl Internable for LinearGradient {
+ type Key = LinearGradientKey;
+ type StoreData = LinearGradientTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_LINEAR_GRADIENTS;
+}
+
+impl InternablePrimitive for LinearGradient {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> LinearGradientKey {
+ LinearGradientKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ key: LinearGradientKey,
+ data_handle: LinearGradientDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ if key.cached {
+ PrimitiveInstanceKind::CachedLinearGradient {
+ data_handle,
+ visible_tiles_range: GradientTileRange::empty(),
+ }
+ } else {
+ PrimitiveInstanceKind::LinearGradient {
+ data_handle,
+ visible_tiles_range: GradientTileRange::empty(),
+ }
+ }
+ }
+}
+
+impl IsVisible for LinearGradient {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct LinearGradientPrimitive {
+ pub cache_segments: Vec<CachedGradientSegment>,
+ pub visible_tiles_range: GradientTileRange,
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct CachedGradientSegment {
+ pub render_task: RenderTaskId,
+ pub local_rect: LayoutRect,
+}
+
+
+#[derive(Copy, Clone, Debug, Hash, MallocSizeOf, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct FastLinearGradientTask {
+ pub color0: ColorU,
+ pub color1: ColorU,
+ pub orientation: LineOrientation,
+}
+
+impl FastLinearGradientTask {
+ pub fn to_instance(&self, target_rect: &DeviceIntRect) -> FastLinearGradientInstance {
+ FastLinearGradientInstance {
+ task_rect: target_rect.to_f32(),
+ color0: ColorF::from(self.color0).premultiplied(),
+ color1: ColorF::from(self.color1).premultiplied(),
+ axis_select: match self.orientation {
+ LineOrientation::Horizontal => 0.0,
+ LineOrientation::Vertical => 1.0,
+ },
+ }
+ }
+}
+
+pub type FastLinearGradientCacheKey = FastLinearGradientTask;
+
+/// The per-instance shader input of a fast-path linear gradient render task.
+///
+/// Must match the FAST_LINEAR_GRADIENT instance description in renderer/vertex.rs.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[repr(C)]
+#[derive(Clone, Debug)]
+pub struct FastLinearGradientInstance {
+ pub task_rect: DeviceRect,
+ pub color0: PremultipliedColorF,
+ pub color1: PremultipliedColorF,
+ pub axis_select: f32,
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct LinearGradientTask {
+ pub start: DevicePoint,
+ pub end: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub extend_mode: ExtendMode,
+ pub stops: GpuBufferAddress,
+}
+
+impl LinearGradientTask {
+ pub fn to_instance(&self, target_rect: &DeviceIntRect) -> LinearGradientInstance {
+ LinearGradientInstance {
+ task_rect: target_rect.to_f32(),
+ start: self.start,
+ end: self.end,
+ scale: self.scale,
+ extend_mode: self.extend_mode as i32,
+ gradient_stops_address: self.stops.as_int(),
+ }
+ }
+}
+
+/// The per-instance shader input of a linear gradient render task.
+///
+/// Must match the LINEAR_GRADIENT instance description in renderer/vertex.rs.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[repr(C)]
+#[derive(Clone, Debug)]
+pub struct LinearGradientInstance {
+ pub task_rect: DeviceRect,
+ pub start: DevicePoint,
+ pub end: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub extend_mode: i32,
+ pub gradient_stops_address: i32,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct LinearGradientCacheKey {
+ pub size: DeviceIntSize,
+ pub start: PointKey,
+ pub end: PointKey,
+ pub scale: PointKey,
+ pub extend_mode: ExtendMode,
+ pub stops: Vec<GradientStopKey>,
+ pub reversed_stops: bool,
+}
diff --git a/gfx/wr/webrender/src/prim_store/gradient/mod.rs b/gfx/wr/webrender/src/prim_store/gradient/mod.rs
new file mode 100644
index 0000000000..d0b922c579
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/gradient/mod.rs
@@ -0,0 +1,392 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{ColorF, ColorU, GradientStop, PremultipliedColorF};
+use api::units::{LayoutRect, LayoutSize, LayoutVector2D};
+use crate::renderer::{GpuBufferAddress, GpuBufferBuilder};
+use std::hash;
+
+mod linear;
+mod radial;
+mod conic;
+
+pub use linear::MAX_CACHED_SIZE as LINEAR_MAX_CACHED_SIZE;
+
+pub use linear::*;
+pub use radial::*;
+pub use conic::*;
+
+/// A hashable gradient stop that can be used in primitive keys.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Copy, Clone, MallocSizeOf, PartialEq)]
+pub struct GradientStopKey {
+ pub offset: f32,
+ pub color: ColorU,
+}
+
+impl GradientStopKey {
+ pub fn empty() -> Self {
+ GradientStopKey {
+ offset: 0.0,
+ color: ColorU::new(0, 0, 0, 0),
+ }
+ }
+}
+
+impl Into<GradientStopKey> for GradientStop {
+ fn into(self) -> GradientStopKey {
+ GradientStopKey {
+ offset: self.offset,
+ color: self.color.into(),
+ }
+ }
+}
+
+// Convert `stop_keys` into a vector of `GradientStop`s, which is a more
+// convenient representation for the current gradient builder. Compute the
+// minimum stop alpha along the way.
+fn stops_and_min_alpha(stop_keys: &[GradientStopKey]) -> (Vec<GradientStop>, f32) {
+ let mut min_alpha: f32 = 1.0;
+ let stops = stop_keys.iter().map(|stop_key| {
+ let color: ColorF = stop_key.color.into();
+ min_alpha = min_alpha.min(color.a);
+
+ GradientStop {
+ offset: stop_key.offset,
+ color,
+ }
+ }).collect();
+
+ (stops, min_alpha)
+}
+
+impl Eq for GradientStopKey {}
+
+impl hash::Hash for GradientStopKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.offset.to_bits().hash(state);
+ self.color.hash(state);
+ }
+}
+
+// The gradient entry index for the first color stop
+pub const GRADIENT_DATA_FIRST_STOP: usize = 0;
+// The gradient entry index for the last color stop
+pub const GRADIENT_DATA_LAST_STOP: usize = GRADIENT_DATA_SIZE - 1;
+
+// The start of the gradient data table
+pub const GRADIENT_DATA_TABLE_BEGIN: usize = GRADIENT_DATA_FIRST_STOP + 1;
+// The exclusive bound of the gradient data table
+pub const GRADIENT_DATA_TABLE_END: usize = GRADIENT_DATA_LAST_STOP;
+// The number of entries in the gradient data table.
+pub const GRADIENT_DATA_TABLE_SIZE: usize = 128;
+
+// The number of entries in a gradient data: GRADIENT_DATA_TABLE_SIZE + first stop entry + last stop entry
+pub const GRADIENT_DATA_SIZE: usize = GRADIENT_DATA_TABLE_SIZE + 2;
+
+/// An entry in a gradient data table representing a segment of the gradient
+/// color space.
+#[derive(Debug, Copy, Clone)]
+#[repr(C)]
+struct GradientDataEntry {
+ start_color: PremultipliedColorF,
+ end_step: PremultipliedColorF,
+}
+
+impl GradientDataEntry {
+ fn white() -> Self {
+ Self {
+ start_color: PremultipliedColorF::WHITE,
+ end_step: PremultipliedColorF::TRANSPARENT,
+ }
+ }
+}
+
+// TODO(gw): Tidy this up to be a free function / module?
+pub struct GradientGpuBlockBuilder {}
+
+impl GradientGpuBlockBuilder {
+ /// Generate a color ramp filling the indices in [start_idx, end_idx) and interpolating
+ /// from start_color to end_color.
+ fn fill_colors(
+ start_idx: usize,
+ end_idx: usize,
+ start_color: &PremultipliedColorF,
+ end_color: &PremultipliedColorF,
+ entries: &mut [GradientDataEntry; GRADIENT_DATA_SIZE],
+ prev_step: &PremultipliedColorF,
+ ) -> PremultipliedColorF {
+ // Calculate the color difference for individual steps in the ramp.
+ let inv_steps = 1.0 / (end_idx - start_idx) as f32;
+ let mut step = PremultipliedColorF {
+ r: (end_color.r - start_color.r) * inv_steps,
+ g: (end_color.g - start_color.g) * inv_steps,
+ b: (end_color.b - start_color.b) * inv_steps,
+ a: (end_color.a - start_color.a) * inv_steps,
+ };
+ // As a subtle form of compression, we ensure that the step values for
+ // each stop range are the same if and only if they belong to the same
+ // stop range. However, if two different stop ranges have the same step,
+ // we need to modify the steps so they compare unequally between ranges.
+ // This allows to quickly compare if two adjacent stops belong to the
+ // same range by comparing their steps.
+ if step == *prev_step {
+ // Modify the step alpha value as if by nextafter(). The difference
+ // here should be so small as to be unnoticeable, but yet allow it
+ // to compare differently.
+ step.a = f32::from_bits(if step.a == 0.0 { 1 } else { step.a.to_bits() + 1 });
+ }
+
+ let mut cur_color = *start_color;
+
+ // Walk the ramp writing start and end colors for each entry.
+ for index in start_idx .. end_idx {
+ let entry = &mut entries[index];
+ entry.start_color = cur_color;
+ cur_color.r += step.r;
+ cur_color.g += step.g;
+ cur_color.b += step.b;
+ cur_color.a += step.a;
+ entry.end_step = step;
+ }
+
+ step
+ }
+
+ /// Compute an index into the gradient entry table based on a gradient stop offset. This
+ /// function maps offsets from [0, 1] to indices in [GRADIENT_DATA_TABLE_BEGIN, GRADIENT_DATA_TABLE_END].
+ #[inline]
+ fn get_index(offset: f32) -> usize {
+ (offset.max(0.0).min(1.0) * GRADIENT_DATA_TABLE_SIZE as f32 +
+ GRADIENT_DATA_TABLE_BEGIN as f32)
+ .round() as usize
+ }
+
+ // Build the gradient data from the supplied stops, reversing them if necessary.
+ pub fn build(
+ reverse_stops: bool,
+ gpu_buffer_builder: &mut GpuBufferBuilder,
+ src_stops: &[GradientStop],
+ ) -> GpuBufferAddress {
+ // Preconditions (should be ensured by DisplayListBuilder):
+ // * we have at least two stops
+ // * first stop has offset 0.0
+ // * last stop has offset 1.0
+ let mut src_stops = src_stops.into_iter();
+ let mut cur_color = match src_stops.next() {
+ Some(stop) => {
+ debug_assert_eq!(stop.offset, 0.0);
+ stop.color.premultiplied()
+ }
+ None => {
+ error!("Zero gradient stops found!");
+ PremultipliedColorF::BLACK
+ }
+ };
+
+ // A table of gradient entries, with two colors per entry, that specify the start and end color
+ // within the segment of the gradient space represented by that entry. To lookup a gradient result,
+ // first the entry index is calculated to determine which two colors to interpolate between, then
+ // the offset within that entry bucket is used to interpolate between the two colors in that entry.
+ // This layout is motivated by the fact that if one naively tries to store a single color per entry
+ // and interpolate directly between entries, then hard stops will become softened because the end
+ // color of an entry actually differs from the start color of the next entry, even though they fall
+ // at the same edge offset in the gradient space. Instead, the two-color-per-entry layout preserves
+ // hard stops, as the end color for a given entry can differ from the start color for the following
+ // entry.
+ // Colors are stored in RGBA32F format (in the GPU cache). This table requires the gradient color
+ // stops to be normalized to the range [0, 1]. The first and last entries hold the first and last
+ // color stop colors respectively, while the entries in between hold the interpolated color stop
+ // values for the range [0, 1].
+ // As a further optimization, rather than directly storing the end color, the difference of the end
+ // color from the start color is stored instead, so that an entry can be evaluated more cheaply
+ // with start+diff*offset instead of mix(start,end,offset). Further, the color difference in two
+ // adjacent entries will always be the same if they were generated from the same set of stops/run.
+ // To allow fast searching of the table, if two adjacent entries generated from different sets of
+ // stops (a boundary) have the same difference, the floating-point bits of the stop will be nudged
+ // so that they compare differently without perceptibly altering the interpolation result. This way,
+ // one can quickly scan the table and recover runs just by comparing the color differences of the
+ // current and next entry.
+ // For example, a table with 2 inside entries (startR,startG,startB):(diffR,diffG,diffB) might look
+ // like so:
+ // first | 0.0 | 0.5 | last
+ // (0,0,0):(0,0,0) | (1,0,0):(-1,1,0) | (0,0,1):(0,1,-1) | (1,1,1):(0,0,0)
+ // ^ solid black ^ red to green ^ blue to green ^ solid white
+ let mut entries = [GradientDataEntry::white(); GRADIENT_DATA_SIZE];
+ let mut prev_step = cur_color;
+ if reverse_stops {
+ // Fill in the first entry (for reversed stops) with the first color stop
+ prev_step = GradientGpuBlockBuilder::fill_colors(
+ GRADIENT_DATA_LAST_STOP,
+ GRADIENT_DATA_LAST_STOP + 1,
+ &cur_color,
+ &cur_color,
+ &mut entries,
+ &prev_step,
+ );
+
+ // Fill in the center of the gradient table, generating a color ramp between each consecutive pair
+ // of gradient stops. Each iteration of a loop will fill the indices in [next_idx, cur_idx). The
+ // loop will then fill indices in [GRADIENT_DATA_TABLE_BEGIN, GRADIENT_DATA_TABLE_END).
+ let mut cur_idx = GRADIENT_DATA_TABLE_END;
+ for next in src_stops {
+ let next_color = next.color.premultiplied();
+ let next_idx = Self::get_index(1.0 - next.offset);
+
+ if next_idx < cur_idx {
+ prev_step = GradientGpuBlockBuilder::fill_colors(
+ next_idx,
+ cur_idx,
+ &next_color,
+ &cur_color,
+ &mut entries,
+ &prev_step,
+ );
+ cur_idx = next_idx;
+ }
+
+ cur_color = next_color;
+ }
+ if cur_idx != GRADIENT_DATA_TABLE_BEGIN {
+ error!("Gradient stops abruptly at {}, auto-completing to white", cur_idx);
+ }
+
+ // Fill in the last entry (for reversed stops) with the last color stop
+ GradientGpuBlockBuilder::fill_colors(
+ GRADIENT_DATA_FIRST_STOP,
+ GRADIENT_DATA_FIRST_STOP + 1,
+ &cur_color,
+ &cur_color,
+ &mut entries,
+ &prev_step,
+ );
+ } else {
+ // Fill in the first entry with the first color stop
+ prev_step = GradientGpuBlockBuilder::fill_colors(
+ GRADIENT_DATA_FIRST_STOP,
+ GRADIENT_DATA_FIRST_STOP + 1,
+ &cur_color,
+ &cur_color,
+ &mut entries,
+ &prev_step,
+ );
+
+ // Fill in the center of the gradient table, generating a color ramp between each consecutive pair
+ // of gradient stops. Each iteration of a loop will fill the indices in [cur_idx, next_idx). The
+ // loop will then fill indices in [GRADIENT_DATA_TABLE_BEGIN, GRADIENT_DATA_TABLE_END).
+ let mut cur_idx = GRADIENT_DATA_TABLE_BEGIN;
+ for next in src_stops {
+ let next_color = next.color.premultiplied();
+ let next_idx = Self::get_index(next.offset);
+
+ if next_idx > cur_idx {
+ prev_step = GradientGpuBlockBuilder::fill_colors(
+ cur_idx,
+ next_idx,
+ &cur_color,
+ &next_color,
+ &mut entries,
+ &prev_step,
+ );
+ cur_idx = next_idx;
+ }
+
+ cur_color = next_color;
+ }
+ if cur_idx != GRADIENT_DATA_TABLE_END {
+ error!("Gradient stops abruptly at {}, auto-completing to white", cur_idx);
+ }
+
+ // Fill in the last entry with the last color stop
+ GradientGpuBlockBuilder::fill_colors(
+ GRADIENT_DATA_LAST_STOP,
+ GRADIENT_DATA_LAST_STOP + 1,
+ &cur_color,
+ &cur_color,
+ &mut entries,
+ &prev_step,
+ );
+ }
+
+ let mut writer = gpu_buffer_builder.write_blocks(2 * entries.len());
+
+ for entry in entries {
+ writer.push_one(entry.start_color);
+ writer.push_one(entry.end_step);
+ }
+
+ writer.finish()
+ }
+}
+
+// If the gradient is not tiled we know that any content outside of the clip will not
+// be shown. Applying the clip early reduces how much of the gradient we
+// render and cache. We do this optimization separately on each axis.
+// Returns the offset between the new and old primitive rect origin, to apply to the
+// gradient parameters that are relative to the primitive origin.
+pub fn apply_gradient_local_clip(
+ prim_rect: &mut LayoutRect,
+ stretch_size: &LayoutSize,
+ tile_spacing: &LayoutSize,
+ clip_rect: &LayoutRect,
+) -> LayoutVector2D {
+ let w = prim_rect.max.x.min(clip_rect.max.x) - prim_rect.min.x;
+ let h = prim_rect.max.y.min(clip_rect.max.y) - prim_rect.min.y;
+ let is_tiled_x = w > stretch_size.width + tile_spacing.width;
+ let is_tiled_y = h > stretch_size.height + tile_spacing.height;
+
+ let mut offset = LayoutVector2D::new(0.0, 0.0);
+
+ if !is_tiled_x {
+ let diff = (clip_rect.min.x - prim_rect.min.x).min(prim_rect.width());
+ if diff > 0.0 {
+ prim_rect.min.x += diff;
+ offset.x = -diff;
+ }
+
+ let diff = prim_rect.max.x - clip_rect.max.x;
+ if diff > 0.0 {
+ prim_rect.max.x -= diff;
+ }
+ }
+
+ if !is_tiled_y {
+ let diff = (clip_rect.min.y - prim_rect.min.y).min(prim_rect.height());
+ if diff > 0.0 {
+ prim_rect.min.y += diff;
+ offset.y = -diff;
+ }
+
+ let diff = prim_rect.max.y - clip_rect.max.y;
+ if diff > 0.0 {
+ prim_rect.max.y -= diff;
+ }
+ }
+
+ offset
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<LinearGradient>(), 72, "LinearGradient size changed");
+ assert_eq!(mem::size_of::<LinearGradientTemplate>(), 144, "LinearGradientTemplate size changed");
+ assert_eq!(mem::size_of::<LinearGradientKey>(), 88, "LinearGradientKey size changed");
+
+ assert_eq!(mem::size_of::<RadialGradient>(), 72, "RadialGradient size changed");
+ assert_eq!(mem::size_of::<RadialGradientTemplate>(), 144, "RadialGradientTemplate size changed");
+ assert_eq!(mem::size_of::<RadialGradientKey>(), 96, "RadialGradientKey size changed");
+
+ assert_eq!(mem::size_of::<ConicGradient>(), 72, "ConicGradient size changed");
+ assert_eq!(mem::size_of::<ConicGradientTemplate>(), 144, "ConicGradientTemplate size changed");
+ assert_eq!(mem::size_of::<ConicGradientKey>(), 96, "ConicGradientKey size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/gradient/radial.rs b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
new file mode 100644
index 0000000000..f3f20f9a55
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/gradient/radial.rs
@@ -0,0 +1,531 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! Radial gradients
+//!
+//! Specification: https://drafts.csswg.org/css-images-4/#radial-gradients
+//!
+//! Radial gradients are rendered via cached render tasks and composited with the image brush.
+
+use euclid::{vec2, size2};
+use api::{ExtendMode, GradientStop, PremultipliedColorF, ColorU};
+use api::units::*;
+use crate::scene_building::IsVisible;
+use crate::frame_builder::FrameBuildingState;
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::prim_store::{BrushSegment, GradientTileRange, InternablePrimitive};
+use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity};
+use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
+use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, FloatKey};
+use crate::render_task::{RenderTask, RenderTaskKind};
+use crate::render_task_graph::RenderTaskId;
+use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
+use crate::renderer::GpuBufferAddress;
+use crate::picture::{SurfaceIndex};
+
+use std::{hash, ops::{Deref, DerefMut}};
+use super::{
+ stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder,
+ apply_gradient_local_clip,
+};
+
+/// Hashable radial gradient parameters, for use during prim interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct RadialGradientParams {
+ pub start_radius: f32,
+ pub end_radius: f32,
+ pub ratio_xy: f32,
+}
+
+impl Eq for RadialGradientParams {}
+
+impl hash::Hash for RadialGradientParams {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.start_radius.to_bits().hash(state);
+ self.end_radius.to_bits().hash(state);
+ self.ratio_xy.to_bits().hash(state);
+ }
+}
+
+/// Identifying key for a radial gradient.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
+pub struct RadialGradientKey {
+ pub common: PrimKeyCommonData,
+ pub extend_mode: ExtendMode,
+ pub center: PointKey,
+ pub params: RadialGradientParams,
+ pub stretch_size: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub tile_spacing: SizeKey,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+}
+
+impl RadialGradientKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ radial_grad: RadialGradient,
+ ) -> Self {
+ RadialGradientKey {
+ common: info.into(),
+ extend_mode: radial_grad.extend_mode,
+ center: radial_grad.center,
+ params: radial_grad.params,
+ stretch_size: radial_grad.stretch_size,
+ stops: radial_grad.stops,
+ tile_spacing: radial_grad.tile_spacing,
+ nine_patch: radial_grad.nine_patch,
+ }
+ }
+}
+
+impl InternDebug for RadialGradientKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+#[derive(Debug)]
+pub struct RadialGradientTemplate {
+ pub common: PrimTemplateCommonData,
+ pub extend_mode: ExtendMode,
+ pub params: RadialGradientParams,
+ pub center: DevicePoint,
+ pub task_size: DeviceIntSize,
+ pub scale: DeviceVector2D,
+ pub stretch_size: LayoutSize,
+ pub tile_spacing: LayoutSize,
+ pub brush_segments: Vec<BrushSegment>,
+ pub stops_opacity: PrimitiveOpacity,
+ pub stops: Vec<GradientStop>,
+ pub src_color: Option<RenderTaskId>,
+}
+
+impl Deref for RadialGradientTemplate {
+ type Target = PrimTemplateCommonData;
+ fn deref(&self) -> &Self::Target {
+ &self.common
+ }
+}
+
+impl DerefMut for RadialGradientTemplate {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.common
+ }
+}
+
+impl From<RadialGradientKey> for RadialGradientTemplate {
+ fn from(item: RadialGradientKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(item.common);
+ let mut brush_segments = Vec::new();
+
+ if let Some(ref nine_patch) = item.nine_patch {
+ brush_segments = nine_patch.create_segments(common.prim_rect.size());
+ }
+
+ let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
+
+ // Save opacity of the stops for use in
+ // selecting which pass this gradient
+ // should be drawn in.
+ let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
+
+ let mut stretch_size: LayoutSize = item.stretch_size.into();
+ stretch_size.width = stretch_size.width.min(common.prim_rect.width());
+ stretch_size.height = stretch_size.height.min(common.prim_rect.height());
+
+ // Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
+ // so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
+ // differences, especially with 8 bits per channel.
+ const MAX_SIZE: f32 = 1024.0;
+ let mut task_size: DeviceSize = stretch_size.cast_unit();
+ let mut scale = vec2(1.0, 1.0);
+ if task_size.width > MAX_SIZE {
+ scale.x = task_size.width/ MAX_SIZE;
+ task_size.width = MAX_SIZE;
+ }
+ if task_size.height > MAX_SIZE {
+ scale.y = task_size.height /MAX_SIZE;
+ task_size.height = MAX_SIZE;
+ }
+
+ RadialGradientTemplate {
+ common,
+ center: DevicePoint::new(item.center.x, item.center.y),
+ extend_mode: item.extend_mode,
+ params: item.params,
+ stretch_size,
+ task_size: task_size.ceil().to_i32(),
+ scale,
+ tile_spacing: item.tile_spacing.into(),
+ brush_segments,
+ stops_opacity,
+ stops,
+ src_color: None,
+ }
+ }
+}
+
+impl RadialGradientTemplate {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ parent_surface: SurfaceIndex,
+ ) {
+ if let Some(mut request) =
+ frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
+ // write_prim_gpu_blocks
+ request.push(PremultipliedColorF::WHITE);
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ self.stretch_size.width,
+ self.stretch_size.height,
+ 0.0,
+ 0.0,
+ ]);
+
+ // write_segment_gpu_blocks
+ for segment in &self.brush_segments {
+ // has to match VECS_PER_SEGMENT
+ request.write_segment(
+ segment.local_rect,
+ segment.extra_data,
+ );
+ }
+ }
+
+ let task_size = self.task_size;
+ let cache_key = RadialGradientCacheKey {
+ size: task_size,
+ center: PointKey { x: self.center.x, y: self.center.y },
+ scale: PointKey { x: self.scale.x, y: self.scale.y },
+ start_radius: FloatKey(self.params.start_radius),
+ end_radius: FloatKey(self.params.end_radius),
+ ratio_xy: FloatKey(self.params.ratio_xy),
+ extend_mode: self.extend_mode,
+ stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
+ };
+
+ let task_id = frame_state.resource_cache.request_render_task(
+ RenderTaskCacheKey {
+ size: task_size,
+ kind: RenderTaskCacheKeyKind::RadialGradient(cache_key),
+ },
+ frame_state.gpu_cache,
+ frame_state.frame_gpu_data,
+ frame_state.rg_builder,
+ None,
+ false,
+ RenderTaskParent::Surface(parent_surface),
+ &mut frame_state.surface_builder,
+ |rg_builder, gpu_buffer_builder| {
+ let stops = GradientGpuBlockBuilder::build(
+ false,
+ gpu_buffer_builder,
+ &self.stops,
+ );
+
+ rg_builder.add().init(RenderTask::new_dynamic(
+ task_size,
+ RenderTaskKind::RadialGradient(RadialGradientTask {
+ extend_mode: self.extend_mode,
+ center: self.center,
+ scale: self.scale,
+ params: self.params.clone(),
+ stops,
+ }),
+ ))
+ }
+ );
+
+ self.src_color = Some(task_id);
+
+ // Tile spacing is always handled by decomposing into separate draw calls so the
+ // primitive opacity is equivalent to stops opacity. This might change to being
+ // set to non-opaque in the presence of tile spacing if/when tile spacing is handled
+ // in the same way as with the image primitive.
+ self.opacity = self.stops_opacity;
+ }
+}
+
+pub type RadialGradientDataHandle = InternHandle<RadialGradient>;
+
+#[derive(Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RadialGradient {
+ pub extend_mode: ExtendMode,
+ pub center: PointKey,
+ pub params: RadialGradientParams,
+ pub stretch_size: SizeKey,
+ pub stops: Vec<GradientStopKey>,
+ pub tile_spacing: SizeKey,
+ pub nine_patch: Option<Box<NinePatchDescriptor>>,
+}
+
+impl Internable for RadialGradient {
+ type Key = RadialGradientKey;
+ type StoreData = RadialGradientTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_RADIAL_GRADIENTS;
+}
+
+impl InternablePrimitive for RadialGradient {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> RadialGradientKey {
+ RadialGradientKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: RadialGradientKey,
+ data_handle: RadialGradientDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::RadialGradient {
+ data_handle,
+ visible_tiles_range: GradientTileRange::empty(),
+ }
+ }
+}
+
+impl IsVisible for RadialGradient {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RadialGradientTask {
+ pub extend_mode: ExtendMode,
+ pub center: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub params: RadialGradientParams,
+ pub stops: GpuBufferAddress,
+}
+
+impl RadialGradientTask {
+ pub fn to_instance(&self, target_rect: &DeviceIntRect) -> RadialGradientInstance {
+ RadialGradientInstance {
+ task_rect: target_rect.to_f32(),
+ center: self.center,
+ scale: self.scale,
+ start_radius: self.params.start_radius,
+ end_radius: self.params.end_radius,
+ ratio_xy: self.params.ratio_xy,
+ extend_mode: self.extend_mode as i32,
+ gradient_stops_address: self.stops.as_int(),
+ }
+ }
+}
+
+/// The per-instance shader input of a radial gradient render task.
+///
+/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[repr(C)]
+#[derive(Clone, Debug)]
+pub struct RadialGradientInstance {
+ pub task_rect: DeviceRect,
+ pub center: DevicePoint,
+ pub scale: DeviceVector2D,
+ pub start_radius: f32,
+ pub end_radius: f32,
+ pub ratio_xy: f32,
+ pub extend_mode: i32,
+ pub gradient_stops_address: i32,
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct RadialGradientCacheKey {
+ pub size: DeviceIntSize,
+ pub center: PointKey,
+ pub scale: PointKey,
+ pub start_radius: FloatKey,
+ pub end_radius: FloatKey,
+ pub ratio_xy: FloatKey,
+ pub extend_mode: ExtendMode,
+ pub stops: Vec<GradientStopKey>,
+}
+
+/// Avoid invoking the radial gradient shader on large areas where the color is
+/// constant.
+///
+/// If the extend mode is set to clamp, the "interesting" part
+/// of the gradient is only in the bounds of the gradient's ellipse, and the rest
+/// is the color of the last gradient stop.
+///
+/// Sometimes we run into radial gradient with a small radius compared to the
+/// primitive bounds, which means a large area of the primitive is a constant color
+/// This function tries to detect that, potentially shrink the gradient primitive to only
+/// the useful part and if needed insert solid color primitives around the gradient where
+/// parts of it have been removed.
+pub fn optimize_radial_gradient(
+ prim_rect: &mut LayoutRect,
+ stretch_size: &mut LayoutSize,
+ center: &mut LayoutPoint,
+ tile_spacing: &mut LayoutSize,
+ clip_rect: &LayoutRect,
+ radius: LayoutSize,
+ end_offset: f32,
+ extend_mode: ExtendMode,
+ stops: &[GradientStopKey],
+ solid_parts: &mut dyn FnMut(&LayoutRect, ColorU),
+) {
+ let offset = apply_gradient_local_clip(
+ prim_rect,
+ stretch_size,
+ tile_spacing,
+ clip_rect
+ );
+
+ *center += offset;
+
+ if extend_mode != ExtendMode::Clamp || stops.is_empty() {
+ return;
+ }
+
+ // Bounding box of the "interesting" part of the gradient.
+ let min = prim_rect.min + center.to_vector() - radius.to_vector() * end_offset;
+ let max = prim_rect.min + center.to_vector() + radius.to_vector() * end_offset;
+
+ // The (non-repeated) gradient primitive rect.
+ let gradient_rect = LayoutRect::from_origin_and_size(
+ prim_rect.min,
+ *stretch_size,
+ );
+
+ // How much internal margin between the primitive bounds and the gradient's
+ // bounding rect (areas that are a constant color).
+ let mut l = (min.x - gradient_rect.min.x).max(0.0).floor();
+ let mut t = (min.y - gradient_rect.min.y).max(0.0).floor();
+ let mut r = (gradient_rect.max.x - max.x).max(0.0).floor();
+ let mut b = (gradient_rect.max.y - max.y).max(0.0).floor();
+
+ let is_tiled = prim_rect.width() > stretch_size.width + tile_spacing.width
+ || prim_rect.height() > stretch_size.height + tile_spacing.height;
+
+ let bg_color = stops.last().unwrap().color;
+
+ if bg_color.a != 0 && is_tiled {
+ // If the primitive has repetitions, it's not enough to insert solid rects around it,
+ // so bail out.
+ return;
+ }
+
+ // If the background is fully transparent, shrinking the primitive bounds as much as possible
+ // is always a win. If the background is not transparent, we have to insert solid rectangles
+ // around the shrunk parts.
+ // If the background is transparent and the primitive is tiled, the optimization may introduce
+ // tile spacing which forces the tiling to be manually decomposed.
+ // Either way, don't bother optimizing unless it saves a significant amount of pixels.
+ if bg_color.a != 0 || (is_tiled && tile_spacing.is_empty()) {
+ let threshold = 128.0;
+ if l < threshold { l = 0.0 }
+ if t < threshold { t = 0.0 }
+ if r < threshold { r = 0.0 }
+ if b < threshold { b = 0.0 }
+ }
+
+ if l + t + r + b == 0.0 {
+ // No adjustment to make;
+ return;
+ }
+
+ // Insert solid rectangles around the gradient, in the places where the primitive will be
+ // shrunk.
+ if bg_color.a != 0 {
+ if l != 0.0 && t != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.min,
+ size2(l, t),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if l != 0.0 && b != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.bottom_left() - vec2(0.0, b),
+ size2(l, b),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if t != 0.0 && r != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.top_right() - vec2(r, 0.0),
+ size2(r, t),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if r != 0.0 && b != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.bottom_right() - vec2(r, b),
+ size2(r, b),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if l != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.min + vec2(0.0, t),
+ size2(l, gradient_rect.height() - t - b),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if r != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.top_right() + vec2(-r, t),
+ size2(r, gradient_rect.height() - t - b),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if t != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.min + vec2(l, 0.0),
+ size2(gradient_rect.width() - l - r, t),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+
+ if b != 0.0 {
+ let solid_rect = LayoutRect::from_origin_and_size(
+ gradient_rect.bottom_left() + vec2(l, -b),
+ size2(gradient_rect.width() - l - r, b),
+ );
+ solid_parts(&solid_rect, bg_color);
+ }
+ }
+
+ // Shrink the gradient primitive.
+
+ prim_rect.min.x += l;
+ prim_rect.min.y += t;
+
+ stretch_size.width -= l + r;
+ stretch_size.height -= b + t;
+
+ center.x -= l;
+ center.y -= t;
+
+ tile_spacing.width += l + r;
+ tile_spacing.height += t + b;
+}
diff --git a/gfx/wr/webrender/src/prim_store/image.rs b/gfx/wr/webrender/src/prim_store/image.rs
new file mode 100644
index 0000000000..9e1edc7f41
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/image.rs
@@ -0,0 +1,682 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{
+ AlphaType, ColorDepth, ColorF, ColorU, ExternalImageData, ExternalImageType,
+ ImageKey as ApiImageKey, ImageBufferKind, ImageRendering, PremultipliedColorF,
+ RasterSpace, Shadow, YuvColorSpace, ColorRange, YuvFormat,
+};
+use api::units::*;
+use crate::scene_building::{CreateShadow, IsVisible};
+use crate::frame_builder::{FrameBuildingContext, FrameBuildingState};
+use crate::gpu_cache::{GpuCache, GpuDataRequest};
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::{LayoutPrimitiveInfo};
+use crate::picture::SurfaceIndex;
+use crate::prim_store::{
+ EdgeAaSegmentMask, PrimitiveInstanceKind,
+ PrimitiveOpacity, PrimKey,
+ PrimTemplate, PrimTemplateCommonData, PrimitiveStore, SegmentInstanceIndex,
+ SizeKey, InternablePrimitive,
+};
+use crate::render_target::RenderTargetKind;
+use crate::render_task_graph::RenderTaskId;
+use crate::render_task::RenderTask;
+use crate::render_task_cache::{
+ RenderTaskCacheKey, RenderTaskCacheKeyKind, RenderTaskParent
+};
+use crate::resource_cache::{ImageRequest, ImageProperties, ResourceCache};
+use crate::util::pack_as_float;
+use crate::visibility::{PrimitiveVisibility, compute_conservative_visible_rect};
+use crate::spatial_tree::SpatialNodeIndex;
+use crate::image_tiling;
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct VisibleImageTile {
+ pub src_color: RenderTaskId,
+ pub edge_flags: EdgeAaSegmentMask,
+ pub local_rect: LayoutRect,
+ pub local_clip_rect: LayoutRect,
+}
+
+// Key that identifies a unique (partial) image that is being
+// stored in the render task cache.
+#[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ImageCacheKey {
+ pub request: ImageRequest,
+ pub texel_rect: Option<DeviceIntRect>,
+}
+
+/// Instance specific fields for an image primitive. These are
+/// currently stored in a separate array to avoid bloating the
+/// size of PrimitiveInstance. In the future, we should be able
+/// to remove this and store the information inline, by:
+/// (a) Removing opacity collapse / binding support completely.
+/// Once we have general picture caching, we don't need this.
+/// (b) Change visible_tiles to use Storage in the primitive
+/// scratch buffer. This will reduce the size of the
+/// visible_tiles field here, and save memory allocation
+/// when image tiling is used. I've left it as a Vec for
+/// now to reduce the number of changes, and because image
+/// tiling is very rare on real pages.
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct ImageInstance {
+ pub segment_instance_index: SegmentInstanceIndex,
+ pub tight_local_clip_rect: LayoutRect,
+ pub visible_tiles: Vec<VisibleImageTile>,
+ pub src_color: Option<RenderTaskId>,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, PartialEq, MallocSizeOf, Hash)]
+pub struct Image {
+ pub key: ApiImageKey,
+ pub stretch_size: SizeKey,
+ pub tile_spacing: SizeKey,
+ pub color: ColorU,
+ pub image_rendering: ImageRendering,
+ pub alpha_type: AlphaType,
+}
+
+pub type ImageKey = PrimKey<Image>;
+
+impl ImageKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ image: Image,
+ ) -> Self {
+ ImageKey {
+ common: info.into(),
+ kind: image,
+ }
+ }
+}
+
+impl InternDebug for ImageKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, MallocSizeOf)]
+pub struct ImageData {
+ pub key: ApiImageKey,
+ pub stretch_size: LayoutSize,
+ pub tile_spacing: LayoutSize,
+ pub color: ColorF,
+ pub image_rendering: ImageRendering,
+ pub alpha_type: AlphaType,
+}
+
+impl From<Image> for ImageData {
+ fn from(image: Image) -> Self {
+ ImageData {
+ key: image.key,
+ color: image.color.into(),
+ stretch_size: image.stretch_size.into(),
+ tile_spacing: image.tile_spacing.into(),
+ image_rendering: image.image_rendering,
+ alpha_type: image.alpha_type,
+ }
+ }
+}
+
+impl ImageData {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ common: &mut PrimTemplateCommonData,
+ image_instance: &mut ImageInstance,
+ parent_surface: SurfaceIndex,
+ prim_spatial_node_index: SpatialNodeIndex,
+ frame_state: &mut FrameBuildingState,
+ frame_context: &FrameBuildingContext,
+ visibility: &mut PrimitiveVisibility,
+ ) {
+
+ let image_properties = frame_state
+ .resource_cache
+ .get_image_properties(self.key);
+
+ common.opacity = match &image_properties {
+ Some(properties) => {
+ if properties.descriptor.is_opaque() {
+ PrimitiveOpacity::from_alpha(self.color.a)
+ } else {
+ PrimitiveOpacity::translucent()
+ }
+ }
+ None => PrimitiveOpacity::opaque(),
+ };
+
+ if self.stretch_size.width >= common.prim_rect.width() &&
+ self.stretch_size.height >= common.prim_rect.height() {
+
+ common.may_need_repetition = false;
+ }
+
+ let request = ImageRequest {
+ key: self.key,
+ rendering: self.image_rendering,
+ tile: None,
+ };
+
+ match image_properties {
+ // Non-tiled (most common) path.
+ Some(ImageProperties { tiling: None, ref descriptor, ref external_image, .. }) => {
+ let mut size = frame_state.resource_cache.request_image(
+ request,
+ frame_state.gpu_cache,
+ );
+
+ let orig_task_id = frame_state.rg_builder.add().init(
+ RenderTask::new_image(size, request)
+ );
+
+ // On some devices we cannot render from an ImageBufferKind::TextureExternal
+ // source using most shaders, so must peform a copy to a regular texture first.
+ let task_id = if frame_context.fb_config.external_images_require_copy
+ && matches!(
+ external_image,
+ Some(ExternalImageData {
+ image_type: ExternalImageType::TextureHandle(
+ ImageBufferKind::TextureExternal
+ ),
+ ..
+ })
+ )
+ {
+ let target_kind = if descriptor.format.bytes_per_pixel() == 1 {
+ RenderTargetKind::Alpha
+ } else {
+ RenderTargetKind::Color
+ };
+
+ let task_id = RenderTask::new_scaling(
+ orig_task_id,
+ frame_state.rg_builder,
+ target_kind,
+ size
+ );
+
+ frame_state.surface_builder.add_child_render_task(
+ task_id,
+ frame_state.rg_builder,
+ );
+
+ task_id
+ } else {
+ orig_task_id
+ };
+
+ // Every frame, for cached items, we need to request the render
+ // task cache item. The closure will be invoked on the first
+ // time through, and any time the render task output has been
+ // evicted from the texture cache.
+ if self.tile_spacing == LayoutSize::zero() {
+ // Most common case.
+ image_instance.src_color = Some(task_id);
+ } else {
+ let padding = DeviceIntSideOffsets::new(
+ 0,
+ (self.tile_spacing.width * size.width as f32 / self.stretch_size.width) as i32,
+ (self.tile_spacing.height * size.height as f32 / self.stretch_size.height) as i32,
+ 0,
+ );
+
+ size.width += padding.horizontal();
+ size.height += padding.vertical();
+
+ if padding != DeviceIntSideOffsets::zero() {
+ common.opacity = PrimitiveOpacity::translucent();
+ }
+
+ let image_cache_key = ImageCacheKey {
+ request,
+ texel_rect: None,
+ };
+ let target_kind = if descriptor.format.bytes_per_pixel() == 1 {
+ RenderTargetKind::Alpha
+ } else {
+ RenderTargetKind::Color
+ };
+
+ // Request a pre-rendered image task.
+ let cached_task_handle = frame_state.resource_cache.request_render_task(
+ RenderTaskCacheKey {
+ size,
+ kind: RenderTaskCacheKeyKind::Image(image_cache_key),
+ },
+ frame_state.gpu_cache,
+ frame_state.frame_gpu_data,
+ frame_state.rg_builder,
+ None,
+ descriptor.is_opaque(),
+ RenderTaskParent::Surface(parent_surface),
+ &mut frame_state.surface_builder,
+ |rg_builder, _| {
+ // Create a task to blit from the texture cache to
+ // a normal transient render task surface.
+ // TODO: figure out if/when we can do a blit instead.
+ let cache_to_target_task_id = RenderTask::new_scaling_with_padding(
+ task_id,
+ rg_builder,
+ target_kind,
+ size,
+ padding,
+ );
+
+ // Create a task to blit the rect from the child render
+ // task above back into the right spot in the persistent
+ // render target cache.
+ RenderTask::new_blit(
+ size,
+ cache_to_target_task_id,
+ rg_builder,
+ )
+ }
+ );
+
+ image_instance.src_color = Some(cached_task_handle);
+ }
+ }
+ // Tiled image path.
+ Some(ImageProperties { tiling: Some(tile_size), visible_rect, .. }) => {
+ // we'll have a source handle per visible tile instead.
+ image_instance.src_color = None;
+
+ image_instance.visible_tiles.clear();
+ // TODO: rename the blob's visible_rect into something that doesn't conflict
+ // with the terminology we use during culling since it's not really the same
+ // thing.
+ let active_rect = visible_rect;
+
+ // Tighten the clip rect because decomposing the repeated image can
+ // produce primitives that are partially covering the original image
+ // rect and we want to clip these extra parts out.
+ let tight_clip_rect = visibility
+ .clip_chain
+ .local_clip_rect
+ .intersection(&common.prim_rect).unwrap();
+ image_instance.tight_local_clip_rect = tight_clip_rect;
+
+ let visible_rect = compute_conservative_visible_rect(
+ &visibility.clip_chain,
+ frame_state.current_dirty_region().combined,
+ prim_spatial_node_index,
+ frame_context.spatial_tree,
+ );
+
+ let base_edge_flags = edge_flags_for_tile_spacing(&self.tile_spacing);
+
+ let stride = self.stretch_size + self.tile_spacing;
+
+ // We are performing the decomposition on the CPU here, no need to
+ // have it in the shader.
+ common.may_need_repetition = false;
+
+ let repetitions = image_tiling::repetitions(
+ &common.prim_rect,
+ &visible_rect,
+ stride,
+ );
+
+ for image_tiling::Repetition { origin, edge_flags } in repetitions {
+ let edge_flags = base_edge_flags | edge_flags;
+
+ let layout_image_rect = LayoutRect::from_origin_and_size(
+ origin,
+ self.stretch_size,
+ );
+
+ let tiles = image_tiling::tiles(
+ &layout_image_rect,
+ &visible_rect,
+ &active_rect,
+ tile_size as i32,
+ );
+
+ for tile in tiles {
+ let request = request.with_tile(tile.offset);
+ let size = frame_state.resource_cache.request_image(
+ request,
+ frame_state.gpu_cache,
+ );
+
+ let task_id = frame_state.rg_builder.add().init(
+ RenderTask::new_image(size, request)
+ );
+
+ image_instance.visible_tiles.push(VisibleImageTile {
+ src_color: task_id,
+ edge_flags: tile.edge_flags & edge_flags,
+ local_rect: tile.rect,
+ local_clip_rect: tight_clip_rect,
+ });
+ }
+ }
+
+ if image_instance.visible_tiles.is_empty() {
+ // Mark as invisible
+ visibility.reset();
+ }
+ }
+ None => {
+ image_instance.src_color = None;
+ }
+ }
+
+ if let Some(mut request) = frame_state.gpu_cache.request(&mut common.gpu_cache_handle) {
+ self.write_prim_gpu_blocks(&mut request);
+ }
+ }
+
+ pub fn write_prim_gpu_blocks(&self, request: &mut GpuDataRequest) {
+ // Images are drawn as a white color, modulated by the total
+ // opacity coming from any collapsed property bindings.
+ // Size has to match `VECS_PER_SPECIFIC_BRUSH` from `brush_image.glsl` exactly.
+ request.push(self.color.premultiplied());
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ self.stretch_size.width + self.tile_spacing.width,
+ self.stretch_size.height + self.tile_spacing.height,
+ 0.0,
+ 0.0,
+ ]);
+ }
+}
+
+fn edge_flags_for_tile_spacing(tile_spacing: &LayoutSize) -> EdgeAaSegmentMask {
+ let mut flags = EdgeAaSegmentMask::empty();
+
+ if tile_spacing.width > 0.0 {
+ flags |= EdgeAaSegmentMask::LEFT | EdgeAaSegmentMask::RIGHT;
+ }
+ if tile_spacing.height > 0.0 {
+ flags |= EdgeAaSegmentMask::TOP | EdgeAaSegmentMask::BOTTOM;
+ }
+
+ flags
+}
+
+pub type ImageTemplate = PrimTemplate<ImageData>;
+
+impl From<ImageKey> for ImageTemplate {
+ fn from(image: ImageKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(image.common);
+
+ ImageTemplate {
+ common,
+ kind: image.kind.into(),
+ }
+ }
+}
+
+pub type ImageDataHandle = InternHandle<Image>;
+
+impl Internable for Image {
+ type Key = ImageKey;
+ type StoreData = ImageTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_IMAGES;
+}
+
+impl InternablePrimitive for Image {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> ImageKey {
+ ImageKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: ImageKey,
+ data_handle: ImageDataHandle,
+ prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ // TODO(gw): Refactor this to not need a separate image
+ // instance (see ImageInstance struct).
+ let image_instance_index = prim_store.images.push(ImageInstance {
+ segment_instance_index: SegmentInstanceIndex::INVALID,
+ tight_local_clip_rect: LayoutRect::zero(),
+ visible_tiles: Vec::new(),
+ src_color: None,
+ });
+
+ PrimitiveInstanceKind::Image {
+ data_handle,
+ image_instance_index,
+ is_compositor_surface: false,
+ }
+ }
+}
+
+impl CreateShadow for Image {
+ fn create_shadow(
+ &self,
+ shadow: &Shadow,
+ _: bool,
+ _: RasterSpace,
+ ) -> Self {
+ Image {
+ tile_spacing: self.tile_spacing,
+ stretch_size: self.stretch_size,
+ key: self.key,
+ image_rendering: self.image_rendering,
+ alpha_type: self.alpha_type,
+ color: shadow.color.into(),
+ }
+ }
+}
+
+impl IsVisible for Image {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct YuvImage {
+ pub color_depth: ColorDepth,
+ pub yuv_key: [ApiImageKey; 3],
+ pub format: YuvFormat,
+ pub color_space: YuvColorSpace,
+ pub color_range: ColorRange,
+ pub image_rendering: ImageRendering,
+}
+
+pub type YuvImageKey = PrimKey<YuvImage>;
+
+impl YuvImageKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ yuv_image: YuvImage,
+ ) -> Self {
+ YuvImageKey {
+ common: info.into(),
+ kind: yuv_image,
+ }
+ }
+}
+
+impl InternDebug for YuvImageKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct YuvImageData {
+ pub color_depth: ColorDepth,
+ pub yuv_key: [ApiImageKey; 3],
+ pub src_yuv: [Option<RenderTaskId>; 3],
+ pub format: YuvFormat,
+ pub color_space: YuvColorSpace,
+ pub color_range: ColorRange,
+ pub image_rendering: ImageRendering,
+}
+
+impl From<YuvImage> for YuvImageData {
+ fn from(image: YuvImage) -> Self {
+ YuvImageData {
+ color_depth: image.color_depth,
+ yuv_key: image.yuv_key,
+ src_yuv: [None, None, None],
+ format: image.format,
+ color_space: image.color_space,
+ color_range: image.color_range,
+ image_rendering: image.image_rendering,
+ }
+ }
+}
+
+impl YuvImageData {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ common: &mut PrimTemplateCommonData,
+ frame_state: &mut FrameBuildingState,
+ ) {
+
+ self.src_yuv = [ None, None, None ];
+
+ let channel_num = self.format.get_plane_num();
+ debug_assert!(channel_num <= 3);
+ for channel in 0 .. channel_num {
+ let request = ImageRequest {
+ key: self.yuv_key[channel],
+ rendering: self.image_rendering,
+ tile: None,
+ };
+
+ let size = frame_state.resource_cache.request_image(
+ request,
+ frame_state.gpu_cache,
+ );
+
+ let task_id = frame_state.rg_builder.add().init(
+ RenderTask::new_image(size, request)
+ );
+
+ self.src_yuv[channel] = Some(task_id);
+ }
+
+ if let Some(mut request) = frame_state.gpu_cache.request(&mut common.gpu_cache_handle) {
+ self.write_prim_gpu_blocks(&mut request);
+ };
+
+ // YUV images never have transparency
+ common.opacity = PrimitiveOpacity::opaque();
+ }
+
+ pub fn request_resources(
+ &mut self,
+ resource_cache: &mut ResourceCache,
+ gpu_cache: &mut GpuCache,
+ ) {
+ let channel_num = self.format.get_plane_num();
+ debug_assert!(channel_num <= 3);
+ for channel in 0 .. channel_num {
+ resource_cache.request_image(
+ ImageRequest {
+ key: self.yuv_key[channel],
+ rendering: self.image_rendering,
+ tile: None,
+ },
+ gpu_cache,
+ );
+ }
+ }
+
+ pub fn write_prim_gpu_blocks(&self, request: &mut GpuDataRequest) {
+ let ranged_color_space = self.color_space.with_range(self.color_range);
+ request.push([
+ pack_as_float(self.color_depth.bit_depth()),
+ pack_as_float(ranged_color_space as u32),
+ pack_as_float(self.format as u32),
+ 0.0
+ ]);
+ }
+}
+
+pub type YuvImageTemplate = PrimTemplate<YuvImageData>;
+
+impl From<YuvImageKey> for YuvImageTemplate {
+ fn from(image: YuvImageKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(image.common);
+
+ YuvImageTemplate {
+ common,
+ kind: image.kind.into(),
+ }
+ }
+}
+
+pub type YuvImageDataHandle = InternHandle<YuvImage>;
+
+impl Internable for YuvImage {
+ type Key = YuvImageKey;
+ type StoreData = YuvImageTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_YUV_IMAGES;
+}
+
+impl InternablePrimitive for YuvImage {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> YuvImageKey {
+ YuvImageKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ _key: YuvImageKey,
+ data_handle: YuvImageDataHandle,
+ _prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::YuvImage {
+ data_handle,
+ segment_instance_index: SegmentInstanceIndex::INVALID,
+ is_compositor_surface: false,
+ }
+ }
+}
+
+impl IsVisible for YuvImage {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<Image>(), 32, "Image size changed");
+ assert_eq!(mem::size_of::<ImageTemplate>(), 72, "ImageTemplate size changed");
+ assert_eq!(mem::size_of::<ImageKey>(), 52, "ImageKey size changed");
+ assert_eq!(mem::size_of::<YuvImage>(), 32, "YuvImage size changed");
+ assert_eq!(mem::size_of::<YuvImageTemplate>(), 84, "YuvImageTemplate size changed");
+ assert_eq!(mem::size_of::<YuvImageKey>(), 52, "YuvImageKey size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/interned.rs b/gfx/wr/webrender/src/prim_store/interned.rs
new file mode 100644
index 0000000000..f536b50b09
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/interned.rs
@@ -0,0 +1,14 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// list of all interned primitives to match enumerate_interners!
+
+pub use crate::prim_store::backdrop::{BackdropCapture, BackdropRender};
+pub use crate::prim_store::borders::{ImageBorder, NormalBorderPrim};
+pub use crate::prim_store::image::{Image, YuvImage};
+pub use crate::prim_store::line_dec::{LineDecoration};
+pub use crate::prim_store::gradient::{LinearGradient, RadialGradient, ConicGradient};
+pub use crate::prim_store::picture::Picture;
+pub use crate::prim_store::text_run::TextRun;
+
diff --git a/gfx/wr/webrender/src/prim_store/line_dec.rs b/gfx/wr/webrender/src/prim_store/line_dec.rs
new file mode 100644
index 0000000000..496bab7569
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/line_dec.rs
@@ -0,0 +1,257 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{
+ ColorF, ColorU, RasterSpace,
+ LineOrientation, LineStyle, PremultipliedColorF, Shadow,
+};
+use api::units::*;
+use crate::scene_building::{CreateShadow, IsVisible};
+use crate::frame_builder::{FrameBuildingState};
+use crate::gpu_cache::GpuDataRequest;
+use crate::intern;
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::prim_store::{
+ PrimKey, PrimTemplate, PrimTemplateCommonData,
+ InternablePrimitive, PrimitiveStore,
+};
+use crate::prim_store::PrimitiveInstanceKind;
+
+/// Maximum resolution in device pixels at which line decorations are rasterized.
+pub const MAX_LINE_DECORATION_RESOLUTION: u32 = 4096;
+
+#[derive(Clone, Debug, Hash, MallocSizeOf, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct LineDecorationCacheKey {
+ pub style: LineStyle,
+ pub orientation: LineOrientation,
+ pub wavy_line_thickness: Au,
+ pub size: LayoutSizeAu,
+}
+
+/// Identifying key for a line decoration.
+#[derive(Clone, Debug, Hash, MallocSizeOf, PartialEq, Eq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct LineDecoration {
+ // If the cache_key is Some(..) it is a line decoration
+ // that relies on a render task (e.g. wavy). If the
+ // cache key is None, it uses a fast path to draw the
+ // line decoration as a solid rect.
+ pub cache_key: Option<LineDecorationCacheKey>,
+ pub color: ColorU,
+}
+
+pub type LineDecorationKey = PrimKey<LineDecoration>;
+
+impl LineDecorationKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ line_dec: LineDecoration,
+ ) -> Self {
+ LineDecorationKey {
+ common: info.into(),
+ kind: line_dec,
+ }
+ }
+}
+
+impl intern::InternDebug for LineDecorationKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct LineDecorationData {
+ pub cache_key: Option<LineDecorationCacheKey>,
+ pub color: ColorF,
+}
+
+impl LineDecorationData {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ common: &mut PrimTemplateCommonData,
+ frame_state: &mut FrameBuildingState,
+ ) {
+ if let Some(ref mut request) = frame_state.gpu_cache.request(&mut common.gpu_cache_handle) {
+ self.write_prim_gpu_blocks(request);
+ }
+ }
+
+ fn write_prim_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest
+ ) {
+ match self.cache_key.as_ref() {
+ Some(cache_key) => {
+ request.push(self.color.premultiplied());
+ request.push(PremultipliedColorF::WHITE);
+ request.push([
+ cache_key.size.width.to_f32_px(),
+ cache_key.size.height.to_f32_px(),
+ 0.0,
+ 0.0,
+ ]);
+ }
+ None => {
+ request.push(self.color.premultiplied());
+ }
+ }
+ }
+}
+
+pub type LineDecorationTemplate = PrimTemplate<LineDecorationData>;
+
+impl From<LineDecorationKey> for LineDecorationTemplate {
+ fn from(line_dec: LineDecorationKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(line_dec.common);
+ LineDecorationTemplate {
+ common,
+ kind: LineDecorationData {
+ cache_key: line_dec.kind.cache_key,
+ color: line_dec.kind.color.into(),
+ }
+ }
+ }
+}
+
+pub type LineDecorationDataHandle = intern::Handle<LineDecoration>;
+
+impl intern::Internable for LineDecoration {
+ type Key = LineDecorationKey;
+ type StoreData = LineDecorationTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_LINE_DECORATIONS;
+}
+
+impl InternablePrimitive for LineDecoration {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> LineDecorationKey {
+ LineDecorationKey::new(
+ info,
+ self,
+ )
+ }
+
+ fn make_instance_kind(
+ _key: LineDecorationKey,
+ data_handle: LineDecorationDataHandle,
+ _: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ PrimitiveInstanceKind::LineDecoration {
+ data_handle,
+ render_task: None,
+ }
+ }
+}
+
+impl CreateShadow for LineDecoration {
+ fn create_shadow(
+ &self,
+ shadow: &Shadow,
+ _: bool,
+ _: RasterSpace,
+ ) -> Self {
+ LineDecoration {
+ color: shadow.color.into(),
+ cache_key: self.cache_key.clone(),
+ }
+ }
+}
+
+impl IsVisible for LineDecoration {
+ fn is_visible(&self) -> bool {
+ self.color.a > 0
+ }
+}
+
+/// Choose the decoration mask tile size for a given line.
+///
+/// Given a line with overall size `rect_size` and the given `orientation`,
+/// return the dimensions of a single mask tile for the decoration pattern
+/// described by `style` and `wavy_line_thickness`.
+///
+/// If `style` is `Solid`, no mask tile is necessary; return `None`. The other
+/// styles each have their own characteristic periods of repetition, so for each
+/// one, this function returns a `LayoutSize` with the right aspect ratio and
+/// whose specific size is convenient for the `cs_line_decoration.glsl` fragment
+/// shader to work with. The shader uses a local coordinate space in which the
+/// tile fills a rectangle with one corner at the origin, and with the size this
+/// function returns.
+///
+/// The returned size is not necessarily in pixels; device scaling and other
+/// concerns can still affect the actual task size.
+///
+/// Regardless of whether `orientation` is `Vertical` or `Horizontal`, the
+/// `width` and `height` of the returned size are always horizontal and
+/// vertical, respectively.
+pub fn get_line_decoration_size(
+ rect_size: &LayoutSize,
+ orientation: LineOrientation,
+ style: LineStyle,
+ wavy_line_thickness: f32,
+) -> Option<LayoutSize> {
+ let h = match orientation {
+ LineOrientation::Horizontal => rect_size.height,
+ LineOrientation::Vertical => rect_size.width,
+ };
+
+ // TODO(gw): The formulae below are based on the existing gecko and line
+ // shader code. They give reasonable results for most inputs,
+ // but could definitely do with a detailed pass to get better
+ // quality on a wider range of inputs!
+ // See nsCSSRendering::PaintDecorationLine in Gecko.
+
+ let (parallel, perpendicular) = match style {
+ LineStyle::Solid => {
+ return None;
+ }
+ LineStyle::Dashed => {
+ let dash_length = (3.0 * h).min(64.0).max(1.0);
+
+ (2.0 * dash_length, 4.0)
+ }
+ LineStyle::Dotted => {
+ let diameter = h.min(64.0).max(1.0);
+ let period = 2.0 * diameter;
+
+ (period, diameter)
+ }
+ LineStyle::Wavy => {
+ let line_thickness = wavy_line_thickness.max(1.0);
+ let slope_length = h - line_thickness;
+ let flat_length = ((line_thickness - 1.0) * 2.0).max(1.0);
+ let approx_period = 2.0 * (slope_length + flat_length);
+
+ (approx_period, h)
+ }
+ };
+
+ Some(match orientation {
+ LineOrientation::Horizontal => LayoutSize::new(parallel, perpendicular),
+ LineOrientation::Vertical => LayoutSize::new(perpendicular, parallel),
+ })
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<LineDecoration>(), 20, "LineDecoration size changed");
+ assert_eq!(mem::size_of::<LineDecorationTemplate>(), 60, "LineDecorationTemplate size changed");
+ assert_eq!(mem::size_of::<LineDecorationKey>(), 40, "LineDecorationKey size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/mod.rs b/gfx/wr/webrender/src/prim_store/mod.rs
new file mode 100644
index 0000000000..7deecac93c
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/mod.rs
@@ -0,0 +1,1448 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{BorderRadius, ClipMode, ColorF, ColorU, RasterSpace};
+use api::{ImageRendering, RepeatMode, PrimitiveFlags};
+use api::{PremultipliedColorF, PropertyBinding, Shadow};
+use api::{PrimitiveKeyKind, FillRule, POLYGON_CLIP_VERTEX_MAX};
+use api::units::*;
+use euclid::{SideOffsets2D, Size2D};
+use malloc_size_of::MallocSizeOf;
+use crate::clip::ClipLeafId;
+use crate::segment::EdgeAaSegmentMask;
+use crate::border::BorderSegmentCacheKey;
+use crate::debug_item::{DebugItem, DebugMessage};
+use crate::debug_colors;
+use crate::scene_building::{CreateShadow, IsVisible};
+use crate::frame_builder::FrameBuildingState;
+use glyph_rasterizer::GlyphKey;
+use crate::gpu_cache::{GpuCacheAddress, GpuCacheHandle, GpuDataRequest};
+use crate::gpu_types::{BrushFlags};
+use crate::intern;
+use crate::picture::PicturePrimitive;
+use crate::render_task_graph::RenderTaskId;
+use crate::resource_cache::ImageProperties;
+use crate::scene::SceneProperties;
+use std::{hash, ops, u32, usize};
+use crate::util::Recycler;
+use crate::internal_types::{FastHashSet, LayoutPrimitiveInfo};
+use crate::visibility::PrimitiveVisibility;
+
+pub mod backdrop;
+pub mod borders;
+pub mod gradient;
+pub mod image;
+pub mod line_dec;
+pub mod picture;
+pub mod text_run;
+pub mod interned;
+
+mod storage;
+
+use backdrop::{BackdropCaptureDataHandle, BackdropRenderDataHandle};
+use borders::{ImageBorderDataHandle, NormalBorderDataHandle};
+use gradient::{LinearGradientPrimitive, LinearGradientDataHandle, RadialGradientDataHandle, ConicGradientDataHandle};
+use image::{ImageDataHandle, ImageInstance, YuvImageDataHandle};
+use line_dec::LineDecorationDataHandle;
+use picture::PictureDataHandle;
+use text_run::{TextRunDataHandle, TextRunPrimitive};
+
+pub const VECS_PER_SEGMENT: usize = 2;
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Copy, Clone, MallocSizeOf)]
+pub struct PrimitiveOpacity {
+ pub is_opaque: bool,
+}
+
+impl PrimitiveOpacity {
+ pub fn opaque() -> PrimitiveOpacity {
+ PrimitiveOpacity { is_opaque: true }
+ }
+
+ pub fn translucent() -> PrimitiveOpacity {
+ PrimitiveOpacity { is_opaque: false }
+ }
+
+ pub fn from_alpha(alpha: f32) -> PrimitiveOpacity {
+ PrimitiveOpacity {
+ is_opaque: alpha >= 1.0,
+ }
+ }
+}
+
+/// For external images, it's not possible to know the
+/// UV coords of the image (or the image data itself)
+/// until the render thread receives the frame and issues
+/// callbacks to the client application. For external
+/// images that are visible, a DeferredResolve is created
+/// that is stored in the frame. This allows the render
+/// thread to iterate this list and update any changed
+/// texture data and update the UV rect. Any filtering
+/// is handled externally for NativeTexture external
+/// images.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct DeferredResolve {
+ pub address: GpuCacheAddress,
+ pub image_properties: ImageProperties,
+ pub rendering: ImageRendering,
+}
+
+#[derive(Debug, Copy, Clone, PartialEq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct ClipTaskIndex(pub u32);
+
+impl ClipTaskIndex {
+ pub const INVALID: ClipTaskIndex = ClipTaskIndex(0);
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, MallocSizeOf, Ord, PartialOrd)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct PictureIndex(pub usize);
+
+impl PictureIndex {
+ pub const INVALID: PictureIndex = PictureIndex(!0);
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Copy, Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct RectangleKey {
+ pub x0: f32,
+ pub y0: f32,
+ pub x1: f32,
+ pub y1: f32,
+}
+
+impl RectangleKey {
+ pub fn intersects(&self, other: &Self) -> bool {
+ self.x0 < other.x1
+ && other.x0 < self.x1
+ && self.y0 < other.y1
+ && other.y0 < self.y1
+ }
+}
+
+impl Eq for RectangleKey {}
+
+impl hash::Hash for RectangleKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.x0.to_bits().hash(state);
+ self.y0.to_bits().hash(state);
+ self.x1.to_bits().hash(state);
+ self.y1.to_bits().hash(state);
+ }
+}
+
+impl From<RectangleKey> for LayoutRect {
+ fn from(key: RectangleKey) -> LayoutRect {
+ LayoutRect {
+ min: LayoutPoint::new(key.x0, key.y0),
+ max: LayoutPoint::new(key.x1, key.y1),
+ }
+ }
+}
+
+impl From<RectangleKey> for WorldRect {
+ fn from(key: RectangleKey) -> WorldRect {
+ WorldRect {
+ min: WorldPoint::new(key.x0, key.y0),
+ max: WorldPoint::new(key.x1, key.y1),
+ }
+ }
+}
+
+impl From<LayoutRect> for RectangleKey {
+ fn from(rect: LayoutRect) -> RectangleKey {
+ RectangleKey {
+ x0: rect.min.x,
+ y0: rect.min.y,
+ x1: rect.max.x,
+ y1: rect.max.y,
+ }
+ }
+}
+
+impl From<PictureRect> for RectangleKey {
+ fn from(rect: PictureRect) -> RectangleKey {
+ RectangleKey {
+ x0: rect.min.x,
+ y0: rect.min.y,
+ x1: rect.max.x,
+ y1: rect.max.y,
+ }
+ }
+}
+
+impl From<WorldRect> for RectangleKey {
+ fn from(rect: WorldRect) -> RectangleKey {
+ RectangleKey {
+ x0: rect.min.x,
+ y0: rect.min.y,
+ x1: rect.max.x,
+ y1: rect.max.y,
+ }
+ }
+}
+
+/// To create a fixed-size representation of a polygon, we use a fixed
+/// number of points. Our initialization method restricts us to values
+/// <= 32. If our constant POLYGON_CLIP_VERTEX_MAX is > 32, the Rust
+/// compiler will complain.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Copy, Debug, Clone, Hash, MallocSizeOf, PartialEq)]
+pub struct PolygonKey {
+ pub point_count: u8,
+ pub points: [PointKey; POLYGON_CLIP_VERTEX_MAX],
+ pub fill_rule: FillRule,
+}
+
+impl PolygonKey {
+ pub fn new(
+ points_layout: &Vec<LayoutPoint>,
+ fill_rule: FillRule,
+ ) -> Self {
+ // We have to fill fixed-size arrays with data from a Vec.
+ // We'll do this by initializing the arrays to known-good
+ // values then overwriting those values as long as our
+ // iterator provides values.
+ let mut points: [PointKey; POLYGON_CLIP_VERTEX_MAX] = [PointKey { x: 0.0, y: 0.0}; POLYGON_CLIP_VERTEX_MAX];
+
+ let mut point_count: u8 = 0;
+ for (src, dest) in points_layout.iter().zip(points.iter_mut()) {
+ *dest = (*src as LayoutPoint).into();
+ point_count = point_count + 1;
+ }
+
+ PolygonKey {
+ point_count,
+ points,
+ fill_rule,
+ }
+ }
+}
+
+impl Eq for PolygonKey {}
+
+/// A hashable SideOffset2D that can be used in primitive keys.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct SideOffsetsKey {
+ pub top: f32,
+ pub right: f32,
+ pub bottom: f32,
+ pub left: f32,
+}
+
+impl Eq for SideOffsetsKey {}
+
+impl hash::Hash for SideOffsetsKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.top.to_bits().hash(state);
+ self.right.to_bits().hash(state);
+ self.bottom.to_bits().hash(state);
+ self.left.to_bits().hash(state);
+ }
+}
+
+impl From<SideOffsetsKey> for LayoutSideOffsets {
+ fn from(key: SideOffsetsKey) -> LayoutSideOffsets {
+ LayoutSideOffsets::new(
+ key.top,
+ key.right,
+ key.bottom,
+ key.left,
+ )
+ }
+}
+
+impl<U> From<SideOffsets2D<f32, U>> for SideOffsetsKey {
+ fn from(offsets: SideOffsets2D<f32, U>) -> SideOffsetsKey {
+ SideOffsetsKey {
+ top: offsets.top,
+ right: offsets.right,
+ bottom: offsets.bottom,
+ left: offsets.left,
+ }
+ }
+}
+
+/// A hashable size for using as a key during primitive interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Copy, Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct SizeKey {
+ w: f32,
+ h: f32,
+}
+
+impl Eq for SizeKey {}
+
+impl hash::Hash for SizeKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.w.to_bits().hash(state);
+ self.h.to_bits().hash(state);
+ }
+}
+
+impl From<SizeKey> for LayoutSize {
+ fn from(key: SizeKey) -> LayoutSize {
+ LayoutSize::new(key.w, key.h)
+ }
+}
+
+impl<U> From<Size2D<f32, U>> for SizeKey {
+ fn from(size: Size2D<f32, U>) -> SizeKey {
+ SizeKey {
+ w: size.width,
+ h: size.height,
+ }
+ }
+}
+
+/// A hashable vec for using as a key during primitive interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Copy, Debug, Clone, MallocSizeOf, PartialEq)]
+pub struct VectorKey {
+ pub x: f32,
+ pub y: f32,
+}
+
+impl Eq for VectorKey {}
+
+impl hash::Hash for VectorKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.x.to_bits().hash(state);
+ self.y.to_bits().hash(state);
+ }
+}
+
+impl From<VectorKey> for LayoutVector2D {
+ fn from(key: VectorKey) -> LayoutVector2D {
+ LayoutVector2D::new(key.x, key.y)
+ }
+}
+
+impl From<VectorKey> for WorldVector2D {
+ fn from(key: VectorKey) -> WorldVector2D {
+ WorldVector2D::new(key.x, key.y)
+ }
+}
+
+impl From<LayoutVector2D> for VectorKey {
+ fn from(vec: LayoutVector2D) -> VectorKey {
+ VectorKey {
+ x: vec.x,
+ y: vec.y,
+ }
+ }
+}
+
+impl From<WorldVector2D> for VectorKey {
+ fn from(vec: WorldVector2D) -> VectorKey {
+ VectorKey {
+ x: vec.x,
+ y: vec.y,
+ }
+ }
+}
+
+/// A hashable point for using as a key during primitive interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Copy, Clone, MallocSizeOf, PartialEq)]
+pub struct PointKey {
+ pub x: f32,
+ pub y: f32,
+}
+
+impl Eq for PointKey {}
+
+impl hash::Hash for PointKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.x.to_bits().hash(state);
+ self.y.to_bits().hash(state);
+ }
+}
+
+impl From<PointKey> for LayoutPoint {
+ fn from(key: PointKey) -> LayoutPoint {
+ LayoutPoint::new(key.x, key.y)
+ }
+}
+
+impl From<LayoutPoint> for PointKey {
+ fn from(p: LayoutPoint) -> PointKey {
+ PointKey {
+ x: p.x,
+ y: p.y,
+ }
+ }
+}
+
+impl From<PicturePoint> for PointKey {
+ fn from(p: PicturePoint) -> PointKey {
+ PointKey {
+ x: p.x,
+ y: p.y,
+ }
+ }
+}
+
+impl From<WorldPoint> for PointKey {
+ fn from(p: WorldPoint) -> PointKey {
+ PointKey {
+ x: p.x,
+ y: p.y,
+ }
+ }
+}
+
+/// A hashable float for using as a key during primitive interning.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Copy, Clone, MallocSizeOf, PartialEq)]
+pub struct FloatKey(f32);
+
+impl Eq for FloatKey {}
+
+impl hash::Hash for FloatKey {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.0.to_bits().hash(state);
+ }
+}
+
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct PrimKeyCommonData {
+ pub flags: PrimitiveFlags,
+ pub prim_rect: RectangleKey,
+}
+
+impl From<&LayoutPrimitiveInfo> for PrimKeyCommonData {
+ fn from(info: &LayoutPrimitiveInfo) -> Self {
+ PrimKeyCommonData {
+ flags: info.flags,
+ prim_rect: info.rect.into(),
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct PrimKey<T: MallocSizeOf> {
+ pub common: PrimKeyCommonData,
+ pub kind: T,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct PrimitiveKey {
+ pub common: PrimKeyCommonData,
+ pub kind: PrimitiveKeyKind,
+}
+
+impl PrimitiveKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ kind: PrimitiveKeyKind,
+ ) -> Self {
+ PrimitiveKey {
+ common: info.into(),
+ kind,
+ }
+ }
+}
+
+impl intern::InternDebug for PrimitiveKey {}
+
+/// The shared information for a given primitive. This is interned and retained
+/// both across frames and display lists, by comparing the matching PrimitiveKey.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub enum PrimitiveTemplateKind {
+ Rectangle {
+ color: PropertyBinding<ColorF>,
+ },
+ Clear,
+}
+
+impl PrimitiveTemplateKind {
+ /// Write any GPU blocks for the primitive template to the given request object.
+ pub fn write_prim_gpu_blocks(
+ &self,
+ request: &mut GpuDataRequest,
+ scene_properties: &SceneProperties,
+ ) {
+ match *self {
+ PrimitiveTemplateKind::Clear => {
+ // Opaque black with operator dest out
+ request.push(PremultipliedColorF::BLACK);
+ }
+ PrimitiveTemplateKind::Rectangle { ref color, .. } => {
+ request.push(scene_properties.resolve_color(color).premultiplied())
+ }
+ }
+ }
+}
+
+/// Construct the primitive template data from a primitive key. This
+/// is invoked when a primitive key is created and the interner
+/// doesn't currently contain a primitive with this key.
+impl From<PrimitiveKeyKind> for PrimitiveTemplateKind {
+ fn from(kind: PrimitiveKeyKind) -> Self {
+ match kind {
+ PrimitiveKeyKind::Clear => {
+ PrimitiveTemplateKind::Clear
+ }
+ PrimitiveKeyKind::Rectangle { color, .. } => {
+ PrimitiveTemplateKind::Rectangle {
+ color: color.into(),
+ }
+ }
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+#[derive(Debug)]
+pub struct PrimTemplateCommonData {
+ pub flags: PrimitiveFlags,
+ pub may_need_repetition: bool,
+ pub prim_rect: LayoutRect,
+ pub opacity: PrimitiveOpacity,
+ /// The GPU cache handle for a primitive template. Since this structure
+ /// is retained across display lists by interning, this GPU cache handle
+ /// also remains valid, which reduces the number of updates to the GPU
+ /// cache when a new display list is processed.
+ pub gpu_cache_handle: GpuCacheHandle,
+ /// Specifies the edges that are *allowed* to have anti-aliasing.
+ /// In other words EdgeAaSegmentFlags::all() does not necessarily mean all edges will
+ /// be anti-aliased, only that they could be.
+ ///
+ /// Use this to force disable anti-alasing on edges of the primitives.
+ pub edge_aa_mask: EdgeAaSegmentMask,
+}
+
+impl PrimTemplateCommonData {
+ pub fn with_key_common(common: PrimKeyCommonData) -> Self {
+ PrimTemplateCommonData {
+ flags: common.flags,
+ may_need_repetition: true,
+ prim_rect: common.prim_rect.into(),
+ gpu_cache_handle: GpuCacheHandle::new(),
+ opacity: PrimitiveOpacity::translucent(),
+ edge_aa_mask: EdgeAaSegmentMask::all(),
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct PrimTemplate<T> {
+ pub common: PrimTemplateCommonData,
+ pub kind: T,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct PrimitiveTemplate {
+ pub common: PrimTemplateCommonData,
+ pub kind: PrimitiveTemplateKind,
+}
+
+impl ops::Deref for PrimitiveTemplate {
+ type Target = PrimTemplateCommonData;
+ fn deref(&self) -> &Self::Target {
+ &self.common
+ }
+}
+
+impl ops::DerefMut for PrimitiveTemplate {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.common
+ }
+}
+
+impl From<PrimitiveKey> for PrimitiveTemplate {
+ fn from(item: PrimitiveKey) -> Self {
+ PrimitiveTemplate {
+ common: PrimTemplateCommonData::with_key_common(item.common),
+ kind: item.kind.into(),
+ }
+ }
+}
+
+impl PrimitiveTemplate {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ scene_properties: &SceneProperties,
+ ) {
+ if let Some(mut request) = frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
+ self.kind.write_prim_gpu_blocks(&mut request, scene_properties);
+ }
+
+ self.opacity = match self.kind {
+ PrimitiveTemplateKind::Clear => {
+ PrimitiveOpacity::translucent()
+ }
+ PrimitiveTemplateKind::Rectangle { ref color, .. } => {
+ PrimitiveOpacity::from_alpha(scene_properties.resolve_color(color).a)
+ }
+ };
+ }
+}
+
+type PrimitiveDataHandle = intern::Handle<PrimitiveKeyKind>;
+
+impl intern::Internable for PrimitiveKeyKind {
+ type Key = PrimitiveKey;
+ type StoreData = PrimitiveTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_PRIMITIVES;
+}
+
+impl InternablePrimitive for PrimitiveKeyKind {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> PrimitiveKey {
+ PrimitiveKey::new(info, self)
+ }
+
+ fn make_instance_kind(
+ key: PrimitiveKey,
+ data_handle: PrimitiveDataHandle,
+ prim_store: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ match key.kind {
+ PrimitiveKeyKind::Clear => {
+ PrimitiveInstanceKind::Clear {
+ data_handle
+ }
+ }
+ PrimitiveKeyKind::Rectangle { color, .. } => {
+ let color_binding_index = match color {
+ PropertyBinding::Binding(..) => {
+ prim_store.color_bindings.push(color)
+ }
+ PropertyBinding::Value(..) => ColorBindingIndex::INVALID,
+ };
+ PrimitiveInstanceKind::Rectangle {
+ data_handle,
+ segment_instance_index: SegmentInstanceIndex::INVALID,
+ color_binding_index,
+ }
+ }
+ }
+ }
+}
+
+#[derive(Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct VisibleMaskImageTile {
+ pub tile_offset: TileOffset,
+ pub tile_rect: LayoutRect,
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct VisibleGradientTile {
+ pub handle: GpuCacheHandle,
+ pub local_rect: LayoutRect,
+ pub local_clip_rect: LayoutRect,
+}
+
+/// Information about how to cache a border segment,
+/// along with the current render task cache entry.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, MallocSizeOf)]
+pub struct BorderSegmentInfo {
+ pub local_task_size: LayoutSize,
+ pub cache_key: BorderSegmentCacheKey,
+}
+
+/// Represents the visibility state of a segment (wrt clip masks).
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[derive(Debug, Clone)]
+pub enum ClipMaskKind {
+ /// The segment has a clip mask, specified by the render task.
+ Mask(RenderTaskId),
+ /// The segment has no clip mask.
+ None,
+ /// The segment is made invisible / clipped completely.
+ Clipped,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf)]
+pub struct BrushSegment {
+ pub local_rect: LayoutRect,
+ pub may_need_clip_mask: bool,
+ pub edge_flags: EdgeAaSegmentMask,
+ pub extra_data: [f32; 4],
+ pub brush_flags: BrushFlags,
+}
+
+impl BrushSegment {
+ pub fn new(
+ local_rect: LayoutRect,
+ may_need_clip_mask: bool,
+ edge_flags: EdgeAaSegmentMask,
+ extra_data: [f32; 4],
+ brush_flags: BrushFlags,
+ ) -> Self {
+ Self {
+ local_rect,
+ may_need_clip_mask,
+ edge_flags,
+ extra_data,
+ brush_flags,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+#[repr(C)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct ClipRect {
+ rect: LayoutRect,
+ mode: f32,
+}
+
+#[derive(Debug, Clone)]
+#[repr(C)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct ClipCorner {
+ rect: LayoutRect,
+ outer_radius_x: f32,
+ outer_radius_y: f32,
+ inner_radius_x: f32,
+ inner_radius_y: f32,
+}
+
+impl ClipCorner {
+ fn uniform(rect: LayoutRect, outer_radius: f32, inner_radius: f32) -> ClipCorner {
+ ClipCorner {
+ rect,
+ outer_radius_x: outer_radius,
+ outer_radius_y: outer_radius,
+ inner_radius_x: inner_radius,
+ inner_radius_y: inner_radius,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+#[repr(C)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct ClipData {
+ rect: ClipRect,
+ top_left: ClipCorner,
+ top_right: ClipCorner,
+ bottom_left: ClipCorner,
+ bottom_right: ClipCorner,
+}
+
+impl ClipData {
+ pub fn rounded_rect(size: LayoutSize, radii: &BorderRadius, mode: ClipMode) -> ClipData {
+ // TODO(gw): For simplicity, keep most of the clip GPU structs the
+ // same as they were, even though the origin is now always
+ // zero, since they are in the clip's local space. In future,
+ // we could reduce the GPU cache size of ClipData.
+ let rect = LayoutRect::from_size(size);
+
+ ClipData {
+ rect: ClipRect {
+ rect,
+ mode: mode as u32 as f32,
+ },
+ top_left: ClipCorner {
+ rect: LayoutRect::from_origin_and_size(
+ LayoutPoint::new(rect.min.x, rect.min.y),
+ LayoutSize::new(radii.top_left.width, radii.top_left.height),
+ ),
+ outer_radius_x: radii.top_left.width,
+ outer_radius_y: radii.top_left.height,
+ inner_radius_x: 0.0,
+ inner_radius_y: 0.0,
+ },
+ top_right: ClipCorner {
+ rect: LayoutRect::from_origin_and_size(
+ LayoutPoint::new(
+ rect.max.x - radii.top_right.width,
+ rect.min.y,
+ ),
+ LayoutSize::new(radii.top_right.width, radii.top_right.height),
+ ),
+ outer_radius_x: radii.top_right.width,
+ outer_radius_y: radii.top_right.height,
+ inner_radius_x: 0.0,
+ inner_radius_y: 0.0,
+ },
+ bottom_left: ClipCorner {
+ rect: LayoutRect::from_origin_and_size(
+ LayoutPoint::new(
+ rect.min.x,
+ rect.max.y - radii.bottom_left.height,
+ ),
+ LayoutSize::new(radii.bottom_left.width, radii.bottom_left.height),
+ ),
+ outer_radius_x: radii.bottom_left.width,
+ outer_radius_y: radii.bottom_left.height,
+ inner_radius_x: 0.0,
+ inner_radius_y: 0.0,
+ },
+ bottom_right: ClipCorner {
+ rect: LayoutRect::from_origin_and_size(
+ LayoutPoint::new(
+ rect.max.x - radii.bottom_right.width,
+ rect.max.y - radii.bottom_right.height,
+ ),
+ LayoutSize::new(radii.bottom_right.width, radii.bottom_right.height),
+ ),
+ outer_radius_x: radii.bottom_right.width,
+ outer_radius_y: radii.bottom_right.height,
+ inner_radius_x: 0.0,
+ inner_radius_y: 0.0,
+ },
+ }
+ }
+
+ pub fn uniform(size: LayoutSize, radius: f32, mode: ClipMode) -> ClipData {
+ // TODO(gw): For simplicity, keep most of the clip GPU structs the
+ // same as they were, even though the origin is now always
+ // zero, since they are in the clip's local space. In future,
+ // we could reduce the GPU cache size of ClipData.
+ let rect = LayoutRect::from_size(size);
+
+ ClipData {
+ rect: ClipRect {
+ rect,
+ mode: mode as u32 as f32,
+ },
+ top_left: ClipCorner::uniform(
+ LayoutRect::from_origin_and_size(
+ LayoutPoint::new(rect.min.x, rect.min.y),
+ LayoutSize::new(radius, radius),
+ ),
+ radius,
+ 0.0,
+ ),
+ top_right: ClipCorner::uniform(
+ LayoutRect::from_origin_and_size(
+ LayoutPoint::new(rect.max.x - radius, rect.min.y),
+ LayoutSize::new(radius, radius),
+ ),
+ radius,
+ 0.0,
+ ),
+ bottom_left: ClipCorner::uniform(
+ LayoutRect::from_origin_and_size(
+ LayoutPoint::new(rect.min.x, rect.max.y - radius),
+ LayoutSize::new(radius, radius),
+ ),
+ radius,
+ 0.0,
+ ),
+ bottom_right: ClipCorner::uniform(
+ LayoutRect::from_origin_and_size(
+ LayoutPoint::new(
+ rect.max.x - radius,
+ rect.max.y - radius,
+ ),
+ LayoutSize::new(radius, radius),
+ ),
+ radius,
+ 0.0,
+ ),
+ }
+ }
+}
+
+/// A hashable descriptor for nine-patches, used by image and
+/// gradient borders.
+#[derive(Debug, Clone, PartialEq, Eq, Hash, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct NinePatchDescriptor {
+ pub width: i32,
+ pub height: i32,
+ pub slice: DeviceIntSideOffsets,
+ pub fill: bool,
+ pub repeat_horizontal: RepeatMode,
+ pub repeat_vertical: RepeatMode,
+ pub outset: SideOffsetsKey,
+ pub widths: SideOffsetsKey,
+}
+
+impl IsVisible for PrimitiveKeyKind {
+ // Return true if the primary primitive is visible.
+ // Used to trivially reject non-visible primitives.
+ // TODO(gw): Currently, primitives other than those
+ // listed here are handled before the
+ // add_primitive() call. In the future
+ // we should move the logic for all other
+ // primitive types to use this.
+ fn is_visible(&self) -> bool {
+ match *self {
+ PrimitiveKeyKind::Clear => {
+ true
+ }
+ PrimitiveKeyKind::Rectangle { ref color, .. } => {
+ match *color {
+ PropertyBinding::Value(value) => value.a > 0,
+ PropertyBinding::Binding(..) => true,
+ }
+ }
+ }
+ }
+}
+
+impl CreateShadow for PrimitiveKeyKind {
+ // Create a clone of this PrimitiveContainer, applying whatever
+ // changes are necessary to the primitive to support rendering
+ // it as part of the supplied shadow.
+ fn create_shadow(
+ &self,
+ shadow: &Shadow,
+ _: bool,
+ _: RasterSpace,
+ ) -> PrimitiveKeyKind {
+ match *self {
+ PrimitiveKeyKind::Rectangle { .. } => {
+ PrimitiveKeyKind::Rectangle {
+ color: PropertyBinding::Value(shadow.color.into()),
+ }
+ }
+ PrimitiveKeyKind::Clear => {
+ panic!("bug: this prim is not supported in shadow contexts");
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub enum PrimitiveInstanceKind {
+ /// Direct reference to a Picture
+ Picture {
+ /// Handle to the common interned data for this primitive.
+ data_handle: PictureDataHandle,
+ pic_index: PictureIndex,
+ segment_instance_index: SegmentInstanceIndex,
+ },
+ /// A run of glyphs, with associated font parameters.
+ TextRun {
+ /// Handle to the common interned data for this primitive.
+ data_handle: TextRunDataHandle,
+ /// Index to the per instance scratch data for this primitive.
+ run_index: TextRunIndex,
+ },
+ /// A line decoration. cache_handle refers to a cached render
+ /// task handle, if this line decoration is not a simple solid.
+ LineDecoration {
+ /// Handle to the common interned data for this primitive.
+ data_handle: LineDecorationDataHandle,
+ // TODO(gw): For now, we need to store some information in
+ // the primitive instance that is created during
+ // prepare_prims and read during the batching pass.
+ // Once we unify the prepare_prims and batching to
+ // occur at the same time, we can remove most of
+ // the things we store here in the instance, and
+ // use them directly. This will remove cache_handle,
+ // but also the opacity, clip_task_id etc below.
+ render_task: Option<RenderTaskId>,
+ },
+ NormalBorder {
+ /// Handle to the common interned data for this primitive.
+ data_handle: NormalBorderDataHandle,
+ render_task_ids: storage::Range<RenderTaskId>,
+ },
+ ImageBorder {
+ /// Handle to the common interned data for this primitive.
+ data_handle: ImageBorderDataHandle,
+ },
+ Rectangle {
+ /// Handle to the common interned data for this primitive.
+ data_handle: PrimitiveDataHandle,
+ segment_instance_index: SegmentInstanceIndex,
+ color_binding_index: ColorBindingIndex,
+ },
+ YuvImage {
+ /// Handle to the common interned data for this primitive.
+ data_handle: YuvImageDataHandle,
+ segment_instance_index: SegmentInstanceIndex,
+ is_compositor_surface: bool,
+ },
+ Image {
+ /// Handle to the common interned data for this primitive.
+ data_handle: ImageDataHandle,
+ image_instance_index: ImageInstanceIndex,
+ is_compositor_surface: bool,
+ },
+ /// Always rendered directly into the picture. This tends to be
+ /// faster with SWGL.
+ LinearGradient {
+ /// Handle to the common interned data for this primitive.
+ data_handle: LinearGradientDataHandle,
+ visible_tiles_range: GradientTileRange,
+ },
+ /// Always rendered via a cached render task. Usually faster with
+ /// a GPU.
+ CachedLinearGradient {
+ /// Handle to the common interned data for this primitive.
+ data_handle: LinearGradientDataHandle,
+ visible_tiles_range: GradientTileRange,
+ },
+ RadialGradient {
+ /// Handle to the common interned data for this primitive.
+ data_handle: RadialGradientDataHandle,
+ visible_tiles_range: GradientTileRange,
+ },
+ ConicGradient {
+ /// Handle to the common interned data for this primitive.
+ data_handle: ConicGradientDataHandle,
+ visible_tiles_range: GradientTileRange,
+ },
+ /// Clear out a rect, used for special effects.
+ Clear {
+ /// Handle to the common interned data for this primitive.
+ data_handle: PrimitiveDataHandle,
+ },
+ /// Render a portion of a specified backdrop.
+ BackdropCapture {
+ data_handle: BackdropCaptureDataHandle,
+ },
+ BackdropRender {
+ data_handle: BackdropRenderDataHandle,
+ pic_index: PictureIndex,
+ },
+}
+
+impl PrimitiveInstanceKind {
+ pub fn as_pic(&self) -> PictureIndex {
+ match self {
+ PrimitiveInstanceKind::Picture { pic_index, .. } => *pic_index,
+ _ => panic!("bug: as_pic called on a prim that is not a picture"),
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct PrimitiveInstanceIndex(pub u32);
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct PrimitiveInstance {
+ /// Identifies the kind of primitive this
+ /// instance is, and references to where
+ /// the relevant information for the primitive
+ /// can be found.
+ pub kind: PrimitiveInstanceKind,
+
+ /// All information and state related to clip(s) for this primitive
+ pub clip_leaf_id: ClipLeafId,
+
+ /// Information related to the current visibility state of this
+ /// primitive.
+ // TODO(gw): Currently built each frame, but can be retained.
+ pub vis: PrimitiveVisibility,
+}
+
+impl PrimitiveInstance {
+ pub fn new(
+ kind: PrimitiveInstanceKind,
+ clip_leaf_id: ClipLeafId,
+ ) -> Self {
+ PrimitiveInstance {
+ kind,
+ vis: PrimitiveVisibility::new(),
+ clip_leaf_id,
+ }
+ }
+
+ // Reset any pre-frame state for this primitive.
+ pub fn reset(&mut self) {
+ self.vis.reset();
+ }
+
+ pub fn clear_visibility(&mut self) {
+ self.vis.reset();
+ }
+
+ pub fn uid(&self) -> intern::ItemUid {
+ match &self.kind {
+ PrimitiveInstanceKind::Clear { data_handle, .. } |
+ PrimitiveInstanceKind::Rectangle { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::Image { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::LineDecoration { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::LinearGradient { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::NormalBorder { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::Picture { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::ConicGradient { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::TextRun { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::YuvImage { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::BackdropCapture { data_handle, .. } => {
+ data_handle.uid()
+ }
+ PrimitiveInstanceKind::BackdropRender { data_handle, .. } => {
+ data_handle.uid()
+ }
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[derive(Debug)]
+pub struct SegmentedInstance {
+ pub gpu_cache_handle: GpuCacheHandle,
+ pub segments_range: SegmentsRange,
+}
+
+pub type GlyphKeyStorage = storage::Storage<GlyphKey>;
+pub type TextRunIndex = storage::Index<TextRunPrimitive>;
+pub type TextRunStorage = storage::Storage<TextRunPrimitive>;
+pub type ColorBindingIndex = storage::Index<PropertyBinding<ColorU>>;
+pub type ColorBindingStorage = storage::Storage<PropertyBinding<ColorU>>;
+pub type BorderHandleStorage = storage::Storage<RenderTaskId>;
+pub type SegmentStorage = storage::Storage<BrushSegment>;
+pub type SegmentsRange = storage::Range<BrushSegment>;
+pub type SegmentInstanceStorage = storage::Storage<SegmentedInstance>;
+pub type SegmentInstanceIndex = storage::Index<SegmentedInstance>;
+pub type ImageInstanceStorage = storage::Storage<ImageInstance>;
+pub type ImageInstanceIndex = storage::Index<ImageInstance>;
+pub type GradientTileStorage = storage::Storage<VisibleGradientTile>;
+pub type GradientTileRange = storage::Range<VisibleGradientTile>;
+pub type LinearGradientStorage = storage::Storage<LinearGradientPrimitive>;
+
+/// Contains various vecs of data that is used only during frame building,
+/// where we want to recycle the memory each new display list, to avoid constantly
+/// re-allocating and moving memory around. Written during primitive preparation,
+/// and read during batching.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct PrimitiveScratchBuffer {
+ /// Contains a list of clip mask instance parameters
+ /// per segment generated.
+ pub clip_mask_instances: Vec<ClipMaskKind>,
+
+ /// List of glyphs keys that are allocated by each
+ /// text run instance.
+ pub glyph_keys: GlyphKeyStorage,
+
+ /// List of render task handles for border segment instances
+ /// that have been added this frame.
+ pub border_cache_handles: BorderHandleStorage,
+
+ /// A list of brush segments that have been built for this scene.
+ pub segments: SegmentStorage,
+
+ /// A list of segment ranges and GPU cache handles for prim instances
+ /// that have opted into segment building. In future, this should be
+ /// removed in favor of segment building during primitive interning.
+ pub segment_instances: SegmentInstanceStorage,
+
+ /// A list of visible tiles that tiled gradients use to store
+ /// per-tile information.
+ pub gradient_tiles: GradientTileStorage,
+
+ /// List of debug display items for rendering.
+ pub debug_items: Vec<DebugItem>,
+
+ /// List of current debug messages to log on screen
+ messages: Vec<DebugMessage>,
+
+ /// Set of sub-graphs that are required, determined during visibility pass
+ pub required_sub_graphs: FastHashSet<PictureIndex>,
+}
+
+impl Default for PrimitiveScratchBuffer {
+ fn default() -> Self {
+ PrimitiveScratchBuffer {
+ clip_mask_instances: Vec::new(),
+ glyph_keys: GlyphKeyStorage::new(0),
+ border_cache_handles: BorderHandleStorage::new(0),
+ segments: SegmentStorage::new(0),
+ segment_instances: SegmentInstanceStorage::new(0),
+ gradient_tiles: GradientTileStorage::new(0),
+ debug_items: Vec::new(),
+ messages: Vec::new(),
+ required_sub_graphs: FastHashSet::default(),
+ }
+ }
+}
+
+impl PrimitiveScratchBuffer {
+ pub fn recycle(&mut self, recycler: &mut Recycler) {
+ recycler.recycle_vec(&mut self.clip_mask_instances);
+ self.glyph_keys.recycle(recycler);
+ self.border_cache_handles.recycle(recycler);
+ self.segments.recycle(recycler);
+ self.segment_instances.recycle(recycler);
+ self.gradient_tiles.recycle(recycler);
+ recycler.recycle_vec(&mut self.debug_items);
+ }
+
+ pub fn begin_frame(&mut self) {
+ // Clear the clip mask tasks for the beginning of the frame. Append
+ // a single kind representing no clip mask, at the ClipTaskIndex::INVALID
+ // location.
+ self.clip_mask_instances.clear();
+ self.clip_mask_instances.push(ClipMaskKind::None);
+
+ self.border_cache_handles.clear();
+
+ // TODO(gw): As in the previous code, the gradient tiles store GPU cache
+ // handles that are cleared (and thus invalidated + re-uploaded)
+ // every frame. This maintains the existing behavior, but we
+ // should fix this in the future to retain handles.
+ self.gradient_tiles.clear();
+
+ self.required_sub_graphs.clear();
+
+ self.debug_items.clear();
+ }
+
+ pub fn end_frame(&mut self) {
+ const MSGS_TO_RETAIN: usize = 32;
+ const TIME_TO_RETAIN: u64 = 2000000000;
+ const LINE_HEIGHT: f32 = 20.0;
+ const X0: f32 = 32.0;
+ const Y0: f32 = 32.0;
+ let now = time::precise_time_ns();
+
+ let msgs_to_remove = self.messages.len().max(MSGS_TO_RETAIN) - MSGS_TO_RETAIN;
+ let mut msgs_removed = 0;
+
+ self.messages.retain(|msg| {
+ if msgs_removed < msgs_to_remove {
+ msgs_removed += 1;
+ return false;
+ }
+
+ if msg.timestamp + TIME_TO_RETAIN < now {
+ return false;
+ }
+
+ true
+ });
+
+ let mut y = Y0 + self.messages.len() as f32 * LINE_HEIGHT;
+ let shadow_offset = 1.0;
+
+ for msg in &self.messages {
+ self.debug_items.push(DebugItem::Text {
+ position: DevicePoint::new(X0 + shadow_offset, y + shadow_offset),
+ color: debug_colors::BLACK,
+ msg: msg.msg.clone(),
+ });
+
+ self.debug_items.push(DebugItem::Text {
+ position: DevicePoint::new(X0, y),
+ color: debug_colors::RED,
+ msg: msg.msg.clone(),
+ });
+
+ y -= LINE_HEIGHT;
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn push_debug_rect(
+ &mut self,
+ rect: DeviceRect,
+ outer_color: ColorF,
+ inner_color: ColorF,
+ ) {
+ self.debug_items.push(DebugItem::Rect {
+ rect,
+ outer_color,
+ inner_color,
+ });
+ }
+
+ #[allow(dead_code)]
+ pub fn push_debug_string(
+ &mut self,
+ position: DevicePoint,
+ color: ColorF,
+ msg: String,
+ ) {
+ self.debug_items.push(DebugItem::Text {
+ position,
+ color,
+ msg,
+ });
+ }
+
+ #[allow(dead_code)]
+ pub fn log(
+ &mut self,
+ msg: String,
+ ) {
+ self.messages.push(DebugMessage {
+ msg,
+ timestamp: time::precise_time_ns(),
+ })
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Clone, Debug)]
+pub struct PrimitiveStoreStats {
+ picture_count: usize,
+ text_run_count: usize,
+ image_count: usize,
+ linear_gradient_count: usize,
+ color_binding_count: usize,
+}
+
+impl PrimitiveStoreStats {
+ pub fn empty() -> Self {
+ PrimitiveStoreStats {
+ picture_count: 0,
+ text_run_count: 0,
+ image_count: 0,
+ linear_gradient_count: 0,
+ color_binding_count: 0,
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct PrimitiveStore {
+ pub pictures: Vec<PicturePrimitive>,
+ pub text_runs: TextRunStorage,
+ pub linear_gradients: LinearGradientStorage,
+
+ /// A list of image instances. These are stored separately as
+ /// storing them inline in the instance makes the structure bigger
+ /// for other types.
+ pub images: ImageInstanceStorage,
+
+ /// animated color bindings for this primitive.
+ pub color_bindings: ColorBindingStorage,
+}
+
+impl PrimitiveStore {
+ pub fn new(stats: &PrimitiveStoreStats) -> PrimitiveStore {
+ PrimitiveStore {
+ pictures: Vec::with_capacity(stats.picture_count),
+ text_runs: TextRunStorage::new(stats.text_run_count),
+ images: ImageInstanceStorage::new(stats.image_count),
+ color_bindings: ColorBindingStorage::new(stats.color_binding_count),
+ linear_gradients: LinearGradientStorage::new(stats.linear_gradient_count),
+ }
+ }
+
+ pub fn get_stats(&self) -> PrimitiveStoreStats {
+ PrimitiveStoreStats {
+ picture_count: self.pictures.len(),
+ text_run_count: self.text_runs.len(),
+ image_count: self.images.len(),
+ linear_gradient_count: self.linear_gradients.len(),
+ color_binding_count: self.color_bindings.len(),
+ }
+ }
+
+ #[allow(unused)]
+ pub fn print_picture_tree(&self, root: PictureIndex) {
+ use crate::print_tree::PrintTree;
+ let mut pt = PrintTree::new("picture tree");
+ self.pictures[root.0].print(&self.pictures, root, &mut pt);
+ }
+}
+
+/// Trait for primitives that are directly internable.
+/// see SceneBuilder::add_primitive<P>
+pub trait InternablePrimitive: intern::Internable<InternData = ()> + Sized {
+ /// Build a new key from self with `info`.
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> Self::Key;
+
+ fn make_instance_kind(
+ key: Self::Key,
+ data_handle: intern::Handle<Self>,
+ prim_store: &mut PrimitiveStore,
+ reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind;
+}
+
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<PrimitiveInstance>(), 104, "PrimitiveInstance size changed");
+ assert_eq!(mem::size_of::<PrimitiveInstanceKind>(), 24, "PrimitiveInstanceKind size changed");
+ assert_eq!(mem::size_of::<PrimitiveTemplate>(), 56, "PrimitiveTemplate size changed");
+ assert_eq!(mem::size_of::<PrimitiveTemplateKind>(), 28, "PrimitiveTemplateKind size changed");
+ assert_eq!(mem::size_of::<PrimitiveKey>(), 36, "PrimitiveKey size changed");
+ assert_eq!(mem::size_of::<PrimitiveKeyKind>(), 16, "PrimitiveKeyKind size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/picture.rs b/gfx/wr/webrender/src/prim_store/picture.rs
new file mode 100644
index 0000000000..c3ec88783a
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/picture.rs
@@ -0,0 +1,328 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{
+ ColorU, MixBlendMode, FilterPrimitiveInput, FilterPrimitiveKind, ColorSpace,
+ PropertyBinding, PropertyBindingId, CompositeOperator, RasterSpace,
+};
+use api::units::{Au, LayoutVector2D};
+use crate::scene_building::IsVisible;
+use crate::filterdata::SFilterData;
+use crate::intern::ItemUid;
+use crate::intern::{Internable, InternDebug, Handle as InternHandle};
+use crate::internal_types::{LayoutPrimitiveInfo, Filter};
+use crate::picture::PictureCompositeMode;
+use crate::prim_store::{
+ PrimitiveInstanceKind, PrimitiveStore, VectorKey,
+ InternablePrimitive,
+};
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq, Hash, Eq)]
+pub enum CompositeOperatorKey {
+ Over,
+ In,
+ Out,
+ Atop,
+ Xor,
+ Lighter,
+ Arithmetic([Au; 4]),
+}
+
+impl From<CompositeOperator> for CompositeOperatorKey {
+ fn from(operator: CompositeOperator) -> Self {
+ match operator {
+ CompositeOperator::Over => CompositeOperatorKey::Over,
+ CompositeOperator::In => CompositeOperatorKey::In,
+ CompositeOperator::Out => CompositeOperatorKey::Out,
+ CompositeOperator::Atop => CompositeOperatorKey::Atop,
+ CompositeOperator::Xor => CompositeOperatorKey::Xor,
+ CompositeOperator::Lighter => CompositeOperatorKey::Lighter,
+ CompositeOperator::Arithmetic(k_vals) => {
+ let k_vals = [
+ Au::from_f32_px(k_vals[0]),
+ Au::from_f32_px(k_vals[1]),
+ Au::from_f32_px(k_vals[2]),
+ Au::from_f32_px(k_vals[3]),
+ ];
+ CompositeOperatorKey::Arithmetic(k_vals)
+ }
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq, Hash, Eq)]
+pub enum FilterPrimitiveKey {
+ Identity(ColorSpace, FilterPrimitiveInput),
+ Flood(ColorSpace, ColorU),
+ Blend(ColorSpace, MixBlendMode, FilterPrimitiveInput, FilterPrimitiveInput),
+ Blur(ColorSpace, Au, Au, FilterPrimitiveInput),
+ Opacity(ColorSpace, Au, FilterPrimitiveInput),
+ ColorMatrix(ColorSpace, [Au; 20], FilterPrimitiveInput),
+ DropShadow(ColorSpace, (VectorKey, Au, ColorU), FilterPrimitiveInput),
+ ComponentTransfer(ColorSpace, FilterPrimitiveInput, Vec<SFilterData>),
+ Offset(ColorSpace, FilterPrimitiveInput, VectorKey),
+ Composite(ColorSpace, FilterPrimitiveInput, FilterPrimitiveInput, CompositeOperatorKey),
+}
+
+/// Represents a hashable description of how a picture primitive
+/// will be composited into its parent.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, MallocSizeOf, PartialEq, Hash, Eq)]
+pub enum PictureCompositeKey {
+ // No visual compositing effect
+ Identity,
+
+ // FilterOp
+ Blur(Au, Au, bool),
+ Brightness(Au),
+ Contrast(Au),
+ Grayscale(Au),
+ HueRotate(Au),
+ Invert(Au),
+ Opacity(Au),
+ OpacityBinding(PropertyBindingId, Au),
+ Saturate(Au),
+ Sepia(Au),
+ DropShadows(Vec<(VectorKey, Au, ColorU)>),
+ ColorMatrix([Au; 20]),
+ SrgbToLinear,
+ LinearToSrgb,
+ ComponentTransfer(ItemUid),
+ Flood(ColorU),
+ SvgFilter(Vec<FilterPrimitiveKey>),
+
+ // MixBlendMode
+ Multiply,
+ Screen,
+ Overlay,
+ Darken,
+ Lighten,
+ ColorDodge,
+ ColorBurn,
+ HardLight,
+ SoftLight,
+ Difference,
+ Exclusion,
+ Hue,
+ Saturation,
+ Color,
+ Luminosity,
+ PlusLighter,
+}
+
+impl From<Option<PictureCompositeMode>> for PictureCompositeKey {
+ fn from(mode: Option<PictureCompositeMode>) -> Self {
+ match mode {
+ Some(PictureCompositeMode::MixBlend(mode)) => {
+ match mode {
+ MixBlendMode::Normal => PictureCompositeKey::Identity,
+ MixBlendMode::Multiply => PictureCompositeKey::Multiply,
+ MixBlendMode::Screen => PictureCompositeKey::Screen,
+ MixBlendMode::Overlay => PictureCompositeKey::Overlay,
+ MixBlendMode::Darken => PictureCompositeKey::Darken,
+ MixBlendMode::Lighten => PictureCompositeKey::Lighten,
+ MixBlendMode::ColorDodge => PictureCompositeKey::ColorDodge,
+ MixBlendMode::ColorBurn => PictureCompositeKey::ColorBurn,
+ MixBlendMode::HardLight => PictureCompositeKey::HardLight,
+ MixBlendMode::SoftLight => PictureCompositeKey::SoftLight,
+ MixBlendMode::Difference => PictureCompositeKey::Difference,
+ MixBlendMode::Exclusion => PictureCompositeKey::Exclusion,
+ MixBlendMode::Hue => PictureCompositeKey::Hue,
+ MixBlendMode::Saturation => PictureCompositeKey::Saturation,
+ MixBlendMode::Color => PictureCompositeKey::Color,
+ MixBlendMode::Luminosity => PictureCompositeKey::Luminosity,
+ MixBlendMode::PlusLighter => PictureCompositeKey::PlusLighter,
+ }
+ }
+ Some(PictureCompositeMode::Filter(op)) => {
+ match op {
+ Filter::Blur { width, height, should_inflate } =>
+ PictureCompositeKey::Blur(Au::from_f32_px(width), Au::from_f32_px(height), should_inflate),
+ Filter::Brightness(value) => PictureCompositeKey::Brightness(Au::from_f32_px(value)),
+ Filter::Contrast(value) => PictureCompositeKey::Contrast(Au::from_f32_px(value)),
+ Filter::Grayscale(value) => PictureCompositeKey::Grayscale(Au::from_f32_px(value)),
+ Filter::HueRotate(value) => PictureCompositeKey::HueRotate(Au::from_f32_px(value)),
+ Filter::Invert(value) => PictureCompositeKey::Invert(Au::from_f32_px(value)),
+ Filter::Saturate(value) => PictureCompositeKey::Saturate(Au::from_f32_px(value)),
+ Filter::Sepia(value) => PictureCompositeKey::Sepia(Au::from_f32_px(value)),
+ Filter::SrgbToLinear => PictureCompositeKey::SrgbToLinear,
+ Filter::LinearToSrgb => PictureCompositeKey::LinearToSrgb,
+ Filter::Identity => PictureCompositeKey::Identity,
+ Filter::DropShadows(ref shadows) => {
+ PictureCompositeKey::DropShadows(
+ shadows.iter().map(|shadow| {
+ (shadow.offset.into(), Au::from_f32_px(shadow.blur_radius), shadow.color.into())
+ }).collect()
+ )
+ }
+ Filter::Opacity(binding, _) => {
+ match binding {
+ PropertyBinding::Value(value) => {
+ PictureCompositeKey::Opacity(Au::from_f32_px(value))
+ }
+ PropertyBinding::Binding(key, default) => {
+ PictureCompositeKey::OpacityBinding(key.id, Au::from_f32_px(default))
+ }
+ }
+ }
+ Filter::ColorMatrix(values) => {
+ let mut quantized_values: [Au; 20] = [Au(0); 20];
+ for (value, result) in values.iter().zip(quantized_values.iter_mut()) {
+ *result = Au::from_f32_px(*value);
+ }
+ PictureCompositeKey::ColorMatrix(quantized_values)
+ }
+ Filter::ComponentTransfer => unreachable!(),
+ Filter::Flood(color) => PictureCompositeKey::Flood(color.into()),
+ }
+ }
+ Some(PictureCompositeMode::ComponentTransferFilter(handle)) => {
+ PictureCompositeKey::ComponentTransfer(handle.uid())
+ }
+ Some(PictureCompositeMode::SvgFilter(filter_primitives, filter_data)) => {
+ PictureCompositeKey::SvgFilter(filter_primitives.into_iter().map(|primitive| {
+ match primitive.kind {
+ FilterPrimitiveKind::Identity(identity) => FilterPrimitiveKey::Identity(primitive.color_space, identity.input),
+ FilterPrimitiveKind::Blend(blend) => FilterPrimitiveKey::Blend(primitive.color_space, blend.mode, blend.input1, blend.input2),
+ FilterPrimitiveKind::Flood(flood) => FilterPrimitiveKey::Flood(primitive.color_space, flood.color.into()),
+ FilterPrimitiveKind::Blur(blur) =>
+ FilterPrimitiveKey::Blur(primitive.color_space, Au::from_f32_px(blur.width), Au::from_f32_px(blur.height), blur.input),
+ FilterPrimitiveKind::Opacity(opacity) =>
+ FilterPrimitiveKey::Opacity(primitive.color_space, Au::from_f32_px(opacity.opacity), opacity.input),
+ FilterPrimitiveKind::ColorMatrix(color_matrix) => {
+ let mut quantized_values: [Au; 20] = [Au(0); 20];
+ for (value, result) in color_matrix.matrix.iter().zip(quantized_values.iter_mut()) {
+ *result = Au::from_f32_px(*value);
+ }
+ FilterPrimitiveKey::ColorMatrix(primitive.color_space, quantized_values, color_matrix.input)
+ }
+ FilterPrimitiveKind::DropShadow(drop_shadow) => {
+ FilterPrimitiveKey::DropShadow(
+ primitive.color_space,
+ (
+ drop_shadow.shadow.offset.into(),
+ Au::from_f32_px(drop_shadow.shadow.blur_radius),
+ drop_shadow.shadow.color.into(),
+ ),
+ drop_shadow.input,
+ )
+ }
+ FilterPrimitiveKind::ComponentTransfer(component_transfer) =>
+ FilterPrimitiveKey::ComponentTransfer(primitive.color_space, component_transfer.input, filter_data.clone()),
+ FilterPrimitiveKind::Offset(info) =>
+ FilterPrimitiveKey::Offset(primitive.color_space, info.input, info.offset.into()),
+ FilterPrimitiveKind::Composite(info) =>
+ FilterPrimitiveKey::Composite(primitive.color_space, info.input1, info.input2, info.operator.into()),
+ }
+ }).collect())
+ }
+ Some(PictureCompositeMode::Blit(_)) |
+ Some(PictureCompositeMode::TileCache { .. }) |
+ Some(PictureCompositeMode::IntermediateSurface) |
+ None => {
+ PictureCompositeKey::Identity
+ }
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct Picture {
+ pub composite_mode_key: PictureCompositeKey,
+ pub raster_space: RasterSpace,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct PictureKey {
+ pub composite_mode_key: PictureCompositeKey,
+ pub raster_space: RasterSpace,
+}
+
+impl PictureKey {
+ pub fn new(
+ pic: Picture,
+ ) -> Self {
+ PictureKey {
+ composite_mode_key: pic.composite_mode_key,
+ raster_space: pic.raster_space,
+ }
+ }
+}
+
+impl InternDebug for PictureKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct PictureData;
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct PictureTemplate;
+
+impl From<PictureKey> for PictureTemplate {
+ fn from(_: PictureKey) -> Self {
+ PictureTemplate
+ }
+}
+
+pub type PictureDataHandle = InternHandle<Picture>;
+
+impl Internable for Picture {
+ type Key = PictureKey;
+ type StoreData = PictureTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_PICTURES;
+}
+
+impl InternablePrimitive for Picture {
+ fn into_key(
+ self,
+ _: &LayoutPrimitiveInfo,
+ ) -> PictureKey {
+ PictureKey::new(self)
+ }
+
+ fn make_instance_kind(
+ _key: PictureKey,
+ _: PictureDataHandle,
+ _: &mut PrimitiveStore,
+ _reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ // Should never be hit as this method should not be
+ // called for pictures.
+ unreachable!();
+ }
+}
+
+impl IsVisible for Picture {
+ fn is_visible(&self) -> bool {
+ true
+ }
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<Picture>(), 96, "Picture size changed");
+ assert_eq!(mem::size_of::<PictureTemplate>(), 0, "PictureTemplate size changed");
+ assert_eq!(mem::size_of::<PictureKey>(), 96, "PictureKey size changed");
+}
diff --git a/gfx/wr/webrender/src/prim_store/storage.rs b/gfx/wr/webrender/src/prim_store/storage.rs
new file mode 100644
index 0000000000..4b99d87556
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/storage.rs
@@ -0,0 +1,156 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use std::{iter::Extend, ops, marker::PhantomData, u32};
+use crate::util::Recycler;
+
+#[derive(Debug, Hash)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct Index<T>(u32, PhantomData<T>);
+
+// We explicitly implement Copy + Clone instead of using #[derive(Copy, Clone)]
+// because we don't want to require that T implements Clone + Copy.
+impl<T> Clone for Index<T> {
+ fn clone(&self) -> Self { *self }
+}
+
+impl<T> Copy for Index<T> {}
+
+impl<T> PartialEq for Index<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+impl<T> Index<T> {
+ fn new(idx: usize) -> Self {
+ debug_assert!(idx < u32::max_value() as usize);
+ Index(idx as u32, PhantomData)
+ }
+
+ pub const INVALID: Index<T> = Index(u32::MAX, PhantomData);
+ pub const UNUSED: Index<T> = Index(u32::MAX-1, PhantomData);
+}
+
+#[derive(Debug)]
+pub struct OpenRange<T> {
+ start: Index<T>,
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct Range<T> {
+ pub start: Index<T>,
+ pub end: Index<T>,
+}
+
+// We explicitly implement Copy + Clone instead of using #[derive(Copy, Clone)]
+// because we don't want to require that T implements Clone + Copy.
+impl<T> Clone for Range<T> {
+ fn clone(&self) -> Self {
+ Range { start: self.start, end: self.end }
+ }
+}
+impl<T> Copy for Range<T> {}
+
+impl<T> Range<T> {
+ /// Create an empty `Range`
+ pub fn empty() -> Self {
+ Range {
+ start: Index::new(0),
+ end: Index::new(0),
+ }
+ }
+
+ /// Check for an empty `Range`
+ pub fn is_empty(self) -> bool {
+ self.start.0 >= self.end.0
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct Storage<T> {
+ data: Vec<T>,
+}
+
+impl<T> Storage<T> {
+ pub fn new(initial_capacity: usize) -> Self {
+ Storage {
+ data: Vec::with_capacity(initial_capacity),
+ }
+ }
+
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ pub fn clear(&mut self) {
+ self.data.clear();
+ }
+
+ pub fn push(&mut self, t: T) -> Index<T> {
+ let index = self.data.len();
+ self.data.push(t);
+ Index(index as u32, PhantomData)
+ }
+
+ pub fn reserve(&mut self, count: usize) {
+ self.data.reserve(count);
+ }
+
+ pub fn recycle(&mut self, recycler: &mut Recycler) {
+ recycler.recycle_vec(&mut self.data);
+ }
+
+ pub fn extend<II: IntoIterator<Item=T>>(&mut self, iter: II) -> Range<T> {
+ let range = self.open_range();
+ self.data.extend(iter);
+
+ self.close_range(range)
+ }
+
+ pub fn open_range(&self) -> OpenRange<T> {
+ OpenRange {
+ start: Index::new(self.data.len())
+ }
+ }
+
+ pub fn close_range(&self, range: OpenRange<T>) -> Range<T> {
+ Range {
+ start: range.start,
+ end: Index::new(self.data.len()),
+ }
+ }
+}
+
+impl<T> ops::Index<Index<T>> for Storage<T> {
+ type Output = T;
+ fn index(&self, index: Index<T>) -> &Self::Output {
+ &self.data[index.0 as usize]
+ }
+}
+
+impl<T> ops::IndexMut<Index<T>> for Storage<T> {
+ fn index_mut(&mut self, index: Index<T>) -> &mut Self::Output {
+ &mut self.data[index.0 as usize]
+ }
+}
+
+impl<T> ops::Index<Range<T>> for Storage<T> {
+ type Output = [T];
+ fn index(&self, index: Range<T>) -> &Self::Output {
+ let start = index.start.0 as _;
+ let end = index.end.0 as _;
+ &self.data[start..end]
+ }
+}
+
+impl<T> ops::IndexMut<Range<T>> for Storage<T> {
+ fn index_mut(&mut self, index: Range<T>) -> &mut Self::Output {
+ let start = index.start.0 as _;
+ let end = index.end.0 as _;
+ &mut self.data[start..end]
+ }
+}
diff --git a/gfx/wr/webrender/src/prim_store/text_run.rs b/gfx/wr/webrender/src/prim_store/text_run.rs
new file mode 100644
index 0000000000..61562306be
--- /dev/null
+++ b/gfx/wr/webrender/src/prim_store/text_run.rs
@@ -0,0 +1,505 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::{ColorF, FontInstanceFlags, GlyphInstance, RasterSpace, Shadow};
+use api::units::{LayoutToWorldTransform, LayoutVector2D, RasterPixelScale, DevicePixelScale};
+use crate::scene_building::{CreateShadow, IsVisible};
+use crate::frame_builder::FrameBuildingState;
+use glyph_rasterizer::{FontInstance, FontTransform, GlyphKey, FONT_SIZE_LIMIT};
+use crate::gpu_cache::GpuCache;
+use crate::intern;
+use crate::internal_types::LayoutPrimitiveInfo;
+use crate::picture::SurfaceInfo;
+use crate::prim_store::{PrimitiveOpacity, PrimitiveScratchBuffer};
+use crate::prim_store::{PrimitiveStore, PrimKeyCommonData, PrimTemplateCommonData};
+use crate::renderer::{MAX_VERTEX_TEXTURE_WIDTH};
+use crate::resource_cache::{ResourceCache};
+use crate::util::{MatrixHelpers};
+use crate::prim_store::{InternablePrimitive, PrimitiveInstanceKind};
+use crate::spatial_tree::{SpatialTree, SpatialNodeIndex};
+use crate::space::SpaceSnapper;
+use crate::util::PrimaryArc;
+
+use std::ops;
+use std::sync::Arc;
+
+use super::storage;
+
+/// A run of glyphs, with associated font information.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
+pub struct TextRunKey {
+ pub common: PrimKeyCommonData,
+ pub font: FontInstance,
+ pub glyphs: PrimaryArc<Vec<GlyphInstance>>,
+ pub shadow: bool,
+ pub requested_raster_space: RasterSpace,
+}
+
+impl TextRunKey {
+ pub fn new(
+ info: &LayoutPrimitiveInfo,
+ text_run: TextRun,
+ ) -> Self {
+ TextRunKey {
+ common: info.into(),
+ font: text_run.font,
+ glyphs: PrimaryArc(text_run.glyphs),
+ shadow: text_run.shadow,
+ requested_raster_space: text_run.requested_raster_space,
+ }
+ }
+}
+
+impl intern::InternDebug for TextRunKey {}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(MallocSizeOf)]
+pub struct TextRunTemplate {
+ pub common: PrimTemplateCommonData,
+ pub font: FontInstance,
+ #[ignore_malloc_size_of = "Measured via PrimaryArc"]
+ pub glyphs: Arc<Vec<GlyphInstance>>,
+}
+
+impl ops::Deref for TextRunTemplate {
+ type Target = PrimTemplateCommonData;
+ fn deref(&self) -> &Self::Target {
+ &self.common
+ }
+}
+
+impl ops::DerefMut for TextRunTemplate {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.common
+ }
+}
+
+impl From<TextRunKey> for TextRunTemplate {
+ fn from(item: TextRunKey) -> Self {
+ let common = PrimTemplateCommonData::with_key_common(item.common);
+ TextRunTemplate {
+ common,
+ font: item.font,
+ glyphs: item.glyphs.0,
+ }
+ }
+}
+
+impl TextRunTemplate {
+ /// Update the GPU cache for a given primitive template. This may be called multiple
+ /// times per frame, by each primitive reference that refers to this interned
+ /// template. The initial request call to the GPU cache ensures that work is only
+ /// done if the cache entry is invalid (due to first use or eviction).
+ pub fn update(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ ) {
+ self.write_prim_gpu_blocks(frame_state);
+ self.opacity = PrimitiveOpacity::translucent();
+ }
+
+ fn write_prim_gpu_blocks(
+ &mut self,
+ frame_state: &mut FrameBuildingState,
+ ) {
+ // corresponds to `fetch_glyph` in the shaders
+ if let Some(mut request) = frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
+ request.push(ColorF::from(self.font.color).premultiplied());
+ // this is the only case where we need to provide plain color to GPU
+ let bg_color = ColorF::from(self.font.bg_color);
+ request.push([bg_color.r, bg_color.g, bg_color.b, 1.0]);
+
+ let mut gpu_block = [0.0; 4];
+ for (i, src) in self.glyphs.iter().enumerate() {
+ // Two glyphs are packed per GPU block.
+
+ if (i & 1) == 0 {
+ gpu_block[0] = src.point.x;
+ gpu_block[1] = src.point.y;
+ } else {
+ gpu_block[2] = src.point.x;
+ gpu_block[3] = src.point.y;
+ request.push(gpu_block);
+ }
+ }
+
+ // Ensure the last block is added in the case
+ // of an odd number of glyphs.
+ if (self.glyphs.len() & 1) != 0 {
+ request.push(gpu_block);
+ }
+
+ assert!(request.current_used_block_num() <= MAX_VERTEX_TEXTURE_WIDTH);
+ }
+ }
+}
+
+pub type TextRunDataHandle = intern::Handle<TextRun>;
+
+#[derive(Debug, MallocSizeOf)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct TextRun {
+ pub font: FontInstance,
+ #[ignore_malloc_size_of = "Measured via PrimaryArc"]
+ pub glyphs: Arc<Vec<GlyphInstance>>,
+ pub shadow: bool,
+ pub requested_raster_space: RasterSpace,
+}
+
+impl intern::Internable for TextRun {
+ type Key = TextRunKey;
+ type StoreData = TextRunTemplate;
+ type InternData = ();
+ const PROFILE_COUNTER: usize = crate::profiler::INTERNED_TEXT_RUNS;
+}
+
+impl InternablePrimitive for TextRun {
+ fn into_key(
+ self,
+ info: &LayoutPrimitiveInfo,
+ ) -> TextRunKey {
+ TextRunKey::new(
+ info,
+ self,
+ )
+ }
+
+ fn make_instance_kind(
+ key: TextRunKey,
+ data_handle: TextRunDataHandle,
+ prim_store: &mut PrimitiveStore,
+ reference_frame_relative_offset: LayoutVector2D,
+ ) -> PrimitiveInstanceKind {
+ let run_index = prim_store.text_runs.push(TextRunPrimitive {
+ used_font: key.font.clone(),
+ glyph_keys_range: storage::Range::empty(),
+ reference_frame_relative_offset,
+ snapped_reference_frame_relative_offset: reference_frame_relative_offset,
+ shadow: key.shadow,
+ raster_scale: 1.0,
+ requested_raster_space: key.requested_raster_space,
+ });
+
+ PrimitiveInstanceKind::TextRun{ data_handle, run_index }
+ }
+}
+
+impl CreateShadow for TextRun {
+ fn create_shadow(
+ &self,
+ shadow: &Shadow,
+ blur_is_noop: bool,
+ current_raster_space: RasterSpace,
+ ) -> Self {
+ let mut font = FontInstance {
+ color: shadow.color.into(),
+ ..self.font.clone()
+ };
+ if shadow.blur_radius > 0.0 {
+ font.disable_subpixel_aa();
+ }
+
+ let requested_raster_space = if blur_is_noop {
+ current_raster_space
+ } else {
+ RasterSpace::Local(1.0)
+ };
+
+ TextRun {
+ font,
+ glyphs: self.glyphs.clone(),
+ shadow: true,
+ requested_raster_space,
+ }
+ }
+}
+
+impl IsVisible for TextRun {
+ fn is_visible(&self) -> bool {
+ self.font.color.a > 0
+ }
+}
+
+#[derive(Debug)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+pub struct TextRunPrimitive {
+ pub used_font: FontInstance,
+ pub glyph_keys_range: storage::Range<GlyphKey>,
+ pub reference_frame_relative_offset: LayoutVector2D,
+ pub snapped_reference_frame_relative_offset: LayoutVector2D,
+ pub shadow: bool,
+ pub raster_scale: f32,
+ pub requested_raster_space: RasterSpace,
+}
+
+impl TextRunPrimitive {
+ pub fn update_font_instance(
+ &mut self,
+ specified_font: &FontInstance,
+ surface: &SurfaceInfo,
+ spatial_node_index: SpatialNodeIndex,
+ transform: &LayoutToWorldTransform,
+ mut allow_subpixel: bool,
+ raster_space: RasterSpace,
+ spatial_tree: &SpatialTree,
+ ) -> bool {
+ // If local raster space is specified, include that in the scale
+ // of the glyphs that get rasterized.
+ // TODO(gw): Once we support proper local space raster modes, this
+ // will implicitly be part of the device pixel ratio for
+ // the (cached) local space surface, and so this code
+ // will no longer be required.
+ let raster_scale = raster_space.local_scale().unwrap_or(1.0).max(0.001);
+
+ let dps = surface.device_pixel_scale.0;
+ let font_size = specified_font.size.to_f32_px();
+
+ // Small floating point error can accumulate in the raster * device_pixel scale.
+ // Round that to the nearest 100th of a scale factor to remove this error while
+ // still allowing reasonably accurate scale factors when a pinch-zoom is stopped
+ // at a fractional amount.
+ let quantized_scale = (dps * raster_scale * 100.0).round() / 100.0;
+ let mut device_font_size = font_size * quantized_scale;
+
+ // Check there is a valid transform that doesn't exceed the font size limit.
+ // Ensure the font is supposed to be rasterized in screen-space.
+ // Only support transforms that can be coerced to simple 2D transforms.
+ // Add texture padding to the rasterized glyph buffer when one anticipates
+ // the glyph will need to be scaled when rendered.
+ let (use_subpixel_aa, transform_glyphs, texture_padding, oversized) = if raster_space != RasterSpace::Screen ||
+ transform.has_perspective_component() || !transform.has_2d_inverse()
+ {
+ (false, false, true, device_font_size > FONT_SIZE_LIMIT)
+ } else if transform.exceeds_2d_scale((FONT_SIZE_LIMIT / device_font_size) as f64) {
+ (false, false, true, true)
+ } else {
+ (true, !transform.is_simple_2d_translation(), false, false)
+ };
+
+ let font_transform = if transform_glyphs {
+ // Get the font transform matrix (skew / scale) from the complete transform.
+ // Fold in the device pixel scale.
+ self.raster_scale = 1.0;
+ FontTransform::from(transform)
+ } else {
+ if oversized {
+ // Font sizes larger than the limit need to be scaled, thus can't use subpixels.
+ // In this case we adjust the font size and raster space to ensure
+ // we rasterize at the limit, to minimize the amount of scaling.
+ let limited_raster_scale = FONT_SIZE_LIMIT / (font_size * dps);
+ device_font_size = FONT_SIZE_LIMIT;
+
+ // Record the raster space the text needs to be snapped in. The original raster
+ // scale would have been too big.
+ self.raster_scale = limited_raster_scale;
+ } else {
+ // Record the raster space the text needs to be snapped in. We may have changed
+ // from RasterSpace::Screen due to a transform with perspective or without a 2d
+ // inverse, or it may have been RasterSpace::Local all along.
+ self.raster_scale = raster_scale;
+ }
+
+ // Rasterize the glyph without any transform
+ FontTransform::identity()
+ };
+
+ // TODO(aosmond): Snapping really ought to happen during scene building
+ // as much as possible. This will allow clips to be already adjusted
+ // based on the snapping requirements of the primitive. This may affect
+ // complex clips that create a different task, and when we rasterize
+ // glyphs without the transform (because the shader doesn't have the
+ // snap offsets to adjust its clip). These rects are fairly conservative
+ // to begin with and do not appear to be causing significant issues at
+ // this time.
+ self.snapped_reference_frame_relative_offset = if transform_glyphs {
+ // Don't touch the reference frame relative offset. We'll let the
+ // shader do the snapping in device pixels.
+ self.reference_frame_relative_offset
+ } else {
+ // TODO(dp): The SurfaceInfo struct needs to be updated to use RasterPixelScale
+ // rather than DevicePixelScale, however this is a large chunk of
+ // work that will be done as a follow up patch.
+ let raster_pixel_scale = RasterPixelScale::new(surface.device_pixel_scale.0);
+
+ // There may be an animation, so snap the reference frame relative
+ // offset such that it excludes the impact, if any.
+ let snap_to_device = SpaceSnapper::new_with_target(
+ surface.raster_spatial_node_index,
+ spatial_node_index,
+ raster_pixel_scale,
+ spatial_tree,
+ );
+ snap_to_device.snap_point(&self.reference_frame_relative_offset.to_point()).to_vector()
+ };
+
+ let mut flags = specified_font.flags;
+ if transform_glyphs {
+ flags |= FontInstanceFlags::TRANSFORM_GLYPHS;
+ }
+ if texture_padding {
+ flags |= FontInstanceFlags::TEXTURE_PADDING;
+ }
+
+ // If the transform or device size is different, then the caller of
+ // this method needs to know to rebuild the glyphs.
+ let cache_dirty =
+ self.used_font.transform != font_transform ||
+ self.used_font.size != device_font_size.into() ||
+ self.used_font.flags != flags;
+
+ // Construct used font instance from the specified font instance
+ self.used_font = FontInstance {
+ transform: font_transform,
+ size: device_font_size.into(),
+ flags,
+ ..specified_font.clone()
+ };
+
+ // If we are using special estimated background subpixel blending, then
+ // we can allow it regardless of what the surface says.
+ allow_subpixel |= self.used_font.bg_color.a != 0;
+
+ // If using local space glyphs, we don't want subpixel AA.
+ if !allow_subpixel || !use_subpixel_aa {
+ self.used_font.disable_subpixel_aa();
+
+ // Disable subpixel positioning for oversized glyphs to avoid
+ // thrashing the glyph cache with many subpixel variations of
+ // big glyph textures. A possible subpixel positioning error
+ // is small relative to the maximum font size and thus should
+ // not be very noticeable.
+ if oversized {
+ self.used_font.disable_subpixel_position();
+ }
+ }
+
+ cache_dirty
+ }
+
+ /// Gets the raster space to use when rendering this primitive.
+ /// Usually this would be the requested raster space. However, if
+ /// the primitive's spatial node or one of its ancestors is being pinch zoomed
+ /// then we round it. This prevents us rasterizing glyphs for every minor
+ /// change in zoom level, as that would be too expensive.
+ fn get_raster_space_for_prim(
+ &self,
+ prim_spatial_node_index: SpatialNodeIndex,
+ low_quality_pinch_zoom: bool,
+ device_pixel_scale: DevicePixelScale,
+ spatial_tree: &SpatialTree,
+ ) -> RasterSpace {
+ let prim_spatial_node = spatial_tree.get_spatial_node(prim_spatial_node_index);
+ if prim_spatial_node.is_ancestor_or_self_zooming {
+ if low_quality_pinch_zoom {
+ // In low-quality mode, we set the scale to be 1.0. However, the device-pixel
+ // scale selected for the zoom will be taken into account in the caller to this
+ // function when it's converted from local -> device pixels. Since in this mode
+ // the device-pixel scale is constant during the zoom, this gives the desired
+ // performance while also allowing the scale to be adjusted to a new factor at
+ // the end of a pinch-zoom.
+ RasterSpace::Local(1.0)
+ } else {
+ let root_spatial_node_index = spatial_tree.root_reference_frame_index();
+
+ // For high-quality mode, we quantize the exact scale factor as before. However,
+ // we want to _undo_ the effect of the device-pixel scale on the picture cache
+ // tiles (which changes now that they are raster roots). Divide the rounded value
+ // by the device-pixel scale so that the local -> device conversion has no effect.
+ let scale_factors = spatial_tree
+ .get_relative_transform(prim_spatial_node_index, root_spatial_node_index)
+ .scale_factors();
+
+ // Round the scale up to the nearest power of 2, but don't exceed 8.
+ let scale = scale_factors.0.max(scale_factors.1).min(8.0).max(1.0);
+ let rounded_up = 2.0f32.powf(scale.log2().ceil());
+
+ RasterSpace::Local(rounded_up / device_pixel_scale.0)
+ }
+ } else {
+ // Assume that if we have a RasterSpace::Local, it is frequently changing, in which
+ // case we want to undo the device-pixel scale, as we do above.
+ match self.requested_raster_space {
+ RasterSpace::Local(scale) => RasterSpace::Local(scale / device_pixel_scale.0),
+ RasterSpace::Screen => RasterSpace::Screen,
+ }
+ }
+ }
+
+ pub fn request_resources(
+ &mut self,
+ prim_offset: LayoutVector2D,
+ specified_font: &FontInstance,
+ glyphs: &[GlyphInstance],
+ transform: &LayoutToWorldTransform,
+ surface: &SurfaceInfo,
+ spatial_node_index: SpatialNodeIndex,
+ allow_subpixel: bool,
+ low_quality_pinch_zoom: bool,
+ resource_cache: &mut ResourceCache,
+ gpu_cache: &mut GpuCache,
+ spatial_tree: &SpatialTree,
+ scratch: &mut PrimitiveScratchBuffer,
+ ) {
+ let raster_space = self.get_raster_space_for_prim(
+ spatial_node_index,
+ low_quality_pinch_zoom,
+ surface.device_pixel_scale,
+ spatial_tree,
+ );
+
+ let cache_dirty = self.update_font_instance(
+ specified_font,
+ surface,
+ spatial_node_index,
+ transform,
+ allow_subpixel,
+ raster_space,
+ spatial_tree,
+ );
+
+ if self.glyph_keys_range.is_empty() || cache_dirty {
+ let subpx_dir = self.used_font.get_subpx_dir();
+
+ let dps = surface.device_pixel_scale.0;
+ let transform = match raster_space {
+ RasterSpace::Local(scale) => FontTransform::new(scale * dps, 0.0, 0.0, scale * dps),
+ RasterSpace::Screen => self.used_font.transform.scale(dps),
+ };
+
+ self.glyph_keys_range = scratch.glyph_keys.extend(
+ glyphs.iter().map(|src| {
+ let src_point = src.point + prim_offset;
+ let device_offset = transform.transform(&src_point);
+ GlyphKey::new(src.index, device_offset, subpx_dir)
+ }));
+ }
+
+ resource_cache.request_glyphs(
+ self.used_font.clone(),
+ &scratch.glyph_keys[self.glyph_keys_range],
+ gpu_cache,
+ );
+ }
+}
+
+/// These are linux only because FontInstancePlatformOptions varies in size by platform.
+#[test]
+#[cfg(target_os = "linux")]
+fn test_struct_sizes() {
+ use std::mem;
+ // The sizes of these structures are critical for performance on a number of
+ // talos stress tests. If you get a failure here on CI, there's two possibilities:
+ // (a) You made a structure smaller than it currently is. Great work! Update the
+ // test expectations and move on.
+ // (b) You made a structure larger. This is not necessarily a problem, but should only
+ // be done with care, and after checking if talos performance regresses badly.
+ assert_eq!(mem::size_of::<TextRun>(), 64, "TextRun size changed");
+ assert_eq!(mem::size_of::<TextRunTemplate>(), 80, "TextRunTemplate size changed");
+ assert_eq!(mem::size_of::<TextRunKey>(), 80, "TextRunKey size changed");
+ assert_eq!(mem::size_of::<TextRunPrimitive>(), 80, "TextRunPrimitive size changed");
+}