summaryrefslogtreecommitdiffstats
path: root/gfx/wr/webrender/src/texture_pack
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /gfx/wr/webrender/src/texture_pack
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/wr/webrender/src/texture_pack')
-rw-r--r--gfx/wr/webrender/src/texture_pack/guillotine.rs279
-rw-r--r--gfx/wr/webrender/src/texture_pack/mod.rs329
-rw-r--r--gfx/wr/webrender/src/texture_pack/slab.rs356
3 files changed, 964 insertions, 0 deletions
diff --git a/gfx/wr/webrender/src/texture_pack/guillotine.rs b/gfx/wr/webrender/src/texture_pack/guillotine.rs
new file mode 100644
index 0000000000..c71cf974e5
--- /dev/null
+++ b/gfx/wr/webrender/src/texture_pack/guillotine.rs
@@ -0,0 +1,279 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::units::{DeviceIntPoint, DeviceIntRect, DeviceIntSize};
+
+//TODO: gather real-world statistics on the bin usage in order to assist the decision
+// on where to place the size thresholds.
+
+/// This is an optimization tweak to enable looking through all the free rectangles in a bin
+/// and choosing the smallest, as opposed to picking the first match.
+const FIND_SMALLEST_AREA: bool = false;
+
+const NUM_BINS: usize = 3;
+/// The minimum number of pixels on each side that we require for rects to be classified as
+/// particular bin of freelists.
+const MIN_RECT_AXIS_SIZES: [i32; NUM_BINS] = [1, 16, 32];
+
+#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
+struct FreeListBin(u8);
+
+#[derive(Debug, Clone, Copy)]
+struct FreeListIndex(usize);
+
+impl FreeListBin {
+ fn for_size(size: &DeviceIntSize) -> Self {
+ MIN_RECT_AXIS_SIZES
+ .iter()
+ .enumerate()
+ .rev()
+ .find(|(_, &min_size)| min_size <= size.width && min_size <= size.height)
+ .map(|(id, _)| FreeListBin(id as u8))
+ .expect("Unable to find a bin!")
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct FreeRectSlice(pub u32);
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct FreeRect {
+ slice: FreeRectSlice,
+ rect: DeviceIntRect,
+}
+
+/// A texture allocator using the guillotine algorithm.
+///
+/// See sections 2.2 and 2.2.5 in "A Thousand Ways to Pack the Bin - A Practical Approach to Two-
+/// Dimensional Rectangle Bin Packing":
+///
+/// http://clb.demon.fi/files/RectangleBinPack.pdf
+///
+/// This approach was chosen because of its simplicity and good performance.
+///
+/// Note: the allocations are spread across multiple textures, and also are binned
+/// orthogonally in order to speed up the search.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct GuillotineAllocator {
+ bins: [Vec<FreeRect>; NUM_BINS],
+}
+
+impl GuillotineAllocator {
+ pub fn new(initial_size: Option<DeviceIntSize>) -> Self {
+ let mut allocator = GuillotineAllocator {
+ bins: [
+ Vec::new(),
+ Vec::new(),
+ Vec::new(),
+ ],
+ };
+
+ if let Some(initial_size) = initial_size {
+ allocator.push(
+ FreeRectSlice(0),
+ initial_size.into(),
+ );
+ }
+
+ allocator
+ }
+
+ fn push(&mut self, slice: FreeRectSlice, rect: DeviceIntRect) {
+ let id = FreeListBin::for_size(&rect.size).0 as usize;
+ self.bins[id].push(FreeRect {
+ slice,
+ rect,
+ })
+ }
+
+ /// Find a suitable rect in the free list. We choose the smallest such rect
+ /// in terms of area (Best-Area-Fit, BAF).
+ fn find_index_of_best_rect(
+ &self,
+ requested_dimensions: &DeviceIntSize,
+ ) -> Option<(FreeListBin, FreeListIndex)> {
+ let start_bin = FreeListBin::for_size(requested_dimensions);
+ (start_bin.0 .. NUM_BINS as u8)
+ .find_map(|id| if FIND_SMALLEST_AREA {
+ let mut smallest_index_and_area = None;
+ for (candidate_index, candidate) in self.bins[id as usize].iter().enumerate() {
+ if requested_dimensions.width > candidate.rect.size.width ||
+ requested_dimensions.height > candidate.rect.size.height
+ {
+ continue;
+ }
+
+ let candidate_area = candidate.rect.size.area();
+ match smallest_index_and_area {
+ Some((_, area)) if candidate_area >= area => continue,
+ _ => smallest_index_and_area = Some((candidate_index, candidate_area)),
+ }
+ }
+
+ smallest_index_and_area
+ .map(|(index, _)| (FreeListBin(id), FreeListIndex(index)))
+ } else {
+ self.bins[id as usize]
+ .iter()
+ .position(|candidate| {
+ requested_dimensions.width <= candidate.rect.size.width &&
+ requested_dimensions.height <= candidate.rect.size.height
+ })
+ .map(|index| (FreeListBin(id), FreeListIndex(index)))
+ })
+ }
+
+ // Split that results in the single largest area (Min Area Split Rule, MINAS).
+ fn split_guillotine(&mut self, chosen: &FreeRect, requested_dimensions: &DeviceIntSize) {
+ let candidate_free_rect_to_right = DeviceIntRect::new(
+ DeviceIntPoint::new(
+ chosen.rect.origin.x + requested_dimensions.width,
+ chosen.rect.origin.y,
+ ),
+ DeviceIntSize::new(
+ chosen.rect.size.width - requested_dimensions.width,
+ requested_dimensions.height,
+ ),
+ );
+ let candidate_free_rect_to_bottom = DeviceIntRect::new(
+ DeviceIntPoint::new(
+ chosen.rect.origin.x,
+ chosen.rect.origin.y + requested_dimensions.height,
+ ),
+ DeviceIntSize::new(
+ requested_dimensions.width,
+ chosen.rect.size.height - requested_dimensions.height,
+ ),
+ );
+
+ // Guillotine the rectangle.
+ let new_free_rect_to_right;
+ let new_free_rect_to_bottom;
+ if candidate_free_rect_to_right.size.area() > candidate_free_rect_to_bottom.size.area() {
+ new_free_rect_to_right = DeviceIntRect::new(
+ candidate_free_rect_to_right.origin,
+ DeviceIntSize::new(
+ candidate_free_rect_to_right.size.width,
+ chosen.rect.size.height,
+ ),
+ );
+ new_free_rect_to_bottom = candidate_free_rect_to_bottom
+ } else {
+ new_free_rect_to_right = candidate_free_rect_to_right;
+ new_free_rect_to_bottom = DeviceIntRect::new(
+ candidate_free_rect_to_bottom.origin,
+ DeviceIntSize::new(
+ chosen.rect.size.width,
+ candidate_free_rect_to_bottom.size.height,
+ ),
+ )
+ }
+
+ // Add the guillotined rects back to the free list.
+ if !new_free_rect_to_right.is_empty() {
+ self.push(chosen.slice, new_free_rect_to_right);
+ }
+ if !new_free_rect_to_bottom.is_empty() {
+ self.push(chosen.slice, new_free_rect_to_bottom);
+ }
+ }
+
+ pub fn allocate(
+ &mut self, requested_dimensions: &DeviceIntSize
+ ) -> Option<(FreeRectSlice, DeviceIntPoint)> {
+ if requested_dimensions.width == 0 || requested_dimensions.height == 0 {
+ return Some((FreeRectSlice(0), DeviceIntPoint::new(0, 0)));
+ }
+ let (bin, index) = self.find_index_of_best_rect(requested_dimensions)?;
+
+ // Remove the rect from the free list and decide how to guillotine it.
+ let chosen = self.bins[bin.0 as usize].swap_remove(index.0);
+ self.split_guillotine(&chosen, requested_dimensions);
+
+ // Return the result.
+ Some((chosen.slice, chosen.rect.origin))
+ }
+
+ /// Add a new slice to the allocator, and immediately allocate a rect from it.
+ pub fn extend(
+ &mut self,
+ slice: FreeRectSlice,
+ total_size: DeviceIntSize,
+ requested_dimensions: DeviceIntSize,
+ ) {
+ self.split_guillotine(
+ &FreeRect { slice, rect: total_size.into() },
+ &requested_dimensions
+ );
+ }
+}
+
+#[cfg(test)]
+fn random_fill(count: usize, texture_size: i32) -> f32 {
+ use rand::{thread_rng, Rng};
+
+ let total_rect = DeviceIntRect::new(
+ DeviceIntPoint::zero(),
+ DeviceIntSize::new(texture_size, texture_size),
+ );
+ let mut rng = thread_rng();
+ let mut allocator = GuillotineAllocator::new(None);
+
+ // check for empty allocation
+ assert_eq!(
+ allocator.allocate(&DeviceIntSize::new(0, 12)),
+ Some((FreeRectSlice(0), DeviceIntPoint::zero())),
+ );
+
+ let mut slices: Vec<Vec<DeviceIntRect>> = Vec::new();
+ let mut requested_area = 0f32;
+ // fill up the allocator
+ for _ in 0 .. count {
+ let size = DeviceIntSize::new(
+ rng.gen_range(1, texture_size),
+ rng.gen_range(1, texture_size),
+ );
+ requested_area += size.area() as f32;
+
+ match allocator.allocate(&size) {
+ Some((slice, origin)) => {
+ let rect = DeviceIntRect::new(origin, size);
+ assert_eq!(None, slices[slice.0 as usize].iter().find(|r| r.intersects(&rect)));
+ assert!(total_rect.contains_rect(&rect));
+ slices[slice.0 as usize].push(rect);
+ }
+ None => {
+ allocator.extend(FreeRectSlice(slices.len() as u32), total_rect.size, size);
+ let rect = DeviceIntRect::new(DeviceIntPoint::zero(), size);
+ slices.push(vec![rect]);
+ }
+ }
+ }
+ // validate the free rects
+ for (i, free_vecs) in allocator.bins.iter().enumerate() {
+ for fr in free_vecs {
+ assert_eq!(FreeListBin(i as u8), FreeListBin::for_size(&fr.rect.size));
+ assert_eq!(None, slices[fr.slice.0 as usize].iter().find(|r| r.intersects(&fr.rect)));
+ assert!(total_rect.contains_rect(&fr.rect));
+ slices[fr.slice.0 as usize].push(fr.rect);
+ }
+ }
+
+ let allocated_area = slices.len() as f32 * (texture_size * texture_size) as f32;
+ requested_area / allocated_area
+}
+
+#[test]
+fn test_small() {
+ random_fill(100, 100);
+}
+
+#[test]
+fn test_large() {
+ random_fill(1000, 10000);
+}
diff --git a/gfx/wr/webrender/src/texture_pack/mod.rs b/gfx/wr/webrender/src/texture_pack/mod.rs
new file mode 100644
index 0000000000..21707a2e96
--- /dev/null
+++ b/gfx/wr/webrender/src/texture_pack/mod.rs
@@ -0,0 +1,329 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+mod guillotine;
+mod slab;
+
+pub use guillotine::*;
+pub use slab::*;
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+use api::units::*;
+use crate::internal_types::CacheTextureId;
+use euclid::{point2, size2, default::Box2D};
+use smallvec::SmallVec;
+
+pub use etagere::AllocatorOptions as ShelfAllocatorOptions;
+pub use etagere::BucketedAtlasAllocator as BucketedShelfAllocator;
+pub use etagere::AtlasAllocator as ShelfAllocator;
+
+/// ID of an allocation within a given allocator.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct AllocId(pub u32);
+
+pub trait AtlasAllocator {
+ /// Specific parameters of the allocator.
+ type Parameters;
+ /// Constructor
+ fn new(size: i32, parameters: &Self::Parameters) -> Self;
+ /// Allocate a rectangle.
+ fn allocate(&mut self, size: DeviceIntSize) -> Option<(AllocId, DeviceIntRect)>;
+ /// Deallocate a rectangle and return its size.
+ fn deallocate(&mut self, id: AllocId);
+ /// Return true if there is no live allocations.
+ fn is_empty(&self) -> bool;
+ /// Allocated area in pixels.
+ fn allocated_space(&self) -> i32;
+ /// Write a debug visualization of the atlas fitting in the provided rectangle.
+ ///
+ /// This is inserted in a larger dump so it shouldn't contain the xml start/end tags.
+ fn dump_into_svg(&self, rect: &Box2D<f32>, output: &mut dyn std::io::Write) -> std::io::Result<()>;
+}
+
+pub trait AtlasAllocatorList<TextureParameters> {
+ /// Allocate a rectangle.
+ ///
+ /// If allocation fails, call the provided callback, add a new allocator to the list and try again.
+ fn allocate(
+ &mut self,
+ size: DeviceIntSize,
+ texture_alloc_cb: &mut dyn FnMut(DeviceIntSize, &TextureParameters) -> CacheTextureId,
+ ) -> (CacheTextureId, AllocId, DeviceIntRect);
+
+ /// Deallocate a rectangle and return its size.
+ fn deallocate(&mut self, texture_id: CacheTextureId, alloc_id: AllocId);
+
+ fn texture_parameters(&self) -> &TextureParameters;
+}
+
+/// A number of 2D textures (single layer), each with a number of
+/// regions that can act as a slab allocator.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct TextureUnit<Allocator> {
+ allocator: Allocator,
+ texture_id: CacheTextureId,
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct AllocatorList<Allocator: AtlasAllocator, TextureParameters> {
+ units: SmallVec<[TextureUnit<Allocator>; 1]>,
+ size: i32,
+ atlas_parameters: Allocator::Parameters,
+ texture_parameters: TextureParameters,
+}
+
+impl<Allocator: AtlasAllocator, TextureParameters> AllocatorList<Allocator, TextureParameters> {
+ pub fn new(
+ size: i32,
+ atlas_parameters: Allocator::Parameters,
+ texture_parameters: TextureParameters,
+ ) -> Self {
+ AllocatorList {
+ units: SmallVec::new(),
+ size,
+ atlas_parameters,
+ texture_parameters,
+ }
+ }
+
+ pub fn allocate(
+ &mut self,
+ requested_size: DeviceIntSize,
+ texture_alloc_cb: &mut dyn FnMut(DeviceIntSize, &TextureParameters) -> CacheTextureId,
+ ) -> (CacheTextureId, AllocId, DeviceIntRect) {
+ // Try to allocate from one of the existing textures.
+ for unit in &mut self.units {
+ if let Some((alloc_id, rect)) = unit.allocator.allocate(requested_size) {
+ return (unit.texture_id, alloc_id, rect);
+ }
+ }
+
+ // Need to create a new texture to hold the allocation.
+ let texture_id = texture_alloc_cb(size2(self.size, self.size), &self.texture_parameters);
+ let unit_index = self.units.len();
+
+ self.units.push(TextureUnit {
+ allocator: Allocator::new(self.size, &self.atlas_parameters),
+ texture_id,
+ });
+
+ let (alloc_id, rect) = self.units[unit_index]
+ .allocator
+ .allocate(requested_size)
+ .unwrap();
+
+ (texture_id, alloc_id, rect)
+ }
+
+ pub fn deallocate(&mut self, texture_id: CacheTextureId, alloc_id: AllocId) {
+ let unit = self.units
+ .iter_mut()
+ .find(|unit| unit.texture_id == texture_id)
+ .expect("Unable to find the associated texture array unit");
+
+ unit.allocator.deallocate(alloc_id);
+ }
+
+ pub fn release_empty_textures<'l>(&mut self, texture_dealloc_cb: &'l mut dyn FnMut(CacheTextureId)) {
+ self.units.retain(|unit| {
+ if unit.allocator.is_empty() {
+ texture_dealloc_cb(unit.texture_id);
+
+ false
+ } else{
+ true
+ }
+ });
+ }
+
+ pub fn clear(&mut self, texture_dealloc_cb: &mut dyn FnMut(CacheTextureId)) {
+ for unit in self.units.drain(..) {
+ texture_dealloc_cb(unit.texture_id);
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn dump_as_svg(&self, output: &mut dyn std::io::Write) -> std::io::Result<()> {
+ use svg_fmt::*;
+
+ let num_arrays = self.units.len() as f32;
+
+ let text_spacing = 15.0;
+ let unit_spacing = 30.0;
+ let texture_size = self.size as f32 / 2.0;
+
+ let svg_w = unit_spacing * 2.0 + texture_size;
+ let svg_h = unit_spacing + num_arrays * (texture_size + text_spacing + unit_spacing);
+
+ writeln!(output, "{}", BeginSvg { w: svg_w, h: svg_h })?;
+
+ // Background.
+ writeln!(output,
+ " {}",
+ rectangle(0.0, 0.0, svg_w, svg_h)
+ .inflate(1.0, 1.0)
+ .fill(rgb(50, 50, 50))
+ )?;
+
+ let mut y = unit_spacing;
+ for unit in &self.units {
+ writeln!(output, " {}", text(unit_spacing, y, format!("{:?}", unit.texture_id)).color(rgb(230, 230, 230)))?;
+
+ let rect = Box2D {
+ min: point2(unit_spacing, y),
+ max: point2(unit_spacing + texture_size, y + texture_size),
+ };
+
+ unit.allocator.dump_into_svg(&rect, output)?;
+
+ y += unit_spacing + texture_size + text_spacing;
+ }
+
+ writeln!(output, "{}", EndSvg)
+ }
+
+ pub fn allocated_space(&self) -> i32 {
+ let mut accum = 0;
+ for unit in &self.units {
+ accum += unit.allocator.allocated_space();
+ }
+
+ accum
+ }
+
+ pub fn allocated_textures(&self) -> usize {
+ self.units.len()
+ }
+}
+
+impl<Allocator: AtlasAllocator, TextureParameters> AtlasAllocatorList<TextureParameters>
+for AllocatorList<Allocator, TextureParameters> {
+ fn allocate(
+ &mut self,
+ requested_size: DeviceIntSize,
+ texture_alloc_cb: &mut dyn FnMut(DeviceIntSize, &TextureParameters) -> CacheTextureId,
+ ) -> (CacheTextureId, AllocId, DeviceIntRect) {
+ self.allocate(requested_size, texture_alloc_cb)
+ }
+
+ fn deallocate(&mut self, texture_id: CacheTextureId, alloc_id: AllocId) {
+ self.deallocate(texture_id, alloc_id);
+ }
+
+ fn texture_parameters(&self) -> &TextureParameters {
+ &self.texture_parameters
+ }
+}
+
+impl AtlasAllocator for BucketedShelfAllocator {
+ type Parameters = ShelfAllocatorOptions;
+
+ fn new(size: i32, options: &Self::Parameters) -> Self {
+ BucketedShelfAllocator::with_options(size2(size, size), options)
+ }
+
+ fn allocate(&mut self, size: DeviceIntSize) -> Option<(AllocId, DeviceIntRect)> {
+ self.allocate(size.to_untyped()).map(|alloc| {
+ (AllocId(alloc.id.serialize()), alloc.rectangle.to_rect().cast_unit())
+ })
+ }
+
+ fn deallocate(&mut self, id: AllocId) {
+ self.deallocate(etagere::AllocId::deserialize(id.0));
+ }
+
+ fn is_empty(&self) -> bool {
+ self.is_empty()
+ }
+
+ fn allocated_space(&self) -> i32 {
+ self.allocated_space()
+ }
+
+ fn dump_into_svg(&self, rect: &Box2D<f32>, output: &mut dyn std::io::Write) -> std::io::Result<()> {
+ self.dump_into_svg(Some(&rect.to_i32().cast_unit()), output)
+ }
+}
+
+impl AtlasAllocator for ShelfAllocator {
+ type Parameters = ShelfAllocatorOptions;
+
+ fn new(size: i32, options: &Self::Parameters) -> Self {
+ ShelfAllocator::with_options(size2(size, size), options)
+ }
+
+ fn allocate(&mut self, size: DeviceIntSize) -> Option<(AllocId, DeviceIntRect)> {
+ self.allocate(size.to_untyped()).map(|alloc| {
+ (AllocId(alloc.id.serialize()), alloc.rectangle.to_rect().cast_unit())
+ })
+ }
+
+ fn deallocate(&mut self, id: AllocId) {
+ self.deallocate(etagere::AllocId::deserialize(id.0));
+ }
+
+ fn is_empty(&self) -> bool {
+ self.is_empty()
+ }
+
+ fn allocated_space(&self) -> i32 {
+ self.allocated_space()
+ }
+
+ fn dump_into_svg(&self, rect: &Box2D<f32>, output: &mut dyn std::io::Write) -> std::io::Result<()> {
+ self.dump_into_svg(Some(&rect.to_i32().cast_unit()), output)
+ }
+}
+
+#[test]
+fn bug_1680769() {
+ let mut allocators: AllocatorList<ShelfAllocator, ()> = AllocatorList::new(
+ 1024,
+ ShelfAllocatorOptions::default(),
+ (),
+ );
+
+ let mut allocations = Vec::new();
+ let mut next_id = CacheTextureId(0);
+ let alloc_cb = &mut |_: DeviceIntSize, _: &()| {
+ let texture_id = next_id;
+ next_id.0 += 1;
+
+ texture_id
+ };
+
+ // Make some allocations, forcing the the creation of multiple textures.
+ for _ in 0..50 {
+ allocations.push(allocators.allocate(size2(256, 256), alloc_cb));
+ }
+
+ // Deallocate everything.
+ // It should empty all atlases and we still have textures allocated because
+ // we haven't called release_empty_textures yet.
+ for alloc in allocations.drain(..) {
+ allocators.deallocate(alloc.0, alloc.1);
+ }
+
+ // Allocate something else.
+ // Bug 1680769 was causing this allocation to be duplicated and leaked in
+ // all textures.
+ allocations.push(allocators.allocate(size2(8, 8), alloc_cb));
+
+ // Deallocate all known allocations.
+ for alloc in allocations.drain(..) {
+ allocators.deallocate(alloc.0, alloc.1);
+ }
+
+ // If we have leaked items, this won't manage to remove all textures.
+ allocators.release_empty_textures(&mut |_| {});
+
+ assert_eq!(allocators.allocated_textures(), 0);
+}
diff --git a/gfx/wr/webrender/src/texture_pack/slab.rs b/gfx/wr/webrender/src/texture_pack/slab.rs
new file mode 100644
index 0000000000..6e38383397
--- /dev/null
+++ b/gfx/wr/webrender/src/texture_pack/slab.rs
@@ -0,0 +1,356 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#![deny(unconditional_recursion)]
+
+use super::{AtlasAllocator, AllocId};
+use api::units::{DeviceIntPoint, DeviceIntRect, DeviceIntSize};
+use euclid::{point2, size2, default::Box2D};
+use std::cmp;
+
+fn pack_alloc_id(region_index: usize, location: TextureLocation) -> AllocId {
+ AllocId(
+ region_index as u32 & 0xFFFF
+ | (location.0 as u32) << 16
+ | (location.1 as u32) << 24
+ )
+}
+
+fn unpack_alloc_id(id: AllocId) -> (usize, TextureLocation) {
+ (
+ (id.0 & 0xFFFF) as usize,
+ TextureLocation(
+ ((id.0 >> 16) & 0xFF) as u8,
+ ((id.0 >> 24) & 0xFF) as u8,
+ ),
+ )
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Copy, Clone, PartialEq)]
+struct SlabSize {
+ width: i32,
+ height: i32,
+}
+
+impl SlabSize {
+ fn invalid() -> SlabSize {
+ SlabSize {
+ width: 0,
+ height: 0,
+ }
+ }
+
+ fn get(size: DeviceIntSize) -> SlabSize {
+ fn quantize_dimension(size: i32) -> i32 {
+ match size {
+ 0 => unreachable!(),
+ 1..=16 => 16,
+ 17..=32 => 32,
+ 33..=64 => 64,
+ 65..=128 => 128,
+ 129..=256 => 256,
+ 257..=512 => 512,
+ _ => panic!("Invalid dimensions for cache!"),
+ }
+ }
+
+
+ let x_size = quantize_dimension(size.width);
+ let y_size = quantize_dimension(size.height);
+
+ let (width, height) = match (x_size, y_size) {
+ // Special cased rectangular slab pages.
+ (512, 0..=64) => (512, 64),
+ (512, 128) => (512, 128),
+ (512, 256) => (512, 256),
+ (0..=64, 512) => (64, 512),
+ (128, 512) => (128, 512),
+ (256, 512) => (256, 512),
+
+ // If none of those fit, use a square slab size.
+ (x_size, y_size) => {
+ let square_size = cmp::max(x_size, y_size);
+ (square_size, square_size)
+ }
+ };
+
+ SlabSize {
+ width,
+ height,
+ }
+ }
+}
+
+// The x/y location within a texture region of an allocation.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct TextureLocation(pub u8, pub u8);
+
+impl TextureLocation {
+ fn new(x: i32, y: i32) -> Self {
+ debug_assert!(x >= 0 && y >= 0 && x < 0x100 && y < 0x100);
+ TextureLocation(x as u8, y as u8)
+ }
+}
+
+/// A region is a rectangular part of a texture cache texture, split into fixed-size slabs.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+struct TextureRegion {
+ index: usize,
+ slab_size: SlabSize,
+ offset: DeviceIntPoint,
+ free_slots: Vec<TextureLocation>,
+ total_slot_count: usize,
+}
+
+impl TextureRegion {
+ fn new(index: usize, offset: DeviceIntPoint) -> Self {
+ TextureRegion {
+ index,
+ slab_size: SlabSize::invalid(),
+ offset,
+ free_slots: Vec::new(),
+ total_slot_count: 0,
+ }
+ }
+
+ // Initialize a region to be an allocator for a specific slab size.
+ fn init(&mut self, slab_size: SlabSize, region_size: i32, empty_regions: &mut usize) {
+ debug_assert!(self.slab_size == SlabSize::invalid());
+ debug_assert!(self.free_slots.is_empty());
+
+ self.slab_size = slab_size;
+ let slots_per_x_axis = region_size / self.slab_size.width;
+ let slots_per_y_axis = region_size / self.slab_size.height;
+
+ // Add each block to a freelist.
+ for y in 0 .. slots_per_y_axis {
+ for x in 0 .. slots_per_x_axis {
+ self.free_slots.push(TextureLocation::new(x, y));
+ }
+ }
+
+ self.total_slot_count = self.free_slots.len();
+ *empty_regions -= 1;
+ }
+
+ // Deinit a region, allowing it to become a region with
+ // a different allocator size.
+ fn deinit(&mut self, empty_regions: &mut usize) {
+ self.slab_size = SlabSize::invalid();
+ self.free_slots.clear();
+ self.total_slot_count = 0;
+ *empty_regions += 1;
+ }
+
+ fn is_empty(&self) -> bool {
+ self.slab_size == SlabSize::invalid()
+ }
+
+ // Attempt to allocate a fixed size block from this region.
+ fn alloc(&mut self) -> Option<(DeviceIntPoint, TextureLocation)> {
+ debug_assert!(self.slab_size != SlabSize::invalid());
+
+ self.free_slots.pop().map(|location| {(
+ point2(
+ self.offset.x + self.slab_size.width * location.0 as i32,
+ self.offset.y + self.slab_size.height * location.1 as i32,
+ ),
+ location,
+ )})
+ }
+
+ // Free a block in this region.
+ fn free(&mut self, location: TextureLocation, empty_regions: &mut usize) {
+ self.free_slots.push(location);
+
+ // If this region is completely unused, deinit it
+ // so that it can become a different slab size
+ // as required.
+ if self.free_slots.len() == self.total_slot_count {
+ self.deinit(empty_regions);
+ }
+ }
+}
+
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct SlabAllocatorParameters {
+ pub region_size: i32,
+}
+
+/// A 2D texture divided into regions.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct SlabAllocator {
+ regions: Vec<TextureRegion>,
+ size: i32,
+ region_size: i32,
+ empty_regions: usize,
+ allocated_space: i32,
+}
+
+impl SlabAllocator {
+ pub fn new(size: i32, options: &SlabAllocatorParameters) -> Self {
+ let regions_per_row = size / options.region_size;
+ let num_regions = (regions_per_row * regions_per_row) as usize;
+
+ let mut regions = Vec::with_capacity(num_regions);
+
+ for index in 0..num_regions {
+ let offset = point2(
+ (index as i32 % regions_per_row) * options.region_size,
+ (index as i32 / regions_per_row) * options.region_size,
+ );
+
+ regions.push(TextureRegion::new(index, offset));
+ }
+
+ SlabAllocator {
+ regions,
+ size,
+ region_size: options.region_size,
+ empty_regions: num_regions,
+ allocated_space: 0,
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.empty_regions == self.regions.len()
+ }
+
+ pub fn allocated_space(&self) -> i32 {
+ self.allocated_space
+ }
+
+ // Returns the region index and allocated rect.
+ pub fn allocate(&mut self, size: DeviceIntSize) -> Option<(AllocId, DeviceIntRect)> {
+ let slab_size = SlabSize::get(size);
+
+ // Keep track of the location of an empty region,
+ // in case we need to select a new empty region
+ // after the loop.
+ let mut empty_region_index = None;
+
+ let allocated_size = size2(slab_size.width, slab_size.height);
+
+ // Run through the existing regions of this size, and see if
+ // we can find a free block in any of them.
+ for (i, region) in self.regions.iter_mut().enumerate() {
+ if region.is_empty() {
+ empty_region_index = Some(i);
+ } else if region.slab_size == slab_size {
+ if let Some((origin, location)) = region.alloc() {
+ return Some((
+ pack_alloc_id(region.index, location),
+ DeviceIntRect {
+ origin,
+ size: allocated_size,
+ }
+ ));
+ }
+ }
+ }
+
+ if let Some(empty_region_index) = empty_region_index {
+ let region = &mut self.regions[empty_region_index];
+ region.init(slab_size, self.region_size, &mut self.empty_regions);
+ let (origin, location) = region.alloc().unwrap();
+
+ return Some((
+ pack_alloc_id(region.index, location),
+ DeviceIntRect {
+ origin,
+ size: allocated_size,
+ },
+ ))
+ }
+
+ None
+ }
+
+ pub fn deallocate(&mut self, id: AllocId) {
+ let (region_index, location) = unpack_alloc_id(id);
+
+ let region = &mut self.regions[region_index];
+ region.free(location, &mut self.empty_regions);
+
+ self.allocated_space -= region.slab_size.width * region.slab_size.height;
+ }
+
+ pub fn dump_into_svg(&self, rect: &Box2D<f32>, output: &mut dyn std::io::Write) -> std::io::Result<()> {
+ use svg_fmt::*;
+
+ let region_spacing = 5.0;
+ let text_spacing = 15.0;
+ let regions_per_row = (self.size / self.region_size) as usize;
+ let wh = rect.size().width.min(rect.size().height);
+ let region_wh = (wh - region_spacing) / regions_per_row as f32 - region_spacing;
+
+ let x0 = rect.min.x;
+ let y0 = rect.min.y;
+
+ for (idx, region) in self.regions.iter().enumerate() {
+ let slab_size = region.slab_size;
+ let x = x0 + (idx % regions_per_row) as f32 * (region_wh + region_spacing);
+
+ let y = y0 + text_spacing + (idx / regions_per_row) as f32 * (region_wh + region_spacing);
+
+ let texture_background = if region.is_empty() { rgb(30, 30, 30) } else { rgb(40, 40, 130) };
+ writeln!(output, " {}", rectangle(x, y, region_wh, region_wh).inflate(1.0, 1.0).fill(rgb(10, 10, 10)))?;
+ writeln!(output, " {}", rectangle(x, y, region_wh, region_wh).fill(texture_background))?;
+
+ let sw = (slab_size.width as f32 / self.region_size as f32) * region_wh;
+ let sh = (slab_size.height as f32 / self.region_size as f32) * region_wh;
+
+ for slot in &region.free_slots {
+ let sx = x + slot.0 as f32 * sw;
+ let sy = y + slot.1 as f32 * sh;
+
+ // Allocation slot.
+ writeln!(output, " {}", rectangle(sx, sy, sw, sh).inflate(-0.5, -0.5).fill(rgb(30, 30, 30)))?;
+ }
+
+ if slab_size.width != 0 {
+ let region_text = format!("{}x{}", slab_size.width, slab_size.height);
+ let tx = x + 1.0;
+ let ty = y + region_wh - 1.0;
+ writeln!(output, " {}", text(tx, ty, region_text).color(rgb(230, 230, 230)))?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl AtlasAllocator for SlabAllocator {
+ type Parameters = SlabAllocatorParameters;
+
+ fn new(size: i32, options: &Self::Parameters) -> Self {
+ SlabAllocator::new(size, options)
+ }
+
+ fn allocate(&mut self, size: DeviceIntSize) -> Option<(AllocId, DeviceIntRect)> {
+ self.allocate(size)
+ }
+
+ fn deallocate(&mut self, id: AllocId) {
+ self.deallocate(id);
+ }
+
+ fn is_empty(&self) -> bool {
+ self.is_empty()
+ }
+
+ fn allocated_space(&self) -> i32 {
+ self.allocated_space()
+ }
+
+ fn dump_into_svg(&self, rect: &Box2D<f32>, output: &mut dyn std::io::Write) -> std::io::Result<()> {
+ self.dump_into_svg(rect, output)
+ }
+}