summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wpf-gpu-raster
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/wpf-gpu-raster/.cargo-checksum.json1
-rw-r--r--third_party/rust/wpf-gpu-raster/.github/workflows/coverage.yml23
-rw-r--r--third_party/rust/wpf-gpu-raster/.github/workflows/rust.yml39
-rw-r--r--third_party/rust/wpf-gpu-raster/CHANGES.md22
-rw-r--r--third_party/rust/wpf-gpu-raster/Cargo.toml21
-rw-r--r--third_party/rust/wpf-gpu-raster/LICENSE23
-rw-r--r--third_party/rust/wpf-gpu-raster/README.md22
-rw-r--r--third_party/rust/wpf-gpu-raster/examples/draw.rs354
-rw-r--r--third_party/rust/wpf-gpu-raster/examples/obj-output.rs26
-rw-r--r--third_party/rust/wpf-gpu-raster/examples/simple.rs11
-rw-r--r--third_party/rust/wpf-gpu-raster/notes8
-rw-r--r--third_party/rust/wpf-gpu-raster/src/aacoverage.rs647
-rw-r--r--third_party/rust/wpf-gpu-raster/src/aarasterizer.rs1768
-rw-r--r--third_party/rust/wpf-gpu-raster/src/bezier.rs990
-rw-r--r--third_party/rust/wpf-gpu-raster/src/c_bindings.rs158
-rw-r--r--third_party/rust/wpf-gpu-raster/src/fix.rs9
-rw-r--r--third_party/rust/wpf-gpu-raster/src/geometry_sink.rs92
-rw-r--r--third_party/rust/wpf-gpu-raster/src/helpers.rs55
-rw-r--r--third_party/rust/wpf-gpu-raster/src/hwrasterizer.rs1455
-rw-r--r--third_party/rust/wpf-gpu-raster/src/hwvertexbuffer.rs3075
-rw-r--r--third_party/rust/wpf-gpu-raster/src/lib.rs669
-rw-r--r--third_party/rust/wpf-gpu-raster/src/matrix.rs37
-rw-r--r--third_party/rust/wpf-gpu-raster/src/notes12
-rw-r--r--third_party/rust/wpf-gpu-raster/src/nullable_ref.rs53
-rw-r--r--third_party/rust/wpf-gpu-raster/src/real.rs163
-rw-r--r--third_party/rust/wpf-gpu-raster/src/tri_rasterize.rs190
-rw-r--r--third_party/rust/wpf-gpu-raster/src/types.rs181
27 files changed, 10104 insertions, 0 deletions
diff --git a/third_party/rust/wpf-gpu-raster/.cargo-checksum.json b/third_party/rust/wpf-gpu-raster/.cargo-checksum.json
new file mode 100644
index 0000000000..b5e98f7bb0
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{".github/workflows/coverage.yml":"90aaa068c16cb778b24badaff78baf2a313637780a723be09596abde0f4c827a",".github/workflows/rust.yml":"905954be896d052ced621eedb9d5b9d35795490f27071ac1147e75ac3b3711ec","CHANGES.md":"5f54e553a1c4ef21c5be6109b25df9d1d63c4547627723fe044c73dbddf0db2f","Cargo.toml":"c4f220ebc481f7b1db1909f32c5e95a94f665b40943713f084547d9df2f8c29c","LICENSE":"ae48df11a335dc1a615f4f938b69cba73bcf4485c4f97af49b38efb0f216353b","README.md":"e14b7ddbd29b6f87d956921999da1cf7bc3add0166cacf21e8b1ac1d9092a90d","examples/draw.rs":"52fee9e2f2c11e1c891b30cb460be2a0ec65974f38dc0c08fd48391caf1e4247","examples/obj-output.rs":"6fc549022aa715eee74ea1cafb89ca33189e9dbe914ea6b2c46160049bda68f3","examples/simple.rs":"99fb566414cbd4a0eb69a2774c9780d7cd17e5cdaa14837b280fba319c053f22","notes":"48e636c646d697e213b3a79e31063e11b6ffc7493592d31f3929b1db495870b8","src/aacoverage.rs":"fdadadd208caa986cc386797f937a976b5a315174c7c0782b87c0334d6474a97","src/aarasterizer.rs":"283bed1e22917118f332b24731cb6bd11334a4f0ba0d88821cfeb6b607de12da","src/bezier.rs":"f089ab04e30077ce4e0fe59dfa602948b989aa53d51ad207fbc30c1edd24086b","src/c_bindings.rs":"9c5ab638cf0a14220d93528e37cdc0f6d83277eaa10acf9ce36f32a28e30c02b","src/fix.rs":"7ccf63db5bab4ab0135d92691f7c2272a27866b9792dd55ec98b2d1c1b7c0358","src/geometry_sink.rs":"9025569f77f475a1e47fd470e8f53dcdf88ef57e3a5b8a51268fff892da8b1a7","src/helpers.rs":"220294dac335943518f249c4a27ad803f8226ed62cd780f517e95be6343a1f2f","src/hwrasterizer.rs":"82b2d6d35488a6ad7de4d82f3ee38c6f09f4b6de06b4f98eea61b3abdd72eb62","src/hwvertexbuffer.rs":"f3dd54f17570eb530c9c827b24a53b755a2dfa6028e9b83f9d7a4ba9945c2ecf","src/lib.rs":"6b3ec96d3efeed723af68d663465c04cebdb54764c137f698195880c9dd8c5fd","src/matrix.rs":"1ac44bc5d073f96ab64b1b5c6077fd0d47fe61db8243bd9a55fc91d8eae1dd92","src/notes":"d50d49e0b5660bc6350d8055f25f26700c937558de0af690e1fc4f50ed7e05c9","src/nullable_ref.rs":"789fe0e59b7d4a925faecbf2362be93643ea8382b4424ca0e60866f9bf83c3cd","src/real.rs":"73a2d1a77613364e9514fd7ead4d708a554d2b7343645cdb4cb8a2b3b640e057","src/tri_rasterize.rs":"30821a3465cea3c5ac578590013b530c03ea3010225f580d6cf609e39910c412","src/types.rs":"b840212a99a212ef38211aaf1bd801ec83416569541941d15fd95285d1342b99"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/.github/workflows/coverage.yml b/third_party/rust/wpf-gpu-raster/.github/workflows/coverage.yml
new file mode 100644
index 0000000000..3f2e6f2523
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/.github/workflows/coverage.yml
@@ -0,0 +1,23 @@
+name: Coverage
+
+on: [pull_request, push]
+
+jobs:
+ coverage:
+ runs-on: ubuntu-latest
+ env:
+ CARGO_TERM_COLOR: always
+ steps:
+ - uses: actions/checkout@v3
+ - name: Install Rust
+ run: rustup toolchain install stable --component llvm-tools-preview
+ - name: Install cargo-llvm-cov
+ uses: taiki-e/install-action@cargo-llvm-cov
+ - name: Generate code coverage
+ run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
+ files: lcov.info
+ fail_ci_if_error: true
diff --git a/third_party/rust/wpf-gpu-raster/.github/workflows/rust.yml b/third_party/rust/wpf-gpu-raster/.github/workflows/rust.yml
new file mode 100644
index 0000000000..1f38bfc1f7
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/.github/workflows/rust.yml
@@ -0,0 +1,39 @@
+name: Rust
+
+on:
+ push:
+ pull_request:
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Build
+ run: cargo build --verbose
+ - name: Run tests
+ run: cargo test --verbose
+
+ aarch64:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Install toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: stable
+ override: true
+ target: aarch64-unknown-linux-gnu
+
+ - name: Install cross
+ run: cargo install cross
+
+ - name: Run tests with Neon
+ run: cross test --target aarch64-unknown-linux-gnu
diff --git a/third_party/rust/wpf-gpu-raster/CHANGES.md b/third_party/rust/wpf-gpu-raster/CHANGES.md
new file mode 100644
index 0000000000..8de9816947
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/CHANGES.md
@@ -0,0 +1,22 @@
+Changes for Safety
+------------------
+
+`CEdgeStore` is replaced by `typed_arena_nomut::Arena<CEdge>`.
+
+`CEdgeStore` is an arena with built-in stack storage for the first allocation
+of the arena. It exposes the allocated buffers to support very fast allocation,
+and supports fast enumeration by returning pointers to each allocation.
+
+`CCoverageBuffer` also now uses a `typed_arena_nomut::Arena<CEdge>` but uses it
+to allocate `CCoverageIntervalBuffer`'s. We currently lack support for
+the builtin stack storage. Storing these in an Arena is not ideal, we'd rather
+just heap allocate them individually.
+
+
+Changes for performance
+-----------------------
+
+Switched from using triangle strips to triangle lists. This lets
+us use a single triangle to draw each line segement which reduces
+the amount of geometry per line segment from 6 vertices to 3.
+Direct2D also made this switch in later versions.
diff --git a/third_party/rust/wpf-gpu-raster/Cargo.toml b/third_party/rust/wpf-gpu-raster/Cargo.toml
new file mode 100644
index 0000000000..62e67c78ac
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "wpf-gpu-raster"
+version = "0.1.0"
+edition = "2021"
+license = "MIT"
+
+[profile.release]
+debug = true
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+typed-arena-nomut = "0.1.0"
+
+[dev-dependencies]
+usvg = "0.4"
+euclid = "0.22.6"
+png = "0.17.2"
+
+[features]
+default = ["c_bindings"]
+c_bindings = []
diff --git a/third_party/rust/wpf-gpu-raster/LICENSE b/third_party/rust/wpf-gpu-raster/LICENSE
new file mode 100644
index 0000000000..a616ed188d
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/LICENSE
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) .NET Foundation and Contributors
+
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/README.md b/third_party/rust/wpf-gpu-raster/README.md
new file mode 100644
index 0000000000..1d4756b13f
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/README.md
@@ -0,0 +1,22 @@
+This is a port of the WPF hardware rasterizer code to Rust. That
+rasterizer is predecessor to the Direct2D rasterizer. Direct2D still
+uses a similar technique when run on hardware that does not support
+Target Independent Rasterization.
+
+Design
+======
+
+The general algorithm used for rasterization is a vertical sweep of
+the shape that maintains an active edge list. The sweep is done
+at a sub-scanline resolution and results in either:
+ 1. Sub-scanlines being combined in the coverage buffer and output
+ as "complex scans". These are emitted as lines constructed out
+ of triangle strips.
+ 2. Simple trapezoids being recognized in the active edge list
+ and output using a faster simple trapezoid path.
+
+Bezier flattening is done using an approach that uses forward differencing
+of the error metric to compute a flattened version that would match a traditional
+adaptive recursive flattening.
+
+
diff --git a/third_party/rust/wpf-gpu-raster/examples/draw.rs b/third_party/rust/wpf-gpu-raster/examples/draw.rs
new file mode 100644
index 0000000000..828449ee72
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/examples/draw.rs
@@ -0,0 +1,354 @@
+/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
+
+ /**************************************************************************
+ *
+ * Copyright 2012 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+*/
+
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+use euclid::{default::Transform2D, point2};
+use wpf_gpu_raster::{PathBuilder};
+
+
+use std::ops::Index;
+
+
+const WIDTH: u32 = 800;
+const HEIGHT: u32 = 800;
+
+
+fn over(src: u32, dst: u32) -> u32 {
+ let a = src >> 24;
+ let a = 255 - a;
+ let mask = 0xff00ff;
+ let t = (dst & mask) * a + 0x800080;
+ let mut rb = (t + ((t >> 8) & mask)) >> 8;
+ rb &= mask;
+
+ rb += src & mask;
+
+ // saturate
+ rb |= 0x1000100 - ((rb >> 8) & mask);
+ rb &= mask;
+
+ let t = ((dst >> 8) & mask) * a + 0x800080;
+ let mut ag = (t + ((t >> 8) & mask)) >> 8;
+ ag &= mask;
+ ag += (src >> 8) & mask;
+
+ // saturate
+ ag |= 0x1000100 - ((ag >> 8) & mask);
+ ag &= mask;
+
+ (ag << 8) + rb
+}
+
+pub fn alpha_mul(x: u32, a: u32) -> u32 {
+ let mask = 0xFF00FF;
+
+ let src_rb = ((x & mask) * a) >> 8;
+ let src_ag = ((x >> 8) & mask) * a;
+
+ (src_rb & mask) | (src_ag & !mask)
+}
+
+fn write_image(data: &[u32], path: &str) {
+ use std::path::Path;
+ use std::fs::File;
+ use std::io::BufWriter;
+
+ let mut png_data: Vec<u8> = vec![0; (WIDTH * HEIGHT * 3) as usize];
+ let mut i = 0;
+ for pixel in data {
+ png_data[i] = ((pixel >> 16) & 0xff) as u8;
+ png_data[i + 1] = ((pixel >> 8) & 0xff) as u8;
+ png_data[i + 2] = ((pixel >> 0) & 0xff) as u8;
+ i += 3;
+ }
+
+
+ let path = Path::new(path);
+ let file = File::create(path).unwrap();
+ let w = &mut BufWriter::new(file);
+
+ let mut encoder = png::Encoder::new(w, WIDTH, HEIGHT); // Width is 2 pixels and height is 1.
+ encoder.set_color(png::ColorType::Rgb);
+ encoder.set_depth(png::BitDepth::Eight);
+ let mut writer = encoder.write_header().unwrap();
+
+ writer.write_image_data(&png_data).unwrap(); // Save
+}
+
+#[derive(Debug)]
+struct Vertex {
+ x: f32,
+ y: f32,
+ coverage: f32
+}
+#[derive(Debug)]
+struct Triangle {
+ v: [Vertex; 3],
+}
+
+impl Index<usize> for Triangle {
+ type Output = Vertex;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.v[index]
+ }
+}
+
+// D3D11 mandates 8 bit subpixel precision:
+// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
+const FIXED_SHIFT: i32 = 8;
+const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
+
+/* Proper rounding of float to integer */
+fn iround(mut v: f32) -> i64 {
+ if v > 0.0 {
+ v += 0.5;
+ }
+ if v < 0.0 {
+ v -= 0.5;
+ }
+ return v as i64
+}
+
+/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
+fn rast_triangle(buffer: &mut [u32], stride: usize, tri: &Triangle, color: u32) {
+ let center_offset = -0.5;
+
+ let mut coverage1 = tri[0].coverage;
+ let mut coverage2 = tri[1].coverage;
+ let mut coverage3 = tri[2].coverage;
+
+ /* fixed point coordinates */
+ let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
+ let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
+ let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
+
+ let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
+ let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
+ let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
+
+
+ /* Force correct vertex order */
+ let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
+ if cross > 0 {
+ std::mem::swap(&mut x1, &mut x3);
+ std::mem::swap(&mut y1, &mut y3);
+ // I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
+ std::mem::swap(&mut coverage2, &mut coverage3);
+ } else {
+ std::mem::swap(&mut coverage1, &mut coverage3);
+ }
+
+ /* Deltas */
+ let dx12 = x1 - x2;
+ let dx23 = x2 - x3;
+ let dx31 = x3 - x1;
+
+ let dy12 = y1 - y2;
+ let dy23 = y2 - y3;
+ let dy31 = y3 - y1;
+
+ /* Fixed-point deltas */
+ let fdx12 = dx12 << FIXED_SHIFT;
+ let fdx23 = dx23 << FIXED_SHIFT;
+ let fdx31 = dx31 << FIXED_SHIFT;
+
+ let fdy12 = dy12 << FIXED_SHIFT;
+ let fdy23 = dy23 << FIXED_SHIFT;
+ let fdy31 = dy31 << FIXED_SHIFT;
+
+ /* Bounding rectangle */
+ let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
+ let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
+
+ let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
+ let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
+
+ minx = minx.max(0);
+ maxx = maxx.min(WIDTH as i64 - 1);
+
+ miny = miny.max(0);
+ maxy = maxy.min(HEIGHT as i64 - 1);
+
+ /* Half-edge constants */
+ let mut c1 = dy12 * x1 - dx12 * y1;
+ let mut c2 = dy23 * x2 - dx23 * y2;
+ let mut c3 = dy31 * x3 - dx31 * y3;
+
+ /* Correct for top-left filling convention */
+ if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
+ if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
+ if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
+
+ let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
+ let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
+ let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
+
+ /* Perform rasterization */
+ let mut buffer = &mut buffer[miny as usize * stride..];
+ for _y in miny..=maxy {
+ let mut cx1 = cy1;
+ let mut cx2 = cy2;
+ let mut cx3 = cy3;
+
+ for x in minx..=maxx {
+ if cx1 > 0 && cx2 > 0 && cx3 > 0 {
+ // cross is equal to 2*area of the triangle.
+ // we can normalize cx by 2*area to get barycentric coords.
+ let area = cross.abs() as f32;
+ let bary = (cx1 as f32 / area, cx2 as f32/ area, cx3 as f32 / area);
+
+ let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
+
+ let color = alpha_mul(color, (coverages * 256. + 0.5) as u32);
+ buffer[x as usize] = over(color, buffer[x as usize]);
+ }
+
+ cx1 -= fdy12;
+ cx2 -= fdy23;
+ cx3 -= fdy31;
+ }
+
+ cy1 += fdx12;
+ cy2 += fdx23;
+ cy3 += fdx31;
+
+ buffer = &mut buffer[stride..];
+ }
+}
+
+
+fn main() {
+ let opt = usvg::Options::default();
+
+ let rtree = usvg::Tree::from_file("tiger.svg", &opt).unwrap();
+
+ let mut image = vec![0; (WIDTH * HEIGHT) as usize];
+ for _ in 0..1 {
+ let mut total_vertex_count = 0;
+ let mut total_time = std::time::Duration::default();
+ for node in rtree.root().descendants() {
+ use usvg::NodeExt;
+ let t = node.transform();
+ let transform = Transform2D::new(
+ t.a as f32, t.b as f32,
+ t.c as f32, t.d as f32,
+ t.e as f32, t.f as f32,
+ );
+
+
+ let s = 1.;
+ if let usvg::NodeKind::Path(ref usvg_path) = *node.borrow() {
+ let color = match usvg_path.fill {
+ Some(ref fill) => {
+ match fill.paint {
+ usvg::Paint::Color(c) => 0xff000000 | (c.red as u32) << 16 | (c.green as u32) << 8 | c.blue as u32,
+ _ => 0xff00ff00,
+ }
+ }
+ None => {
+ continue;
+ }
+ };
+ let mut builder = PathBuilder::new();
+ //dbg!(&usvg_path.segments);
+ for segment in &usvg_path.segments {
+ match *segment {
+ usvg::PathSegment::MoveTo { x, y } => {
+ let p = transform.transform_point(point2(x as f32, y as f32)) * s;
+ builder.move_to(p.x, p.y);
+ }
+ usvg::PathSegment::LineTo { x, y } => {
+ let p = transform.transform_point(point2(x as f32, y as f32)) * s;
+ builder.line_to(p.x, p.y);
+ }
+ usvg::PathSegment::CurveTo { x1, y1, x2, y2, x, y, } => {
+ let c1 = transform.transform_point(point2(x1 as f32, y1 as f32)) * s;
+ let c2 = transform.transform_point(point2(x2 as f32, y2 as f32)) * s;
+ let p = transform.transform_point(point2(x as f32, y as f32)) * s;
+ builder.curve_to(
+ c1.x, c1.y,
+ c2.x, c2.y,
+ p.x, p.y,
+ );
+ }
+ usvg::PathSegment::ClosePath => {
+ builder.close();
+ }
+ }
+ }
+ let start = std::time::Instant::now();
+ let result = builder.rasterize_to_tri_list(0, 0, WIDTH as i32, HEIGHT as i32);
+ let end = std::time::Instant::now();
+ total_time += end - start;
+
+ println!("vertices {}", result.len());
+ total_vertex_count += result.len();
+ if result.len() == 0 {
+ continue;
+ }
+
+ for n in (0..result.len()).step_by(3) {
+ let vertices = {
+ [&result[n], &result[n+1], &result[n+2]]
+ };
+
+ let src = color;
+ let tri = Triangle { v: [
+ Vertex { x: vertices[0].x, y: vertices[0].y, coverage: vertices[0].coverage},
+ Vertex { x: vertices[1].x, y: vertices[1].y, coverage: vertices[1].coverage},
+ Vertex { x: vertices[2].x, y: vertices[2].y, coverage: vertices[2].coverage}
+ ]
+ };
+ rast_triangle(&mut image, WIDTH as usize, &tri, src);
+ }
+ }
+ }
+
+ println!("total vertex count {}, took {}ms", total_vertex_count, total_time.as_secs_f32()*1000.);
+ }
+
+
+ write_image(&image, "out.png");
+ use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
+ use crate::*;
+ fn calculate_hash<T: Hash>(t: &T) -> u64 {
+ let mut s = DefaultHasher::new();
+ t.hash(&mut s);
+ s.finish()
+ }
+
+ assert_eq!(calculate_hash(&image),
+ if cfg!(debug_assertions) { 0x5973c52a1c0232f3 } else { 0xf15821a5bebc5ecf});
+
+
+}
diff --git a/third_party/rust/wpf-gpu-raster/examples/obj-output.rs b/third_party/rust/wpf-gpu-raster/examples/obj-output.rs
new file mode 100644
index 0000000000..c7ec8d7686
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/examples/obj-output.rs
@@ -0,0 +1,26 @@
+// Output an .obj file of the generated mesh. Viewable at https://3dviewer.net/
+
+fn output_obj_file(data: &[OutputVertex]) {
+ for v in data {
+ let color = v.coverage;
+ println!("v {} {} {} {} {} {}", v.x, v.y, 0., color, color, color);
+ }
+
+ // output a standard triangle strip face list
+ for n in (1..data.len()-1).step_by(3) {
+ println!("f {} {} {}", n, n+1, n+2);
+ }
+}
+
+use wpf_gpu_raster::{PathBuilder, OutputVertex};
+fn main() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.0);
+ p.line_to(30., 10.);
+ p.line_to(50., 20.);
+ p.line_to(30., 30.);
+ p.line_to(10., 30.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ output_obj_file(&result)
+}
diff --git a/third_party/rust/wpf-gpu-raster/examples/simple.rs b/third_party/rust/wpf-gpu-raster/examples/simple.rs
new file mode 100644
index 0000000000..5b82cdd941
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/examples/simple.rs
@@ -0,0 +1,11 @@
+use wpf_gpu_raster::PathBuilder;
+fn main() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(10., 30.);
+ p.line_to(30., 30.);
+ p.line_to(30., 10.);
+ p.close();
+ let _result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ //dbg!(result);
+}
diff --git a/third_party/rust/wpf-gpu-raster/notes b/third_party/rust/wpf-gpu-raster/notes
new file mode 100644
index 0000000000..8550376eac
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/notes
@@ -0,0 +1,8 @@
+bezier flattening
+-----------------
+if we make sure we flatten beziers to integer y values we can avoid having to hit
+the slow complex coverage path
+
+We can probably do this by using a skia style flattener.
+Normally we compute a series of line segments using partial differencing.
+I think we can adjust the line towards an integer y value by having small partial differences that we can move by.
diff --git a/third_party/rust/wpf-gpu-raster/src/aacoverage.rs b/third_party/rust/wpf-gpu-raster/src/aacoverage.rs
new file mode 100644
index 0000000000..7b165a9da0
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/aacoverage.rs
@@ -0,0 +1,647 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+//------------------------------------------------------------------------------
+//
+
+use std::cell::Cell;
+
+use typed_arena_nomut::Arena;
+
+//
+// Description:
+// Coverage buffer implementation
+#[cfg(debug_assertions)]
+use crate::aarasterizer::AssertActiveList;
+use crate::aarasterizer::CEdge;
+use crate::nullable_ref::Ref;
+use crate::types::*;
+//struct CEdge;
+//struct CInactiveEdge;
+
+//-------------------------------------------------------------------------
+//
+// TrapezoidalAA only supports 8x8 mode, so the shifts/masks are all
+// constants. Also, since we must be symmetrical, x and y shifts are
+// merged into one shift unlike the implementation in aarasterizer.
+//
+//-------------------------------------------------------------------------
+
+pub const c_nShift: INT = 3;
+pub const c_nShiftSize: INT = 8;
+pub const c_nShiftSizeSquared: INT = c_nShiftSize * c_nShiftSize;
+pub const c_nHalfShiftSize: INT = 4;
+pub const c_nShiftMask: INT = 7;
+//pub const c_rShiftSize: f32 = 8.0;
+//pub const c_rHalfShiftSize: f32 = 4.0;
+pub const c_rInvShiftSize: f32 = 1.0/8.0;
+pub const c_antiAliasMode: MilAntiAliasMode = MilAntiAliasMode::EightByEight;
+
+//
+// Interval coverage descriptor for our antialiased filler
+//
+
+pub struct CCoverageInterval<'a>
+{
+ pub m_pNext: Cell<Ref<'a, CCoverageInterval<'a>>>, // m_pNext interval (look for sentinel, not NULL)
+ pub m_nPixelX: Cell<INT>, // Interval's left edge (m_pNext->X is the right edge)
+ pub m_nCoverage: Cell<INT>, // Pixel coverage for interval
+}
+
+impl<'a> Default for CCoverageInterval<'a> {
+ fn default() -> Self {
+ Self { m_pNext: Cell::new(unsafe { Ref::null() } ), m_nPixelX: Default::default(), m_nCoverage: Default::default() }
+ }
+}
+
+// Define our on-stack storage use. The 'free' versions are nicely tuned
+// to avoid allocations in most common scenarios, while at the same time
+// not chewing up toooo much stack space.
+//
+// We make the debug versions small so that we hit the 'grow' cases more
+// frequently, for better testing:
+
+#[cfg(debug_assertions)]
+ // Must be at least 6 now: 4 for the "minus4" logic in hwrasterizer.*, and then
+ // 1 each for the head and tail sentinels (since their allocation doesn't use Grow).
+ const INTERVAL_BUFFER_NUMBER: usize = 8;
+#[cfg(not(debug_assertions))]
+ const INTERVAL_BUFFER_NUMBER: usize = 32;
+
+
+//
+// Allocator structure for the antialiased fill interval data
+//
+
+struct CCoverageIntervalBuffer<'a>
+{
+ m_pNext: Cell<Option<& 'a CCoverageIntervalBuffer<'a>>>,
+ m_interval: [CCoverageInterval<'a>; INTERVAL_BUFFER_NUMBER],
+}
+
+impl<'a> Default for CCoverageIntervalBuffer<'a> {
+ fn default() -> Self {
+ Self { m_pNext: Cell::new(None), m_interval: Default::default() }
+ }
+}
+
+//------------------------------------------------------------------------------
+//
+// Class: CCoverageBuffer
+//
+// Description:
+// Coverage buffer implementation that maintains coverage information
+// for one scanline.
+//
+// This implementation will maintain a linked list of intervals consisting
+// of x value in pixel space and a coverage value that applies for all pixels
+// between pInterval->X and pInterval->Next->X.
+//
+// For example, if we add the following interval (assuming 8x8 anti-aliasing)
+// to the coverage buffer:
+// _____ _____ _____ _____
+// | | | | |
+// | ------------------- |
+// |_____|_____|_____|_____|
+// (0,0) (1,0) (2,0) (3,0) (4,0)
+//
+// Then we will get the following coverage buffer:
+//
+// m_nPixelX: INT_MIN | 0 | 1 | 3 | 4 | INT_MAX
+// m_nCoverage: 0 | 4 | 8 | 4 | 0 | 0xdeadbeef
+// m_pNext: -------->|---->|---->|---->|---->| NULL
+//
+//------------------------------------------------------------------------------
+pub struct CCoverageBuffer<'a>
+{
+ /*
+public:
+ //
+ // Init/Destroy methods
+ //
+
+ VOID Initialize();
+ VOID Destroy();
+
+ //
+ // Setup the buffer so that it can accept another scanline
+ //
+
+ VOID Reset();
+
+ //
+ // Add a subpixel interval to the coverage buffer
+ //
+
+ HRESULT FillEdgesAlternating(
+ __in_ecount(1) const CEdge *pEdgeActiveList,
+ INT nSubpixelYCurrent
+ );
+
+ HRESULT FillEdgesWinding(
+ __in_ecount(1) const CEdge *pEdgeActiveList,
+ INT nSubpixelYCurrent
+ );
+
+ HRESULT AddInterval(INT nSubpixelXLeft, INT nSubpixelXRight);
+
+private:
+
+ HRESULT Grow(
+ __deref_out_ecount(1) CCoverageInterval **ppIntervalNew,
+ __deref_out_ecount(1) CCoverageInterval **ppIntervalEndMinus4
+ );
+
+public:*/
+ pub m_pIntervalStart: Cell<Ref<'a, CCoverageInterval<'a>>>, // Points to list head entry
+
+//private:
+ m_pIntervalNew: Cell<Ref<'a, CCoverageInterval<'a>>>,
+ interval_new_index: Cell<usize>,
+
+ // The Minus4 in the below variable refers to the position at which
+ // we need to Grow the buffer. The buffer is grown once before an
+ // AddInterval, so the Grow has to ensure that there are enough
+ // intervals for the AddInterval worst case which is the following:
+ //
+ // 1 2 3 4
+ // *_____*_____ _____*_____*
+ // | | | | |
+ // | ---|-----------|--- |
+ // |_____|_____|_____|_____|
+ //
+ // Note that the *'s above mark potentional insert points in the list,
+ // so we need to ensure that at least 4 intervals can be allocated.
+ //
+
+ m_pIntervalEndMinus4: Cell<Ref<'a, CCoverageInterval<'a>>>,
+
+ // Cache the next-to-last added interval to accelerate insertion.
+ m_pIntervalLast: Cell<Ref<'a, CCoverageInterval<'a>>>,
+
+ m_pIntervalBufferBuiltin: CCoverageIntervalBuffer<'a>,
+ m_pIntervalBufferCurrent: Cell<Ref<'a, CCoverageIntervalBuffer<'a>>>,
+
+ arena: Arena<CCoverageIntervalBuffer<'a>>
+
+ // Disable instrumentation checks within all methods of this class
+ //SET_MILINSTRUMENTATION_FLAGS(MILINSTRUMENTATIONFLAGS_DONOTHING);
+}
+
+impl<'a> Default for CCoverageBuffer<'a> {
+ fn default() -> Self {
+ Self {
+ m_pIntervalStart: Cell::new(unsafe { Ref::null() }),
+ m_pIntervalNew: Cell::new(unsafe { Ref::null() }),
+ m_pIntervalEndMinus4: Cell::new(unsafe { Ref::null() }),
+ m_pIntervalLast: Cell::new(unsafe { Ref::null() }),
+ m_pIntervalBufferBuiltin: Default::default(),
+ m_pIntervalBufferCurrent: unsafe { Cell::new(Ref::null()) },
+ arena: Arena::new(),
+ interval_new_index: Cell::new(0),
+ }
+ }
+}
+
+
+//
+// Inlines
+//
+impl<'a> CCoverageBuffer<'a> {
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::AddInterval
+//
+// Synopsis: Add a subpixel resolution interval to the coverage buffer
+//
+//-------------------------------------------------------------------------
+pub fn AddInterval(&'a self, nSubpixelXLeft: INT, nSubpixelXRight: INT) -> HRESULT
+{
+ let hr: HRESULT = S_OK;
+ let mut nPixelXNext: INT;
+ let nPixelXLeft: INT;
+ let nPixelXRight: INT;
+ let nCoverageLeft: INT; // coverage from right edge of pixel for interval start
+ let nCoverageRight: INT; // coverage from left edge of pixel for interval end
+
+ let mut pInterval = self.m_pIntervalStart.get();
+ let mut pIntervalNew = self.m_pIntervalNew.get();
+ let mut interval_new_index = self.interval_new_index.get();
+ let mut pIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
+
+ // Make sure we have enough room to add two intervals if
+ // necessary:
+
+ if (pIntervalNew >= pIntervalEndMinus4)
+ {
+ IFC!(self.Grow(&mut pIntervalNew, &mut pIntervalEndMinus4, &mut interval_new_index));
+ }
+
+ // Convert interval to pixel space so that we can insert it
+ // into the coverage buffer
+
+ debug_assert!(nSubpixelXLeft < nSubpixelXRight);
+ nPixelXLeft = nSubpixelXLeft >> c_nShift;
+ nPixelXRight = nSubpixelXRight >> c_nShift;
+
+ // Try to resume searching from the last searched interval.
+ if self.m_pIntervalLast.get().m_nPixelX.get() < nPixelXLeft {
+ pInterval = self.m_pIntervalLast.get();
+ }
+
+ // Skip any intervals less than 'nPixelLeft':
+
+ loop {
+ let nextInterval = pInterval.m_pNext.get();
+ nPixelXNext = nextInterval.m_nPixelX.get();
+ if !(nPixelXNext < nPixelXLeft) { break }
+
+ pInterval = nextInterval;
+ }
+
+ // Remember the found interval.
+ self.m_pIntervalLast.set(pInterval);
+
+ // Insert a new interval if necessary:
+
+ if (nPixelXNext != nPixelXLeft)
+ {
+ pIntervalNew.m_nPixelX.set(nPixelXLeft);
+ pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
+
+ pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
+ pInterval.m_pNext.set(pIntervalNew);
+
+ pInterval = pIntervalNew;
+
+ interval_new_index += 1;
+ pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
+
+ }
+ else
+ {
+ pInterval = (*pInterval).m_pNext.get();
+ }
+
+ //
+ // Compute coverage for left segment as shown by the *'s below
+ //
+ // |_____|_____|_____|_
+ // | | | |
+ // | ***---------- |
+ // |_____|_____|_____|
+ //
+
+ nCoverageLeft = c_nShiftSize - (nSubpixelXLeft & c_nShiftMask);
+
+ // If nCoverageLeft == 0, then the value of nPixelXLeft is wrong
+ // and should have been equal to nPixelXLeft+1.
+ debug_assert!(nCoverageLeft > 0);
+
+ // If we have partial coverage, then ensure that we have a position
+ // for the end of the pixel
+
+ if ((nCoverageLeft < c_nShiftSize || (nPixelXLeft == nPixelXRight))
+ && nPixelXLeft + 1 != pInterval.m_pNext.get().m_nPixelX.get())
+ {
+ pIntervalNew.m_nPixelX.set(nPixelXLeft + 1);
+ pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
+
+ pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
+ pInterval.m_pNext.set(pIntervalNew);
+
+ interval_new_index += 1;
+ pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
+ }
+
+ //
+ // If the interval only includes one pixel, then the coverage is
+ // nSubpixelXRight - nSubpixelXLeft
+ //
+
+ if (nPixelXLeft == nPixelXRight)
+ {
+ pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nSubpixelXRight - nSubpixelXLeft);
+ debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
+ //goto Cleanup;
+
+ //Cleanup:
+ // Update the coverage buffer new interval
+ self.interval_new_index.set(interval_new_index);
+ self.m_pIntervalNew.set(pIntervalNew);
+ return hr;
+ }
+
+ // Update coverage of current interval
+ pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nCoverageLeft);
+ debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
+
+ // Increase the coverage for any intervals between 'nPixelXLeft'
+ // and 'nPixelXRight':
+
+ loop {
+ let nextInterval = pInterval.m_pNext.get();
+ (nPixelXNext = nextInterval.m_nPixelX.get());
+
+ if !(nPixelXNext < nPixelXRight) {
+ break;
+ }
+ pInterval = nextInterval;
+ pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + c_nShiftSize);
+ debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
+ }
+
+ // Remember the found interval.
+ self.m_pIntervalLast.set(pInterval);
+
+ // Insert another new interval if necessary:
+
+ if (nPixelXNext != nPixelXRight)
+ {
+ pIntervalNew.m_nPixelX.set(nPixelXRight);
+ pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get() - c_nShiftSize);
+
+ pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
+ pInterval.m_pNext.set(pIntervalNew);
+
+ pInterval = pIntervalNew;
+
+ interval_new_index += 1;
+ pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
+ }
+ else
+ {
+ pInterval = pInterval.m_pNext.get();
+ }
+
+ //
+ // Compute coverage for right segment as shown by the *'s below
+ //
+ // |_____|_____|_____|_
+ // | | | |
+ // | ---------**** |
+ // |_____|_____|_____|
+ //
+
+ nCoverageRight = nSubpixelXRight & c_nShiftMask;
+ if (nCoverageRight > 0)
+ {
+ if (nPixelXRight + 1 != (*(*pInterval).m_pNext.get()).m_nPixelX.get())
+ {
+ pIntervalNew.m_nPixelX.set(nPixelXRight + 1);
+ pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
+
+ pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
+ pInterval.m_pNext.set(pIntervalNew);
+
+ interval_new_index += 1;
+ pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
+ }
+
+ pInterval.m_nCoverage.set((*pInterval).m_nCoverage.get() + nCoverageRight);
+ debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
+ }
+
+//Cleanup:
+ // Update the coverage buffer new interval
+ self.interval_new_index.set(interval_new_index);
+ self.m_pIntervalNew.set(pIntervalNew);
+
+ return hr;
+}
+
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::FillEdgesAlternating
+//
+// Synopsis:
+// Given the active edge list for the current scan, do an alternate-mode
+// antialiased fill.
+//
+//-------------------------------------------------------------------------
+pub fn FillEdgesAlternating(&'a self,
+ pEdgeActiveList: Ref<CEdge>,
+ nSubpixelYCurrent: INT
+ ) -> HRESULT
+{
+
+ let hr: HRESULT = S_OK;
+ let mut pEdgeStart: Ref<CEdge> = (*pEdgeActiveList).Next.get();
+ let mut pEdgeEnd: Ref<CEdge>;
+ let mut nSubpixelXLeft: INT;
+ let mut nSubpixelXRight: INT;
+
+ ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
+
+ while (pEdgeStart.X.get() != INT::MAX)
+ {
+ pEdgeEnd = pEdgeStart.Next.get();
+
+ // We skip empty pairs:
+ (nSubpixelXLeft = pEdgeStart.X.get());
+ if (nSubpixelXLeft != pEdgeEnd.X.get())
+ {
+ // We now know we have a non-empty interval. Skip any
+ // empty interior pairs:
+
+ while ({(nSubpixelXRight = pEdgeEnd.X.get()); pEdgeEnd.X == pEdgeEnd.Next.get().X})
+ {
+ pEdgeEnd = pEdgeEnd.Next.get().Next.get();
+ }
+
+ debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
+
+ IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
+ }
+
+ // Prepare for the next iteration:
+ pEdgeStart = pEdgeEnd.Next.get();
+ }
+
+//Cleanup:
+ return hr
+
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::FillEdgesWinding
+//
+// Synopsis:
+// Given the active edge list for the current scan, do an alternate-mode
+// antialiased fill.
+//
+//-------------------------------------------------------------------------
+pub fn FillEdgesWinding(&'a self,
+ pEdgeActiveList: Ref<CEdge>,
+ nSubpixelYCurrent: INT
+ ) -> HRESULT
+{
+
+ let hr: HRESULT = S_OK;
+ let mut pEdgeStart: Ref<CEdge> = pEdgeActiveList.Next.get();
+ let mut pEdgeEnd: Ref<CEdge>;
+ let mut nSubpixelXLeft: INT;
+ let mut nSubpixelXRight: INT;
+ let mut nWindingValue: INT;
+
+ ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
+
+ while (pEdgeStart.X.get() != INT::MAX)
+ {
+ pEdgeEnd = pEdgeStart.Next.get();
+
+ nWindingValue = pEdgeStart.WindingDirection;
+ while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
+ {
+ pEdgeEnd = pEdgeEnd.Next.get();
+ }
+
+ debug_assert!(pEdgeEnd.X.get() != INT::MAX);
+
+ // We skip empty pairs:
+
+ if ({nSubpixelXLeft = pEdgeStart.X.get(); nSubpixelXLeft != pEdgeEnd.X.get()})
+ {
+ // We now know we have a non-empty interval. Skip any
+ // empty interior pairs:
+
+ while ({nSubpixelXRight = pEdgeEnd.X.get(); nSubpixelXRight == pEdgeEnd.Next.get().X.get()})
+ {
+ pEdgeStart = pEdgeEnd.Next.get();
+ pEdgeEnd = pEdgeStart.Next.get();
+
+ nWindingValue = pEdgeStart.WindingDirection;
+ while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
+ {
+ pEdgeEnd = pEdgeEnd.Next.get();
+ }
+ }
+
+ debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
+
+ IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
+ }
+
+ // Prepare for the next iteration:
+
+ pEdgeStart = pEdgeEnd.Next.get();
+ }
+
+//Cleanup:
+ return hr;//RRETURN(hr);
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::Initialize
+//
+// Synopsis: Set the coverage buffer to a valid initial state
+//
+//-------------------------------------------------------------------------
+pub fn Initialize(&'a self)
+{
+ self.m_pIntervalBufferBuiltin.m_interval[0].m_nPixelX.set(INT::MIN);
+ self.m_pIntervalBufferBuiltin.m_interval[0].m_nCoverage.set(0);
+ self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
+
+ self.m_pIntervalBufferBuiltin.m_interval[1].m_nPixelX.set(INT::MAX);
+ self.m_pIntervalBufferBuiltin.m_interval[1].m_nCoverage.set(0xdeadbeef);
+ self.m_pIntervalBufferBuiltin.m_interval[1].m_pNext.set(unsafe { Ref::null() });
+
+ self.m_pIntervalBufferBuiltin.m_pNext.set(None);
+ self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
+
+ self.m_pIntervalStart.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[0]));
+ self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
+ self.interval_new_index.set(2);
+ self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
+ self.m_pIntervalLast.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::Destroy
+//
+// Synopsis: Free all allocated buffers
+//
+//-------------------------------------------------------------------------
+pub fn Destroy(&mut self)
+{
+ // Free the linked-list of allocations (skipping 'm_pIntervalBufferBuiltin',
+ // which is built into the class):
+
+
+}
+
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::Reset
+//
+// Synopsis: Reset the coverage buffer
+//
+//-------------------------------------------------------------------------
+pub fn Reset(&'a self)
+{
+ // Reset our coverage structure. Point the head back to the tail,
+ // and reset where the next new entry will be placed:
+
+ self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
+
+ self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
+ self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
+ self.interval_new_index.set(2);
+ self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
+ self.m_pIntervalLast.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CCoverageBuffer::Grow
+//
+// Synopsis:
+// Grow our interval buffer.
+//
+//-------------------------------------------------------------------------
+fn Grow(&'a self,
+ ppIntervalNew: &mut Ref<'a, CCoverageInterval<'a>>,
+ ppIntervalEndMinus4: &mut Ref<'a, CCoverageInterval<'a>>,
+ interval_new_index: &mut usize
+ ) -> HRESULT
+{
+ let hr: HRESULT = S_OK;
+ let pIntervalBufferNew = (*self.m_pIntervalBufferCurrent.get()).m_pNext.get();
+
+ let pIntervalBufferNew = pIntervalBufferNew.unwrap_or_else(||
+ {
+ let pIntervalBufferNew = self.arena.alloc(Default::default());
+
+ (*pIntervalBufferNew).m_pNext.set(None);
+ (*self.m_pIntervalBufferCurrent.get()).m_pNext.set(Some(pIntervalBufferNew));
+ pIntervalBufferNew
+ });
+
+ self.m_pIntervalBufferCurrent.set(Ref::new(pIntervalBufferNew));
+
+ self.m_pIntervalNew.set(Ref::new(&(*pIntervalBufferNew).m_interval[2]));
+ self.interval_new_index.set(2);
+ self.m_pIntervalEndMinus4.set(Ref::new(&(*pIntervalBufferNew).m_interval[INTERVAL_BUFFER_NUMBER - 4]));
+
+ *ppIntervalNew = self.m_pIntervalNew.get();
+ *ppIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
+ *interval_new_index = 2;
+
+ return hr;
+}
+
+}
+/*
+impl<'a> Drop for CCoverageBuffer<'a> {
+ fn drop(&mut self) {
+ self.Destroy();
+ }
+}*/
diff --git a/third_party/rust/wpf-gpu-raster/src/aarasterizer.rs b/third_party/rust/wpf-gpu-raster/src/aarasterizer.rs
new file mode 100644
index 0000000000..ad9617a42d
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/aarasterizer.rs
@@ -0,0 +1,1768 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#![allow(unused_parens)]
+
+use std::cell::Cell;
+
+use crate::aacoverage::c_nShift;
+use crate::bezier::CMILBezier;
+use crate::helpers::Int32x32To64;
+use crate::matrix::CMILMatrix;
+use crate::nullable_ref::Ref;
+use crate::real::CFloatFPU;
+//use crate::types::PathPointType::*;
+use crate::types::*;
+use typed_arena_nomut::Arena;
+
+const S_OK: HRESULT = 0;
+
+#[cfg(debug_assertions)]
+macro_rules! EDGE_STORE_STACK_NUMBER {
+ () => {
+ 10
+ };
+}
+#[cfg(debug_assertions)]
+macro_rules! EDGE_STORE_ALLOCATION_NUMBER { () => { 11 }; }
+#[cfg(debug_assertions)]
+macro_rules! INACTIVE_LIST_NUMBER { () => { 12 }; }
+#[cfg(debug_assertions)]
+macro_rules! ENUMERATE_BUFFER_NUMBER { () => { 15 }; }
+
+#[cfg(not(debug_assertions))]
+macro_rules! EDGE_STORE_STACK_NUMBER { () => { (1600 / std::mem::size_of::<CEdge>()) }; }
+#[cfg(not(debug_assertions))]
+macro_rules! EDGE_STORE_ALLOCATION_NUMBER { () => { (4032 / std::mem::size_of::<CEdge>()) as u32 }; }
+#[cfg(not(debug_assertions))]
+macro_rules! INACTIVE_LIST_NUMBER { () => { EDGE_STORE_STACK_NUMBER!() }; }
+#[cfg(not(debug_assertions))]
+macro_rules! ENUMERATE_BUFFER_NUMBER { () => { 32 }; }
+
+macro_rules! ASSERTACTIVELIST {
+ ($list: expr, $y: expr) => {
+ // make sure we use y even in non debug builds
+ _ = $y;
+ #[cfg(debug_assertions)]
+ AssertActiveList($list, $y);
+ };
+}
+pub struct CEdge<'a> {
+ pub Next: Cell<Ref<'a, CEdge<'a>>>, // Next active edge (don't check for NULL,
+ // look for tail sentinel instead)
+ pub X: Cell<INT>, // Current X location
+ pub Dx: INT, // X increment
+ pub Error: Cell<INT>, // Current DDA error
+ pub ErrorUp: INT, // Error increment
+ pub ErrorDown: INT, // Error decrement when the error rolls over
+ pub StartY: INT, // Y-row start
+ pub EndY: INT, // Y-row end
+ pub WindingDirection: INT, // -1 or 1
+}
+
+impl<'a> std::default::Default for CEdge<'a> {
+ fn default() -> Self {
+ Self {
+ Next: Cell::new(unsafe { Ref::null() }),
+ X: Default::default(),
+ Dx: Default::default(),
+ Error: Default::default(),
+ ErrorUp: Default::default(),
+ ErrorDown: Default::default(),
+ StartY: Default::default(),
+ EndY: Default::default(),
+ WindingDirection: Default::default(),
+ }
+ }
+}
+
+// We the inactive-array separate from the edge allocations so that
+// we can more easily do in-place sorts on it:
+#[derive(Clone)]
+pub struct CInactiveEdge<'a> {
+ Edge: Ref<'a, CEdge<'a>>, // Associated edge
+ Yx: LONGLONG, // Sorting key, StartY and X packed into an lword
+}
+
+impl<'a> Default for CInactiveEdge<'a> {
+ fn default() -> Self {
+ Self {
+ Edge: unsafe { Ref::null() },
+ Yx: Default::default(),
+ }
+ }
+}
+macro_rules! ASSERTACTIVELISTORDER {
+ ($list: expr) => {
+ #[cfg(debug_assertions)]
+ AssertActiveListOrder($list)
+ };
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Advance DDA and update active edge list
+*
+* Created:
+*
+* 06/20/2003 ashrafm
+*
+\**************************************************************************/
+pub fn AdvanceDDAAndUpdateActiveEdgeList(nSubpixelYCurrent: INT, pEdgeActiveList: Ref<CEdge>) {
+ let mut outOfOrder = false;
+ let mut pEdgePrevious: Ref<CEdge> = pEdgeActiveList;
+ let mut pEdgeCurrent: Ref<CEdge> = pEdgeActiveList.Next.get();
+ let mut prevX = pEdgePrevious.X.get();
+
+ // Advance DDA and update edge list
+
+ loop {
+ if (pEdgeCurrent.EndY <= nSubpixelYCurrent) {
+ // If we've hit the sentinel, our work here is done:
+
+ if (pEdgeCurrent.EndY == INT::MIN) {
+ break; // ============>
+ }
+ // This edge is stale, remove it from the list:
+
+ pEdgeCurrent = pEdgeCurrent.Next.get();
+ pEdgePrevious.Next.set(pEdgeCurrent);
+ continue; // ============>
+ }
+
+ // Advance the DDA:
+
+ let mut x = pEdgeCurrent.X.get() + pEdgeCurrent.Dx;
+ let mut error = pEdgeCurrent.Error.get() + pEdgeCurrent.ErrorUp;
+ if (error >= 0) {
+ error -= pEdgeCurrent.ErrorDown;
+ x += 1;
+ }
+ pEdgeCurrent.X.set(x);
+ pEdgeCurrent.Error.set(error);
+
+ // Is this entry out-of-order with respect to the previous one?
+ outOfOrder |= (prevX > x);
+
+ // Advance:
+
+ pEdgePrevious = pEdgeCurrent;
+ pEdgeCurrent = pEdgeCurrent.Next.get();
+ prevX = x;
+ }
+
+ // It turns out that having any out-of-order edges at this point
+ // is extremely rare in practice, so only call the bubble-sort
+ // if it's truly needed.
+ //
+ // NOTE: If you're looking at this code trying to fix a bug where
+ // the edges are out of order when the filler is called, do
+ // NOT simply change the code to always do the bubble-sort!
+ // Instead, figure out what caused our 'outOfOrder' logic
+ // above to get messed up.
+
+ if (outOfOrder) {
+ SortActiveEdges(pEdgeActiveList);
+ }
+ ASSERTACTIVELISTORDER!(pEdgeActiveList);
+
+}
+
+//+----------------------------------------------------------------------------
+//
+
+//
+// Description: Code for rasterizing the fill of a path.
+//
+// >>>> Note that some of this code is duplicated in hw\hwrasterizer.cpp,
+// >>>> so changes to this file may need to propagate.
+//
+// pursue reduced code duplication
+//
+
+// This option may potentially increase performance for many
+// paths that have edges adjacent at their top point and cover
+// more than one span. The code has been tested, but performance
+// has not been thoroughly investigated.
+const SORT_EDGES_INCLUDING_SLOPE: bool = false;
+
+/////////////////////////////////////////////////////////////////////////
+// The x86 C compiler insists on making a divide and modulus operation
+// into two DIVs, when it can in fact be done in one. So we use this
+// macro.
+//
+// Note: QUOTIENT_REMAINDER implicitly takes unsigned arguments.
+//
+// QUOTIENT_REMAINDER_64_32 takes a 64-bit numerator and produces 32-bit
+// results.
+
+macro_rules! QUOTIENT_REMAINDER {
+ ($ulNumerator: ident, $ulDenominator: ident, $ulQuotient: ident, $ulRemainder: ident) => {
+ $ulQuotient = (($ulNumerator as ULONG) / ($ulDenominator as ULONG)) as _;
+ $ulRemainder = (($ulNumerator as ULONG) % ($ulDenominator as ULONG)) as _;
+ };
+}
+
+macro_rules! QUOTIENT_REMAINDER_64_32 {
+ ($ulNumerator: ident, $ulDenominator: ident, $ulQuotient: ident, $ulRemainder: ident) => {
+ $ulQuotient = (($ulNumerator as ULONGLONG) / (($ulDenominator as ULONG) as ULONGLONG)) as _;
+ $ulRemainder =
+ (($ulNumerator as ULONGLONG) % (($ulDenominator as ULONG) as ULONGLONG)) as _;
+ };
+}
+
+// SWAP macro:
+macro_rules! SWAP {
+ ($temp: ident, $a: expr, $b: expr) => {
+ $temp = $a;
+ $a = $b;
+ $b = $temp;
+ };
+}
+
+struct CEdgeAllocation {
+ Next: *mut CEdgeAllocation, // Next allocation batch (may be NULL)
+ /*__field_range(<=, EDGE_STORE_ALLOCATION_NUMBER)*/ Count: UINT,
+ EdgeArray: [CEdge<'static>; EDGE_STORE_STACK_NUMBER!()],
+}
+
+impl Default for CEdgeAllocation {
+ fn default() -> Self {
+ Self { Next: NULL(), Count: Default::default(), EdgeArray: [(); EDGE_STORE_STACK_NUMBER!()].map(|_| Default::default()) }
+ }
+}
+/*
+pub struct CEdgeStore {
+ /* __field_range(<=, UINT_MAX - 2) */ TotalCount: UINT, // Total edge count in store
+ /* __field_range(<=, CurrentBuffer->Count) */
+ CurrentRemaining: UINT, // How much room remains in current buffer
+ CurrentBuffer: *mut CEdgeAllocation, // Current buffer
+ CurrentEdge: *mut CEdge<'static>, // Current edge in current buffer
+ Enumerator: *mut CEdgeAllocation, // For enumerating all the edges
+ EdgeHead: CEdgeAllocation, // Our built-in allocation
+}
+
+impl Default for CEdgeStore {
+ fn default() -> Self {
+ Self { TotalCount: Default::default(), CurrentRemaining: Default::default(), CurrentBuffer: NULL(), CurrentEdge: NULL(), Enumerator: NULL(), EdgeHead: Default::default() }
+ }
+}
+
+impl CEdgeStore {
+ pub fn init(&mut self) {
+ self.TotalCount = 0;
+ self.CurrentBuffer = NULL();
+ self.CurrentEdge = NULL();
+ self.Enumerator = NULL();
+ self.CurrentRemaining = EDGE_STORE_STACK_NUMBER!() as u32;
+
+ self.EdgeHead = CEdgeAllocation {
+ Count: EDGE_STORE_STACK_NUMBER!() as u32,
+ // hack to work around limited Default implementation for arrays
+ EdgeArray: [(); EDGE_STORE_STACK_NUMBER!()].map(|_| Default::default()),
+ Next: NULL(),
+ };
+ self.CurrentBuffer = &mut self.EdgeHead;
+ self.CurrentEdge = &mut self.EdgeHead.EdgeArray[0];
+ }
+}
+
+impl Drop for CEdgeStore {
+ fn drop(&mut self) {
+ // Free our allocation list, skipping the head, which is not
+ // dynamically allocated:
+
+ let mut allocation: *mut CEdgeAllocation = self.EdgeHead.Next;
+ while (allocation != NULL()) {
+ let next = unsafe { (*allocation).Next };
+ drop(unsafe { Box::from_raw(allocation) });
+ allocation = next;
+ }
+ }
+}
+
+impl CEdgeStore {
+ pub fn StartEnumeration(&mut self) -> UINT {
+ unsafe {
+ self.Enumerator = &mut self.EdgeHead;
+
+ // Update the count and make sure nothing more gets added (in
+ // part because this Count would have to be re-computed):
+
+ (*self.CurrentBuffer).Count -= self.CurrentRemaining;
+
+ // This will never overflow because NextAddBuffer always ensures that TotalCount has
+ // space remaining to describe the capacity of all new buffers added to the edge list.
+ self.TotalCount += (*self.CurrentBuffer).Count;
+
+ // Prevent this from being called again, because bad things would
+ // happen:
+
+ self.CurrentBuffer = NULL();
+
+ return self.TotalCount;
+ }
+ }
+
+ fn Enumerate(
+ &mut self,
+ /*__deref_out_ecount(*ppEndEdge - *ppStartEdge)*/ ppStartEdge: &mut *mut CEdge,
+ /* __deref_out_ecount(0) */ ppEndEdge: &mut *mut CEdge,
+ ) -> bool {
+ /*
+ unsafe {
+ let enumerator: *mut CEdgeAllocation = self.Enumerator;
+
+ // Might return startEdge == endEdge:
+
+ *ppStartEdge = &mut (*enumerator).EdgeArray[0];
+ *ppEndEdge = (*ppStartEdge).offset((*enumerator).Count as isize);
+
+ self.Enumerator = (*enumerator).Next;
+ return (self.Enumerator != NULL());
+ }*/
+ return true;
+ }
+
+ fn StartAddBuffer(
+ &self,
+ /*__deref_out_ecount(*puRemaining)*/ ppCurrentEdge: &mut *mut CEdge,
+ /* __deref_out_range(==, (this->CurrentRemaining)) */ puRemaining: &mut UINT,
+ ) {
+ panic!()
+ // *ppCurrentEdge = self.CurrentEdge;
+ // *puRemaining = self.CurrentRemaining;
+ }
+
+ fn EndAddBuffer(
+ &mut self,
+ /*__in_ecount(remaining) */ pCurrentEdge: *mut CEdge,
+ /* __range(0, (this->CurrentBuffer->Count)) */ remaining: UINT,
+ ) {
+ panic!();
+ //self.CurrentEdge = pCurrentEdge;
+ //self.CurrentRemaining = remaining;
+ }
+
+ // Disable instrumentation checks within all methods of this class
+ //SET_MILINSTRUMENTATION_FLAGS(MILINSTRUMENTATIONFLAGS_DONOTHING);
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* The edge initializer is out of room in its current 'store' buffer;
+* get it a new one.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+impl CEdgeStore {
+ fn NextAddBuffer(
+ &mut self,
+ /*__deref_out_ecount(*puRemaining)*/ ppCurrentEdge: &mut *mut CEdge,
+ puRemaining: &mut UINT,
+ ) -> HRESULT {
+ panic!()
+ /*
+ unsafe {
+ let hr = S_OK;
+
+ let mut cNewTotalCount: u32 = 0;
+
+ // The caller has completely filled up this chunk:
+
+ assert!(*puRemaining == 0);
+
+ // Check to make sure that "TotalCount" will be able to represent the current capacity
+ cNewTotalCount = self.TotalCount + (*self.CurrentBuffer).Count;
+
+ if (cNewTotalCount < self.TotalCount) {
+ return WINCODEC_ERR_VALUEOVERFLOW;
+ }
+
+ // And that it can represent the new capacity as well, with at least 2 to spare.
+ // This "magic" 2 comes from the fact that the usage pattern of this class has callers
+ // needing to allocate space for TotalCount + 2 edges.
+ if (cNewTotalCount + ((EDGE_STORE_ALLOCATION_NUMBER!() + 2) as UINT) < cNewTotalCount) {
+ return WINCODEC_ERR_VALUEOVERFLOW;
+ }
+
+ // We have to grow our data structure by adding a new buffer
+ // and adding it to the list:
+
+ let newBuffer: *mut CEdgeAllocation = Box::into_raw(Box::<CEdgeAllocation>::new(Default::default()));/*static_cast<CEdgeAllocation*>
+ (GpMalloc(Mt(MAARasterizerEdge),
+ sizeof(CEdgeAllocation) +
+ sizeof(CEdge) * (EDGE_STORE_ALLOCATION_NUMBER
+ - EDGE_STORE_STACK_NUMBER)));*/
+ IFCOOM!(newBuffer);
+
+ (*newBuffer).Next = NULL();
+ (*newBuffer).Count = EDGE_STORE_STACK_NUMBER!() as u32;//EDGE_STORE_ALLOCATION_NUMBER!() as u32;
+
+ self.TotalCount = cNewTotalCount;
+
+ (*self.CurrentBuffer).Next = newBuffer;
+ self.CurrentBuffer = newBuffer;
+
+ self.CurrentEdge = &mut (*newBuffer).EdgeArray[0];
+ *ppCurrentEdge = panic!();//self.CurrentEdge;
+ self.CurrentRemaining = EDGE_STORE_STACK_NUMBER!() as u32;//EDGE_STORE_ALLOCATION_NUMBER!();
+ *puRemaining = EDGE_STORE_STACK_NUMBER!() as u32; //EDGE_STORE_ALLOCATION_NUMBER!();
+
+ return hr;
+ }*/
+ }
+}
+*/
+/**************************************************************************\
+*
+* Function Description:
+*
+* Some debug code for verifying the state of the active edge list.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+pub fn AssertActiveList(mut list: Ref<CEdge>, yCurrent: INT) -> bool {
+
+ let mut b = true;
+ let mut activeCount = 0;
+
+ assert!((*list).X.get() == INT::MIN);
+ b &= ((*list).X.get() == INT::MIN);
+
+ // Skip the head sentinel:
+
+ list = (*list).Next.get();
+
+ while ((*list).X.get() != INT::MAX) {
+ assert!((*list).X.get() != INT::MIN);
+ b &= ((*list).X.get() != INT::MIN);
+
+ assert!((*list).X <= (*(*list).Next.get()).X);
+ b &= ((*list).X <= (*(*list).Next.get()).X);
+
+ assert!(((*list).StartY <= yCurrent) && (yCurrent < (*list).EndY));
+ b &= (((*list).StartY <= yCurrent) && (yCurrent < (*list).EndY));
+
+ activeCount += 1;
+ list = (*list).Next.get();
+ }
+
+ assert!((*list).X.get() == INT::MAX);
+ b &= ((*list).X.get() == INT::MAX);
+
+ // There should always be a multiple of 2 edges in the active list.
+ //
+ // NOTE: If you hit this assert, do NOT simply comment it out!
+ // It usually means that all the edges didn't get initialized
+ // properly. For every scan-line, there has to be a left edge
+ // and a right edge (or a multiple thereof). So if you give
+ // even a single bad edge to the edge initializer (or you miss
+ // one), you'll probably hit this assert.
+
+ assert!((activeCount & 1) == 0);
+ b &= ((activeCount & 1) == 0);
+
+ return (b);
+
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Some debug code for verifying the state of the active edge list.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+fn AssertActiveListOrder(mut list: Ref<CEdge>) {
+
+ assert!((*list).X.get() == INT::MIN);
+
+ // Skip the head sentinel:
+
+ list = (*list).Next.get();
+
+ while ((*list).X.get() != INT::MAX) {
+ assert!((*list).X.get() != INT::MIN);
+ assert!((*list).X <= (*(*list).Next.get()).X);
+
+ list = (*list).Next.get();
+ }
+
+ assert!((*list).X.get() == INT::MAX);
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Clip the edge vertically.
+*
+* We've pulled this routine out-of-line from InitializeEdges mainly
+* because it needs to call inline Asm, and when there is in-line
+* Asm in a routine the compiler generally does a much less efficient
+* job optimizing the whole routine. InitializeEdges is rather
+* performance critical, so we avoid polluting the whole routine
+* by having this functionality out-of-line.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+fn ClipEdge(edgeBuffer: &mut CEdge, yClipTopInteger: INT, dMOriginal: INT) {
+ let mut xDelta: INT;
+ let mut error: INT;
+
+ // Cases where bigNumerator will exceed 32-bits in precision
+ // will be rare, but could happen, and we can't fall over
+ // in those cases.
+
+ let dN: INT = edgeBuffer.ErrorDown;
+ let mut bigNumerator: LONGLONG = Int32x32To64(dMOriginal, yClipTopInteger - edgeBuffer.StartY)
+ + (edgeBuffer.Error.get() + dN) as LONGLONG;
+ if (bigNumerator >= 0) {
+ QUOTIENT_REMAINDER_64_32!(bigNumerator, dN, xDelta, error);
+ } else {
+ bigNumerator = -bigNumerator;
+ QUOTIENT_REMAINDER_64_32!(bigNumerator, dN, xDelta, error);
+
+ xDelta = -xDelta;
+ if (error != 0) {
+ xDelta -= 1;
+ error = dN - error;
+ }
+ }
+
+ // Update the edge data structure with the results:
+
+ edgeBuffer.StartY = yClipTopInteger;
+ edgeBuffer.X.set(edgeBuffer.X.get() + xDelta);
+ edgeBuffer.Error.set(error - dN); // Renormalize error
+}
+
+pub fn CheckValidRange28_4(x: f32, y: f32) -> bool {
+ //
+ // We want coordinates in the 28.4 range in the end. The matrix we get
+ // as input includes the scale by 16 to get to 28.4, so we want to
+ // ensure that we are in integer range. Assuming a sign bit and
+ // five bits for the rasterizer working range, we want coordinates in the
+ // -2^26 to 2^26.
+ //
+ // Note that the 5-bit requirement comes from the
+ // implementation of InitializeEdges.
+ // (See line with "error -= dN * (16 - (xStart & 15))")
+ //
+ // Anti-aliasing uses another c_nShift bits, so we get a
+ // desired range of -2^(26-c_nShift) to 2^(26-c_nShift)
+ //
+ let rPixelCoordinateMax = (1 << (26 - c_nShift)) as f32;
+ let rPixelCoordinateMin = -rPixelCoordinateMax;
+ return x <= rPixelCoordinateMax && x >= rPixelCoordinateMin
+ && y <= rPixelCoordinateMax && y >= rPixelCoordinateMin;
+}
+
+//+-----------------------------------------------------------------------------
+//
+// Function: TransformRasterizerPointsTo28_4
+//
+// Synopsis:
+// Transform rasterizer points to 28.4. If overflow occurs, return that
+// information.
+//
+//------------------------------------------------------------------------------
+fn TransformRasterizerPointsTo28_4(
+ pmat: &CMILMatrix,
+ // Transform to take us to 28.4
+ mut pPtsSource: &[MilPoint2F],
+ // Source points
+ mut cPoints: UINT,
+ // Count of points
+ mut pPtsDest: &mut [POINT], // Destination points
+) -> HRESULT {
+ let hr = S_OK;
+
+ debug_assert!(cPoints > 0);
+
+ while {
+ //
+ // Transform coordinates
+ //
+
+ let rPixelX =
+ (pmat.GetM11() * pPtsSource[0].X) + (pmat.GetM21() * pPtsSource[0].Y) + pmat.GetDx();
+ let rPixelY =
+ (pmat.GetM12() * pPtsSource[0].X) + (pmat.GetM22() * pPtsSource[0].Y) + pmat.GetDy();
+
+ //
+ // Check for NaNs or overflow
+ //
+
+ if !CheckValidRange28_4(rPixelX, rPixelY) {
+ return WGXERR_BADNUMBER;
+ }
+
+ //
+ // Assign coordinates
+ //
+
+ pPtsDest[0].x = CFloatFPU::Round(rPixelX);
+ pPtsDest[0].y = CFloatFPU::Round(rPixelY);
+
+ pPtsDest = &mut pPtsDest[1..];
+ pPtsSource = &pPtsSource[1..];
+ cPoints -= 1;
+ cPoints != 0
+ } {}
+
+ return hr;
+}
+
+pub fn AppendScaleToMatrix(pmat: &mut CMILMatrix, scaleX: REAL, scaleY: REAL) {
+ pmat.SetM11(pmat.GetM11() * scaleX);
+ pmat.SetM21(pmat.GetM21() * scaleX);
+ pmat.SetM12(pmat.GetM12() * scaleY);
+ pmat.SetM22(pmat.GetM22() * scaleY);
+ pmat.SetDx(pmat.GetDx() * scaleX);
+ pmat.SetDy(pmat.GetDy() * scaleY);
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Add edges to the edge list.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+pub struct CInitializeEdgesContext<'a> {
+ pub MaxY: INT, // Maximum 'y' found, should be INT_MIN on
+ // first call to 'InitializeEdges'
+ pub ClipRect: Option<&'a RECT>, // Bounding clip rectangle in 28.4 format
+ pub Store: &'a Arena<CEdge<'a>>, // Where to stick the edges
+ pub AntiAliasMode: MilAntiAliasMode,
+}
+
+impl<'a> CInitializeEdgesContext<'a> {
+ pub fn new(store: &'a Arena<CEdge<'a>>) -> Self {
+ CInitializeEdgesContext { MaxY: Default::default(), ClipRect: Default::default(), Store: store, AntiAliasMode: MilAntiAliasMode::None }
+ }
+}
+
+fn InitializeEdges(
+ pEdgeContext: &mut CInitializeEdgesContext,
+ /*__inout_ecount(vertexCount)*/
+ mut pointArray: &mut [POINT], // Points to a 28.4 array of size 'vertexCount'
+ // Note that we may modify the contents!
+ /*__in_range(>=, 2)*/ vertexCount: UINT,
+) -> HRESULT {
+ // Disable instrumentation checks for this function
+ //SET_MILINSTRUMENTATION_FLAGS(MILINSTRUMENTATIONFLAGS_DONOTHING);
+
+ let hr = S_OK;
+
+ let mut xStart;
+ let mut yStart;
+ let mut yStartInteger;
+ let mut yEndInteger;
+ let mut dMOriginal;
+ let mut dM: i32;
+ let mut dN: i32;
+ let mut dX: i32;
+ let mut errorUp: i32;
+ let mut quotient: i32;
+ let mut remainder: i32;
+ let mut error: i32;
+ let mut windingDirection;
+ //let mut edgeBuffer: *mut CEdge = NULL();
+ let bufferCount: UINT = 0;
+ let mut yClipTopInteger;
+ let mut yClipTop;
+ let mut yClipBottom;
+ let mut xClipLeft;
+ let mut xClipRight;
+
+ let mut yMax = pEdgeContext.MaxY;
+ let _store = &mut pEdgeContext.Store;
+ let clipRect = pEdgeContext.ClipRect;
+
+ let mut edgeCount = vertexCount - 1;
+ assert!(edgeCount >= 1);
+
+ if let Some(clipRect) = clipRect {
+ yClipTopInteger = clipRect.top >> 4;
+ yClipTop = clipRect.top;
+ yClipBottom = clipRect.bottom;
+ xClipLeft = clipRect.left;
+ xClipRight = clipRect.right;
+
+ assert!(yClipBottom > 0);
+ assert!(yClipTop <= yClipBottom);
+ } else {
+ yClipBottom = 0;
+ yClipTopInteger = INT::MIN >> c_nShift;
+
+ // These 3 values are only used when clipRect is non-NULL
+ yClipTop = 0;
+ xClipLeft = 0;
+ xClipRight = 0;
+ }
+
+ if (pEdgeContext.AntiAliasMode != MilAntiAliasMode::None) {
+ // If antialiasing, apply the supersampling scaling here before we
+ // calculate the DDAs. We do this here and not in the Matrix
+ // transform we give to FixedPointPathEnumerate mainly so that the
+ // Bezier flattener can continue to operate in its optimal 28.4
+ // format.
+ //
+ // PS#856364-2003/07/01-JasonHa Remove pixel center fixup
+ //
+ // We also apply a half-pixel offset here so that the antialiasing
+ // code can assume that the pixel centers are at half-pixel
+ // coordinates, not on the integer coordinates.
+
+ let mut point = &mut *pointArray;
+ let mut i = vertexCount;
+
+ while {
+ point[0].x = (point[0].x + 8) << c_nShift;
+ point[0].y = (point[0].y + 8) << c_nShift;
+ point = &mut point[1..];
+ i -= 1;
+ i != 0
+ } {}
+
+ yClipTopInteger <<= c_nShift;
+ yClipTop <<= c_nShift;
+ yClipBottom <<= c_nShift;
+ xClipLeft <<= c_nShift;
+ xClipRight <<= c_nShift;
+ }
+
+ // Make 'yClipBottom' inclusive by subtracting off one pixel
+ // (keeping in mind that we're in 28.4 device space):
+
+ yClipBottom -= 16;
+
+ // Warm up the store where we keep the edge data:
+
+ //store.StartAddBuffer(&mut edgeBuffer, &mut bufferCount);
+
+ 'outer: loop { loop {
+ // Handle trivial rejection:
+
+ if (yClipBottom >= 0) {
+ // Throw out any edges that are above or below the clipping.
+ // This has to be a precise check, because we assume later
+ // on that every edge intersects in the vertical dimension
+ // with the clip rectangle. That asssumption is made in two
+ // places:
+ //
+ // 1. When we sort the edges, we assume either zero edges,
+ // or two or more.
+ // 2. When we start the DDAs, we assume either zero edges,
+ // or that there's at least one scan of DDAs to output.
+ //
+ // Plus, of course, it's less efficient if we let things
+ // through.
+ //
+ // Note that 'yClipBottom' is inclusive:
+
+ let clipHigh = ((pointArray[0]).y <= yClipTop) && ((pointArray[1]).y <= yClipTop);
+
+ let clipLow = ((pointArray[0]).y > yClipBottom) && ((pointArray[1]).y > yClipBottom);
+
+ #[cfg(debug_assertions)]
+ {
+ let (mut yRectTop, mut yRectBottom, y0, y1, yTop, yBottom);
+
+ // Getting the trivial rejection code right is tricky.
+ // So on checked builds let's verify that we're doing it
+ // correctly, using a different approach:
+
+ let mut clipped = false;
+ if let Some(clipRect) = clipRect {
+ yRectTop = clipRect.top >> 4;
+ yRectBottom = clipRect.bottom >> 4;
+ if (pEdgeContext.AntiAliasMode != MilAntiAliasMode::None) {
+ yRectTop <<= c_nShift;
+ yRectBottom <<= c_nShift;
+ }
+ y0 = ((pointArray[0]).y + 15) >> 4;
+ y1 = ((pointArray[1]).y + 15) >> 4;
+ yTop = y0.min(y1);
+ yBottom = y0.max(y1);
+
+ clipped = ((yTop >= yRectBottom) || (yBottom <= yRectTop));
+ }
+
+ assert!(clipped == (clipHigh || clipLow));
+ }
+
+ if (clipHigh || clipLow) {
+ break; // ======================>
+ }
+
+ if (edgeCount > 1) {
+ // Here we'll collapse two edges down to one if both are
+ // to the left or to the right of the clipping rectangle.
+
+ if (((pointArray[0]).x < xClipLeft)
+ && ((pointArray[1]).x < xClipLeft)
+ && ((pointArray[2]).x < xClipLeft))
+ {
+ // Note this is one reason why 'pointArray' can't be 'const':
+
+ pointArray[1] = pointArray[0];
+
+ break; // ======================>
+ }
+
+ if (((pointArray[0]).x > xClipRight)
+ && ((pointArray[1]).x > xClipRight)
+ && ((pointArray[2]).x > xClipRight))
+ {
+ // Note this is one reason why 'pointArray' can't be 'const':
+
+ pointArray[1] = pointArray[0];
+
+ break; // ======================>
+ }
+ }
+ }
+
+ dM = (pointArray[1]).x - (pointArray[0]).x;
+ dN = (pointArray[1]).y - (pointArray[0]).y;
+
+ if (dN >= 0) {
+ // The vector points downward:
+
+ xStart = (pointArray[0]).x;
+ yStart = (pointArray[0]).y;
+
+ yStartInteger = (yStart + 15) >> 4;
+ yEndInteger = ((pointArray[1]).y + 15) >> 4;
+
+ windingDirection = 1;
+ } else {
+ // The vector points upward, so we have to essentially
+ // 'swap' the end points:
+
+ dN = -dN;
+ dM = -dM;
+
+ xStart = (pointArray[1]).x;
+ yStart = (pointArray[1]).y;
+
+ yStartInteger = (yStart + 15) >> 4;
+ yEndInteger = ((pointArray[0]).y + 15) >> 4;
+
+ windingDirection = -1;
+ }
+
+ // The edgeBuffer must span an integer y-value in order to be
+ // added to the edgeBuffer list. This serves to get rid of
+ // horizontal edges, which cause trouble for our divides.
+
+ if (yEndInteger > yStartInteger) {
+ yMax = yMax.max(yEndInteger);
+
+ dMOriginal = dM;
+ if (dM < 0) {
+ dM = -dM;
+ if (dM < dN)
+ // Can't be '<='
+ {
+ dX = -1;
+ errorUp = dN - dM;
+ } else {
+ QUOTIENT_REMAINDER!(dM, dN, quotient, remainder);
+
+ dX = -quotient;
+ errorUp = remainder;
+ if (remainder > 0) {
+ dX = -quotient - 1;
+ errorUp = dN - remainder;
+ }
+ }
+ } else {
+ if (dM < dN) {
+ dX = 0;
+ errorUp = dM;
+ } else {
+ QUOTIENT_REMAINDER!(dM, dN, quotient, remainder);
+
+ dX = quotient;
+ errorUp = remainder;
+ }
+ }
+
+ error = -1; // Error is initially zero (add dN - 1 for
+ // the ceiling, but subtract off dN so that
+ // we can check the sign instead of comparing
+ // to dN)
+
+ if ((yStart & 15) != 0) {
+ // Advance to the next integer y coordinate
+
+ let mut i = 16 - (yStart & 15);
+ while i != 0 {
+ xStart += dX;
+ error += errorUp;
+ if (error >= 0)
+ {
+ error -= dN;
+ xStart += 1;
+ }
+ i -= 1;
+ }
+ }
+
+ if ((xStart & 15) != 0) {
+ error -= dN * (16 - (xStart & 15));
+ xStart += 15; // We'll want the ceiling in just a bit...
+ }
+
+ xStart >>= 4;
+ error >>= 4;
+
+ if (bufferCount == 0) {
+ //IFC!(store.NextAddBuffer(&mut edgeBuffer, &mut bufferCount));
+ }
+
+ let mut edge = CEdge {
+ Next: Cell::new(unsafe { Ref::null() } ),
+ X: Cell::new(xStart),
+ Dx: dX,
+ Error: Cell::new(error),
+ ErrorUp: errorUp,
+ ErrorDown: dN,
+ WindingDirection: windingDirection,
+ StartY: yStartInteger,
+ EndY: yEndInteger,// Exclusive of end
+ };
+
+ assert!(error < 0);
+
+ // Here we handle the case where the edge starts above the
+ // clipping rectangle, and we need to jump down in the 'y'
+ // direction to the first unclipped scan-line.
+ //
+ // Consequently, we advance the DDA here:
+
+ if (yClipTopInteger > yStartInteger) {
+ assert!(edge.EndY > yClipTopInteger);
+
+ ClipEdge(&mut edge, yClipTopInteger, dMOriginal);
+ }
+
+ // Advance to handle the next edge:
+
+ //edgeBuffer = unsafe { edgeBuffer.offset(1) };
+ pEdgeContext.Store.alloc(edge);
+ //bufferCount -= 1;
+ }
+ break;
+ }
+ pointArray = &mut pointArray[1..];
+ edgeCount -= 1;
+ if edgeCount == 0 {
+ break 'outer;
+ }
+ }
+
+ // We're done with this batch. Let the store know how many edges
+ // we ended up with:
+
+ //store.EndAddBuffer(edgeBuffer, bufferCount);
+
+ pEdgeContext.MaxY = yMax;
+
+ return hr;
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Does complete parameter checking on the 'types' array of a path.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+fn ValidatePathTypes(typesArray: &[BYTE], mut count: INT) -> bool {
+ let mut types = typesArray;
+
+ if (count == 0) {
+ return (true);
+ }
+
+ loop {
+ // The first point in every subpath has to be an unadorned
+ // 'start' point:
+
+ if ((types[0] & PathPointTypePathTypeMask) != PathPointTypeStart) {
+ TraceTag!((tagMILWarning, "Bad subpath start"));
+ return (false);
+ }
+
+ // Advance to the first point after the 'start' point:
+ count -= 1;
+ if (count == 0) {
+ TraceTag!((tagMILWarning, "Path ended after start-path"));
+ return (false);
+ }
+
+ if ((types[1] & PathPointTypePathTypeMask) == PathPointTypeStart) {
+ TraceTag!((tagMILWarning, "Can't have a start followed by a start!"));
+ return (false);
+ }
+
+ // Process runs of lines and Bezier curves:
+
+ loop {
+ match (types[1] & PathPointTypePathTypeMask) {
+ PathPointTypeLine => {
+ types = &types[1..];
+ count -= 1;
+ if (count == 0) {
+ return (true);
+ }
+ }
+
+ PathPointTypeBezier => {
+ if (count < 3) {
+ TraceTag!((
+ tagMILWarning,
+ "Path ended before multiple of 3 Bezier points"
+ ));
+ return (false);
+ }
+
+ if ((types[1] & PathPointTypePathTypeMask) != PathPointTypeBezier) {
+ TraceTag!((tagMILWarning, "Bad subpath start"));
+ return (false);
+ }
+
+ types = &types[1..];
+ count -= 3;
+ if (count == 0) {
+ return (true);
+ }
+ }
+
+ _ => {
+ TraceTag!((tagMILWarning, "Illegal type"));
+ return (false);
+ }
+ }
+
+ // A close-subpath marker or a start-subpath marker marks the
+ // end of a subpath:
+ if !(!((types[0] & PathPointTypeCloseSubpath) != 0)
+ && ((types[1] & PathPointTypePathTypeMask) != PathPointTypeStart)) {
+ types = &types[1..];
+ break;
+ }
+ }
+ }
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Some debug code for verifying the path.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+macro_rules! ASSERTPATH {
+ ($types: expr, $points: expr) => {
+ #[cfg(debug_assertions)]
+ AssertPath($types, $points)
+ };
+}
+fn AssertPath(rgTypes: &[BYTE], cPoints: UINT) {
+ // Make sure that the 'types' array is well-formed, otherwise we
+ // may fall over in the FixedPointPathEnumerate function.
+ //
+ // NOTE: If you hit this assert, DO NOT SIMPLY COMMENT THIS Assert OUT!
+ //
+ // Instead, fix the ValidatePathTypes code if it's letting through
+ // valid paths, or (more likely) fix the code that's letting bogus
+ // paths through. The FixedPointPathEnumerate routine has some
+ // subtle assumptions that require the path to be perfectly valid!
+ //
+ // No internal code should be producing invalid paths, and all
+ // paths created by the application must be parameter checked!
+ assert!(ValidatePathTypes(rgTypes, cPoints as INT));
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member:
+// FixedPointPathEnumerate
+//
+// Synopsis:
+//
+// Enumerate the path.
+//
+// NOTE: The 'enumerateFunction' function is allowed to modify the
+// contents of our call-back buffer! (This is mainly done to allow
+// 'InitializeEdges' to be simpler for some clipping trivial
+// rejection cases.)
+//
+// NOTICE-2006/03/22-milesc This function was initially built to be a
+// general path enumeration function. However, we were only using it for
+// one specific purpose... for Initializing edges of a path to be filled.
+// In doing security work, I simplified this function to just do edge
+// initialization. The name is therefore now overly general. I have kept
+// the name to be a reminder that this function has been written to be
+// more general than would otherwise be evident.
+//
+
+pub fn FixedPointPathEnumerate(
+ rgpt: &[POINT],
+ rgTypes: &[BYTE],
+ cPoints: UINT,
+ _matrix: &CMILMatrix,
+ clipRect: Option<&RECT>, // In scaled 28.4 format
+ enumerateContext: &mut CInitializeEdgesContext,
+) -> HRESULT {
+ let hr = S_OK;
+ let mut bufferStart: [POINT; ENUMERATE_BUFFER_NUMBER!()] = [(); ENUMERATE_BUFFER_NUMBER!()].map(|_| Default::default());
+ let mut bezierBuffer: [POINT; 4] = Default::default();
+ let mut buffer: &mut [POINT];
+ let mut bufferSize: usize;
+ let mut startFigure: [POINT; 1] = Default::default();
+ // The current point offset in rgpt
+ let mut iPoint: usize;
+ // The current type offset in rgTypes
+ let mut iType: usize;
+ let mut runSize: usize;
+ let mut thisCount: usize;
+ let mut isMore: bool = false;
+ let mut xLast: INT;
+ let mut yLast: INT;
+
+ ASSERTPATH!(rgTypes, cPoints);
+
+ // Every valid subpath has at least two vertices in it, hence the
+ // check of 'cPoints - 1':
+
+ iPoint = 0;
+ iType = 0;
+
+ assert!(cPoints > 1);
+ while (iPoint < cPoints as usize - 1) {
+ assert!((rgTypes[iType] & PathPointTypePathTypeMask) == PathPointTypeStart);
+ assert!((rgTypes[iType + 1] & PathPointTypePathTypeMask) != PathPointTypeStart);
+
+ // Add the start point to the beginning of the batch, and
+ // remember it for handling the close figure:
+
+ startFigure[0] = rgpt[iPoint];
+
+ bufferStart[0].x = startFigure[0].x;
+ bufferStart[0].y = startFigure[0].y;
+ let bufferStartPtr = bufferStart.as_ptr();
+ buffer = &mut bufferStart[1..];
+ bufferSize = ENUMERATE_BUFFER_NUMBER!() - 1;
+
+ // We need to enter our loop with 'iType' pointing one past
+ // the start figure:
+
+ iPoint += 1;
+ iType += 1;
+
+ while {
+ // Try finding a run of lines:
+
+ if ((rgTypes[iType] & PathPointTypePathTypeMask) == PathPointTypeLine) {
+ runSize = 1;
+
+ while ((iPoint + runSize < cPoints as usize)
+ && ((rgTypes[iType + runSize] & PathPointTypePathTypeMask) == PathPointTypeLine))
+ {
+ runSize += 1;
+ }
+
+ // Okay, we've found a run of lines. Break it up into our
+ // buffer size:
+
+ loop {
+ thisCount = bufferSize.min(runSize);
+
+ buffer[0 .. thisCount].copy_from_slice(&rgpt[iPoint .. iPoint + thisCount]);
+
+ __analysis_assume!(
+ buffer + bufferSize == bufferStart + ENUMERATE_BUFFER_NUMBER
+ );
+ assert!(buffer.as_ptr().wrapping_offset(bufferSize as isize) == bufferStartPtr.wrapping_offset(ENUMERATE_BUFFER_NUMBER!()) );
+
+ iPoint += thisCount;
+ iType += thisCount;
+ buffer = &mut buffer[thisCount..];
+ runSize -= thisCount;
+ bufferSize -= thisCount;
+
+ if (bufferSize > 0) {
+ break;
+ }
+
+ xLast = bufferStart[ENUMERATE_BUFFER_NUMBER!() - 1].x;
+ yLast = bufferStart[ENUMERATE_BUFFER_NUMBER!() - 1].y;
+ IFR!(InitializeEdges(
+ enumerateContext,
+ &mut bufferStart,
+ ENUMERATE_BUFFER_NUMBER!()
+ ));
+
+ // Continue the last vertex as the first in the new batch:
+
+ bufferStart[0].x = xLast;
+ bufferStart[0].y = yLast;
+ buffer = &mut bufferStart[1..];
+ bufferSize = ENUMERATE_BUFFER_NUMBER!() - 1;
+ if !(runSize != 0) {
+ break;
+ }
+ }
+ } else {
+ assert!(iPoint + 3 <= cPoints as usize);
+ assert!((rgTypes[iType] & PathPointTypePathTypeMask) == PathPointTypeBezier);
+
+ bezierBuffer.copy_from_slice(&rgpt[(iPoint - 1) .. iPoint + 3]);
+
+ // Prepare for the next iteration:
+
+ iPoint += 3;
+ iType += 1;
+
+ // Process the Bezier:
+
+ let mut bezier = CMILBezier::new(&bezierBuffer, clipRect);
+ loop {
+ thisCount = bezier.Flatten(buffer, &mut isMore) as usize;
+
+ __analysis_assume!(
+ buffer + bufferSize == bufferStart + ENUMERATE_BUFFER_NUMBER!()
+ );
+ assert!(buffer.as_ptr().wrapping_offset(bufferSize as isize) == bufferStartPtr.wrapping_offset(ENUMERATE_BUFFER_NUMBER!()));
+
+ buffer = &mut buffer[thisCount..];
+ bufferSize -= thisCount;
+
+ if (bufferSize > 0) {
+ break;
+ }
+
+ xLast = bufferStart[ENUMERATE_BUFFER_NUMBER!() - 1].x;
+ yLast = bufferStart[ENUMERATE_BUFFER_NUMBER!() - 1].y;
+ IFR!(InitializeEdges(
+ enumerateContext,
+ &mut bufferStart,
+ ENUMERATE_BUFFER_NUMBER!()
+ ));
+
+ // Continue the last vertex as the first in the new batch:
+
+ bufferStart[0].x = xLast;
+ bufferStart[0].y = yLast;
+ buffer = &mut bufferStart[1..];
+ bufferSize = ENUMERATE_BUFFER_NUMBER!() - 1;
+ if !isMore {
+ break;
+ }
+ }
+ }
+
+ ((iPoint < cPoints as usize)
+ && ((rgTypes[iType] & PathPointTypePathTypeMask) != PathPointTypeStart))
+ } {}
+
+ // Okay, the subpath is done. But we still have to handle the
+ // 'close figure' (which is implicit for a fill):
+ // Add the close-figure point:
+
+ buffer[0].x = startFigure[0].x;
+ buffer[0].y = startFigure[0].y;
+ bufferSize -= 1;
+
+ // We have to flush anything we might have in the batch, unless
+ // there's only one vertex in there! (The latter case may happen
+ // for the stroke case with no close figure if we just flushed a
+ // batch.)
+ // If we're flattening, we must call the one additional time to
+ // correctly handle closing the subpath, even if there is only
+ // one entry in the batch. The flattening callback handles the
+ // one point case and closes the subpath properly without adding
+ // extraneous points.
+
+ let verticesInBatch = ENUMERATE_BUFFER_NUMBER!() - bufferSize;
+ if (verticesInBatch > 1) {
+ IFR!(InitializeEdges(
+ enumerateContext,
+ &mut bufferStart,
+ (verticesInBatch) as UINT
+ ));
+ }
+ }
+
+ return hr;
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* We want to sort in the inactive list; the primary key is 'y', and
+* the secondary key is 'x'. This routine creates a single LONGLONG
+* key that represents both.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+fn YX(x: INT, y: INT, p: &mut LONGLONG) {
+ // Bias 'x' by INT_MAX so that it's effectively unsigned:
+ /*
+ reinterpret_cast<LARGE_INTEGER*>(p)->HighPart = y;
+ reinterpret_cast<LARGE_INTEGER*>(p)->LowPart = x + INT_MAX;
+ */
+ *p = (((y as u64) << 32) | (((x as i64 + i32::MAX as i64) as u64) & 0xffffffff)) as i64;
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Recursive function to quick-sort our inactive edge list. Note that
+* for performance, the results are not completely sorted; an insertion
+* sort has to be run after the quicksort in order to do a lighter-weight
+* sort of the subtables.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+const QUICKSORT_THRESHOLD: isize = 8;
+
+fn QuickSortEdges(inactive: &mut [CInactiveEdge],
+ /*__inout_xcount(f - l + 1 elements)*/ f: usize,
+ /*__inout_xcount(array starts at f)*/ l: usize,
+) {
+ let mut e: Ref<CEdge>;
+ let mut y: LONGLONG;
+ let mut first: LONGLONG;
+ let mut second: LONGLONG;
+ let mut last: LONGLONG;
+
+ // Find the median of the first, middle, and last elements:
+
+ let m = f + ((l - f) >> 1);
+
+ SWAP!(y, inactive[f + 1].Yx, inactive[m].Yx);
+ SWAP!(e, inactive[f + 1].Edge, inactive[m].Edge);
+
+ if {second = inactive[f + 1].Yx; second > {last = inactive[l].Yx; last}} {
+ inactive[f + 1].Yx = last;
+ inactive[l].Yx = second;
+
+ SWAP!(e, inactive[f + 1].Edge, inactive[l].Edge);
+ }
+ if {first = inactive[f].Yx; first} > {last = inactive[l].Yx; last} {
+ inactive[f].Yx = last;
+ inactive[l].Yx = first;
+
+ SWAP!(e, inactive[f].Edge, inactive[l].Edge);
+ }
+ if {second = inactive[f + 1].Yx; second} > {first = inactive[f].Yx; first} {
+ inactive[f + 1].Yx = first;
+ inactive[f].Yx = second;
+
+ SWAP!(e, inactive[f + 1].Edge, inactive[f].Edge);
+ }
+
+ // f->Yx is now the desired median, and (f + 1)->Yx <= f->Yx <= l->Yx
+
+ debug_assert!((inactive[f + 1].Yx <= inactive[f].Yx) && (inactive[f].Yx <= inactive[l].Yx));
+
+ let median = inactive[f].Yx;
+
+ let mut i = f + 2;
+ while (inactive[i].Yx < median) {
+ i += 1;
+ }
+
+ let mut j = l - 1;
+ while (inactive[j].Yx > median) {
+ j -= 1;
+ }
+
+ while (i < j) {
+ SWAP!(y, inactive[i].Yx, inactive[j].Yx);
+ SWAP!(e, inactive[i].Edge, inactive[j].Edge);
+
+ while {
+ i = i + 1;
+ inactive[i].Yx < median
+ } {}
+
+ while {
+ j = j - 1 ;
+ inactive[j].Yx > median
+ } {}
+ }
+
+ SWAP!(y, inactive[f].Yx, inactive[j].Yx);
+ SWAP!(e, inactive[f].Edge, inactive[j].Edge);
+
+ let a = j - f;
+ let b = l - j;
+
+ // Use less stack space by recursing on the shorter subtable. Also,
+ // have the less-overhead insertion-sort handle small subtables.
+
+ if (a <= b) {
+ if (a > QUICKSORT_THRESHOLD as usize) {
+ // 'a' is the smallest, so do it first:
+
+ QuickSortEdges(inactive, f, j - 1);
+ QuickSortEdges(inactive, j + 1, l);
+ } else if (b > QUICKSORT_THRESHOLD as usize) {
+ QuickSortEdges(inactive, j + 1, l);
+ }
+ } else {
+ if (b > QUICKSORT_THRESHOLD as usize) {
+ // 'b' is the smallest, so do it first:
+
+ QuickSortEdges(inactive, j + 1 , l);
+ QuickSortEdges(inactive, f, j + 1);
+ } else if (a > QUICKSORT_THRESHOLD as usize) {
+ QuickSortEdges(inactive, f, j -1);
+ }
+ }
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Do a sort of the inactive table using an insertion-sort. Expects
+* large tables to have already been sorted via quick-sort.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+fn InsertionSortEdges(
+ /* __inout_xcount(count forward & -1 back)*/ mut inactive: &mut [CInactiveEdge],
+ mut count: INT,
+) {
+ let mut e: Ref<CEdge>;
+ let mut y: LONGLONG;
+ let mut yPrevious: LONGLONG;
+
+ assert!(inactive[0].Yx == i64::MIN);
+ assert!(count >= 2);
+ //inactive = &mut inactive[1..];
+
+ let mut indx = 2; // Skip first entry (by definition it's already in order!)
+ count -= 1;
+
+ while {
+ let mut p = indx;
+
+ // Copy the current stuff to temporary variables to make a hole:
+
+ e = (inactive[indx]).Edge;
+ y = (inactive[indx]).Yx;
+
+ // Shift everything one slot to the right (effectively moving
+ // the hole one position to the left):
+
+ while (y < {yPrevious = inactive[p-1].Yx; yPrevious}) {
+ inactive[p].Yx = yPrevious;
+ inactive[p].Edge = inactive[p-1].Edge;
+ p -= 1;
+ }
+
+ // Drop the temporary stuff into the final hole:
+
+ inactive[p].Yx = y;
+ inactive[p].Edge = e;
+
+ // The quicksort should have ensured that we don't have to move
+ // any entry terribly far:
+
+ assert!((indx - p) <= QUICKSORT_THRESHOLD as usize);
+
+ indx += 1;
+ count -= 1;
+ count != 0
+ } {}
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Assert the state of the inactive array.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+macro_rules! ASSERTINACTIVEARRAY {
+ ($inactive: expr, $count: expr) => {
+ #[cfg(debug_assertions)]
+ AssertInactiveArray($inactive, $count);
+ };
+}
+fn AssertInactiveArray(
+ /*__in_ecount(count)*/
+ mut inactive: &[CInactiveEdge], // Annotation should allow the -1 element
+ mut count: INT,
+) {
+ // Verify the head:
+
+ /*#if !ANALYSIS*/
+ // #if needed because prefast don't know that the -1 element is avaliable
+ assert!(inactive[0].Yx == i64::MIN);
+ /*#endif*/
+ assert!(inactive[1].Yx != i64::MIN);
+
+ while {
+ let mut yx: LONGLONG = 0;
+ YX((*inactive[1].Edge).X.get(), (*inactive[1].Edge).StartY, &mut yx);
+
+ assert!(inactive[1].Yx == yx);
+ /*#if !ANALYSIS*/
+ // #if needed because tools don't know that the -1 element is avaliable
+ assert!(inactive[1].Yx >= inactive[0].Yx);
+ /*#endif*/
+ inactive = &inactive[1..];
+ count -= 1;
+ count != 0
+ } {}
+
+ // Verify that the tail is setup appropriately:
+
+ assert!((*inactive[1].Edge).StartY == INT::MAX);
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Initialize and sort the inactive array.
+*
+* Returns:
+*
+* 'y' value of topmost edge.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+pub fn InitializeInactiveArray<'a>(
+ pEdgeStore: &'a Arena<CEdge<'a>>,
+ /*__in_ecount(count+2)*/ rgInactiveArray: &mut [CInactiveEdge<'a>],
+ count: UINT,
+ tailEdge: Ref<'a, CEdge<'a>> // Tail sentinel for inactive list
+) -> INT {
+ let rgInactiveArrayPtr = rgInactiveArray.as_mut_ptr();
+
+ // First initialize the inactive array. Skip the first entry,
+ // which we reserve as a head sentinel for the insertion sort:
+
+ let mut pInactiveEdge = &mut rgInactiveArray[1..];
+
+ for e in pEdgeStore.iter() {
+
+ pInactiveEdge[0].Edge = Ref::new(e);
+ YX(e.X.get(), e.StartY, &mut pInactiveEdge[0].Yx);
+ pInactiveEdge = &mut pInactiveEdge[1..];
+ }
+
+ assert!(unsafe { pInactiveEdge.as_mut_ptr().offset_from(rgInactiveArrayPtr) } as UINT == count + 1);
+
+ // Add the tail, which is used when reading back the array. This
+ // is why we had to allocate the array as 'count + 1':
+
+ pInactiveEdge[0].Edge = tailEdge;
+
+ // Add the head, which is used for the insertion sort. This is why
+ // we had to allocate the array as 'count + 2':
+
+ rgInactiveArray[0].Yx = i64::MIN;
+
+ // Only invoke the quicksort routine if it's worth the overhead:
+
+ if (count as isize > QUICKSORT_THRESHOLD) {
+ // Quick-sort this, skipping the first and last elements,
+ // which are sentinels.
+ //
+ // We do 'inactiveArray + count' to be inclusive of the last
+ // element:
+
+ QuickSortEdges(rgInactiveArray, 1, count as usize);
+ }
+
+ // Do a quick sort to handle the mostly sorted result:
+
+ InsertionSortEdges(rgInactiveArray, count as i32);
+
+ ASSERTINACTIVEARRAY!(rgInactiveArray, count as i32);
+
+ // Return the 'y' value of the topmost edge:
+
+ return (*rgInactiveArray[1].Edge).StartY;
+
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Insert edges into the active edge list.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+pub fn InsertNewEdges<'a>(
+ mut pActiveList: Ref<'a, CEdge<'a>>,
+ iCurrentY: INT,
+ /*__deref_inout_xcount(array terminated by an edge with StartY != iCurrentY)*/
+ ppInactiveEdge: &'a mut [CInactiveEdge<'a>],
+ pYNextInactive: &mut INT, // will be INT_MAX when no more
+) -> &'a mut [CInactiveEdge<'a>] {
+
+ let mut inactive: &mut [CInactiveEdge] = ppInactiveEdge;
+
+ assert!((*inactive[0].Edge).StartY == iCurrentY);
+
+ while {
+ let newActive: Ref<CEdge> = inactive[0].Edge;
+
+ // The activeList edge list sentinel has X = INT_MAX, so this always
+ // terminates:
+
+ while ((*(*pActiveList).Next.get()).X < (*newActive).X) {
+ pActiveList = (*pActiveList).Next.get();
+ }
+
+ if SORT_EDGES_INCLUDING_SLOPE {
+ // The activeList edge list sentinel has Dx = INT_MAX, so this always
+ // terminates:
+
+ while (((*(*pActiveList).Next.get()).X == (*newActive).X) && ((*(*pActiveList).Next.get()).Dx < (*newActive).Dx)) {
+ pActiveList = (*pActiveList).Next.get();
+ }
+ }
+
+ (*newActive).Next.set((*pActiveList).Next.get());
+ (*pActiveList).Next.set(newActive);
+
+ inactive = &mut inactive[1..];
+ (*(inactive[0]).Edge).StartY == iCurrentY
+ } {}
+
+ *pYNextInactive = (*(inactive[0]).Edge).StartY;
+ return inactive;
+
+}
+
+/**************************************************************************\
+*
+* Function Description:
+*
+* Sort the edges so that they're in ascending 'x' order.
+*
+* We use a bubble-sort for this stage, because edges maintain good
+* locality and don't often switch ordering positions.
+*
+* Created:
+*
+* 03/25/2000 andrewgo
+*
+\**************************************************************************/
+
+fn SortActiveEdges(list: Ref<CEdge>) {
+
+ let mut swapOccurred: bool;
+ let mut tmp: Ref<CEdge>;
+
+ // We should never be called with an empty active edge list:
+
+ assert!((*(*list).Next.get()).X.get() != INT::MAX);
+
+ while {
+ swapOccurred = false;
+
+ let mut previous = list;
+ let mut current = (*list).Next.get();
+ let mut next = (*current).Next.get();
+ let mut nextX = (*next).X.get();
+
+ while {
+ if (nextX < (*current).X.get()) {
+ swapOccurred = true;
+
+ (*previous).Next.set(next);
+ (*current).Next.set((*next).Next.get());
+ (*next).Next.set(current);
+
+ SWAP!(tmp, next, current);
+ }
+
+ previous = current;
+ current = next;
+ next = (*next).Next.get();
+ nextX = (*next).X.get();
+ nextX != INT::MAX
+ } {}
+ swapOccurred
+ } {}
+
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/bezier.rs b/third_party/rust/wpf-gpu-raster/src/bezier.rs
new file mode 100644
index 0000000000..fe54628a40
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/bezier.rs
@@ -0,0 +1,990 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+//+-----------------------------------------------------------------------------
+//
+// class Bezier32
+//
+// Bezier cracker.
+//
+// A hybrid cubic Bezier curve flattener based on KirkO's error factor.
+// Generates line segments fast without using the stack. Used to flatten a
+// path.
+//
+// For an understanding of the methods used, see:
+//
+// Kirk Olynyk, "..."
+// Goossen and Olynyk, "System and Method of Hybrid Forward
+// Differencing to Render Bezier Splines"
+// Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
+// Rendering Curves and Surfaces", Computer Graphics, July 1987
+// Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
+// Differencing", Computer Graphics, August 1988
+// Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
+//
+// Public Interface:
+// bInit(pptfx) - pptfx points to 4 control points of
+// Bezier. Current point is set to the first
+// point after the start-point.
+// Bezier32(pptfx) - Constructor with initialization.
+// vGetCurrent(pptfx) - Returns current polyline point.
+// bCurrentIsEndPoint() - TRUE if current point is end-point.
+// vNext() - Moves to next polyline point.
+//
+
+
+#![allow(unused_parens)]
+#![allow(non_upper_case_globals)]
+//+-----------------------------------------------------------------------------
+//
+
+//
+// $TAG ENGR
+
+// $Module: win_mil_graphics_geometry
+// $Keywords:
+//
+// $Description:
+// Class for flattening a bezier.
+//
+// $ENDTAG
+//
+//------------------------------------------------------------------------------
+
+// First conversion from original 28.4 to 18.14 format
+const HFD32_INITIAL_SHIFT: i32 = 10;
+
+// Second conversion to 15.17 format
+const HFD32_ADDITIONAL_SHIFT: i32 = 3;
+
+
+// BEZIER_FLATTEN_GDI_COMPATIBLE:
+//
+// Don't turn on this switch without testing carefully. It's more for
+// documentation's sake - to show the values that GDI used - for an error
+// tolerance of 2/3.
+
+// It turns out that 2/3 produces very noticable artifacts on antialiased lines,
+// so we want to use 1/4 instead.
+/*
+#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
+
+// Flatten to an error of 2/3. During initial phase, use 18.14 format.
+
+#define TEST_MAGNITUDE_INITIAL (6 * 0x00002aa0L)
+
+// Error of 2/3. During normal phase, use 15.17 format.
+
+#define TEST_MAGNITUDE_NORMAL (TEST_MAGNITUDE_INITIAL << 3)
+
+#else
+*/
+use crate::types::*;
+/*
+// Flatten to an error of 1/4. During initial phase, use 18.14 format.
+
+const TEST_MAGNITUDE_INITIAL: i32 = (6 * 0x00001000);
+
+// Error of 1/4. During normal phase, use 15.17 format.
+
+const TEST_MAGNITUDE_NORMAL: i32 = (TEST_MAGNITUDE_INITIAL << 3);
+*/
+
+// I have modified the constants for HFD32 as part of fixing accuracy errors
+// (Bug 816015). Something similar could be done for the 64 bit hfd, but it ain't
+// broke so I'd rather not fix it.
+
+// The shift to the steady state 15.17 format
+const HFD32_SHIFT: LONG = HFD32_INITIAL_SHIFT + HFD32_ADDITIONAL_SHIFT;
+
+// Added to output numbers before rounding back to original representation
+const HFD32_ROUND: LONG = 1 << (HFD32_SHIFT - 1);
+
+// The error is tested on max(|e2|, |e3|), which represent 6 times the actual error.
+// The flattening tolerance is hard coded to 1/4 in the original geometry space,
+// which translates to 4 in 28.4 format. So 6 times that is:
+
+const HFD32_TOLERANCE: LONGLONG = 24;
+
+// During the initial phase, while working in 18.14 format
+const HFD32_INITIAL_TEST_MAGNITUDE: LONGLONG = HFD32_TOLERANCE << HFD32_INITIAL_SHIFT;
+
+// During the steady state, while working in 15.17 format
+const HFD32_TEST_MAGNITUDE: LONGLONG = HFD32_INITIAL_TEST_MAGNITUDE << HFD32_ADDITIONAL_SHIFT;
+
+// We will stop halving the segment with basis e1, e2, e3, e4 when max(|e2|, |e3|)
+// is less than HFD32_TOLERANCE. The operation e2 = (e2 + e3) >> 3 in vHalveStepSize() may
+// eat up 3 bits of accuracy. HfdBasis32 starts off with a pad of HFD32_SHIFT zeros, so
+// we can stay exact up to HFD32_SHIFT/3 subdivisions. Since every subdivision is guaranteed
+// to shift max(|e2|, |e3|) at least by 2, we will subdivide no more than n times if the
+// initial max(|e2|, |e3|) is less than than HFD32_TOLERANCE << 2n. But if the initial
+// max(|e2|, |e3|) is greater than HFD32_TOLERANCE >> (HFD32_SHIFT / 3) then we may not be
+// able to flatten with the 32 bit hfd, so we need to resort to the 64 bit hfd.
+
+const HFD32_MAX_ERROR: INT = (HFD32_TOLERANCE as i32) << ((2 * HFD32_INITIAL_SHIFT) / 3);
+
+// The maximum size of coefficients that can be handled by HfdBasis32.
+const HFD32_MAX_SIZE: LONGLONG = 0xffffc000;
+
+// Michka 9/12/03: I found this number in the the body of the code witout any explanation.
+// My analysis suggests that we could get away with larger numbers, but if I'm wrong we
+// could be in big trouble, so let us stay conservative.
+//
+// In bInit() we subtract Min(Bezier coeffients) from the original coefficients, so after
+// that 0 <= coefficients <= Bound, and the test will be Bound < HFD32_MAX_SIZE. When
+// switching to the HFD basis in bInit():
+// * e0 is the first Bezier coeffient, so abs(e0) <= Bound.
+// * e1 is a difference of non-negative coefficients so abs(e1) <= Bound.
+// * e2 and e3 can be written as 12*(p - (q + r)/2) where p,q and r are coefficients.
+// 0 <=(q + r)/2 <= Bound, so abs(p - (q + r)/2) <= 2*Bound, hence
+// abs(e2), abs(e3) <= 12*Bound.
+//
+// During vLazyHalveStepSize we add e2 + e3, resulting in absolute value <= 24*Bound.
+// Initially HfdBasis32 shifts the numbers by HFD32_INITIAL_SHIFT, so we need to handle
+// 24*bounds*(2^HFD32_SHIFT), and that needs to be less than 2^31. So the bounds need to
+// be less than 2^(31-HFD32_INITIAL_SHIFT)/24).
+//
+// For speed, the algorithm uses & rather than < for comparison. To facilitate that we
+// replace 24 by 32=2^5, and then the binary representation of the number is of the form
+// 0...010...0 with HFD32_SHIFT+5 trailing zeros. By subtracting that from 2^32 = 0xffffffff+1
+// we get a number that is 1..110...0 with the same number of trailing zeros, and that can be
+// used with an & for comparison. So the number should be:
+//
+// 0xffffffffL - (1L << (31 - HFD32_INITIAL_SHIFT - 5)) + 1 = (1L << 16) + 1 = 0xffff0000
+//
+// For the current values of HFD32_INITIAL_SHIFT=10 and HFD32_ADDITIONAL_SHIFT=3, the steady
+// state doesn't pose additional requirements, as shown below.
+//
+// For some reason the current code uses 0xfffc0000 = (1L << 14) + 1.
+//
+// Here is why the steady state doesn't pose additional requirements:
+//
+// In vSteadyState we multiply e0 and e1 by 8, so the requirement is Bounds*2^13 < 2^31,
+// or Bounds < 2^18, less stringent than the above.
+//
+// In vLazyHalveStepSize we cut the error down by subdivision, making abs(e2) and abs(e3)
+// less than HFD32_TEST_MAGNITUDE = 24*2^13, well below 2^31.
+//
+// During all the steady-state operations - vTakeStep, vHalveStepSize and vDoubleStepSize,
+// e0 is on the curve and e1 is a difference of 2 points on the curve, so
+// abs(e0), abs(e1) < Bounds * 2^13, which requires Bound < 2^(31-13) = 2^18. e2 and e3
+// are errors, kept below 6*HFD32_TEST_MAGNITUDE = 216*2^13. Details:
+//
+// In vTakeStep e2 = 2e2 - e3 keeps abs(e2) < 3*HFD32_TEST_MAGNITUDE = 72*2^13,
+// well below 2^31
+//
+// In vHalveStepSize we add e2 + e3 when their absolute is < 3*HFD32_TEST_MAGNITUDE (because
+// this comes after a step), so that keeps the result below 6*HFD32_TEST_MAGNITUDE = 216*2^13.
+//
+// In vDoubleStepSize we know that abs(e2), abs(e3) < HFD32_TEST_MAGNITUDE/4, otherwise we
+// would not have doubled the step.
+
+#[derive(Default)]
+struct HfdBasis32
+{
+ e0: LONG,
+ e1: LONG,
+ e2: LONG,
+ e3: LONG,
+}
+
+impl HfdBasis32 {
+ fn lParentErrorDividedBy4(&self) -> LONG {
+ self.e3.abs().max((self.e2 + self.e2 - self.e3).abs())
+ }
+
+ fn lError(&self) -> LONG
+ {
+ self.e2.abs().max(self.e3.abs())
+ }
+
+ fn fxValue(&self) -> INT
+ {
+ return((self.e0 + HFD32_ROUND) >> HFD32_SHIFT);
+ }
+
+ fn bInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT) -> bool
+ {
+ // Change basis and convert from 28.4 to 18.14 format:
+
+ self.e0 = (p1 ) << HFD32_INITIAL_SHIFT;
+ self.e1 = (p4 - p1 ) << HFD32_INITIAL_SHIFT;
+
+ self.e2 = 6 * (p2 - p3 - p3 + p4);
+ self.e3 = 6 * (p1 - p2 - p2 + p3);
+
+ if (self.lError() >= HFD32_MAX_ERROR)
+ {
+ // Large error, will require too many subdivision for this 32 bit hfd
+ return false;
+ }
+
+ self.e2 <<= HFD32_INITIAL_SHIFT;
+ self.e3 <<= HFD32_INITIAL_SHIFT;
+
+ return true;
+ }
+
+ fn vLazyHalveStepSize(&mut self, cShift: LONG)
+ {
+ self.e2 = self.ExactShiftRight(self.e2 + self.e3, 1);
+ self.e1 = self.ExactShiftRight(self.e1 - self.ExactShiftRight(self.e2, cShift), 1);
+ }
+
+ fn vSteadyState(&mut self, cShift: LONG)
+ {
+ // We now convert from 18.14 fixed format to 15.17:
+
+ self.e0 <<= HFD32_ADDITIONAL_SHIFT;
+ self.e1 <<= HFD32_ADDITIONAL_SHIFT;
+
+ let mut lShift = cShift - HFD32_ADDITIONAL_SHIFT;
+
+ if (lShift < 0)
+ {
+ lShift = -lShift;
+ self.e2 <<= lShift;
+ self.e3 <<= lShift;
+ }
+ else
+ {
+ self.e2 >>= lShift;
+ self.e3 >>= lShift;
+ }
+ }
+
+ fn vHalveStepSize(&mut self)
+ {
+ self.e2 = self.ExactShiftRight(self.e2 + self.e3, 3);
+ self.e1 = self.ExactShiftRight(self.e1 - self.e2, 1);
+ self.e3 = self.ExactShiftRight(self.e3, 2);
+ }
+
+ fn vDoubleStepSize(&mut self)
+ {
+ self.e1 += self.e1 + self.e2;
+ self.e3 <<= 2;
+ self.e2 = (self.e2 << 3) - self.e3;
+ }
+
+ fn vTakeStep(&mut self)
+ {
+ self.e0 += self.e1;
+ let lTemp = self.e2;
+ self.e1 += lTemp;
+ self.e2 += lTemp - self.e3;
+ self.e3 = lTemp;
+ }
+
+ fn ExactShiftRight(&self, num: i32, shift: i32) -> i32
+ {
+ // Performs a shift to the right while asserting that we're not
+ // losing significant bits
+
+ assert!(num == (num >> shift) << shift);
+ return num >> shift;
+ }
+}
+
+fn vBoundBox(
+ aptfx: &[POINT; 4]) -> RECT
+{
+ let mut left = aptfx[0].x;
+ let mut right = aptfx[0].x;
+ let mut top = aptfx[0].y;
+ let mut bottom = aptfx[0].y;
+
+ for i in 1..4
+ {
+ left = left.min(aptfx[i].x);
+ top = top.min(aptfx[i].y);
+ right = right.max(aptfx[i].x);
+ bottom = bottom.max(aptfx[i].y);
+ }
+
+ // We make the bounds one pixel loose for the nominal width
+ // stroke case, which increases the bounds by half a pixel
+ // in every dimension:
+
+ RECT { left: left - 16, top: top - 16, right: right + 16, bottom: bottom + 16}
+}
+
+
+
+fn bIntersect(
+ a: &RECT,
+ b: &RECT) -> bool
+{
+ return((a.left < b.right) &&
+ (a.top < b.bottom) &&
+ (a.right > b.left) &&
+ (a.bottom > b.top));
+}
+
+#[derive(Default)]
+pub struct Bezier32
+{
+ cSteps: LONG,
+ x: HfdBasis32,
+ y: HfdBasis32,
+ rcfxBound: RECT
+}
+impl Bezier32 {
+
+fn bInit(&mut self,
+ aptfxBez: &[POINT; 4],
+ // Pointer to 4 control points
+ prcfxClip: Option<&RECT>) -> bool
+ // Bound box of visible region (optional)
+{
+ let mut aptfx;
+ let mut cShift = 0; // Keeps track of 'lazy' shifts
+
+ self.cSteps = 1; // Number of steps to do before reach end of curve
+
+ self.rcfxBound = vBoundBox(aptfxBez);
+
+ aptfx = aptfxBez.clone();
+
+ {
+ let mut fxOr;
+ let mut fxOffset;
+
+ // find out if the coordinates minus the bounding box
+ // exceed 10 bits
+ fxOffset = self.rcfxBound.left;
+ fxOr = {aptfx[0].x -= fxOffset; aptfx[0].x};
+ fxOr |= {aptfx[1].x -= fxOffset; aptfx[1].x};
+ fxOr |= {aptfx[2].x -= fxOffset; aptfx[2].x};
+ fxOr |= {aptfx[3].x -= fxOffset; aptfx[3].x};
+
+ fxOffset = self.rcfxBound.top;
+ fxOr |= {aptfx[0].y -= fxOffset; aptfx[0].y};
+ fxOr |= {aptfx[1].y -= fxOffset; aptfx[1].y};
+ fxOr |= {aptfx[2].y -= fxOffset; aptfx[2].y};
+ fxOr |= {aptfx[3].y -= fxOffset; aptfx[3].y};
+
+ // This 32 bit cracker can only handle points in a 10 bit space:
+
+ if ((fxOr as i64 & HFD32_MAX_SIZE) != 0) {
+ return false;
+ }
+ }
+
+ if (!self.x.bInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x))
+ {
+ return false;
+ }
+ if (!self.y.bInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y))
+ {
+ return false;
+ }
+
+
+ if (match prcfxClip { None => true, Some(clip) => bIntersect(&self.rcfxBound, clip)})
+ {
+
+ loop {
+ let lTestMagnitude = (HFD32_INITIAL_TEST_MAGNITUDE << cShift) as LONG;
+
+ if (self.x.lError() <= lTestMagnitude && self.y.lError() <= lTestMagnitude) {
+ break;
+ }
+
+ cShift += 2;
+ self.x.vLazyHalveStepSize(cShift);
+ self.y.vLazyHalveStepSize(cShift);
+ self.cSteps <<= 1;
+ }
+ }
+
+ self.x.vSteadyState(cShift);
+ self.y.vSteadyState(cShift);
+
+// Note that this handles the case where the initial error for
+// the Bezier is already less than HFD32_TEST_MAGNITUDE:
+
+ self.x.vTakeStep();
+ self.y.vTakeStep();
+ self.cSteps-=1;
+
+ return true;
+}
+
+
+fn cFlatten(&mut self,
+ mut pptfx: &mut [POINT],
+ pbMore: &mut bool) -> i32
+{
+ let mut cptfx = pptfx.len();
+ assert!(cptfx > 0);
+
+ let cptfxOriginal = cptfx;
+
+ while {
+ // Return current point:
+
+ pptfx[0].x = self.x.fxValue() + self.rcfxBound.left;
+ pptfx[0].y = self.y.fxValue() + self.rcfxBound.top;
+ pptfx = &mut pptfx[1..];
+
+ // If cSteps == 0, that was the end point in the curve!
+
+ if (self.cSteps == 0)
+ {
+ *pbMore = false;
+
+ // '+1' because we haven't decremented 'cptfx' yet:
+
+ return(cptfxOriginal - cptfx + 1) as i32;
+ }
+
+ // Okay, we have to step:
+
+ if (self.x.lError().max(self.y.lError()) > HFD32_TEST_MAGNITUDE as LONG)
+ {
+ self.x.vHalveStepSize();
+ self.y.vHalveStepSize();
+ self.cSteps <<= 1;
+ }
+
+ // We are here after vTakeStep. Before that the error max(|e2|,|e3|) was less
+ // than HFD32_TEST_MAGNITUDE. vTakeStep changed e2 to 2e2-e3. Since
+ // |2e2-e3| < max(|e2|,|e3|) << 2 and vHalveStepSize is guaranteed to reduce
+ // max(|e2|,|e3|) by >> 2, no more than one subdivision should be required to
+ // bring the new max(|e2|,|e3|) back to within HFD32_TEST_MAGNITUDE, so:
+ assert!(self.x.lError().max(self.y.lError()) <= HFD32_TEST_MAGNITUDE as LONG);
+
+ while (!(self.cSteps & 1 != 0) &&
+ self.x.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2) &&
+ self.y.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2))
+ {
+ self.x.vDoubleStepSize();
+ self.y.vDoubleStepSize();
+ self.cSteps >>= 1;
+ }
+
+ self.cSteps -=1 ;
+ self.x.vTakeStep();
+ self.y.vTakeStep();
+ cptfx -= 1;
+ cptfx != 0
+ } {}
+
+ *pbMore = true;
+ return cptfxOriginal as i32;
+}
+}
+
+
+///////////////////////////////////////////////////////////////////////////
+// Bezier64
+//
+// All math is done using 64 bit fixed numbers in a 36.28 format.
+//
+// All drawing is done in a 31 bit space, then a 31 bit window offset
+// is applied. In the initial transform where we change to the HFD
+// basis, e2 and e3 require the most bits precision: e2 = 6(p2 - 2p3 + p4).
+// This requires an additional 4 bits precision -- hence we require 36 bits
+// for the integer part, and the remaining 28 bits is given to the fraction.
+//
+// In rendering a Bezier, every 'subdivide' requires an extra 3 bits of
+// fractional precision. In order to be reversible, we can allow no
+// error to creep in. Since a INT coordinate is 32 bits, and we
+// require an additional 4 bits as mentioned above, that leaves us
+// 28 bits fractional precision -- meaning we can do a maximum of
+// 9 subdivides. Now, the maximum absolute error of a Bezier curve in 27
+// bit integer space is 2^29 - 1. But 9 subdivides reduces the error by a
+// guaranteed factor of 2^18, meaning we can subdivide down only to an error
+// of 2^11 before we overflow, when in fact we want to reduce error to less
+// than 1.
+//
+// So what we do is HFD until we hit an error less than 2^11, reverse our
+// basis transform to get the four control points of this smaller curve
+// (rounding in the process to 32 bits), then invoke another copy of HFD
+// on the reduced Bezier curve. We again have enough precision, but since
+// its starting error is less than 2^11, we can reduce error to 2^-7 before
+// overflowing! We'll start a low HFD after every step of the high HFD.
+////////////////////////////////////////////////////////////////////////////
+#[derive(Default)]
+struct HfdBasis64
+{
+ e0: LONGLONG,
+ e1: LONGLONG,
+ e2: LONGLONG,
+ e3: LONGLONG,
+}
+
+impl HfdBasis64 {
+fn vParentError(&self) -> LONGLONG
+{
+ (self.e3 << 2).abs().max(((self.e2 << 3) - (self.e3 << 2)).abs())
+}
+
+fn vError(&self) -> LONGLONG
+{
+ self.e2.abs().max(self.e3.abs())
+}
+
+fn fxValue(&self) -> INT
+{
+// Convert from 36.28 and round:
+
+ let mut eq = self.e0;
+ eq += (1 << (BEZIER64_FRACTION - 1));
+ eq >>= BEZIER64_FRACTION;
+ return eq as LONG as INT;
+}
+
+fn vInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT)
+{
+ let mut eqTmp;
+ let eqP2 = p2 as LONGLONG;
+ let eqP3 = p3 as LONGLONG;
+
+// e0 = p1
+// e1 = p4 - p1
+// e2 = 6(p2 - 2p3 + p4)
+// e3 = 6(p1 - 2p2 + p3)
+
+// Change basis:
+
+ self.e0 = p1 as LONGLONG; // e0 = p1
+ self.e1 = p4 as LONGLONG;
+ self.e2 = eqP2; self.e2 -= eqP3; self.e2 -= eqP3; self.e2 += self.e1; // e2 = p2 - 2*p3 + p4
+ self.e3 = self.e0; self.e3 -= eqP2; self.e3 -= eqP2; self.e3 += eqP3; // e3 = p1 - 2*p2 + p3
+ self.e1 -= self.e0; // e1 = p4 - p1
+
+// Convert to 36.28 format and multiply e2 and e3 by six:
+
+ self.e0 <<= BEZIER64_FRACTION;
+ self.e1 <<= BEZIER64_FRACTION;
+ eqTmp = self.e2; self.e2 += eqTmp; self.e2 += eqTmp; self.e2 <<= (BEZIER64_FRACTION + 1);
+ eqTmp = self.e3; self.e3 += eqTmp; self.e3 += eqTmp; self.e3 <<= (BEZIER64_FRACTION + 1);
+}
+
+fn vUntransform<F: Fn(&mut POINT) -> &mut LONG>(&self,
+ afx: &mut [POINT; 4], field: F)
+{
+// Declare some temps to hold our operations, since we can't modify e0..e3.
+
+ let mut eqP0;
+ let mut eqP1;
+ let mut eqP2;
+ let mut eqP3;
+
+// p0 = e0
+// p1 = e0 + (6e1 - e2 - 2e3)/18
+// p2 = e0 + (12e1 - 2e2 - e3)/18
+// p3 = e0 + e1
+
+ eqP0 = self.e0;
+
+// NOTE PERF: Convert this to a multiply by 6: [andrewgo]
+
+ eqP2 = self.e1;
+ eqP2 += self.e1;
+ eqP2 += self.e1;
+ eqP1 = eqP2;
+ eqP1 += eqP2; // 6e1
+ eqP1 -= self.e2; // 6e1 - e2
+ eqP2 = eqP1;
+ eqP2 += eqP1; // 12e1 - 2e2
+ eqP2 -= self.e3; // 12e1 - 2e2 - e3
+ eqP1 -= self.e3;
+ eqP1 -= self.e3; // 6e1 - e2 - 2e3
+
+// NOTE: May just want to approximate these divides! [andrewgo]
+// Or can do a 64 bit divide by 32 bit to get 32 bits right here.
+
+ eqP1 /= 18;
+ eqP2 /= 18;
+ eqP1 += self.e0;
+ eqP2 += self.e0;
+
+ eqP3 = self.e0;
+ eqP3 += self.e1;
+
+// Convert from 36.28 format with rounding:
+
+ eqP0 += (1 << (BEZIER64_FRACTION - 1)); eqP0 >>= BEZIER64_FRACTION; *field(&mut afx[0]) = eqP0 as LONG;
+ eqP1 += (1 << (BEZIER64_FRACTION - 1)); eqP1 >>= BEZIER64_FRACTION; *field(&mut afx[1]) = eqP1 as LONG;
+ eqP2 += (1 << (BEZIER64_FRACTION - 1)); eqP2 >>= BEZIER64_FRACTION; *field(&mut afx[2]) = eqP2 as LONG;
+ eqP3 += (1 << (BEZIER64_FRACTION - 1)); eqP3 >>= BEZIER64_FRACTION; *field(&mut afx[3]) = eqP3 as LONG;
+}
+
+fn vHalveStepSize(&mut self)
+{
+// e2 = (e2 + e3) >> 3
+// e1 = (e1 - e2) >> 1
+// e3 >>= 2
+
+ self.e2 += self.e3; self.e2 >>= 3;
+ self.e1 -= self.e2; self.e1 >>= 1;
+ self.e3 >>= 2;
+}
+
+fn vDoubleStepSize(&mut self)
+{
+// e1 = 2e1 + e2
+// e3 = 4e3;
+// e2 = 8e2 - e3
+
+ self.e1 <<= 1; self.e1 += self.e2;
+ self.e3 <<= 2;
+ self.e2 <<= 3; self.e2 -= self.e3;
+}
+
+fn vTakeStep(&mut self)
+{
+ self.e0 += self.e1;
+ let eqTmp = self.e2;
+ self.e1 += self.e2;
+ self.e2 += eqTmp; self.e2 -= self.e3;
+ self.e3 = eqTmp;
+}
+}
+
+const BEZIER64_FRACTION: LONG = 28;
+
+// The following is our 2^11 target error encoded as a 36.28 number
+// (don't forget the additional 4 bits of fractional precision!) and
+// the 6 times error multiplier:
+
+const geqErrorHigh: LONGLONG = (6 * (1 << 15) >> (32 - BEZIER64_FRACTION)) << 32;
+
+/*#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
+
+// The following is the default 2/3 error encoded as a 36.28 number,
+// multiplied by 6, and leaving 4 bits for fraction:
+
+const LONGLONG geqErrorLow = (LONGLONG)(4) << 32;
+
+#else*/
+
+// The following is the default 1/4 error encoded as a 36.28 number,
+// multiplied by 6, and leaving 4 bits for fraction:
+
+use crate::types::POINT;
+
+const geqErrorLow: LONGLONG = (3) << 31;
+
+//#endif
+#[derive(Default)]
+pub struct Bezier64
+{
+ xLow: HfdBasis64,
+ yLow: HfdBasis64,
+ xHigh: HfdBasis64,
+ yHigh: HfdBasis64,
+
+ eqErrorLow: LONGLONG,
+ rcfxClip: Option<RECT>,
+
+ cStepsHigh: LONG,
+ cStepsLow: LONG
+}
+
+impl Bezier64 {
+
+fn vInit(&mut self,
+ aptfx: &[POINT; 4],
+ // Pointer to 4 control points
+ prcfxVis: Option<&RECT>,
+ // Pointer to bound box of visible area (may be NULL)
+ eqError: LONGLONG)
+ // Fractional maximum error (32.32 format)
+{
+ self.cStepsHigh = 1;
+ self.cStepsLow = 0;
+
+ self.xHigh.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
+ self.yHigh.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
+
+// Initialize error:
+
+ self.eqErrorLow = eqError;
+
+ self.rcfxClip = prcfxVis.cloned();
+
+ while (((self.xHigh.vError()) > geqErrorHigh) ||
+ ((self.yHigh.vError()) > geqErrorHigh))
+ {
+ self.cStepsHigh <<= 1;
+ self.xHigh.vHalveStepSize();
+ self.yHigh.vHalveStepSize();
+ }
+}
+
+fn cFlatten(
+ &mut self,
+ mut pptfx: &mut [POINT],
+ pbMore: &mut bool) -> INT
+{
+ let mut aptfx: [POINT; 4] = Default::default();
+ let mut cptfx = pptfx.len();
+ let mut rcfxBound: RECT;
+ let cptfxOriginal = cptfx;
+
+ assert!(cptfx > 0);
+
+ while {
+ if (self.cStepsLow == 0)
+ {
+ // Optimization that if the bound box of the control points doesn't
+ // intersect with the bound box of the visible area, render entire
+ // curve as a single line:
+
+ self.xHigh.vUntransform(&mut aptfx, |p| &mut p.x);
+ self.yHigh.vUntransform(&mut aptfx, |p| &mut p.y);
+
+ self.xLow.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
+ self.yLow.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
+ self.cStepsLow = 1;
+
+ if (match &self.rcfxClip { None => true, Some(clip) => {rcfxBound = vBoundBox(&aptfx); bIntersect(&rcfxBound, &clip)}})
+ {
+ while (((self.xLow.vError()) > self.eqErrorLow) ||
+ ((self.yLow.vError()) > self.eqErrorLow))
+ {
+ self.cStepsLow <<= 1;
+ self.xLow.vHalveStepSize();
+ self.yLow.vHalveStepSize();
+ }
+ }
+
+ // This 'if' handles the case where the initial error for the Bezier
+ // is already less than the target error:
+
+ if ({self.cStepsHigh -= 1; self.cStepsHigh} != 0)
+ {
+ self.xHigh.vTakeStep();
+ self.yHigh.vTakeStep();
+
+ if (((self.xHigh.vError()) > geqErrorHigh) ||
+ ((self.yHigh.vError()) > geqErrorHigh))
+ {
+ self.cStepsHigh <<= 1;
+ self.xHigh.vHalveStepSize();
+ self.yHigh.vHalveStepSize();
+ }
+
+ while (!(self.cStepsHigh & 1 != 0) &&
+ ((self.xHigh.vParentError()) <= geqErrorHigh) &&
+ ((self.yHigh.vParentError()) <= geqErrorHigh))
+ {
+ self.xHigh.vDoubleStepSize();
+ self.yHigh.vDoubleStepSize();
+ self.cStepsHigh >>= 1;
+ }
+ }
+ }
+
+ self.xLow.vTakeStep();
+ self.yLow.vTakeStep();
+
+ pptfx[0].x = self.xLow.fxValue();
+ pptfx[0].y = self.yLow.fxValue();
+ pptfx = &mut pptfx[1..];
+
+ self.cStepsLow-=1;
+ if (self.cStepsLow == 0 && self.cStepsHigh == 0)
+ {
+ *pbMore = false;
+
+ // '+1' because we haven't decremented 'cptfx' yet:
+
+ return(cptfxOriginal - cptfx + 1) as INT;
+ }
+
+ if ((self.xLow.vError() > self.eqErrorLow) ||
+ (self.yLow.vError() > self.eqErrorLow))
+ {
+ self.cStepsLow <<= 1;
+ self.xLow.vHalveStepSize();
+ self.yLow.vHalveStepSize();
+ }
+
+ while (!(self.cStepsLow & 1 != 0) &&
+ ((self.xLow.vParentError()) <= self.eqErrorLow) &&
+ ((self.yLow.vParentError()) <= self.eqErrorLow))
+ {
+ self.xLow.vDoubleStepSize();
+ self.yLow.vDoubleStepSize();
+ self.cStepsLow >>= 1;
+ }
+ cptfx -= 1;
+ cptfx != 0
+ } {};
+
+ *pbMore = true;
+ return(cptfxOriginal) as INT;
+}
+}
+
+//+-----------------------------------------------------------------------------
+//
+// class CMILBezier
+//
+// Bezier cracker. Flattens any Bezier in our 28.4 device space down to a
+// smallest 'error' of 2^-7 = 0.0078. Will use fast 32 bit cracker for small
+// curves and slower 64 bit cracker for big curves.
+//
+// Public Interface:
+// vInit(aptfx, prcfxClip, peqError)
+// - pptfx points to 4 control points of Bezier. The first point
+// retrieved by bNext() is the the first point in the approximation
+// after the start-point.
+//
+// - prcfxClip is an optional pointer to the bound box of the visible
+// region. This is used to optimize clipping of Bezier curves that
+// won't be seen. Note that this value should account for the pen's
+// width!
+//
+// - optional maximum error in 32.32 format, corresponding to Kirko's
+// error factor.
+//
+// bNext(pptfx)
+// - pptfx points to where next point in approximation will be
+// returned. Returns FALSE if the point is the end-point of the
+// curve.
+//
+pub (crate) enum CMILBezier
+{
+ Bezier64(Bezier64),
+ Bezier32(Bezier32)
+}
+
+impl CMILBezier {
+ // All coordinates must be in 28.4 format:
+ pub fn new(aptfxBez: &[POINT; 4], prcfxClip: Option<&RECT>) -> Self {
+ let mut bez32 = Bezier32::default();
+ let bBez32 = bez32.bInit(aptfxBez, prcfxClip);
+ if bBez32 {
+ CMILBezier::Bezier32(bez32)
+ } else {
+ let mut bez64 = Bezier64::default();
+ bez64.vInit(aptfxBez, prcfxClip, geqErrorLow);
+ CMILBezier::Bezier64(bez64)
+ }
+ }
+
+ // Returns the number of points filled in. This will never be zero.
+ //
+ // The last point returned may not be exactly the last control
+ // point. The workaround is for calling code to add an extra
+ // point if this is the case.
+ pub fn Flatten( &mut self,
+ pptfx: &mut [POINT],
+ pbMore: &mut bool) -> INT {
+ match self {
+ CMILBezier::Bezier32(bez) => bez.cFlatten(pptfx, pbMore),
+ CMILBezier::Bezier64(bez) => bez.cFlatten(pptfx, pbMore)
+ }
+ }
+}
+
+#[test]
+fn flatten() {
+ let curve: [POINT; 4] = [
+ POINT{x: 1715, y: 6506},
+ POINT{x: 1692, y: 6506},
+ POINT{x: 1227, y: 5148},
+ POINT{x: 647, y: 5211}];
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 32] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result, &mut more);
+ assert_eq!(count, 21);
+ assert_eq!(more, false);
+}
+
+#[test]
+fn split_flatten32() {
+ // make sure that flattening a curve into two small buffers matches
+ // doing it into a large buffer
+ let curve: [POINT; 4] = [
+ POINT{x: 1795, y: 8445},
+ POINT{x: 1795, y: 8445},
+ POINT{x: 1908, y: 8683},
+ POINT{x: 2043, y: 8705}];
+
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 8] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result[..5], &mut more);
+ assert_eq!(count, 5);
+ assert_eq!(more, true);
+ let count = bez.Flatten(&mut result[5..], &mut more);
+ assert_eq!(count, 3);
+ assert_eq!(more, false);
+
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut full_result: [POINT; 8] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut full_result, &mut more);
+ assert_eq!(count, 8);
+ assert_eq!(more, false);
+ assert!(result == full_result);
+}
+
+#[test]
+fn flatten32() {
+ let curve: [POINT; 4] = [
+ POINT{x: 100, y: 100},
+ POINT{x: 110, y: 100},
+ POINT{x: 110, y: 110},
+ POINT{x: 110, y: 100}];
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 32] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result, &mut more);
+ assert_eq!(count, 3);
+ assert_eq!(more, false);
+}
+
+#[test]
+fn flatten32_double_step_size() {
+ let curve: [POINT; 4] = [
+ POINT{x: 1761, y: 8152},
+ POINT{x: 1761, y: 8152},
+ POINT{x: 1750, y: 8355},
+ POINT{x: 1795, y: 8445}];
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 32] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result, &mut more);
+ assert_eq!(count, 7);
+ assert_eq!(more, false);
+}
+
+#[test]
+fn bezier64_init_high_num_steps() {
+ let curve: [POINT; 4] = [
+ POINT{x: 33, y: -1},
+ POINT{x: -1, y: -1},
+ POINT{x: -1, y: -16385},
+ POINT{x: -226, y: 10}];
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 32] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result, &mut more);
+ assert_eq!(count, 32);
+ assert_eq!(more, true);
+}
+
+#[test]
+fn bezier64_high_error() {
+ let curve: [POINT; 4] = [
+ POINT{x: -1, y: -1},
+ POINT{x: -4097, y: -1},
+ POINT{x: 65471, y: -256},
+ POINT{x: -1, y: 0}];
+ let mut bez = CMILBezier::new(&curve, None);
+ let mut result: [POINT; 32] = Default::default();
+ let mut more: bool = false;
+ let count = bez.Flatten(&mut result, &mut more);
+ assert_eq!(count, 32);
+ assert_eq!(more, true);
+} \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/src/c_bindings.rs b/third_party/rust/wpf-gpu-raster/src/c_bindings.rs
new file mode 100644
index 0000000000..1016287200
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/c_bindings.rs
@@ -0,0 +1,158 @@
+use crate::{PathBuilder, OutputPath, OutputVertex, FillMode, rasterize_to_tri_list};
+use crate::types::{BYTE, POINT};
+
+#[no_mangle]
+pub extern "C" fn wgr_new_builder() -> *mut PathBuilder {
+ let pb = PathBuilder::new();
+ Box::into_raw(Box::new(pb))
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_move_to(pb: &mut PathBuilder, x: f32, y: f32) {
+ pb.move_to(x, y);
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_line_to(pb: &mut PathBuilder, x: f32, y: f32) {
+ pb.line_to(x, y);
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_curve_to(pb: &mut PathBuilder, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
+ pb.curve_to(c1x, c1y, c2x, c2y, x, y);
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_quad_to(pb: &mut PathBuilder, cx: f32, cy: f32, x: f32, y: f32) {
+ pb.quad_to(cx, cy, x, y);
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_close(pb: &mut PathBuilder) {
+ pb.close();
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_set_fill_mode(pb: &mut PathBuilder, fill_mode: FillMode) {
+ pb.set_fill_mode(fill_mode)
+}
+
+#[repr(C)]
+pub struct Path {
+ fill_mode: FillMode,
+ points: *const POINT,
+ num_points: usize,
+ types: *const BYTE,
+ num_types: usize,
+}
+
+impl From<OutputPath> for Path {
+ fn from(output_path: OutputPath) -> Self {
+ let path = Self {
+ fill_mode: output_path.fill_mode,
+ points: output_path.points.as_ptr(),
+ num_points: output_path.points.len(),
+ types: output_path.types.as_ptr(),
+ num_types: output_path.types.len(),
+ };
+ std::mem::forget(output_path);
+ path
+ }
+}
+
+impl Into<OutputPath> for Path {
+ fn into(self) -> OutputPath {
+ OutputPath {
+ fill_mode: self.fill_mode,
+ points: unsafe {
+ if self.points == std::ptr::null() {
+ Default::default()
+ } else {
+ Box::from_raw(std::slice::from_raw_parts_mut(self.points as *mut POINT, self.num_points))
+ }
+ },
+ types: unsafe {
+ if self.types == std::ptr::null() {
+ Default::default()
+ } else {
+ Box::from_raw(std::slice::from_raw_parts_mut(self.types as *mut BYTE, self.num_types))
+ }
+ },
+ }
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_builder_get_path(pb: &mut PathBuilder) -> Path {
+ Path::from(pb.get_path().unwrap_or_default())
+}
+
+#[repr(C)]
+pub struct VertexBuffer {
+ data: *const OutputVertex,
+ len: usize
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_path_rasterize_to_tri_list(
+ path: &Path,
+ clip_x: i32,
+ clip_y: i32,
+ clip_width: i32,
+ clip_height: i32,
+ need_inside: bool,
+ need_outside: bool,
+ rasterization_truncates: bool,
+ output_ptr: *mut OutputVertex,
+ output_capacity: usize,
+) -> VertexBuffer {
+ let output_buffer = if output_ptr != std::ptr::null_mut() {
+ unsafe { Some(std::slice::from_raw_parts_mut(output_ptr, output_capacity)) }
+ } else {
+ None
+ };
+ let mut result = rasterize_to_tri_list(
+ path.fill_mode,
+ unsafe { std::slice::from_raw_parts(path.types, path.num_types) },
+ unsafe { std::slice::from_raw_parts(path.points, path.num_points) },
+ clip_x, clip_y, clip_width, clip_height,
+ need_inside, need_outside,
+ rasterization_truncates,
+ output_buffer
+ );
+ if let Some(output_buffer_size) = result.get_output_buffer_size() {
+ VertexBuffer {
+ data: std::ptr::null(),
+ len: output_buffer_size,
+ }
+ } else {
+ let slice = result.flush_output();
+ let vb = VertexBuffer {
+ data: slice.as_ptr(),
+ len: slice.len(),
+ };
+ std::mem::forget(slice);
+ vb
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_path_release(path: Path) {
+ let output_path: OutputPath = path.into();
+ drop(output_path);
+}
+
+#[no_mangle]
+pub extern "C" fn wgr_vertex_buffer_release(vb: VertexBuffer)
+{
+ if vb.data != std::ptr::null() {
+ unsafe {
+ drop(Box::from_raw(std::slice::from_raw_parts_mut(vb.data as *mut OutputVertex, vb.len)));
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn wgr_builder_release(pb: *mut PathBuilder) {
+ drop(Box::from_raw(pb));
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/fix.rs b/third_party/rust/wpf-gpu-raster/src/fix.rs
new file mode 100644
index 0000000000..bf4741e260
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/fix.rs
@@ -0,0 +1,9 @@
+use crate::types::*;
+type FIX4 = INT; // 28.4 fixed point value
+
+// constants for working with 28.4 fixed point values
+macro_rules! FIX4_SHIFT { () => { 4 } }
+macro_rules! FIX4_PRECISION { () => { 4 } }
+macro_rules! FIX4_ONE { () => { (1 << FIX4_PRECISION!()) } }
+macro_rules! FIX4_HALF { () => { (1 << (FIX4_PRECISION!()-1)) } }
+macro_rules! FIX4_MASK { () => { (FIX4_ONE!() - 1) } } \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/src/geometry_sink.rs b/third_party/rust/wpf-gpu-raster/src/geometry_sink.rs
new file mode 100644
index 0000000000..3282f10a15
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/geometry_sink.rs
@@ -0,0 +1,92 @@
+use crate::aacoverage::CCoverageInterval;
+use crate::nullable_ref::Ref;
+use crate::types::*;
+
+pub trait IGeometrySink
+{
+ //
+ // Aliased geometry output
+ //
+/*
+ virtual HRESULT AddVertex(
+ __in_ecount(1) const MilPoint2F &ptPosition,
+ // In: Vertex coordinates
+ __out_ecount(1) WORD *pidxOut
+ // Out: Index of vertex
+ ) PURE;
+
+ virtual HRESULT AddIndexedVertices(
+ UINT cVertices,
+ // In: number of vertices
+ __in_bcount(cVertices*uVertexStride) const void *pVertexBuffer,
+ // In: vertex buffer containing the vertices
+ UINT uVertexStride,
+ // In: size of each vertex
+ MilVertexFormat mvfFormat,
+ // In: format of each vertex
+ UINT cIndices,
+ // In: Number of indices
+ __in_ecount(cIndices) const UINT *puIndexBuffer
+ // In: index buffer
+ ) PURE;
+
+ virtual void SetTransformMapping(
+ __in_ecount(1) const MILMatrix3x2 &mat2DTransform
+ ) PURE;
+
+ virtual HRESULT AddTriangle(
+ DWORD idx1,
+ // In: Index of triangle's first vertex
+ DWORD idx2,
+ // In: Index of triangle's second vertex
+ DWORD idx3
+ // In: Index of triangle's third vertex
+ ) PURE;
+
+ //
+ // Trapezoidal AA geometry output
+ //
+*/
+ fn AddComplexScan(&mut self,
+ nPixelY: INT,
+ // In: y coordinate in pixel space
+ pIntervalSpanStart: Ref<CCoverageInterval>
+ // In: coverage segments
+ ) -> HRESULT;
+
+ fn AddTrapezoid(
+ &mut self,
+ rYMin: f32,
+ // In: y coordinate of top of trapezoid
+ rXLeftYMin: f32,
+ // In: x coordinate for top left
+ rXRightYMin: f32,
+ // In: x coordinate for top right
+ rYMax: f32,
+ // In: y coordinate of bottom of trapezoid
+ rXLeftYMax: f32,
+ // In: x coordinate for bottom left
+ rXRightYMax: f32,
+ // In: x coordinate for bottom right
+ rXDeltaLeft: f32,
+ // In: trapezoid expand radius
+ rXDeltaRight: f32
+ // In: trapezoid expand radius
+ ) -> HRESULT;
+
+ fn IsEmpty(&self) -> bool;
+ /*
+ virtual HRESULT AddParallelogram(
+ __in_ecount(4) const MilPoint2F *rgPosition
+ ) PURE;
+
+ //
+ // Query sink status
+ //
+
+ // Some geometry generators don't actually know if they have output
+ // any triangles, so they need to get this information from the geometry sink.
+
+ virtual BOOL IsEmpty() PURE;
+*/
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/helpers.rs b/third_party/rust/wpf-gpu-raster/src/helpers.rs
new file mode 100644
index 0000000000..27a594831e
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/helpers.rs
@@ -0,0 +1,55 @@
+pub fn Int32x32To64(a: i32, b: i32) -> i64 { a as i64 * b as i64 }
+
+macro_rules! IsTagEnabled {
+ ($e: expr) => {
+ false
+ }
+}
+
+macro_rules! TraceTag {
+ (($e: expr, $s: expr)) => {
+ dbg!($s)
+ }
+}
+
+macro_rules! IFC {
+ ($e: expr) => {
+ assert_eq!($e, S_OK);
+ }
+}
+
+macro_rules! IFR {
+ ($e: expr) => {
+ let hresult = $e;
+ if (hresult != S_OK) { return hresult }
+ }
+}
+
+macro_rules! __analysis_assume {
+ ($e: expr) => {
+ }
+}
+
+macro_rules! IFCOOM {
+ ($e: expr) => {
+ assert_ne!($e, NULL());
+ }
+}
+
+macro_rules! RRETURN1 {
+ ($e: expr, $s1: expr) => {
+ if $e == $s1 {
+ } else {
+ assert_eq!($e, S_OK);
+ }
+ return $e;
+ }
+}
+
+macro_rules! RRETURN {
+ ($e: expr) => {
+ assert_eq!($e, S_OK);
+ return $e;
+ }
+}
+
diff --git a/third_party/rust/wpf-gpu-raster/src/hwrasterizer.rs b/third_party/rust/wpf-gpu-raster/src/hwrasterizer.rs
new file mode 100644
index 0000000000..49fed1a1bf
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/hwrasterizer.rs
@@ -0,0 +1,1455 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#![allow(unused_parens)]
+
+use crate::aacoverage::{CCoverageBuffer, c_rInvShiftSize, c_antiAliasMode, c_nShift, CCoverageInterval, c_nShiftMask, c_nShiftSize, c_nHalfShiftSize};
+use crate::hwvertexbuffer::CHwVertexBufferBuilder;
+use crate::matrix::{CMILMatrix, CMatrix};
+use crate::nullable_ref::Ref;
+use crate::aarasterizer::*;
+use crate::geometry_sink::IGeometrySink;
+use crate::helpers::Int32x32To64;
+use crate::types::*;
+use typed_arena_nomut::Arena;
+
+//-----------------------------------------------------------------------------
+//
+
+//
+// Description:
+// Trapezoidal anti-aliasing implementation
+//
+// >>>> Note that some of this code is duplicated in sw\aarasterizer.cpp,
+// >>>> so changes to this file may need to propagate.
+//
+// pursue reduced code duplication
+//
+
+macro_rules! MIL_THR {
+ ($e: expr) => {
+ $e//assert_eq!($e, S_OK);
+ }
+}
+
+
+//
+// Optimize for speed instead of size for these critical methods
+//
+
+
+//-------------------------------------------------------------------------
+//
+// Coordinate system encoding
+//
+// All points/coordinates are named as follows:
+//
+// <HungarianType><CoordinateSystem>[X|Y][Left|Right|Top|Bottom]VariableName
+//
+// Common hungarian types:
+// n - INT
+// u - UINT
+// r - FLOAT
+//
+// Coordinate systems:
+// Pixel - Device pixel space assuming integer coordinates in the pixel top left corner.
+// Subpixel - Overscaled space.
+//
+// To convert between Pixel to Subpixel, we have:
+// nSubpixelCoordinate = nPixelCoordinate << c_nShift;
+// nPixelCoordinate = nSubpixelCoordinate >> c_nShift;
+//
+// Note that the conversion to nPixelCoordinate needs to also track
+// (nSubpixelCoordinate & c_nShiftMask) to maintain the full value.
+//
+// Note that since trapezoidal only supports 8x8, c_nShiftSize is always equal to 8. So,
+// (1, 2) in pixel space would become (8, 16) in subpixel space.
+//
+// [X|Y]
+// Indicates which coordinate is being referred to.
+//
+// [Left|Right|Top|Bottom]
+// When referring to trapezoids or rectangular regions, this
+// component indicates which edge is being referred to.
+//
+// VariableName
+// Descriptive portion of the variable name
+//
+//-------------------------------------------------------------------------
+
+
+//-------------------------------------------------------------------------
+//
+// Function: IsFractionGreaterThan
+//
+// Synopsis:
+// Determine if nNumeratorA/nDenominatorA > nNumeratorB/nDenominatorB
+//
+// Note that we assume all denominators are strictly greater than zero.
+//
+//-------------------------------------------------------------------------
+fn IsFractionGreaterThan(
+ nNumeratorA: INT, // Left hand side numerator
+ /* __in_range(>=, 1) */ nDenominatorA: INT, // Left hand side denominator
+ nNumeratorB: INT, // Right hand side numerator
+ /* __in_range(>=, 1) */ nDenominatorB: INT, // Right hand side denominator
+ ) -> bool
+{
+ //
+ // nNumeratorA/nDenominatorA > nNumeratorB/nDenominatorB
+ // iff nNumeratorA*nDenominatorB/nDenominatorA > nNumeratorB, since nDenominatorB > 0
+ // iff nNumeratorA*nDenominatorB > nNumeratorB*nDenominatorA, since nDenominatorA > 0
+ //
+ // Now, all input parameters are 32-bit integers, so we need to use
+ // a 64-bit result to compute the product.
+ //
+
+ let lNumeratorAxDenominatorB = Int32x32To64(nNumeratorA, nDenominatorB);
+ let lNumeratorBxDenominatorA = Int32x32To64(nNumeratorB, nDenominatorA);
+
+ return (lNumeratorAxDenominatorB > lNumeratorBxDenominatorA);
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: IsFractionLessThan
+//
+// Synopsis:
+// Determine if nNumeratorA/nDenominatorA < nNumeratorB/nDenominatorB
+//
+// Note that we assume all denominators are strictly greater than zero.
+//
+//-------------------------------------------------------------------------
+fn
+IsFractionLessThan(
+ nNumeratorA: INT, // Left hand side numerator
+ /* __in_range(>=, 1) */ nDenominatorA: INT, // Left hand side denominator
+ nNumeratorB: INT, // Right hand side numerator
+ /* __in_range(>=, 1) */ nDenominatorB: INT, // Right hand side denominator
+) -> bool
+{
+ //
+ // Same check as previous function with less than comparision instead of
+ // a greater than comparison.
+ //
+
+ let lNumeratorAxDenominatorB = Int32x32To64(nNumeratorA, nDenominatorB);
+ let lNumeratorBxDenominatorA = Int32x32To64(nNumeratorB, nDenominatorA);
+
+ return (lNumeratorAxDenominatorB < lNumeratorBxDenominatorA);
+}
+
+
+//-------------------------------------------------------------------------
+//
+// Function: AdvanceDDAMultipleSteps
+//
+// Synopsis:
+// Advance the DDA by multiple steps
+//
+//-------------------------------------------------------------------------
+fn
+AdvanceDDAMultipleSteps(
+ pEdgeLeft: &CEdge, // Left edge from active edge list
+ pEdgeRight: &CEdge, // Right edge from active edge list
+ nSubpixelYAdvance: INT, // Number of steps to advance the DDA
+ nSubpixelXLeftBottom: &mut INT, // Resulting left x position
+ nSubpixelErrorLeftBottom: &mut INT, // Resulting left x position error
+ nSubpixelXRightBottom: &mut INT, // Resulting right x position
+ nSubpixelErrorRightBottom: &mut INT // Resulting right x position error
+ )
+{
+ //
+ // In this method, we need to be careful of overflow. Expected input ranges for values are:
+ //
+ // edge points: x and y subpixel space coordinates are between [-2^26, 2^26]
+ // since we start with 28.4 space (and are now in subpixel space,
+ // i.e., no 16x scale) and assume 2 bits of working space.
+ //
+ // This assumption is ensured by TransformRasterizerPointsTo28_4.
+ //
+ #[cfg(debug_assertions)]
+ {
+ let nDbgPixelCoordinateMax = (1 << 26);
+ let nDbgPixelCoordinateMin = -nDbgPixelCoordinateMax;
+
+ assert!(pEdgeLeft.X.get() >= nDbgPixelCoordinateMin && pEdgeLeft.X.get() <= nDbgPixelCoordinateMax);
+ assert!(pEdgeLeft.EndY >= nDbgPixelCoordinateMin && pEdgeLeft.EndY <= nDbgPixelCoordinateMax);
+ assert!(pEdgeRight.X.get() >= nDbgPixelCoordinateMin && pEdgeRight.X.get() <= nDbgPixelCoordinateMax);
+ assert!(pEdgeRight.EndY >= nDbgPixelCoordinateMin && pEdgeRight.EndY <= nDbgPixelCoordinateMax);
+
+ //
+ // errorDown: (0, 2^30)
+ // Since errorDown is the edge delta y in 28.4 space (not subpixel space
+ // like the end points), we have a larger range of (0, 2^32) for the positive
+ // error down. With 2 bits of work space (which TransformRasterizerPointsTo28_4
+ // ensures), we know we are between (0, 2^30)
+ //
+
+ let nDbgErrorDownMax: INT = (1 << 30);
+ assert!(pEdgeLeft.ErrorDown > 0 && pEdgeLeft.ErrorDown < nDbgErrorDownMax);
+ assert!(pEdgeRight.ErrorDown > 0 && pEdgeRight.ErrorDown < nDbgErrorDownMax);
+
+ //
+ // errorUp: [0, errorDown)
+ //
+ assert!(pEdgeLeft.ErrorUp >= 0 && pEdgeLeft.ErrorUp < pEdgeLeft.ErrorDown);
+ assert!(pEdgeRight.ErrorUp >= 0 && pEdgeRight.ErrorUp < pEdgeRight.ErrorDown);
+ }
+
+ //
+ // Advance the left edge
+ //
+
+ // Since each point on the edge is withing 28.4 space, the following computation can't overflow.
+ *nSubpixelXLeftBottom = pEdgeLeft.X.get() + nSubpixelYAdvance*pEdgeLeft.Dx;
+
+ // Since the error values can be close to 2^30, we can get an overflow by multiplying with yAdvance.
+ // So, we need to use a 64-bit temporary in this case.
+ let mut llSubpixelErrorBottom: LONGLONG = pEdgeLeft.Error.get() as LONGLONG + Int32x32To64(nSubpixelYAdvance, pEdgeLeft.ErrorUp);
+ if (llSubpixelErrorBottom >= 0)
+ {
+ let llSubpixelXLeftDelta = llSubpixelErrorBottom / (pEdgeLeft.ErrorDown as LONGLONG);
+
+ // The delta should remain in range since it still represents a delta along the edge which
+ // we know fits entirely in 28.4. Note that we add one here since the error must end up
+ // less than 0.
+ assert!(llSubpixelXLeftDelta < INT::MAX as LONGLONG);
+ let nSubpixelXLeftDelta: INT = (llSubpixelXLeftDelta as INT) + 1;
+
+ *nSubpixelXLeftBottom += nSubpixelXLeftDelta;
+ llSubpixelErrorBottom -= Int32x32To64(pEdgeLeft.ErrorDown, nSubpixelXLeftDelta);
+ }
+
+ // At this point, the subtraction above should have generated an error that is within
+ // (-pLeft->ErrorDown, 0)
+
+ assert!((llSubpixelErrorBottom > -pEdgeLeft.ErrorDown as LONGLONG) && (llSubpixelErrorBottom < 0));
+ *nSubpixelErrorLeftBottom = (llSubpixelErrorBottom as INT);
+
+ //
+ // Advance the right edge
+ //
+
+ // Since each point on the edge is withing 28.4 space, the following computation can't overflow.
+ *nSubpixelXRightBottom = pEdgeRight.X.get() + nSubpixelYAdvance*pEdgeRight.Dx;
+
+ // Since the error values can be close to 2^30, we can get an overflow by multiplying with yAdvance.
+ // So, we need to use a 64-bit temporary in this case.
+ llSubpixelErrorBottom = pEdgeRight.Error.get() as LONGLONG + Int32x32To64(nSubpixelYAdvance, pEdgeRight.ErrorUp);
+ if (llSubpixelErrorBottom >= 0)
+ {
+ let llSubpixelXRightDelta: LONGLONG = llSubpixelErrorBottom / (pEdgeRight.ErrorDown as LONGLONG);
+
+ // The delta should remain in range since it still represents a delta along the edge which
+ // we know fits entirely in 28.4. Note that we add one here since the error must end up
+ // less than 0.
+ assert!(llSubpixelXRightDelta < INT::MAX as LONGLONG);
+ let nSubpixelXRightDelta: INT = (llSubpixelXRightDelta as INT) + 1;
+
+ *nSubpixelXRightBottom += nSubpixelXRightDelta;
+ llSubpixelErrorBottom -= Int32x32To64(pEdgeRight.ErrorDown, nSubpixelXRightDelta);
+ }
+
+ // At this point, the subtraction above should have generated an error that is within
+ // (-pRight->ErrorDown, 0)
+
+ assert!((llSubpixelErrorBottom > -pEdgeRight.ErrorDown as LONGLONG) && (llSubpixelErrorBottom < 0));
+ *nSubpixelErrorRightBottom = (llSubpixelErrorBottom as INT);
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: ComputeDeltaUpperBound
+//
+// Synopsis:
+// Compute some value that is >= nSubpixelAdvanceY*|1/m| where m is the
+// slope defined by the edge below.
+//
+//-------------------------------------------------------------------------
+fn
+ComputeDeltaUpperBound(
+ pEdge: &CEdge, // Edge containing 1/m value used for computation
+ nSubpixelYAdvance: INT // Multiplier in synopsis expression
+ ) -> INT
+{
+ let nSubpixelDeltaUpperBound: INT;
+
+ //
+ // Compute the delta bound
+ //
+
+ if (pEdge.ErrorUp == 0)
+ {
+ //
+ // No errorUp, so simply compute bound based on dx value
+ //
+
+ nSubpixelDeltaUpperBound = nSubpixelYAdvance*(pEdge.Dx).abs();
+ }
+ else
+ {
+ let nAbsDx: INT;
+ let nAbsErrorUp: INT;
+
+ //
+ // Compute abs of (dx, error)
+ //
+ // Here, we can assume errorUp > 0
+ //
+
+ assert!(pEdge.ErrorUp > 0);
+
+ if (pEdge.Dx >= 0)
+ {
+ nAbsDx = pEdge.Dx;
+ nAbsErrorUp = pEdge.ErrorUp;
+ }
+ else
+ {
+ //
+ // Dx < 0, so negate (dx, errorUp)
+ //
+ // Note that since errorUp > 0, we know -errorUp < 0 and that
+ // we need to add errorDown to get an errorUp >= 0 which
+ // also means substracting one from dx.
+ //
+
+ nAbsDx = -pEdge.Dx - 1;
+ nAbsErrorUp = -pEdge.ErrorUp + pEdge.ErrorDown;
+ }
+
+ //
+ // Compute the bound of nSubpixelAdvanceY*|1/m|
+ //
+ // Note that the +1 below is included to bound any left over errorUp that we are dropping here.
+ //
+
+ nSubpixelDeltaUpperBound = nSubpixelYAdvance*nAbsDx + (nSubpixelYAdvance*nAbsErrorUp)/pEdge.ErrorDown + 1;
+ }
+
+ return nSubpixelDeltaUpperBound;
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: ComputeDistanceLowerBound
+//
+// Synopsis:
+// Compute some value that is <= distance between
+// (pEdgeLeft->X, pEdgeLeft->Error) and (pEdgeRight->X, pEdgeRight->Error)
+//
+//-------------------------------------------------------------------------
+fn
+ComputeDistanceLowerBound(
+ pEdgeLeft: &CEdge, // Left edge containing the position for the distance computation
+ pEdgeRight: &CEdge // Right edge containing the position for the distance computation
+ ) -> INT
+{
+ //
+ // Note: In these comments, error1 and error2 are theoretical. The actual Error members
+ // are biased by -1.
+ //
+ // distance = (x2 + error2/errorDown2) - (x1 + error1/errorDown1)
+ // = x2 - x1 + error2/errorDown2 - error1/errorDown1
+ // >= x2 - x1 + error2/errorDown2 , since error1 < 0
+ // >= x2 - x1 - 1 , since error2 < 0
+ // = pEdgeRight->X - pEdgeLeft->X - 1
+ //
+ // In the special case where error2/errorDown2 >= error1/errorDown1, we
+ // can get a tigher bound of:
+ //
+ // pEdgeRight->X - pEdgeLeft->X
+ //
+ // This case occurs often in thin strokes, so we check for it here.
+ //
+
+ assert!(pEdgeLeft.Error.get() < 0);
+ assert!(pEdgeRight.Error.get() < 0);
+ assert!(pEdgeLeft.X <= pEdgeRight.X);
+
+ let mut nSubpixelXDistanceLowerBound: INT = pEdgeRight.X.get() - pEdgeLeft.X.get();
+
+ //
+ // If error2/errorDown2 < error1/errorDown1, we need to subtract one from the bound.
+ // Note that error's are actually baised by -1, we so we have to add one before
+ // we do the comparison.
+ //
+
+ if (IsFractionLessThan(
+ pEdgeRight.Error.get()+1,
+ pEdgeRight.ErrorDown,
+ pEdgeLeft.Error.get()+1,
+ pEdgeLeft.ErrorDown
+ ))
+ {
+ // We can't use the tighter lower bound described above, so we need to subtract one to
+ // ensure we have a lower bound.
+
+ nSubpixelXDistanceLowerBound -= 1;
+ }
+
+ return nSubpixelXDistanceLowerBound;
+}
+pub struct CHwRasterizer<'x, 'y, 'z> {
+ m_rcClipBounds: MilPointAndSizeL,
+ m_matWorldToDevice: CMILMatrix,
+ m_pIGeometrySink: &'x mut CHwVertexBufferBuilder<'y, 'z>,
+ m_fillMode: MilFillMode,
+ /*
+DynArray<MilPoint2F> *m_prgPoints;
+DynArray<BYTE> *m_prgTypes;
+MilPointAndSizeL m_rcClipBounds;
+CMILMatrix m_matWorldToDevice;
+IGeometrySink *m_pIGeometrySink;
+MilFillMode::Enum m_fillMode;
+
+//
+// Complex scan coverage buffer
+//
+
+CCoverageBuffer m_coverageBuffer;
+
+CD3DDeviceLevel1 * m_pDeviceNoRef;*/
+ //m_coverageBuffer: CCoverageBuffer,
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::ConvertSubpixelXToPixel
+//
+// Synopsis:
+// Convert from our subpixel coordinate (x + error/errorDown)
+// to a floating point value.
+//
+//-------------------------------------------------------------------------
+fn ConvertSubpixelXToPixel(
+ x: INT,
+ error: INT,
+ rErrorDown: f32
+ ) -> f32
+{
+ assert!(rErrorDown > f32::EPSILON);
+ return ((x as f32) + (error as f32)/rErrorDown)*c_rInvShiftSize;
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::ConvertSubpixelYToPixel
+//
+// Synopsis:
+// Convert from our subpixel space to pixel space assuming no
+// error.
+//
+//-------------------------------------------------------------------------
+fn ConvertSubpixelYToPixel(
+ nSubpixel: i32
+ ) -> f32
+{
+ return (nSubpixel as f32)*c_rInvShiftSize;
+}
+
+impl<'x, 'y, 'z> CHwRasterizer<'x, 'y, 'z> {
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::RasterizePath
+//
+// Synopsis:
+// Internal rasterizer fill path. Note that this method follows the
+// same basic structure as the software rasterizer in aarasterizer.cpp.
+//
+// The general algorithm used for rasterization is a vertical sweep of
+// the shape that maintains an active edge list. The sweep is done
+// at a sub-scanline resolution and results in either:
+// 1. Sub-scanlines being combined in the coverage buffer and output
+// as "complex scans".
+// 2. Simple trapezoids being recognized in the active edge list
+// and output using a faster simple trapezoid path.
+//
+// This method consists of the setup to the main rasterization loop
+// which includes:
+//
+// 1. Setup of the clip rectangle
+// 2. Calling FixedPointPathEnumerate to populate our inactive
+// edge list.
+// 3. Delegating to RasterizePath to execute the main loop.
+//
+//-------------------------------------------------------------------------
+pub fn RasterizePath(
+ &mut self,
+ rgpt: &[POINT],
+ rgTypes: &[BYTE],
+ cPoints: UINT,
+ pmatWorldTransform: &CMILMatrix
+ ) -> HRESULT
+{
+ let mut hr;
+ // Default is not implemented for arrays of size 40 so we need to use map
+ let mut inactiveArrayStack: [CInactiveEdge; INACTIVE_LIST_NUMBER!()] = [(); INACTIVE_LIST_NUMBER!()].map(|_| Default::default());
+ let mut pInactiveArray: &mut [CInactiveEdge];
+ let mut pInactiveArrayAllocation: Vec<CInactiveEdge>;
+ let mut edgeHead: CEdge = Default::default();
+ let mut edgeTail: CEdge = Default::default();
+ let pEdgeActiveList: Ref<CEdge>;
+ let mut edgeStore = Arena::new();
+ //edgeStore.init();
+ let mut edgeContext: CInitializeEdgesContext = CInitializeEdgesContext::new(&mut edgeStore);
+
+ edgeContext.ClipRect = None;
+
+ edgeTail.X.set(i32::MAX); // Terminator to active list
+ edgeTail.StartY = i32::MAX; // Terminator to inactive list
+
+ edgeTail.EndY = i32::MIN;
+ edgeHead.X.set(i32::MIN); // Beginning of active list
+ edgeContext.MaxY = i32::MIN;
+
+ edgeHead.Next.set(Ref::new(&edgeTail));
+ pEdgeActiveList = Ref::new(&mut edgeHead);
+ //edgeContext.Store = &mut edgeStore;
+
+ edgeContext.AntiAliasMode = c_antiAliasMode;
+ assert!(edgeContext.AntiAliasMode != MilAntiAliasMode::None);
+
+ // If the path contains 0 or 1 points, we can ignore it.
+ if (cPoints < 2)
+ {
+ return S_OK;
+ }
+
+ let nPixelYClipBottom: INT = self.m_rcClipBounds.Y + self.m_rcClipBounds.Height;
+
+ // Scale the clip bounds rectangle by 16 to account for our
+ // scaling to 28.4 coordinates:
+
+ let mut clipBounds : RECT = Default::default();
+ clipBounds.left = self.m_rcClipBounds.X * FIX4_ONE!();
+ clipBounds.top = self.m_rcClipBounds.Y * FIX4_ONE!();
+ clipBounds.right = (self.m_rcClipBounds.X + self.m_rcClipBounds.Width) * FIX4_ONE!();
+ clipBounds.bottom = (self.m_rcClipBounds.Y + self.m_rcClipBounds.Height) * FIX4_ONE!();
+
+ edgeContext.ClipRect = Some(&clipBounds);
+
+ //////////////////////////////////////////////////////////////////////////
+ // Convert all our points to 28.4 fixed point:
+
+ let mut matrix: CMILMatrix = (*pmatWorldTransform).clone();
+ AppendScaleToMatrix(&mut matrix, TOREAL!(16), TOREAL!(16));
+
+ let coverageBuffer: CCoverageBuffer = Default::default();
+ // Initialize the coverage buffer
+ coverageBuffer.Initialize();
+
+ // Enumerate the path and construct the edge table:
+
+ hr = MIL_THR!(FixedPointPathEnumerate(
+ rgpt,
+ rgTypes,
+ cPoints,
+ &matrix,
+ edgeContext.ClipRect,
+ &mut edgeContext
+ ));
+
+ if (FAILED(hr))
+ {
+ if (hr == WGXERR_VALUEOVERFLOW)
+ {
+ // Draw nothing on value overflow and return
+ hr = S_OK;
+ }
+ return hr;
+ }
+
+ let nTotalCount: UINT; nTotalCount = edgeContext.Store.len() as u32;
+ if (nTotalCount == 0)
+ {
+ hr = S_OK; // We're outta here (empty path or entirely clipped)
+ return hr;
+ }
+
+ // At this point, there has to be at least two edges. If there's only
+ // one, it means that we didn't do the trivially rejection properly.
+
+ assert!((nTotalCount >= 2) && (nTotalCount <= (UINT::MAX - 2)));
+
+ pInactiveArray = &mut inactiveArrayStack[..];
+ if (nTotalCount > (INACTIVE_LIST_NUMBER!() as u32 - 2))
+ {
+ pInactiveArrayAllocation = vec![Default::default(); nTotalCount as usize + 2];
+
+ pInactiveArray = &mut pInactiveArrayAllocation;
+ }
+
+ // Initialize and sort the inactive array:
+
+ let nSubpixelYCurrent = InitializeInactiveArray(
+ edgeContext.Store,
+ pInactiveArray,
+ nTotalCount,
+ Ref::new(&edgeTail)
+ );
+
+ let mut nSubpixelYBottom = edgeContext.MaxY;
+
+ assert!(nSubpixelYBottom > 0);
+
+ // Skip the head sentinel on the inactive array:
+
+ pInactiveArray = &mut pInactiveArray[1..];
+
+ //
+ // Rasterize the path
+ //
+
+ // 'nPixelYClipBottom' is in screen space and needs to be converted to the
+ // format we use for antialiasing.
+
+ nSubpixelYBottom = nSubpixelYBottom.min(nPixelYClipBottom << c_nShift);
+
+ // 'nTotalCount' should have been zero if all the edges were
+ // clipped out (RasterizeEdges assumes there's at least one edge
+ // to be drawn):
+
+ assert!(nSubpixelYBottom > nSubpixelYCurrent);
+
+ IFC!(self.RasterizeEdges(
+ pEdgeActiveList,
+ pInactiveArray,
+ &coverageBuffer,
+ nSubpixelYCurrent,
+ nSubpixelYBottom
+ ));
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::new
+//
+// Synopsis:
+// 1. Ensure clean state
+// 2. Convert path to internal format
+//
+//-------------------------------------------------------------------------
+pub fn new(
+ pIGeometrySink: &'x mut CHwVertexBufferBuilder<'y, 'z>,
+ fillMode: MilFillMode,
+ pmatWorldToDevice: Option<CMatrix<CoordinateSpace::Shape,CoordinateSpace::Device>>,
+ clipRect: MilPointAndSizeL,
+ ) -> Self
+{
+ //
+ // PS#856364-2003/07/01-ashrafm Remove pixel center fixup
+ //
+ // Incoming coordinate space uses integers at upper-left of pixel (pixel
+ // center are half integers) at device level.
+ //
+ // Rasterizer uses the coordinate space with integers at pixel center.
+ //
+ // To convert from center (1/2, 1/2) to center (0, 0) we need to subtract
+ // 1/2 from each coordinate in device space.
+ //
+ // See InitializeEdges in aarasterizer.ccp to see how we unconvert for
+ // antialiased rendering.
+ //
+
+ let mut matWorldHPCToDeviceIPC = pmatWorldToDevice.unwrap_or(CMatrix::Identity());
+ matWorldHPCToDeviceIPC.SetDx(matWorldHPCToDeviceIPC.GetDx() - 0.5);
+ matWorldHPCToDeviceIPC.SetDy(matWorldHPCToDeviceIPC.GetDy() - 0.5);
+
+ //
+ // Set local state.
+ //
+
+ // There's an opportunity for early clipping here
+ //
+ // However, since the rasterizer itself does a reasonable job of clipping some
+ // cases, we don't early clip yet.
+
+ Self {
+ m_fillMode: fillMode,
+ m_rcClipBounds: clipRect,
+ m_pIGeometrySink: pIGeometrySink,
+ m_matWorldToDevice: matWorldHPCToDeviceIPC,
+ }
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::SendGeometry
+//
+// Synopsis:
+// Tessellate and send geometry to the pipeline
+//
+//-------------------------------------------------------------------------
+pub fn SendGeometry(&mut self,
+ points: &[POINT],
+ types: &[BYTE],
+ ) -> HRESULT
+{
+ let mut hr = S_OK;
+
+ //
+ // Rasterize the path
+ //
+ let count = points.len() as u32;
+ IFR!(self.RasterizePath(
+ points,
+ types,
+ count,
+ &self.m_matWorldToDevice.clone(),
+ ));
+ /*
+ IFC!(self.RasterizePath(
+ self.m_prgPoints.as_ref().unwrap().GetDataBuffer(),
+ self.m_prgTypes.as_ref().unwrap().GetDataBuffer(),
+ self.m_prgPoints.as_ref().unwrap().GetCount() as u32,
+ &self.m_matWorldToDevice,
+ self.m_fillMode
+ ));*/
+
+ //
+ // It's possible that we output no triangles. For example, if we tried to fill a
+ // line instead of stroke it. Since we have no efficient way to detect all these cases
+ // up front, we simply rasterize and see if we generated anything.
+ //
+
+ if (self.m_pIGeometrySink.IsEmpty())
+ {
+ hr = WGXHR_EMPTYFILL;
+ }
+
+ RRETURN1!(hr, WGXHR_EMPTYFILL);
+}
+/*
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::SendGeometryModifiers
+//
+// Synopsis: Send an AA color source to the pipeline.
+//
+//-------------------------------------------------------------------------
+fn SendGeometryModifiers(&self,
+ pPipelineBuilder: &mut CHwPipelineBuilder
+ ) -> HRESULT
+{
+ let hr = S_OK;
+
+ let pAntiAliasColorSource = None;
+
+ self.m_pDeviceNoRef.GetColorComponentSource(
+ CHwColorComponentSource::Diffuse,
+ &pAntiAliasColorSource
+ );
+
+ IFC!(pPipelineBuilder.Set_AAColorSource(
+ pAntiAliasColorSource
+ ));
+
+ return hr;
+}*/
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::GenerateOutputAndClearCoverage
+//
+// Synopsis:
+// Collapse output and generate span data
+//
+//-------------------------------------------------------------------------
+fn
+GenerateOutputAndClearCoverage<'a>(&mut self, coverageBuffer: &'a CCoverageBuffer<'a>,
+ nSubpixelY: INT
+ ) -> HRESULT
+{
+ let hr = S_OK;
+ let nPixelY = nSubpixelY >> c_nShift;
+
+ let pIntervalSpanStart: Ref<CCoverageInterval> = coverageBuffer.m_pIntervalStart.get();
+
+ IFC!(self.m_pIGeometrySink.AddComplexScan(nPixelY, pIntervalSpanStart));
+
+ coverageBuffer.Reset();
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::ComputeTrapezoidsEndScan
+//
+// Synopsis:
+// This methods takes the current active edge list (and ycurrent)
+// and will determine:
+//
+// 1. Can we output some list of simple trapezoids for this active
+// edge list? If the answer is no, then we simply return
+// nSubpixelYCurrent indicating this condition.
+//
+// 2. If we can output some set of trapezoids, then what is the
+// next ycurrent, i.e., how tall are our trapezoids.
+//
+// Note that all trapezoids output for a particular active edge list
+// are all the same height.
+//
+// To further understand the conditions for making this decision, it
+// is important to consider the simple trapezoid tessellation:
+//
+// ___+_________________+___
+// / + / \ + \ '+' marks active edges
+// / + / \ + \
+// / + / \ + \
+// /__+__/___________________\__+__\
+// 1+1/m +
+//
+// Note that 1+1/edge_slope is the required expand distance to ensure
+// that we cover all pixels required.
+//
+// Now, we can fail to output any trapezoids under the following conditions:
+// 1. The expand regions along the top edge of the trapezoid overlap.
+// 2. The expand regions along the bottom edge of the trapezoid overlap
+// within the current scanline. Note that if the bottom edges overlap
+// at some later point, we can shorten our trapezoid to remove the
+// overlapping.
+//
+// The key to the algorithm at this point is to detect the above condition
+// in our active edge list and either update the returned end y position
+// or reject all together based on overlapping.
+//
+//-------------------------------------------------------------------------
+
+fn ComputeTrapezoidsEndScan(&mut self,
+ pEdgeCurrent: Ref<CEdge>,
+ nSubpixelYCurrent: INT,
+ nSubpixelYNextInactive: INT
+ ) -> INT
+{
+
+ let mut nSubpixelYBottomTrapezoids;
+ let mut pEdgeLeft: Ref<CEdge>;
+ let mut pEdgeRight: Ref<CEdge>;
+
+ //
+ // Trapezoids should always start at scanline boundaries
+ //
+
+ assert!((nSubpixelYCurrent & c_nShiftMask) == 0);
+
+ //
+ // If we are doing a winding mode fill, check that we can ignore mode and do an
+ // alternating fill in OutputTrapezoids. This condition occurs when winding is
+ // equivalent to alternating which happens if the pairwise edges have different
+ // winding directions.
+ //
+
+ if (self.m_fillMode == MilFillMode::Winding)
+ {
+ let mut pEdge = pEdgeCurrent;
+ while pEdge.EndY != INT::MIN {
+ // The active edge list always has an even number of edges which we actually
+ // assert in ASSERTACTIVELIST.
+
+ assert!(pEdge.Next.get().EndY != INT::MIN);
+
+ // If not alternating winding direction, we can't fill with alternate mode
+
+ if (pEdge.WindingDirection == pEdge.Next.get().WindingDirection)
+ {
+ // Give up until we handle winding mode
+ nSubpixelYBottomTrapezoids = nSubpixelYCurrent;
+ return nSubpixelYBottomTrapezoids;
+ }
+
+ pEdge = pEdge.Next.get().Next.get();
+ }
+ }
+
+ //
+ // For each edge, we:
+ //
+ // 1. Set the new trapezoid bottom to the min of the current
+ // one and the edge EndY
+ //
+ // 2. Check if edges will intersect during trapezoidal shrink/expand
+ //
+
+ nSubpixelYBottomTrapezoids = nSubpixelYNextInactive;
+
+ let mut pEdge = pEdgeCurrent;
+ while pEdge.EndY != INT::MIN {
+ //
+ // Step 1
+ //
+ // Updated nSubpixelYBottomTrapezoids based on edge EndY.
+ //
+ // Since edges are clipped to the current clip rect y bounds, we also know
+ // that pEdge->EndY <= nSubpixelYBottom so there is no need to check for that here.
+ //
+
+ nSubpixelYBottomTrapezoids = nSubpixelYBottomTrapezoids.min(pEdge.EndY);
+
+ //
+ // Step 2
+ //
+ // Check that edges will not overlap during trapezoid shrink/expand.
+ //
+
+ pEdgeLeft = pEdge;
+ pEdgeRight = pEdge.Next.get();
+
+ if (pEdgeRight.EndY != INT::MIN)
+ {
+ //
+ // __A__A'___________________B'_B__
+ // \ + \ / + / '+' marks active edges
+ // \ + \ / + /
+ // \ + \ / + /
+ // \__+__\____________/__+__/
+ // 1+1/m C C' D' D
+ //
+ // We need to determine if position A' <= position B' and that position C' <= position D'
+ // in the above diagram. So, we need to ensure that both the distance between
+ // A and B and the distance between C and D is greater than or equal to:
+ //
+ // 0.5 + |0.5/m1| + 0.5 + |0.5/m2| (pixel space)
+ // = shiftsize + halfshiftsize*(|1/m1| + |1/m2|) (subpixel space)
+ //
+ // So, we'll start by computing this distance. Note that we can compute a distance
+ // that is too large here since the self-intersection detection is simply used to
+ // recognize trapezoid opportunities and isn't required for visual correctness.
+ //
+
+ let nSubpixelExpandDistanceUpperBound: INT =
+ c_nShiftSize
+ + ComputeDeltaUpperBound(&*pEdgeLeft, c_nHalfShiftSize)
+ + ComputeDeltaUpperBound(&*pEdgeRight, c_nHalfShiftSize);
+
+ //
+ // Compute a top edge distance that is <= to the distance between A' and B' as follows:
+ // lowerbound(distance(A, B)) - nSubpixelExpandDistanceUpperBound
+ //
+
+ let nSubpixelXTopDistanceLowerBound: INT =
+ ComputeDistanceLowerBound(&*pEdgeLeft, &*pEdgeRight) - nSubpixelExpandDistanceUpperBound;
+
+ //
+ // Check if the top edges cross
+ //
+
+ if (nSubpixelXTopDistanceLowerBound < 0)
+ {
+ // The top edges have crossed, so we are out of luck. We can't
+ // start a trapezoid on this scanline
+
+ nSubpixelYBottomTrapezoids = nSubpixelYCurrent;
+ return nSubpixelYBottomTrapezoids;
+ }
+
+ //
+ // If the edges are converging, we need to check if they cross at
+ // nSubpixelYBottomTrapezoids
+ //
+ //
+ // 1) \ / 2) \ \ 3) / /
+ // \ / \ \ / /
+ // \ / \ \ / /
+ //
+ // The edges converge iff (dx1 > dx2 || (dx1 == dx2 && errorUp1/errorDown1 > errorUp2/errorDown2).
+ //
+ // Note that in the case where the edges do not converge, the code below will end up computing
+ // the DDA at the end points and checking for intersection again. This code doesn't rely on
+ // the fact that the edges don't converge, so we can be too conservative here.
+ //
+
+ if (pEdgeLeft.Dx > pEdgeRight.Dx
+ || ((pEdgeLeft.Dx == pEdgeRight.Dx)
+ && IsFractionGreaterThan(pEdgeLeft.ErrorUp, pEdgeLeft.ErrorDown, pEdgeRight.ErrorUp, pEdgeRight.ErrorDown)))
+ {
+
+ let nSubpixelYAdvance: INT = nSubpixelYBottomTrapezoids - nSubpixelYCurrent;
+ assert!(nSubpixelYAdvance > 0);
+
+ //
+ // Compute the edge position at nSubpixelYBottomTrapezoids
+ //
+
+ let mut nSubpixelXLeftAdjustedBottom = 0;
+ let mut nSubpixelErrorLeftBottom = 0;
+ let mut nSubpixelXRightBottom = 0;
+ let mut nSubpixelErrorRightBottom = 0;
+
+ AdvanceDDAMultipleSteps(
+ &*pEdgeLeft,
+ &*pEdgeRight,
+ nSubpixelYAdvance,
+ &mut nSubpixelXLeftAdjustedBottom,
+ &mut nSubpixelErrorLeftBottom,
+ &mut nSubpixelXRightBottom,
+ &mut nSubpixelErrorRightBottom
+ );
+
+ //
+ // Adjust the bottom left position by the expand distance for all the math
+ // that follows. Note that since we adjusted the top distance by that
+ // same expand distance, this adjustment is equivalent to moving the edges
+ // nSubpixelExpandDistanceUpperBound closer together.
+ //
+
+ nSubpixelXLeftAdjustedBottom += nSubpixelExpandDistanceUpperBound;
+
+ //
+ // Check if the bottom edge crosses.
+ //
+ // To avoid checking error1/errDown1 and error2/errDown2, we assume the
+ // edges cross if nSubpixelXLeftAdjustedBottom == nSubpixelXRightBottom
+ // and thus produce a result that is too conservative.
+ //
+
+ if (nSubpixelXLeftAdjustedBottom >= nSubpixelXRightBottom)
+ {
+
+ //
+ // At this point, we have the following scenario
+ //
+ // ____d1____
+ // \ / | |
+ // \ / h1 |
+ // \/ | | nSubpixelYAdvance
+ // / \ |
+ // /__d2__\ |
+ //
+ // We want to compute h1. We know that:
+ //
+ // h1 / nSubpixelYAdvance = d1 / (d1 + d2)
+ // h1 = nSubpixelYAdvance * d1 / (d1 + d2)
+ //
+ // Now, if we approximate d1 with some d1' <= d1, we get
+ //
+ // h1 = nSubpixelYAdvance * d1 / (d1 + d2)
+ // h1 >= nSubpixelYAdvance * d1' / (d1' + d2)
+ //
+ // Similarly, if we approximate d2 with some d2' >= d2, we get
+ //
+ // h1 >= nSubpixelYAdvance * d1' / (d1' + d2)
+ // >= nSubpixelYAdvance * d1' / (d1' + d2')
+ //
+ // Since we are allowed to be too conservative with h1 (it can be
+ // less than the actual value), we'll construct such approximations
+ // for simplicity.
+ //
+ // Note that d1' = nSubpixelXTopDistanceLowerBound which we have already
+ // computed.
+ //
+ // d2 = (x1 + error1/errorDown1) - (x2 + error2/errorDown2)
+ // = x1 - x2 + error1/errorDown1 - error2/errorDown2
+ // <= x1 - x2 - error2/errorDown2 , since error1 < 0
+ // <= x1 - x2 + 1 , since error2 < 0
+ // = nSubpixelXLeftAdjustedBottom - nSubpixelXRightBottom + 1
+ //
+
+ let nSubpixelXBottomDistanceUpperBound: INT = nSubpixelXLeftAdjustedBottom - nSubpixelXRightBottom + 1;
+
+ assert!(nSubpixelXTopDistanceLowerBound >= 0);
+ assert!(nSubpixelXBottomDistanceUpperBound > 0);
+
+ #[cfg(debug_assertions)]
+ let nDbgPreviousSubpixelXBottomTrapezoids: INT = nSubpixelYBottomTrapezoids;
+
+
+ nSubpixelYBottomTrapezoids =
+ nSubpixelYCurrent +
+ (nSubpixelYAdvance * nSubpixelXTopDistanceLowerBound) /
+ (nSubpixelXTopDistanceLowerBound + nSubpixelXBottomDistanceUpperBound);
+
+ #[cfg(debug_assertions)]
+ assert!(nDbgPreviousSubpixelXBottomTrapezoids >= nSubpixelYBottomTrapezoids);
+
+ if (nSubpixelYBottomTrapezoids < nSubpixelYCurrent + c_nShiftSize)
+ {
+ // We no longer have a trapezoid that is at least one scanline high, so
+ // abort
+
+ nSubpixelYBottomTrapezoids = nSubpixelYCurrent;
+ return nSubpixelYBottomTrapezoids;
+ }
+ }
+ }
+ }
+
+ pEdge = pEdge.Next.get();
+ }
+
+ //
+ // Snap to pixel boundary
+ //
+
+ nSubpixelYBottomTrapezoids = nSubpixelYBottomTrapezoids & (!c_nShiftMask);
+
+ //
+ // Ensure that we are never less than nSubpixelYCurrent
+ //
+
+ assert!(nSubpixelYBottomTrapezoids >= nSubpixelYCurrent);
+
+ //
+ // Return trapezoid end scan
+ //
+
+//Cleanup:
+ return nSubpixelYBottomTrapezoids;
+}
+
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::OutputTrapezoids
+//
+// Synopsis:
+// Given the current active edge list, output a list of
+// trapezoids.
+//
+// _________________________
+// / / \ \
+// / / \ \
+// / / \ \
+// /_____/___________________\_____\
+// 1+1/m
+//
+// We output a trapezoid where the distance in X is 1+1/m slope on either edge.
+// Note that we actually do a linear interpolation for coverage along the
+// entire falloff region which comes within 12.5% error when compared to our
+// 8x8 coverage output for complex scans. What is happening here is
+// that we are applying a linear approximation to the coverage function
+// based on slope. It is possible to get better linear interpolations
+// by varying the expanded region, but it hasn't been necessary to apply
+// these quality improvements yet.
+//
+//-------------------------------------------------------------------------
+fn
+OutputTrapezoids(&mut self,
+ pEdgeCurrent: Ref<CEdge>,
+ nSubpixelYCurrent: INT, // inclusive
+ nSubpixelYNext: INT // exclusive
+ ) -> HRESULT
+{
+
+ let hr = S_OK;
+ let nSubpixelYAdvance: INT;
+ let mut rSubpixelLeftErrorDown: f32;
+ let mut rSubpixelRightErrorDown: f32;
+ let mut rPixelXLeft: f32;
+ let mut rPixelXRight: f32;
+ let mut rSubpixelLeftInvSlope: f32;
+ let mut rSubpixelLeftAbsInvSlope: f32;
+ let mut rSubpixelRightInvSlope: f32;
+ let mut rSubpixelRightAbsInvSlope: f32;
+ let mut rPixelXLeftDelta: f32;
+ let mut rPixelXRightDelta: f32;
+
+ let mut pEdgeLeft = pEdgeCurrent;
+ let mut pEdgeRight = (*pEdgeCurrent).Next.get();
+
+ assert!((nSubpixelYCurrent & c_nShiftMask) == 0);
+ assert!(pEdgeLeft.EndY != INT::MIN);
+ assert!(pEdgeRight.EndY != INT::MIN);
+
+ //
+ // Compute the height our trapezoids
+ //
+
+ nSubpixelYAdvance = nSubpixelYNext - nSubpixelYCurrent;
+
+ //
+ // Output each trapezoid
+ //
+
+ loop
+ {
+ //
+ // Compute x/error for end of trapezoid
+ //
+
+ let mut nSubpixelXLeftBottom: INT = 0;
+ let mut nSubpixelErrorLeftBottom: INT = 0;
+ let mut nSubpixelXRightBottom: INT = 0;
+ let mut nSubpixelErrorRightBottom: INT = 0;
+
+ AdvanceDDAMultipleSteps(
+ &*pEdgeLeft,
+ &*pEdgeRight,
+ nSubpixelYAdvance,
+ &mut nSubpixelXLeftBottom,
+ &mut nSubpixelErrorLeftBottom,
+ &mut nSubpixelXRightBottom,
+ &mut nSubpixelErrorRightBottom
+ );
+
+ // The above computation should ensure that we are a simple
+ // trapezoid at this point
+
+ assert!(nSubpixelXLeftBottom <= nSubpixelXRightBottom);
+
+ // We know we have a simple trapezoid now. Now, compute the end of our current trapezoid
+
+ assert!(nSubpixelYAdvance > 0);
+
+ //
+ // Computation of edge data
+ //
+
+ rSubpixelLeftErrorDown = pEdgeLeft.ErrorDown as f32;
+ rSubpixelRightErrorDown = pEdgeRight.ErrorDown as f32;
+ rPixelXLeft = ConvertSubpixelXToPixel(pEdgeLeft.X.get(), pEdgeLeft.Error.get(), rSubpixelLeftErrorDown);
+ rPixelXRight = ConvertSubpixelXToPixel(pEdgeRight.X.get(), pEdgeRight.Error.get(), rSubpixelRightErrorDown);
+
+ rSubpixelLeftInvSlope = pEdgeLeft.Dx as f32 + pEdgeLeft.ErrorUp as f32/rSubpixelLeftErrorDown;
+ rSubpixelLeftAbsInvSlope = rSubpixelLeftInvSlope.abs();
+ rSubpixelRightInvSlope = pEdgeRight.Dx as f32 + pEdgeRight.ErrorUp as f32/rSubpixelRightErrorDown;
+ rSubpixelRightAbsInvSlope = rSubpixelRightInvSlope.abs();
+
+ rPixelXLeftDelta = 0.5 + 0.5 * rSubpixelLeftAbsInvSlope;
+ rPixelXRightDelta = 0.5 + 0.5 * rSubpixelRightAbsInvSlope;
+
+ let rPixelYTop = ConvertSubpixelYToPixel(nSubpixelYCurrent);
+ let rPixelYBottom = ConvertSubpixelYToPixel(nSubpixelYNext);
+
+ let rPixelXBottomLeft = ConvertSubpixelXToPixel(
+ nSubpixelXLeftBottom,
+ nSubpixelErrorLeftBottom,
+ pEdgeLeft.ErrorDown as f32
+ );
+
+ let rPixelXBottomRight = ConvertSubpixelXToPixel(
+ nSubpixelXRightBottom,
+ nSubpixelErrorRightBottom,
+ pEdgeRight.ErrorDown as f32
+ );
+
+ //
+ // Output the trapezoid
+ //
+
+ IFC!(self.m_pIGeometrySink.AddTrapezoid(
+ rPixelYTop, // In: y coordinate of top of trapezoid
+ rPixelXLeft, // In: x coordinate for top left
+ rPixelXRight, // In: x coordinate for top right
+ rPixelYBottom, // In: y coordinate of bottom of trapezoid
+ rPixelXBottomLeft, // In: x coordinate for bottom left
+ rPixelXBottomRight, // In: x coordinate for bottom right
+ rPixelXLeftDelta, // In: trapezoid expand radius for left edge
+ rPixelXRightDelta // In: trapezoid expand radius for right edge
+ ));
+
+ //
+ // Update the edge data
+ //
+
+ // no need to do this if edges are stale
+
+ pEdgeLeft.X.set(nSubpixelXLeftBottom);
+ pEdgeLeft.Error.set(nSubpixelErrorLeftBottom);
+ pEdgeRight.X.set(nSubpixelXRightBottom);
+ pEdgeRight.Error.set(nSubpixelErrorRightBottom);
+
+ //
+ // Check for termination
+ //
+
+ if (pEdgeRight.Next.get().EndY == INT::MIN)
+ {
+ break;
+ }
+
+ //
+ // Advance edge data
+ //
+
+ pEdgeLeft = pEdgeRight.Next.get();
+ pEdgeRight = pEdgeLeft.Next.get();
+
+ }
+
+ return hr;
+
+}
+
+//-------------------------------------------------------------------------
+//
+// Function: CHwRasterizer::RasterizeEdges
+//
+// Synopsis:
+// Rasterize using trapezoidal AA
+//
+//-------------------------------------------------------------------------
+fn
+RasterizeEdges<'a, 'b>(&mut self,
+ pEdgeActiveList: Ref<'a, CEdge<'a>>,
+ mut pInactiveEdgeArray: &'a mut [CInactiveEdge<'a>],
+ coverageBuffer: &'b CCoverageBuffer<'b>,
+ mut nSubpixelYCurrent: INT,
+ nSubpixelYBottom: INT
+ ) -> HRESULT
+{
+ let hr: HRESULT = S_OK;
+ let mut pEdgePrevious: Ref<CEdge>;
+ let mut pEdgeCurrent: Ref<CEdge>;
+ let mut nSubpixelYNextInactive: INT = 0;
+ let mut nSubpixelYNext: INT;
+
+ pInactiveEdgeArray = InsertNewEdges(
+ pEdgeActiveList,
+ nSubpixelYCurrent,
+ pInactiveEdgeArray,
+ &mut nSubpixelYNextInactive
+ );
+
+ while (nSubpixelYCurrent < nSubpixelYBottom)
+ {
+ ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
+
+ //
+ // Detect trapezoidal case
+ //
+
+ pEdgePrevious = pEdgeActiveList;
+ pEdgeCurrent = pEdgeActiveList.Next.get();
+
+ nSubpixelYNext = nSubpixelYCurrent;
+
+ if (!IsTagEnabled!(tagDisableTrapezoids)
+ && (nSubpixelYCurrent & c_nShiftMask) == 0
+ && pEdgeCurrent.EndY != INT::MIN
+ && nSubpixelYNextInactive >= nSubpixelYCurrent + c_nShiftSize
+ )
+ {
+ // Edges are paired, so we can assert we have another one
+ assert!(pEdgeCurrent.Next.get().EndY != INT::MIN);
+
+ //
+ // Given an active edge list, we compute the furthest we can go in the y direction
+ // without creating self-intersection or going past the edge EndY. Note that if we
+ // can't even go one scanline, then nSubpixelYNext == nSubpixelYCurrent
+ //
+
+ nSubpixelYNext = self.ComputeTrapezoidsEndScan(Ref::new(&*pEdgeCurrent), nSubpixelYCurrent, nSubpixelYNextInactive);
+ assert!(nSubpixelYNext >= nSubpixelYCurrent);
+
+ //
+ // Attempt to output a trapezoid. If it turns out we don't have any
+ // potential trapezoids, then nSubpixelYNext == nSubpixelYCurent
+ // indicating that we need to fall back to complex scans.
+ //
+
+ if (nSubpixelYNext >= nSubpixelYCurrent + c_nShiftSize)
+ {
+ IFC!(self.OutputTrapezoids(
+ pEdgeCurrent,
+ nSubpixelYCurrent,
+ nSubpixelYNext
+ ));
+ }
+ }
+
+ //
+ // Rasterize simple trapezoid or a complex scanline
+ //
+
+ if (nSubpixelYNext > nSubpixelYCurrent)
+ {
+ // If we advance, it must be by at least one scan line
+
+ assert!(nSubpixelYNext - nSubpixelYCurrent >= c_nShiftSize);
+
+ // Advance nSubpixelYCurrent
+
+ nSubpixelYCurrent = nSubpixelYNext;
+
+ // Remove stale edges. Note that the DDA is incremented in OutputTrapezoids.
+
+ while (pEdgeCurrent.EndY != INT::MIN)
+ {
+ if (pEdgeCurrent.EndY <= nSubpixelYCurrent)
+ {
+ // Unlink and advance
+
+ pEdgeCurrent = pEdgeCurrent.Next.get();
+ pEdgePrevious.Next.set(pEdgeCurrent);
+ }
+ else
+ {
+ // Advance
+
+ pEdgePrevious = pEdgeCurrent;
+ pEdgeCurrent = pEdgeCurrent.Next.get();
+ }
+ }
+ }
+ else
+ {
+ //
+ // Trapezoid rasterization failed, so
+ // 1) Handle case with no active edges, or
+ // 2) fall back to scan rasterization
+ //
+
+ if (pEdgeCurrent.EndY == INT::MIN)
+ {
+ nSubpixelYNext = nSubpixelYNextInactive;
+ }
+ else
+ {
+ nSubpixelYNext = nSubpixelYCurrent + 1;
+ if (self.m_fillMode == MilFillMode::Alternate)
+ {
+ IFC!(coverageBuffer.FillEdgesAlternating(pEdgeActiveList, nSubpixelYCurrent));
+ }
+ else
+ {
+ IFC!(coverageBuffer.FillEdgesWinding(pEdgeActiveList, nSubpixelYCurrent));
+ }
+ }
+
+ // If the next scan is done, output what's there:
+ if (nSubpixelYNext > (nSubpixelYCurrent | c_nShiftMask))
+ {
+ IFC!(self.GenerateOutputAndClearCoverage(coverageBuffer, nSubpixelYCurrent));
+ }
+
+ // Advance nSubpixelYCurrent
+ nSubpixelYCurrent = nSubpixelYNext;
+
+ // Advance DDA and update edge list
+ AdvanceDDAAndUpdateActiveEdgeList(nSubpixelYCurrent, pEdgeActiveList);
+ }
+
+ //
+ // Update edge list
+ //
+
+ if (nSubpixelYCurrent == nSubpixelYNextInactive)
+ {
+ pInactiveEdgeArray = InsertNewEdges(
+ pEdgeActiveList,
+ nSubpixelYCurrent,
+ pInactiveEdgeArray,
+ &mut nSubpixelYNextInactive
+ );
+ }
+ }
+
+ //
+ // Output the last scanline that has partial coverage
+ //
+
+ if ((nSubpixelYCurrent & c_nShiftMask) != 0)
+ {
+ IFC!(self.GenerateOutputAndClearCoverage(coverageBuffer, nSubpixelYCurrent));
+ }
+
+ RRETURN!(hr);
+}
+
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/hwvertexbuffer.rs b/third_party/rust/wpf-gpu-raster/src/hwvertexbuffer.rs
new file mode 100644
index 0000000000..6b01b5fdb2
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/hwvertexbuffer.rs
@@ -0,0 +1,3075 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+//-----------------------------------------------------------------------------
+//
+
+//
+// Description:
+// Contains HW Vertex Buffer and Builder class implementations
+//
+//
+// Notes:
+//
+// +--------------------------------------+
+// | |
+// | Start Stratum |
+// 1 | |
+// | |
+// +--------------------------------------+
+// 2 |======================================|
+// +--------------------------------------+
+// | / \ / \ |
+// | / \ / \ |
+// | A / B \ C / D \ E |
+// 3 | / \ / \ |
+// | / \ / \ |
+// | / \ / \ |
+// | / \ / \ |
+// +--------------------------------------+
+// | \ / \ / |
+// | \ / \ / |
+// 4 | F \ G / H \ I / J |
+// | \ / \ / |
+// +--------------------------------------+
+// 5 |======================================|
+// +--------------------------------------+
+// 6 |======================================|
+// +--------------------------------------+
+// | |
+// | |
+// 7 | Stop Stratum |
+// | |
+// | |
+// +--------------------------------------+
+//
+//
+// Strata & complement mode.
+//
+// The anti-aliased HW rasterizer produces a series of "strata" where
+// each strata can be a complex span rendered using lines (#'s 2,5,6) or
+// a series of trapezoids (#'s 3 & 4.) In normal mode the trapezoid
+// regions B,D,G,I are filled in.
+//
+// Complement mode complicates things. Complex spans are relatively easy
+// because we get the whole line's worth of data at once. Trapezoids are
+// more complex because we get B,D,G and I separately. We handle this by
+// tracking the current stratum and finishing the last incomplete
+// trapezoid stratum when a new stratum begins. Regions E & J finish
+// trapezoid strata. We also need to add rectangles at the beginning and
+// end of the geometry (start and stop) to fill out the complement
+// region.
+//
+// This is implemented like so:
+//
+// 1. Strata are generated from top to bottom without gaps.
+// 2. Before drawing any lines or trapezoids call
+// PrepareStratum(a, b, fTrapezoid) where a & b are the extent of
+// the current stratum and fTrapezoid is true if you are drawing
+// a trapezoid. This will take care of creating the start
+// stratum and/or finishing a trapezoid stratum if necessary.
+// 3. When completely done call EndBuildingOutside() which will
+// close a pending trapezoid and/or produce the stop stratum.
+//
+//-----------------------------------------------------------------------------
+
+const FORCE_TRIANGLES: bool = true;
+
+//+----------------------------------------------------------------------------
+//
+// Constants to control when we stop waffling because the tiles are too
+// small to make a difference.
+//
+// Future Consideration: can produce an excessive number of triangles.
+// How we mitigate or handle this could be improved. Right now we stop
+// waffling if the waffle size is less than a quarter-pixel.
+// Two big improvements that could be made are:
+// - multipacking very small textures (but note that we cannot rely
+// on prefiltering to ensure that small screen space means small texture
+// source)
+// - clipping primitives to approximately the screen size
+//
+//-----------------------------------------------------------------------------
+//const c_rMinWaffleWidthPixels: f32 = 0.25;
+
+
+const FLOAT_ZERO: f32 = 0.;
+const FLOAT_ONE: f32 = 1.;
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwVertexBuffer and CHwTVertexBuffer<class TVertex>
+//
+// Synopsis: This class accumulates geometry data for a primitive
+//
+//-----------------------------------------------------------------------------
+
+use crate::{types::*, geometry_sink::IGeometrySink, aacoverage::c_nShiftSizeSquared, OutputVertex, nullable_ref::Ref};
+
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwVertexBuffer::Builder
+//
+// Synopsis: Base vertex builder class
+//
+// Responsibilities:
+// - Given ordered basic vertex information expand/convert/pass-thru
+// to vertex buffer (Basic vertex information is minimal vertex
+// information sent from the caller that may or may not have been
+// passed thru a tessellator.)
+// - Choosing vertex format from a minimal required vertex format
+//
+// Not responsible for:
+// - Allocating space in vertex buffer
+//
+// Inputs required:
+// - Key and data to translate input basic vertex info to full vertex data
+// - Vertex info from tessellation (or other Geometry Generator)
+// - Vertex buffer to send output to
+//
+
+/*pub struct CHwVertexBufferBuilder /* : public IGeometrySink */
+{
+ /*
+public:
+
+ static HRESULT Create(
+ MilVertexFormat vfIn,
+ MilVertexFormat vfOut,
+ MilVertexFormatAttribute vfaAntiAliasScaleLocation,
+ __in_ecount_opt(1) CHwPipeline *pPipeline,
+ __in_ecount_opt(1) CD3DDeviceLevel1 *pDevice,
+ __in_ecount(1) CBufferDispenser *pBufferDispenser,
+ __deref_out_ecount(1) CHwVertexBuffer::Builder **ppVertexBufferBuilder
+ );
+
+ virtual ~Builder()
+ {
+#if DBG
+ Assert(!m_fDbgDestroyed);
+ m_fDbgDestroyed = true;
+#endif DBG
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: SetConstantMapping
+ //
+ // Synopsis: Use this method to specify that the given color source for
+ // the given vertex destination is constant (won't differ per
+ // vertex)
+ //
+ //-------------------------------------------------------------------------
+
+ virtual HRESULT SetConstantMapping(
+ MilVertexFormatAttribute mvfaDestination,
+ __in_ecount(1) const CHwConstantColorSource *pConstCS
+ ) PURE;
+
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: FinalizeMappings
+ //
+ // Synopsis: Use this method to let builder know that all mappings have
+ // been sent
+ //
+ //-------------------------------------------------------------------------
+
+ virtual HRESULT FinalizeMappings(
+ ) PURE;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: SetOutsideBounds
+ //
+ // Synopsis: Enables rendering zero-alpha geometry outside of the input
+ // shape but within the given bounding rectangle, if fNeedInside
+ // isn't true then it doesn't render geometry with full alpha.
+ //
+ //-------------------------------------------------------------------------
+ virtual void SetOutsideBounds(
+ __in_ecount_opt(1) const CMILSurfaceRect *prcBounds,
+ bool fNeedInside
+ ) PURE;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: HasOutsideBounds
+ //
+ // Synopsis: Returns true if outside bounds have been set.
+ //
+ //-------------------------------------------------------------------------
+ virtual bool HasOutsideBounds() const PURE;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: BeginBuilding
+ //
+ // Synopsis: This method lets the builder know it should start from a
+ // clean slate
+ //
+ //-------------------------------------------------------------------------
+
+ virtual HRESULT BeginBuilding(
+ ) PURE;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: EndBuilding
+ //
+ // Synopsis: Use this method to let the builder know that all of the
+ // vertex data has been sent
+ //
+ //-------------------------------------------------------------------------
+
+ virtual HRESULT EndBuilding(
+ __deref_opt_out_ecount(1) CHwVertexBuffer **ppVertexBuffer
+ ) PURE;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: FlushReset
+ //
+ // Synopsis: Send pending state and geometry to the device and reset
+ // the vertex buffer.
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE HRESULT FlushReset()
+ {
+ return FlushInternal(NULL);
+ }
+
+ //
+ // Currently all CHwVertexBuffer::Builder are supposed to be allocated via
+ // a CBufferDispenser.
+ //
+
+ DECLARE_BUFFERDISPENSER_DELETE
+
+protected:
+
+ Builder()
+ {
+ m_mvfIn = MILVFAttrNone;
+
+#if DBG
+ m_mvfDbgOut = MILVFAttrNone;
+#endif
+
+ m_mvfaAntiAliasScaleLocation = MILVFAttrNone;
+
+ m_pPipelineNoRef = NULL;
+ m_pDeviceNoRef = NULL;
+
+#if DBG
+ m_fDbgDestroyed = false;
+#endif DBG
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: FlushInternal
+ //
+ // Synopsis: Send any pending state and geometry to the device.
+ // If the optional argument is NULL then reset the
+ // vertex buffer.
+ // If the optional argument is non-NULL AND we have
+ // not yet flushed the vertex buffer return the vertex
+ // buffer.
+ //
+ //-------------------------------------------------------------------------
+
+ virtual HRESULT FlushInternal(
+ __deref_opt_out_ecount_opt(1) CHwVertexBuffer **ppVertexBuffer
+ ) PURE;
+
+
+ CHwPipeline *m_pPipelineNoRef;
+ CD3DDeviceLevel1 *m_pDeviceNoRef;
+
+ MilVertexFormat m_mvfIn; // Vertex fields that are pre-generated
+
+#if DBG
+ MilVertexFormat m_mvfDbgOut; // Output format of the vertex
+#endif
+
+ MilVertexFormat m_mvfGenerated; // Vertex fields that are dynamically
+ // generated by this builder
+
+ MilVertexFormatAttribute m_mvfaAntiAliasScaleLocation; // Vertex field that
+ // contains PPAA
+ // falloff factor
+
+#if DBG
+private:
+
+ bool m_fDbgDestroyed; // Used to check single Release pattern
+
+#endif DBG
+*/
+}*/
+#[derive(Default)]
+pub struct CD3DVertexXYZDUV2 {
+ x: f32,
+ y: f32,
+ //Z: f32,
+ coverage: f32,
+ /*U0: f32, V0: f32,
+ U1: f32, V1: f32,*/
+}
+pub type CHwVertexBuffer<'z> = CHwTVertexBuffer<'z, OutputVertex>;
+#[derive(Default)]
+pub struct CHwTVertexBuffer<'z, TVertex>
+{
+ //m_rgIndices: DynArray<WORD>, // Dynamic array of indices
+
+
+ //m_pBuilder: Rc<CHwTVertexBufferBuilder<TVertex>>,
+
+ /*
+#if DBG
+public:
+
+ CHwTVertexBuffer()
+ {
+ m_fDbgNonLineSegmentTriangleStrip = false;
+ }
+#endif
+
+protected:
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: Reset
+ //
+ // Synopsis: Mark the beginning of a new list of vertices; the existing
+ // list is discarded
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE void Reset(
+ __in_ecount(1) Builder *pVBB
+ )
+ {
+#if DBG
+ m_fDbgNonLineSegmentTriangleStrip = false;
+#endif
+ m_rgIndices.SetCount(0);
+ m_rgVerticesTriList.SetCount(0);
+ m_rgVerticesTriStrip.SetCount(0);
+ m_rgVerticesLineList.SetCount(0);
+ m_rgVerticesNonIndexedTriList.SetCount(0);
+
+ m_pBuilder = pVBB;
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddNonIndexedTriListVertices
+ //
+ // Synopsis: Reserve space for consecutive vertices and return start
+ // index
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE HRESULT AddNonIndexedTriListVertices(
+ UINT uCount,
+ __deref_ecount(uCount) TVertex **ppVertices
+ );
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddTriListVertices
+ //
+ // Synopsis: Reserve space for consecutive vertices and return start
+ // index
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE HRESULT AddTriListVertices(
+ UINT uDelta,
+ __deref_ecount(uDelta) TVertex **ppVertices,
+ __out_ecount(1) WORD *pwIndexStart
+ );
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddTriStripVertices
+ //
+ // Synopsis: Reserve space for consecutive vertices and return start
+ // index
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE HRESULT AddTriStripVertices(
+ UINT uCount,
+ __deref_ecount(uCount) TVertex **ppVertices
+ );
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddLineListVertices
+ //
+ // Synopsis: Reserve space for consecutive vertices and return start
+ // index
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE HRESULT AddLineListVertices(
+ UINT uCount,
+ __deref_ecount(uCount) TVertex **ppVertices
+ );
+
+public:
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddLine implements ILineSink<PointXYA>
+ //
+ // Synopsis: Add a line given two points with x, y, & alpha.
+ //
+ //-------------------------------------------------------------------------
+ HRESULT AddLine(
+ __in_ecount(1) const PointXYA &v0,
+ __in_ecount(1) const PointXYA &v1
+ );
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddTriangle implements ITriangleSink<PointXYA>
+ //
+ // Synopsis: Add a triangle given three points with x, y, & alpha.
+ //
+ //-------------------------------------------------------------------------
+
+ HRESULT AddTriangle(
+ __in_ecount(1) const PointXYA &v0,
+ __in_ecount(1) const PointXYA &v1,
+ __in_ecount(1) const PointXYA &v2
+ );
+
+ // Re-introduce parent AddTriangle(WORD,WORD,WORD) into this scope.
+ using CHwVertexBuffer::AddTriangle;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: AddLineAsTriangleStrip
+ //
+ // Synopsis: Add a horizontal line using a trinagle strip
+ //
+ //-------------------------------------------------------------------------
+ HRESULT AddLineAsTriangleStrip(
+ __in_ecount(1) const TVertex *pBegin, // Begin
+ __in_ecount(1) const TVertex *pEnd // End
+ );
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: SendVertexFormat
+ //
+ // Synopsis: Send contained vertex format to device
+ //
+ //-------------------------------------------------------------------------
+
+ HRESULT SendVertexFormat(
+ __inout_ecount(1) CD3DDeviceLevel1 *pDevice
+ ) const;
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: DrawPrimitive
+ //
+ // Synopsis: Send the geometry data to the device and execute rendering
+ //
+ //-------------------------------------------------------------------------
+
+ HRESULT DrawPrimitive(
+ __inout_ecount(1) CD3DDeviceLevel1 *pDevice
+ ) const;
+
+protected:
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetNumTriListVertices
+ //
+ // Synopsis: Return current number of vertices
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE DWORD GetNumTriListVertices() const
+ {
+ return m_rgVerticesTriList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetTriListVertices
+ //
+ // Synopsis: Return pointer to beginning of vertex list and their count
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE void GetTriListVertices(
+ __deref_out_ecount_full(*puNumVertices) TVertex **ppVertices,
+ __out_ecount(1) UINT * puNumVertices
+ )
+ {
+ *ppVertices = m_rgVerticesTriList.GetDataBuffer();
+ *puNumVertices = m_rgVerticesTriList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetNumNonIndexedTriListVertices
+ //
+ // Synopsis: Return current number of vertices
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE DWORD GetNumNonIndexedTriListVertices() const
+ {
+ return m_rgVerticesNonIndexedTriList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetNonIndexedTriListVertices
+ //
+ // Synopsis: Return pointer to beginning of vertex list and their count
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE void GetNonIndexedTriListVertices(
+ __deref_out_ecount_full(*puNumVertices) TVertex **ppVertices,
+ __out_ecount(1) UINT * puNumVertices
+ )
+ {
+ *ppVertices = m_rgVerticesNonIndexedTriList.GetDataBuffer();
+ *puNumVertices = m_rgVerticesNonIndexedTriList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetNumTriStripVertices
+ //
+ // Synopsis: Return current number of vertices
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE DWORD GetNumTriStripVertices() const
+ {
+ return m_rgVerticesTriStrip.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetTriStripVertices
+ //
+ // Synopsis: Return pointer to beginning of vertex list and their count
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE void GetTriStripVertices(
+ __deref_out_ecount_full(*puNumVertices) TVertex **ppVertices,
+ __out_ecount(1) UINT *puNumVertices
+ )
+ {
+ *ppVertices = m_rgVerticesTriStrip.GetDataBuffer();
+ *puNumVertices = m_rgVerticesTriStrip.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetNumLineListVertices
+ //
+ // Synopsis: Return current number of vertices
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE DWORD GetNumLineListVertices() const
+ {
+ return m_rgVerticesLineList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetLineListVertices
+ //
+ // Synopsis: Return pointer to beginning of vertex list and their count
+ //
+ //-------------------------------------------------------------------------
+
+ MIL_FORCEINLINE void GetLineListVertices(
+ __deref_out_ecount_full(*puNumVertices) TVertex **ppVertices,
+ __out_ecount(1) UINT * puNumVertices
+ )
+ {
+ *ppVertices = m_rgVerticesLineList.GetDataBuffer();
+ *puNumVertices = m_rgVerticesLineList.GetCount();
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: GetLineListVertices
+ //
+ // Synopsis: Return pointer to beginning of vertex list
+ //
+ //-------------------------------------------------------------------------
+
+
+
+*/
+
+ // Dynamic array of vertices for which all allocations are zeroed.
+ // XXX: the zero has been removed
+ //m_rgVerticesTriList: DynArray<TVertex>, // Indexed triangle list vertices
+ //m_rgVerticesNonIndexedTriList: DynArray<TVertex>, // Non-indexed triangle list vertices
+ m_rgVerticesTriList: DynArray<TVertex>, // Triangle strip vertices
+ //m_rgVerticesLineList: DynArray<TVertex>, // Linelist vertices
+
+ m_rgVerticesBuffer: Option<&'z mut [TVertex]>,
+ m_rgVerticesBufferOffset: usize,
+
+ #[cfg(debug_assertions)]
+ // In debug make a note if we add a triangle strip that doesn't have 6 vertices
+ // so that we can ensure that we only waffle 6-vertex tri strips.
+ m_fDbgNonLineSegmentTriangleStrip: bool,
+ subpixel_bias: f32,
+}
+
+impl<'z, TVertex: Default> CHwTVertexBuffer<'z, TVertex> {
+ pub fn new(rasterization_truncates: bool, output_buffer: Option<&'z mut [TVertex]>) -> Self {
+ CHwTVertexBuffer::<TVertex> {
+ subpixel_bias: if rasterization_truncates {
+ // 1/512 is 0.5 of a subpixel when using 8 bits of subpixel precision.
+ 1./512.
+ } else {
+ 0.
+ },
+ m_rgVerticesBuffer: output_buffer,
+ m_rgVerticesBufferOffset: 0,
+ ..Default::default()
+ }
+ }
+
+ pub fn flush_output(&mut self) -> Box<[TVertex]> {
+ std::mem::take(&mut self.m_rgVerticesTriList).into_boxed_slice()
+ }
+
+ pub fn get_output_buffer_size(&self) -> Option<usize> {
+ if self.m_rgVerticesBuffer.is_some() {
+ Some(self.m_rgVerticesBufferOffset)
+ } else {
+ None
+ }
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwTVertexMappings<class TVertex>
+//
+// Synopsis: Helper class that knows how to populate a vertex from the
+// incoming basic per vertex data, like just X and Y
+//
+//-----------------------------------------------------------------------------
+#[derive(Default)]
+struct CHwTVertexMappings<TVertex>
+{/*
+public:
+
+ CHwTVertexMappings();
+
+ void SetPositionTransform(
+ __in_ecount(1) const MILMatrix3x2 &matPositionTransform
+ );
+
+ HRESULT SetConstantMapping(
+ MilVertexFormatAttribute mvfaDestination,
+ __in_ecount(1) const CHwConstantColorSource *pConstCS
+ );
+
+ void PointToUV(
+ __in_ecount(1) const MilPoint2F &ptIn,
+ __bound UINT uIndex,
+ __out_ecount(1) TVertex *pvOut
+ );
+
+ MIL_FORCEINLINE bool AreWaffling() const
+ {
+ return false;
+ }
+
+private:
+ static const size_t s_numOfVertexTextureCoords
+ = NUM_OF_VERTEX_TEXTURE_COORDS(TVertex);
+public:
+
+ MilVertexFormat m_mvfMapped;
+
+ MilColorF m_colorStatic;
+
+ MILMatrix3x2 m_matPos2DTransform;
+
+ MILMatrix3x2 m_rgmatPointToUV[s_numOfVertexTextureCoords];
+ CMilPointAndSizeF m_rgSubrect[s_numOfVertexTextureCoords];
+ WaffleModeFlags m_rgWaffleMode[s_numOfVertexTextureCoords];
+
+*/
+ m_vStatic: TVertex,
+ subpixel_bias: f32,
+}
+
+impl<TVertex> CHwTVertexBuffer<'_, TVertex> {
+ pub fn Reset(&mut self,
+ /*pVBB: &mut CHwTVertexBufferBuilder<TVertex>*/
+ )
+ {
+ #[cfg(debug_assertions)]
+ {
+ self.m_fDbgNonLineSegmentTriangleStrip = false;
+ }
+
+ //self.m_rgIndices.SetCount(0);
+ //self.m_rgVerticesTriList.SetCount(0);
+ self.m_rgVerticesTriList.SetCount(0);
+ self.m_rgVerticesBufferOffset = 0;
+ //self.m_rgVerticesLineList.SetCount(0);
+ //self.m_rgVerticesNonIndexedTriList.SetCount(0);
+
+ //self.m_pBuilder = pVBB;
+ }
+
+ fn IsEmpty(&self) -> bool
+ {
+ return true
+ // && (self.m_rgIndices.GetCount() == 0)
+ //&& (self.m_rgVerticesLineList.GetCount() == 0)
+ && (self.m_rgVerticesTriList.GetCount() == 0)
+ && self.m_rgVerticesBufferOffset == 0
+ //&& (self.m_rgVerticesNonIndexedTriList.GetCount() == 0);
+ }
+
+}
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwTVertexBuffer<class TVertex>::Builder
+//
+// Synopsis: Implements CHwVertexBuffer::Builder for a particular vertex
+// format
+//
+//-----------------------------------------------------------------------------
+
+pub struct CHwTVertexBufferBuilder<'y, 'z, TVertex>
+{
+ m_mvfIn: MilVertexFormat, // Vertex fields that are pre-generated
+
+ #[cfg(debug_assertions)]
+ m_mvfDbgOut: MilVertexFormat, // Output format of the vertex
+
+ m_mvfGenerated: MilVertexFormat, // Vertex fields that are dyn
+
+ m_mvfaAntiAliasScaleLocation: MilVertexFormatAttribute, // Vertex field that
+ // contains PPAA
+ // falloff factor
+
+ /*
+public:
+
+ static MilVertexFormat GetOutVertexFormat();
+
+ static HRESULT Create(
+ __in_ecount(1) CHwTVertexBuffer<TVertex> *pVertexBuffer,
+ MilVertexFormat mvfIn,
+ MilVertexFormat mvfOut,
+ MilVertexFormatAttribute mvfaAntiAliasScaleLocation,
+ __inout_ecount(1) CBufferDispenser *pBufferDispenser,
+ __deref_out_ecount(1) typename CHwTVertexBuffer<TVertex>::Builder **ppVertexBufferBuilder
+ );
+
+ HRESULT SetConstantMapping(
+ MilVertexFormatAttribute mvfaDestination,
+ __in_ecount(1) const CHwConstantColorSource *pConstCS
+ );
+
+ void SetTransformMapping(
+ __in_ecount(1) const MILMatrix3x2 &mat2DTransform
+ );
+
+ HRESULT FinalizeMappings(
+ );
+
+ void SetOutsideBounds(
+ __in_ecount_opt(1) const CMILSurfaceRect *prcBounds,
+ bool fNeedInside
+ );
+
+ bool HasOutsideBounds() const
+ {
+ return NeedOutsideGeometry();
+ }
+
+ HRESULT BeginBuilding(
+ );
+
+ HRESULT AddVertex(
+ __in_ecount(1) const MilPoint2F &ptPosition,
+ // In: Vertex coordinates
+ __out_ecount(1) WORD *pIndex
+ // Out: The index of the new vertex
+ );
+
+ HRESULT AddIndexedVertices(
+ UINT cVertices, // In: number of vertices
+ __in_bcount(cVertices*uVertexStride) const void *pVertexBuffer, // In: vertex buffer containing the vertices
+ UINT uVertexStride, // In: size of each vertex
+ MilVertexFormat mvfFormat, // In: format of each vertex
+ UINT cIndices, // In: Number of indices
+ __in_ecount(cIndices) const UINT *puIndexBuffer // In: index buffer
+ );
+
+ HRESULT AddTriangle(
+ DWORD i1, // In: Index of triangle's first vertex
+ DWORD i2, // In: Index of triangle's second vertex
+ DWORD i3 // In: Index of triangle's third vertex
+ );
+
+ HRESULT AddComplexScan(
+ INT nPixelY,
+ // In: y coordinate in pixel space
+ __in_ecount(1) const CCoverageInterval *pIntervalSpanStart
+ // In: coverage segments
+ );
+
+ HRESULT AddParallelogram(
+ __in_ecount(4) const MilPoint2F *rgPosition
+ );
+
+ HRESULT AddTrapezoid(
+ float rPixelYTop, // In: y coordinate of top of trapezoid
+ float rPixelXTopLeft, // In: x coordinate for top left
+ float rPixelXTopRight, // In: x coordinate for top right
+ float rPixelYBottom, // In: y coordinate of bottom of trapezoid
+ float rPixelXBottomLeft, // In: x coordinate for bottom left
+ float rPixelXBottomRight, // In: x coordinate for bottom right
+ float rPixelXLeftDelta, // In: trapezoid expand radius for left edge
+ float rPixelXRightDelta // In: trapezoid expand radius for right edge
+ );
+
+ BOOL IsEmpty();
+
+ HRESULT EndBuilding(
+ __deref_opt_out_ecount(1) CHwVertexBuffer **ppVertexBuffer
+ );
+
+ HRESULT FlushInternal(
+ __deref_opt_out_ecount_opt(1) CHwVertexBuffer **ppVertexBuffer
+ );
+
+private:
+
+ // Helpers that do AddTrapezoid. Same parameters
+ HRESULT AddTrapezoidStandard( float, float, float, float, float, float, float, float );
+ HRESULT AddTrapezoidWaffle( float, float, float, float, float, float, float, float );
+
+
+
+ HRESULT PrepareStratumSlow(
+ float rStratumTop,
+ float rStratumBottom,
+ bool fTrapezoid,
+ float rTrapezoidLeft,
+ float rTrapezoidRight
+ );
+
+ // Wrap up building of outside geometry.
+ HRESULT EndBuildingOutside();
+
+ DECLARE_BUFFERDISPENSER_NEW(CHwTVertexBuffer<TVertex>::Builder,
+ Mt(CHwTVertexBuffer_Builder));
+
+ Builder(
+ __in_ecount(1) CHwTVertexBuffer<TVertex> *pVertexBuffer
+ );
+
+ HRESULT SetupConverter(
+ MilVertexFormat mvfIn,
+ MilVertexFormat mvfOut,
+ MilVertexFormatAttribute mvfaAntiAliasScaleLocation
+ );
+
+ HRESULT RenderPrecomputedIndexedTriangles(
+ __range(1, SHORT_MAX) UINT cVertices,
+ __in_ecount(cVertices) const TVertex *rgoVertices,
+ __range(1, UINT_MAX) UINT cIndices,
+ __in_ecount(cIndices) const UINT *rguIndices
+ );
+
+
+ // Expands all vertices in the buffer.
+ void ExpandVertices();
+
+ // Has never been successfully used to declare a method or derived type...
+/* typedef void (CHwTVertexBuffer<TVertex>::Builder::FN_ExpandVertices)(
+ UINT uCount,
+ TVertex *pVertex
+ );*/
+
+ // error C2143: syntax error : missing ';' before '*'
+// typedef FN_ExpandVertices *PFN_ExpandVertices;
+
+ typedef void (CHwTVertexBuffer<TVertex>::Builder::* PFN_ExpandVertices)(
+ __range(1,UINT_MAX) UINT uCount,
+ __inout_ecount_full(uCount) TVertex *rgVertices
+ );
+
+ //
+ // Table of vertex expansion routines for common expansion cases:
+ // - There are entries for Z, Diffuse, and one set texture coordinates for
+ // a total of eight combinations.
+ // - Additionally there is a second set of entries for anti-aliasing
+ // falloff applied thru diffuse.
+ //
+
+ static const PFN_ExpandVertices sc_pfnExpandVerticesTable[8*2];
+
+ MIL_FORCEINLINE
+ void TransferAndOrExpandVerticesInline(
+ __range(1,UINT_MAX) UINT uCount,
+ __in_ecount(uCount) TVertex const * rgInputVertices,
+ __out_ecount(uCount) TVertex *rgOutputVertices,
+ MilVertexFormat mvfOut,
+ MilVertexFormatAttribute mvfaScaleByFalloff,
+ bool fInputOutputAreSameBuffer,
+ bool fTransformPosition
+ );
+
+ // FN_ExpandVertices ExpandVerticesFast
+ template <MilVertexFormat mvfOut, MilVertexFormatAttribute mvfaScaleByFalloff>
+ void ExpandVerticesFast(
+ __range(1,UINT_MAX) UINT uCount,
+ __inout_ecount_full(uCount) TVertex *rgVertices
+ )
+ {
+ TransferAndOrExpandVerticesInline(
+ uCount,
+ rgVertices,
+ rgVertices,
+ mvfOut,
+ mvfaScaleByFalloff,
+ true, // => fInputOutputAreSameBuffer
+ false // => fTransformPosition
+ );
+ }
+
+ // error C2146: syntax error : missing ';' before identifier 'ExpandVerticesGeneral'
+ // error C2501: 'CHwTVertexBufferBuilder<TVertex>::FN_ExpandVertices' : missing storage-class or type specifiers
+// FN_ExpandVertices ExpandVerticesGeneral
+// typename FN_ExpandVertices ExpandVerticesGeneral
+ // error C4346: 'CHwTVertexBufferBuilder<TVertex>::FN_ExpandVertices' : dependent name is not a type
+// CHwTVertexBufferBuilder<TVertex>::FN_ExpandVertices ExpandVerticesGeneral
+ // Can't define methos here (unless not parameters are used).
+// typename CHwTVertexBufferBuilder<TVertex>::FN_ExpandVertices ExpandVerticesGeneral
+ // FN_ExpandVertices ExpandVerticesGeneral
+ void ExpandVerticesGeneral(
+ __range(1,UINT_MAX) UINT uCount,
+ __inout_ecount_full(uCount) TVertex *rgVertices
+ )
+ {
+ TransferAndOrExpandVerticesInline(
+ uCount,
+ rgVertices,
+ rgVertices,
+ m_mvfGenerated,
+ m_mvfaAntiAliasScaleLocation,
+ true, // => fInputOutputAreSameBuffer
+ false // => fTransformPosition
+ );
+ }
+
+ void TransferAndExpandVerticesGeneral(
+ __range(1,UINT_MAX) UINT uCount,
+ __in_ecount(uCount) TVertex const *rgInputVertices,
+ __out_ecount_full(uCount) TVertex *rgOutputVertices,
+ bool fTransformPosition
+ )
+ {
+ TransferAndOrExpandVerticesInline(
+ uCount,
+ rgInputVertices,
+ rgOutputVertices,
+ m_mvfGenerated,
+ m_mvfaAntiAliasScaleLocation,
+ false, // => fInputOutputAreSameBuffer
+ fTransformPosition // => fTransformPosition
+ );
+ }
+
+ // FN_ExpandVertices ExpandVerticesInvalid
+ void ExpandVerticesInvalid(
+ __range(1,UINT_MAX) UINT uCount,
+ __inout_ecount_full(uCount) TVertex *rgVertices
+ )
+ {
+ RIP("Invalid ExpandVertices routine.");
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: NeedCoverageGeometry
+ //
+ // Synopsis: True if we should create geometry for a particular
+ // coverage value.
+ //
+ //-------------------------------------------------------------------------
+ bool NeedCoverageGeometry(INT nCoverage) const;
+
+
+
+
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: ReinterpretFloatAsDWORD
+ //
+ // Synopsis: Quicky helper to convert a float to a DWORD bitwise.
+ //
+ //-------------------------------------------------------------------------
+ static MIL_FORCEINLINE DWORD ReinterpretFloatAsDWORD(float c)
+ {
+ return reinterpret_cast<DWORD &>(c);
+ }
+
+private:
+ MIL_FORCEINLINE bool AreWaffling() const
+ {
+ return m_map.AreWaffling();
+ }
+
+ void ViewportToPackedCoordinates(
+ __range(1,UINT_MAX / uGroupSize) UINT uGroupCount,
+ __inout_ecount(uGroupCount * uGroupSize) TVertex *pVertex,
+ __range(2,6) UINT uGroupSize,
+ /*__range(0,NUM_OF_VERTEX_TEXTURE_COORDS(TVertex)-1)*/ __bound UINT uIndex
+ );
+
+ void ViewportToPackedCoordinates(
+ __range(1,UINT_MAX / uGroupSize) UINT uGroupCount,
+ __inout_ecount(uGroupCount * uGroupSize) TVertex *pVertex,
+ __range(2,6) UINT uGroupSize
+ );
+
+ template<class TWaffler>
+ __out_ecount(1) typename TWaffler::ISink *
+ BuildWafflePipeline(
+ __out_xcount(NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2) TWaffler *wafflers,
+ __out_ecount(1) bool &fWafflersUsed
+ ) const;
+
+
+ template<class TWaffler>
+ typename TWaffler::ISink *
+ BuildWafflePipeline(
+ __out_xcount(NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2) TWaffler *wafflers
+ ) const
+ {
+ bool fNotUsed;
+ return BuildWafflePipeline(wafflers, fNotUsed);
+ }*/
+
+ m_pVB: &'y mut CHwTVertexBuffer<'z, TVertex>,
+
+ //m_pfnExpandVertices: PFN_ExpandVertices, // Method for expanding vertices
+
+ //m_rgoPrecomputedTriListVertices: *const TVertex,
+ //m_cPrecomputedTriListVertices: UINT,
+
+ //m_rguPrecomputedTriListIndices: *const UINT,
+ //m_cPrecomputedTriListIndices: UINT,
+
+ //m_map: CHwTVertexMappings<TVertex>,
+
+ // This is true if we had to flush the pipeline as we were getting
+ // geometry rather than just filling up a single vertex buffer.
+ m_fHasFlushed: bool,
+
+ // The next two members control the generation of the zero-alpha geometry
+ // outside the input geometry.
+ m_fNeedOutsideGeometry: bool,
+ m_fNeedInsideGeometry: bool,
+ m_rcOutsideBounds: CMILSurfaceRect, // Bounds for creation of outside geometry
+
+ /*
+ // Helpful m_rcOutsideBounds casts.
+ float OutsideLeft() const { return static_cast<float>(m_rcOutsideBounds.left); }
+ float OutsideRight() const { return static_cast<float>(m_rcOutsideBounds.right); }
+ float OutsideTop() const { return static_cast<float>(m_rcOutsideBounds.top); }
+ float OutsideBottom() const { return static_cast<float>(m_rcOutsideBounds.bottom); }
+ */
+ // This interval (if we are doing outside) shows the location
+ // of the current stratum. It is initialized to [FLT_MAX, -FLT_MAX].
+ //
+ // If the current stratum is a complex span then
+ // m_rCurStratumBottom is set to the bottom of the stratum and
+ // m_rCurStratumTop is set to FLT_MAX.
+ //
+ // If the current stratum is a trapezoidal one, then
+ // m_rCurStratumBottom is its bottom and m_rCurStratumTop is its
+ // top.
+ m_rCurStratumTop: f32,
+ m_rCurStratumBottom: f32,
+
+ // If the current stratum is a trapezoidal one, following var stores
+ // right boundary of the last trapezoid handled by PrepareStratum.
+ // We need it to cloze the stratus properly.
+ m_rLastTrapezoidRight: f32,
+
+ // These are needed to implement outside geometry using triangle lists
+ m_rLastTrapezoidTopRight: f32,
+ m_rLastTrapezoidBottomRight: f32,
+}
+
+/*
+//+----------------------------------------------------------------------------
+//
+// Member: CHwVertexBuffer::AddTriangle
+//
+// Synopsis: Add a triangle using the three indices given to the list
+//
+impl CHwVertexBuffer {
+
+fn AddTriangle(
+ i1: WORD, // In: Index of triangle's first vertex
+ i2: WORD, // In: Index of triangle's second vertex
+ i3: WORD // In: Index of triangle's third vertex
+ ) -> HRESULT
+{
+ let hr: HRESULT = S_OK;
+
+ // Asserting indices < max vertex requires a debug only pure virtual method
+ // which is too much of a functionality change between retail and debug.
+ //
+ //
+ // Assert(i1 < GetNumTriListVertices());
+ // Assert(i2 < GetNumTriListVertices());
+ // Assert(i3 < GetNumTriListVertices());
+
+ WORD *pIndices;
+
+ IFC(m_rgIndices.AddMultiple(3, &pIndices));
+
+ pIndices[0] = i1;
+ pIndices[1] = i2;
+ pIndices[2] = i3;
+
+Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::AddTriangle
+//
+// Synopsis: Add a triangle using given three points to the list
+//
+//-----------------------------------------------------------------------------
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::AddTriangle(
+ __in_ecount(1) const PointXYA &v0,
+ __in_ecount(1) const PointXYA &v1,
+ __in_ecount(1) const PointXYA &v2)
+{
+ let hr: HRESULT = S_OK;
+
+ TVertex *pVertices;
+ hr = AddNonIndexedTriListVertices(3,&pVertices);
+
+ if (hr == E_OUTOFMEMORY)
+ {
+ DebugBreak ();
+ }
+ IFC(hr);
+
+ pVertices[0].ptPt.X = v0.x;
+ pVertices[0].ptPt.Y = v0.y;
+ pVertices[0].Diffuse = reinterpret_cast<const DWORD &>(v0.a);
+ pVertices[1].ptPt.X = v1.x;
+ pVertices[1].ptPt.Y = v1.y;
+ pVertices[1].Diffuse = reinterpret_cast<const DWORD &>(v1.a);
+ pVertices[2].ptPt.X = v2.x;
+ pVertices[2].ptPt.Y = v2.y;
+ pVertices[2].Diffuse = reinterpret_cast<const DWORD &>(v2.a);
+
+Cleanup:
+ RRETURN(hr);
+}
+*/
+
+impl CHwVertexBuffer<'_> {
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::AddLine
+//
+// Synopsis: Add a nominal width line using given two points to the list
+//
+//-----------------------------------------------------------------------------
+fn AddLine(&mut self,
+ v0: &PointXYA,
+ v1: &PointXYA
+ ) -> HRESULT
+{
+ type TVertex = CD3DVertexXYZDUV2;
+ let hr = S_OK;
+
+ let pVertices: &mut [TVertex];
+ let mut rgScratchVertices: [TVertex; 2] = Default::default();
+
+ assert!(!(v0.y != v1.y));
+
+ let fUseTriangles = /*(v0.y < m_pBuilder->GetViewportTop() + 1) ||*/ FORCE_TRIANGLES;
+
+ //if (fUseTriangles)
+ //{
+ pVertices = &mut rgScratchVertices;
+ //}
+ //else
+ //{
+ //IFC!(AddLineListVertices(2, &pVertices));
+ //}
+
+ pVertices[0].x = v0.x;
+ pVertices[0].y = v0.y;
+ pVertices[0].coverage = v0.a;
+ pVertices[1].x = v1.x;
+ pVertices[1].y = v1.y;
+ pVertices[1].coverage = v1.a;
+
+ if (fUseTriangles)
+ {
+ IFC!(self.AddLineAsTriangleList(&pVertices[0],&pVertices[1]));
+ }
+
+ RRETURN!(hr);
+}
+}
+/*
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::AddTriListVertices
+//
+// Synopsis: Reserve space for consecutive vertices and return start index
+//
+
+template <class TVertex>
+MIL_FORCEINLINE
+HRESULT
+CHwTVertexBuffer<TVertex>::AddTriListVertices(
+ UINT uDelta,
+ __deref_ecount(uDelta) TVertex **ppVertices,
+ __out_ecount(1) WORD *pwIndexStart
+ )
+{
+ HRESULT hr = S_OK;
+
+ Assert(ppVertices);
+
+ UINT uCount = static_cast<UINT>(m_rgVerticesTriList.GetCount());
+ if (uCount > SHRT_MAX)
+ {
+ IFC(WGXERR_INVALIDPARAMETER);
+ }
+ UINT newCount;
+ newCount = uDelta + uCount;
+
+ if (newCount > SHRT_MAX)
+ {
+ IFC(m_pBuilder->FlushReset());
+ uCount = 0;
+ newCount = uDelta;
+ }
+
+ if (newCount > m_rgVerticesTriList.GetCapacity())
+ {
+ IFC(m_rgVerticesTriList.ReserveSpace(uDelta));
+ }
+
+ m_rgVerticesTriList.SetCount(newCount);
+ *pwIndexStart = static_cast<WORD>(uCount);
+ *ppVertices = &m_rgVerticesTriList[uCount];
+
+ Cleanup:
+ RRETURN(hr);
+}
+*/
+
+impl<TVertex: Clone + Default> CHwTVertexBuffer<'_, TVertex> {
+
+fn AddTriVertices(&mut self, v0: TVertex, v1: TVertex, v2: TVertex) {
+ if let Some(output_buffer) = &mut self.m_rgVerticesBuffer {
+ let offset = self.m_rgVerticesBufferOffset;
+ if offset + 3 <= output_buffer.len() {
+ output_buffer[offset] = v0;
+ output_buffer[offset + 1] = v1;
+ output_buffer[offset + 2] = v2;
+ }
+ self.m_rgVerticesBufferOffset = offset + 3;
+ } else {
+ self.m_rgVerticesTriList.reserve(3);
+ self.m_rgVerticesTriList.push(v0);
+ self.m_rgVerticesTriList.push(v1);
+ self.m_rgVerticesTriList.push(v2);
+ }
+}
+
+fn AddTrapezoidVertices(&mut self, v0: TVertex, v1: TVertex, v2: TVertex, v3: TVertex) {
+ if let Some(output_buffer) = &mut self.m_rgVerticesBuffer {
+ let offset = self.m_rgVerticesBufferOffset;
+ if offset + 6 <= output_buffer.len() {
+ output_buffer[offset] = v0;
+ output_buffer[offset + 1] = v1.clone();
+ output_buffer[offset + 2] = v2.clone();
+
+ output_buffer[offset + 3] = v1;
+ output_buffer[offset + 4] = v2;
+ output_buffer[offset + 5] = v3;
+ }
+ self.m_rgVerticesBufferOffset = offset + 6;
+ } else {
+ self.m_rgVerticesTriList.reserve(6);
+
+ self.m_rgVerticesTriList.push(v0);
+ self.m_rgVerticesTriList.push(v1.clone());
+ self.m_rgVerticesTriList.push(v2.clone());
+
+ self.m_rgVerticesTriList.push(v1);
+ self.m_rgVerticesTriList.push(v2);
+ self.m_rgVerticesTriList.push(v3);
+ }
+}
+
+fn AddedNonLineSegment(&mut self) {
+ #[cfg(debug_assertions)]
+ {
+ self.m_fDbgNonLineSegmentTriangleStrip = true;
+ }
+}
+
+}
+
+/*
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::AddNonIndexedTriListVertices
+//
+// Synopsis: Reserve space for triangle list vertices.
+//
+
+template <class TVertex>
+MIL_FORCEINLINE
+HRESULT
+CHwTVertexBuffer<TVertex>::AddNonIndexedTriListVertices(
+ UINT uCount,
+ __deref_ecount(uCount) TVertex **ppVertices
+ )
+{
+ HRESULT hr = S_OK;
+
+ UINT Count = static_cast<UINT>(m_rgVerticesNonIndexedTriList.GetCount());
+ UINT newCount = Count + uCount;
+
+ if (newCount > m_rgVerticesNonIndexedTriList.GetCapacity())
+ {
+ IFC(m_rgVerticesNonIndexedTriList.ReserveSpace(uCount));
+ }
+
+ m_rgVerticesNonIndexedTriList.SetCount(newCount);
+ *ppVertices = &m_rgVerticesNonIndexedTriList[Count];
+
+Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::AddLineListVertices
+//
+// Synopsis: Reserve space for consecutive vertices
+//
+
+template <class TVertex>
+MIL_FORCEINLINE
+HRESULT
+CHwTVertexBuffer<TVertex>::AddLineListVertices(
+ UINT uCount,
+ __deref_ecount(uCount) TVertex **ppVertices
+ )
+{
+ HRESULT hr = S_OK;
+
+ Assert(ppVertices);
+
+ UINT Count = static_cast<UINT>(m_rgVerticesLineList.GetCount());
+ UINT newCount = Count + uCount;
+
+ if (newCount > m_rgVerticesLineList.GetCapacity())
+ {
+ IFC(m_rgVerticesLineList.ReserveSpace(uCount));
+ }
+
+ m_rgVerticesLineList.SetCount(newCount);
+ *ppVertices = &m_rgVerticesLineList[Count];
+
+Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwVertexBuffer::Builder
+//
+//-----------------------------------------------------------------------------
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::GetOutVertexFormat
+//
+// Synopsis: Return MIL vertex format covered by specific builders
+//
+//-----------------------------------------------------------------------------
+
+template <>
+MilVertexFormat
+CHwTVertexBuffer<CD3DVertexXYZDUV2>::Builder::GetOutVertexFormat()
+{
+ return (MILVFAttrXYZ | MILVFAttrDiffuse | MILVFAttrUV2);
+}
+
+template <>
+MilVertexFormat
+CHwTVertexBuffer<CD3DVertexXYZDUV8>::Builder::GetOutVertexFormat()
+{
+ return (MILVFAttrXYZ | MILVFAttrDiffuse | MILVFAttrUV8);
+}
+
+template <>
+MilVertexFormat
+CHwTVertexBuffer<CD3DVertexXYZDUV6>::Builder::GetOutVertexFormat()
+{
+ return (MILVFAttrXYZ | MILVFAttrDiffuse | MILVFAttrUV6);
+}
+
+template <>
+MilVertexFormat
+CHwTVertexBuffer<CD3DVertexXYZNDSUV4>::Builder::GetOutVertexFormat()
+{
+ return (MILVFAttrXYZ |
+ MILVFAttrNormal |
+ MILVFAttrDiffuse |
+ MILVFAttrSpecular |
+ MILVFAttrUV4);
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwVertexBuffer::Builder::Create
+//
+// Synopsis: Choose the appropriate final vertex format and instantiate the
+// matching vertex builder
+//
+*/
+pub type CHwVertexBufferBuilder<'y, 'z> = CHwTVertexBufferBuilder<'y, 'z, OutputVertex>;
+impl<'y, 'z> CHwVertexBufferBuilder<'y, 'z> {
+pub fn Create(
+ vfIn: MilVertexFormat,
+ vfOut: MilVertexFormat,
+ mvfaAntiAliasScaleLocation: MilVertexFormatAttribute,
+ pVertexBuffer: &'y mut CHwVertexBuffer<'z>,
+ /*pBufferDispenser: &CBufferDispenser*/
+ ) -> CHwVertexBufferBuilder<'y, 'z>
+{
+ CHwVertexBufferBuilder::CreateTemplate(pVertexBuffer, vfIn, vfOut, mvfaAntiAliasScaleLocation)
+ //let hr: HRESULT = S_OK;
+
+ //assert!(ppVertexBufferBuilder);
+
+ //*ppVertexBufferBuilder = None;
+/*
+ if (!(vfOut & ~CHwTVertexBuffer<CD3DVertexXYZDUV2>::Builder::GetOutVertexFormat()))
+ {
+ CHwTVertexBuffer<CD3DVertexXYZDUV2> *pVB = pDevice->GetVB_XYZDUV2();
+ CHwTVertexBuffer<CD3DVertexXYZDUV2>::Builder *pVBB = NULL;
+
+ IFC(CHwTVertexBuffer<CD3DVertexXYZDUV2>::Builder::Create(
+ pVB,
+ vfIn,
+ vfOut,
+ mvfaAntiAliasScaleLocation,
+ pBufferDispenser,
+ &pVBB
+ ));
+
+ *ppVertexBufferBuilder = pVBB;
+ }
+ else if (!(vfOut & ~CHwTVertexBuffer<CD3DVertexXYZDUV8>::Builder::GetOutVertexFormat()))
+ {
+ CHwTVertexBuffer<CD3DVertexXYZDUV8> *pVB = pDevice->GetVB_XYZRHWDUV8();
+ CHwTVertexBuffer<CD3DVertexXYZDUV8>::Builder *pVBB = NULL;
+
+ IFC(CHwTVertexBuffer<CD3DVertexXYZDUV8>::Builder::Create(
+ pVB,
+ vfIn,
+ vfOut,
+ mvfaAntiAliasScaleLocation,
+ pBufferDispenser,
+ &pVBB
+ ));
+
+ *ppVertexBufferBuilder = pVBB;
+ }
+ else
+ {
+ // NOTE-2004/03/22-chrisra Adding another vertexbuffer type requires updating enum
+ //
+ // If we add another buffer builder type kMaxVertexBuilderSize enum in hwvertexbuffer.h file
+ // needs to be updated to reflect possible changes to the maximum size of buffer builders.
+ //
+ IFC(E_NOTIMPL);
+ }
+
+ // Store the pipeline, if any, which this VBB can use to spill the vertex buffer to if it
+ // overflows.
+ (**ppVertexBufferBuilder).m_pPipelineNoRef = pPipeline;
+ (**ppVertexBufferBuilder).m_pDeviceNoRef = pDevice;
+
+
+Cleanup:
+ RRETURN(hr);*/
+ //hr
+}
+ /*fn AreWafffling(&self) -> bool {
+ false
+ }*/
+
+ // Helpful m_rcOutsideBounds casts.
+ fn OutsideLeft(&self) -> f32 { return self.m_rcOutsideBounds.left as f32; }
+ fn OutsideRight(&self) -> f32 { return self.m_rcOutsideBounds.right as f32; }
+ fn OutsideTop(&self) -> f32 { return self.m_rcOutsideBounds.top as f32; }
+ fn OutsideBottom(&self) -> f32 { return self.m_rcOutsideBounds.bottom as f32; }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Class: THwTVertexMappings<class TVertex>
+//
+//-----------------------------------------------------------------------------
+
+//+----------------------------------------------------------------------------
+//
+// Member: THwTVertexMappings<TVertex>::THwTVertexMappings
+//
+// Synopsis: ctor
+//
+//-----------------------------------------------------------------------------
+/*
+template <class TVertex>
+CHwTVertexMappings<TVertex>::CHwTVertexMappings()
+ :
+ m_mvfMapped(MILVFAttrNone)
+{
+ for (int i = 0; i < ARRAY_SIZE(m_rgWaffleMode); ++i)
+ {
+ m_rgWaffleMode[i] = WaffleModeNone;
+ }
+
+ m_matPos2DTransform.SetIdentity();
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Member: THwTVertexMappings<TVertex>::SetPositionTransform
+//
+// Synopsis: Sets the position transform that needs to be applied.
+//
+//-----------------------------------------------------------------------------
+template <class TVertex>
+void
+CHwTVertexMappings<TVertex>::SetPositionTransform(
+ __in_ecount(1) const MILMatrix3x2 &matPositionTransform
+ )
+{
+ m_matPos2DTransform = matPositionTransform;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexMappings<TVertex>::SetConstantMapping
+//
+// Synopsis: Remember the static color for the given vertex field
+//
+
+template <class TVertex>
+HRESULT
+CHwTVertexMappings<TVertex>::SetConstantMapping(
+ MilVertexFormatAttribute mvfaLocation,
+ __in_ecount(1) const CHwConstantColorSource *pConstCS
+ )
+{
+ HRESULT hr = S_OK;
+
+ Assert(!(m_mvfMapped & mvfaLocation));
+ pConstCS->GetColor(m_colorStatic);
+ m_mvfMapped |= mvfaLocation; // Remember this field has been mapped
+
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Function: GetMILVFAttributeOfTextureCoord
+//
+// Synopsis: Compute MilVertexFormatAttribute for a texture coordinate index
+//
+
+MIL_FORCEINLINE
+MilVertexFormat
+GetMILVFAttributeOfTextureCoord(
+ DWORD dwCoordIndex
+ )
+{
+ return MILVFAttrUV1 << dwCoordIndex;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexMappings<TVertex>::PointToUV
+//
+// Synopsis: Helper function to populate the texture coordinates at the given
+// index using the given point
+//
+
+template <class TVertex>
+MIL_FORCEINLINE void
+CHwTVertexMappings<TVertex>::PointToUV(
+ __in_ecount(1) const MilPoint2F &ptIn,
+ __bound UINT uIndex,
+ __out_ecount(1) TVertex *pvOut
+ )
+{
+ m_rgmatPointToUV[uIndex].TransformPoint(
+ &pvOut->ptTx[uIndex],
+ ptIn.X,
+ ptIn.Y
+ );
+}
+
+
+
+
+
+//+----------------------------------------------------------------------------
+//
+// Class: CHwTVertexBuffer<TVertex>::Builder
+//
+//-----------------------------------------------------------------------------
+
+
+*/
+
+impl<'y, 'z, TVertex: Default> CHwTVertexBufferBuilder<'y, 'z, TVertex> {
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::Create
+//
+// Synopsis: Instantiate a specific type of vertex builder
+//
+
+fn CreateTemplate(
+ pVertexBuffer: &'y mut CHwTVertexBuffer<'z, TVertex>,
+ mvfIn: MilVertexFormat,
+ mvfOut: MilVertexFormat,
+ mvfaAntiAliasScaleLocation: MilVertexFormatAttribute,
+ /*pBufferDispenser: __inout_ecount(1) CBufferDispenser *,*/
+ ) -> Self
+{
+
+
+
+ let mut pVertexBufferBuilder = CHwTVertexBufferBuilder::<TVertex>::new(pVertexBuffer);
+
+ IFC!(pVertexBufferBuilder.SetupConverter(
+ mvfIn,
+ mvfOut,
+ mvfaAntiAliasScaleLocation
+ ));
+
+ return pVertexBufferBuilder;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::Builder
+//
+// Synopsis: ctor
+//
+//-----------------------------------------------------------------------------
+
+fn new(pVertexBuffer: &'y mut CHwTVertexBuffer<'z, TVertex>) -> Self
+{
+ Self {
+ m_pVB: pVertexBuffer,
+
+
+ //m_rgoPrecomputedTriListVertices: NULL(),
+ //m_cPrecomputedTriListVertices: 0,
+
+ //m_rguPrecomputedTriListIndices: NULL(),
+ //m_cPrecomputedTriListIndices: 0,
+
+ // These two track the Y extent of the shape this builder is producing.
+ m_rCurStratumTop: f32::MAX,
+ m_rCurStratumBottom: -f32::MAX,
+ m_fNeedOutsideGeometry: false,
+ m_fNeedInsideGeometry: true,
+
+ m_rLastTrapezoidRight: -f32::MAX,
+ m_rLastTrapezoidTopRight: -f32::MAX,
+ m_rLastTrapezoidBottomRight: -f32::MAX,
+
+ m_fHasFlushed: false,
+ //m_map: Default::default(),
+ m_rcOutsideBounds: Default::default(),
+ #[cfg(debug_assertions)]
+ m_mvfDbgOut: MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat,
+ m_mvfIn: MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat,
+ m_mvfGenerated: MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat,
+ m_mvfaAntiAliasScaleLocation: MilVertexFormatAttribute::MILVFAttrNone,
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::SetupConverter
+//
+// Synopsis: Choose the appropriate conversion method
+//
+
+fn SetupConverter(&mut self,
+ mvfIn: MilVertexFormat,
+ mvfOut: MilVertexFormat,
+ mvfaAntiAliasScaleLocation: MilVertexFormatAttribute,
+ ) -> HRESULT
+{
+ let hr = S_OK;
+
+ self.m_mvfIn = mvfIn;
+
+ #[cfg(debug_assertions)]
+ {
+ self.m_mvfDbgOut = mvfOut;
+ }
+
+ self.m_mvfGenerated = mvfOut & !self.m_mvfIn;
+ self.m_mvfaAntiAliasScaleLocation = mvfaAntiAliasScaleLocation;
+
+ assert!((self.m_mvfGenerated & MilVertexFormatAttribute::MILVFAttrXY as MilVertexFormat) == 0);
+
+ RRETURN!(hr);
+}
+}
+/*
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::SetTransformMapping
+//
+// Synopsis: Delegate mapping sets to CHwTVertexMappings
+//
+//-----------------------------------------------------------------------------
+
+template <class TVertex>
+void
+CHwTVertexBuffer<TVertex>::Builder::SetTransformMapping(
+ __in_ecount(1) const MILMatrix3x2 &mat2DPositionTransform
+ )
+{
+ m_map.SetPositionTransform(mat2DPositionTransform);
+}
+
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::SetConstantMapping(
+ MilVertexFormatAttribute mvfaLocation,
+ __in_ecount(1) const CHwConstantColorSource *pConstCS
+ )
+{
+ HRESULT hr = S_OK;
+
+ IFC(m_map.SetConstantMapping(mvfaLocation, pConstCS));
+
+Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::FinalizeMappings
+//
+// Synopsis: Complete setup of vertex mappings
+//
+
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::FinalizeMappings(
+ )
+{
+ HRESULT hr = S_OK;
+
+ //
+ // Set default Z if required.
+ //
+
+ if (m_mvfGenerated & MILVFAttrZ)
+ {
+ if (!(m_map.m_mvfMapped & MILVFAttrZ))
+ {
+ m_map.m_vStatic.Z = 0.5f;
+ }
+ }
+
+ //
+ // If AA falloff is not going to scale the diffuse color and it is
+ // generated then see if the color is constant such that we can do any
+ // complex conversions just once here instead of in every iteration of the
+ // expansion loop. If AA falloff is going to scale the diffuse color then
+ // we can still optimize for the falloff = 1.0 case by precomputing that
+ // color now and checking for 1.0 during generation. Such a precomputation
+ // has shown significant to performance.
+ //
+
+ if (m_mvfGenerated & MILVFAttrDiffuse)
+ {
+ if (m_map.m_mvfMapped & MILVFAttrDiffuse)
+ {
+
+ // Assumes diffuse color is constant
+ m_map.m_vStatic.Diffuse =
+ Convert_MilColorF_scRGB_To_Premultiplied_MilColorB_sRGB(&m_map.m_colorStatic);
+ }
+ else
+ {
+ // Set default Diffuse value: White
+ m_map.m_vStatic.Diffuse = MIL_COLOR(0xFF,0xFF,0xFF,0xFF);
+ }
+ }
+
+ RRETURN(hr);
+}*/
+impl<TVertex> CHwTVertexBufferBuilder<'_, '_, TVertex> {
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::SetOutsideBounds
+//
+//
+// Synopsis: Enables rendering geometry for areas outside the shape but
+// within the bounds. These areas will be created with
+// zero alpha.
+//
+
+pub fn SetOutsideBounds(&mut self,
+ prcOutsideBounds: Option<&CMILSurfaceRect>,
+ fNeedInside: bool,
+ )
+{
+ // Waffling and outside bounds is not currently implemented. It's
+ // not difficult to do but currently there is no need.
+ //assert!(!(self.AreWaffling() && self.prcOutsideBounds));
+
+ if let Some(prcOutsideBounds) = prcOutsideBounds
+ {
+ self.m_rcOutsideBounds = prcOutsideBounds.clone();
+ self.m_fNeedOutsideGeometry = true;
+ self.m_fNeedInsideGeometry = fNeedInside;
+ }
+ else
+ {
+ self.m_fNeedOutsideGeometry = false;
+ self.m_fNeedInsideGeometry = true;
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::BeginBuilding
+//
+// Synopsis: Prepare for a new primitive by resetting the vertex buffer
+//
+pub fn BeginBuilding(&mut self,
+ ) -> HRESULT
+{
+
+ let hr: HRESULT = S_OK;
+
+ self.m_fHasFlushed = false;
+ self.m_pVB.Reset(/*self*/);
+
+ RRETURN!(hr);
+}
+}
+impl IGeometrySink for CHwVertexBufferBuilder<'_, '_> {
+
+ fn AddTrapezoid(&mut self,
+ rPixelYTop: f32, // In: y coordinate of top of trapezoid
+ rPixelXTopLeft: f32, // In: x coordinate for top left
+ rPixelXTopRight: f32, // In: x coordinate for top right
+ rPixelYBottom: f32, // In: y coordinate of bottom of trapezoid
+ rPixelXBottomLeft: f32, // In: x coordinate for bottom left
+ rPixelXBottomRight: f32, // In: x coordinate for bottom right
+ rPixelXLeftDelta: f32, // In: trapezoid expand radius for left edge
+ rPixelXRightDelta: f32 // In: trapezoid expand radius for right edge
+ ) -> HRESULT
+ {
+ let hr = S_OK;
+
+ if (/*self.AreWaffling()*/ false)
+ {
+ /*IFC(AddTrapezoidWaffle(
+ rPixelYTop,
+ rPixelXTopLeft,
+ rPixelXTopRight,
+ rPixelYBottom,
+ rPixelXBottomLeft,
+ rPixelXBottomRight,
+ rPixelXLeftDelta,
+ rPixelXRightDelta));*/
+ }
+ else
+ {
+ IFC!(self.AddTrapezoidStandard(
+ rPixelYTop,
+ rPixelXTopLeft,
+ rPixelXTopRight,
+ rPixelYBottom,
+ rPixelXBottomLeft,
+ rPixelXBottomRight,
+ rPixelXLeftDelta,
+ rPixelXRightDelta));
+ }
+
+ //Cleanup:
+ RRETURN!(hr);
+ }
+
+
+ fn IsEmpty(&self) -> bool {
+ self.m_pVB.IsEmpty()
+ }
+
+/*
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddVertex
+//
+// Synopsis: Add a vertex to the vertex buffer
+//
+// Remember just the given vertex information now and convert later
+// in a single, more optimal pass.
+//
+
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::AddVertex(
+ __in_ecount(1) const MilPoint2F &ptPosition,
+ // Vertex coordinates
+ __out_ecount(1) WORD *pIndex
+ // The index of the new vertex
+ )
+{
+ HRESULT hr = S_OK;
+
+ Assert(!NeedOutsideGeometry());
+ Assert(m_mvfIn == MILVFAttrXY);
+
+ TVertex *pVertex;
+
+ IFC(m_pVB->AddTriListVertices(1, &pVertex, pIndex));
+
+ pVertex->ptPt = ptPosition;
+
+ // store coverage as a DWORD instead of float
+
+ pVertex->Diffuse = FLOAT_ONE;
+
+Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddIndexedVertices, IGeometrySink
+//
+// Synopsis: Add a fully computed, indexed vertex to the vertex buffer
+//
+
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::AddIndexedVertices(
+ UINT cVertices,
+ // In: number of vertices
+ __in_bcount(cVertices*uVertexStride) const void *pVertexBufferNoRef,
+ // In: vertex buffer containing the vertices
+ UINT uVertexStride,
+ // In: size of each vertex
+ MilVertexFormat mvfFormat,
+ // In: format of each vertex
+ UINT cIndices,
+ // In: Number of indices
+ __in_ecount(cIndices) const UINT *puIndexBuffer
+ // In: index buffer
+ )
+{
+ Assert(m_mvfIn & (MILVFAttrXYZ | MILVFAttrDiffuse | MILVFAttrUV2));
+ Assert(mvfFormat == (MILVFAttrXYZ | MILVFAttrDiffuse | MILVFAttrUV2));
+
+ Assert(uVertexStride == sizeof(TVertex));
+
+ m_rgoPrecomputedTriListVertices = reinterpret_cast<const TVertex *>(pVertexBufferNoRef);
+ m_cPrecomputedTriListVertices = cVertices;
+
+ m_rguPrecomputedTriListIndices = puIndexBuffer;
+ m_cPrecomputedTriListIndices = cIndices;
+
+ return S_OK;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddTriangle
+//
+// Synopsis: Add a triangle to the vertex buffer
+//
+
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::AddTriangle(
+ DWORD i1, // In: Index of triangle's first vertex
+ DWORD i2, // In: Index of triangle's second vertex
+ DWORD i3 // In: Index of triangle's third vertex
+ )
+{
+ HRESULT hr = S_OK;
+
+ Assert(!NeedOutsideGeometry());
+
+ if (AreWaffling())
+ {
+ TVertex *pVertex;
+ UINT uNumVertices;
+ m_pVB->GetTriListVertices(&pVertex, &uNumVertices);
+
+ Assert(i1 < uNumVertices);
+ Assert(i2 < uNumVertices);
+ Assert(i3 < uNumVertices);
+
+ PointXYA rgPoints[3];
+ rgPoints[0].x = pVertex[i1].ptPt.X;
+ rgPoints[0].y = pVertex[i1].ptPt.Y;
+ rgPoints[0].a = 1;
+ rgPoints[1].x = pVertex[i2].ptPt.X;
+ rgPoints[1].y = pVertex[i2].ptPt.Y;
+ rgPoints[1].a = 1;
+ rgPoints[2].x = pVertex[i3].ptPt.X;
+ rgPoints[2].y = pVertex[i3].ptPt.Y;
+ rgPoints[2].a = 1;
+
+ TriangleWaffler<PointXYA> wafflers[NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2];
+ TriangleWaffler<PointXYA>::ISink *pWaffleSinkNoRef = BuildWafflePipeline(wafflers);
+ IFC(pWaffleSinkNoRef->AddTriangle(rgPoints[0], rgPoints[1], rgPoints[2]));
+ }
+ else
+ {
+ IFC(m_pVB->AddTriangle(
+ static_cast<WORD>(i1),
+ static_cast<WORD>(i2),
+ static_cast<WORD>(i3)
+ ));
+ }
+
+Cleanup:
+ RRETURN(hr);
+}
+*/
+
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddComplexScan
+//
+// Synopsis: Add a coverage span to the vertex buffer
+//
+//-----------------------------------------------------------------------------
+ fn AddComplexScan(&mut self,
+ nPixelY: INT,
+ // In: y coordinate in pixel space
+ mut pIntervalSpanStart: Ref<crate::aacoverage::CCoverageInterval>
+ // In: coverage segments
+ ) -> HRESULT {
+
+ let hr: HRESULT = S_OK;
+ //let pVertex: *mut CD3DVertexXYZDUV2 = NULL();
+
+ IFC!(self.PrepareStratum((nPixelY) as f32,
+ (nPixelY+1) as f32,
+ false, /* Not a trapezoid. */
+ 0., 0.,
+ 0., 0., 0., 0.));
+
+ let rPixelY: f32;
+ rPixelY = (nPixelY) as f32 + 0.5;
+
+ //LineWaffler<PointXYA> wafflers[NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2];
+
+ // Use sink for waffling & the first line fix up (aka the complicated cases.)
+ //ILineSink<PointXYA> *pLineSink = NULL;
+ let mut pLineSink = None;
+
+ /*if (self.AreWaffling())
+ {
+ bool fWafflersUsed;
+ pLineSink = BuildWafflePipeline(wafflers, OUT fWafflersUsed);
+ if (!fWafflersUsed)
+ {
+ pLineSink = NULL;
+ }
+ }*/
+
+ // Use triangles instead of lines, for lines too close to the top of the viewport
+ // because lines are clipped (before rasterization) against a viewport that only
+ // includes half of the top pixel row. Waffling will take care of this separately.
+ if (/*pLineSink.is_none() && rPixelY < self.GetViewportTop() + 1 ||*/ FORCE_TRIANGLES)
+ {
+ pLineSink = Some(&mut self.m_pVB);
+ }
+
+ //
+ // Output all segments if creating outside geometry, otherwise only output segments
+ // with non-zero coverage.
+ //
+
+ if (pLineSink.is_none())
+ {
+ /*
+ UINT nSegmentCount = 0;
+
+ for (const CCoverageInterval *pIntervalSpanTemp = pIntervalSpanStart;
+ pIntervalSpanTemp->m_nPixelX != INT_MAX;
+ pIntervalSpanTemp = pIntervalSpanTemp->m_pNext
+ )
+ {
+ if (NeedCoverageGeometry(pIntervalSpanTemp->m_nCoverage))
+ {
+ ++nSegmentCount;
+ }
+ }
+
+ //
+ // Add vertices
+ //
+ if (nSegmentCount)
+ {
+ IFC(m_pVB->AddLineListVertices(nSegmentCount*2, &pVertex));
+ }*/
+ }
+
+ //
+ // Having allocated space (if not using sink), now let's actually output the vertices.
+ //
+
+ while ((*pIntervalSpanStart).m_nPixelX.get() != INT::MAX)
+ {
+ assert!(!(*pIntervalSpanStart).m_pNext.get().is_null());
+
+ //
+ // Output line list segments
+ //
+ // Note that line segments light pixels by going through the the
+ // "diamond" interior of a pixel. While we could accomplish this
+ // by going from left edge to right edge of pixel, D3D10 uses the
+ // convention that the LASTPIXEL is never lit. We respect that now
+ // by setting D3DRS_LASTPIXEL to FALSE and use line segments that
+ // start in center of first pixel and end in center of one pixel
+ // beyond last.
+ //
+ // Since our top left corner is integer, we add 0.5 to get to the
+ // pixel center.
+ //
+ if (self.NeedCoverageGeometry((*pIntervalSpanStart).m_nCoverage.get()))
+ {
+ let rCoverage: f32 = ((*pIntervalSpanStart).m_nCoverage.get() as f32)/(c_nShiftSizeSquared as f32);
+
+ let mut iBegin: LONG = (*pIntervalSpanStart).m_nPixelX.get();
+ let mut iEnd: LONG = (*(*pIntervalSpanStart).m_pNext.get()).m_nPixelX.get();
+ if (self.NeedOutsideGeometry())
+ {
+ // Intersect the interval with the outside bounds to create
+ // start and stop lines. The scan begins (ends) with an
+ // interval starting (ending) at -inf (+inf).
+
+ // The given geometry is not guaranteed to be within m_rcOutsideBounds but
+ // the additional inner min and max (in that order) produce empty spans
+ // for intervals not intersecting m_rcOutsideBounds.
+ //
+ // We could cull here but that should really be done by the geometry
+ // generator.
+
+ iBegin = iBegin.max(iEnd.min(self.m_rcOutsideBounds.left));
+ iEnd = iEnd.min(iBegin.max(self.m_rcOutsideBounds.right));
+ }
+ let rPixelXBegin: f32= (iBegin as f32) + 0.5;
+ let rPixelXEnd: f32 = (iEnd as f32) + 0.5;
+
+ //
+ // Output line (linelist or tristrip) for a pixel
+ //
+
+ //if let Some(pLineSink) = pLineSink
+ {
+ let mut v0: PointXYA = Default::default(); let mut v1: PointXYA = Default::default();
+ v0.x = rPixelXBegin;
+ v0.y = rPixelY;
+ v0.a = rCoverage;
+
+ v1.x = rPixelXEnd;
+ v1.y = rPixelY;
+ v1.a = rCoverage;
+
+ IFC!(self.m_pVB.AddLine(&v0,&v1));
+ }
+ //else
+ {
+ /*
+ let dwDiffuse = ReinterpretFloatAsDWORD(rCoverage);
+
+ pVertex[0].ptPt.X = rPixelXBegin;
+ pVertex[0].ptPt.Y = rPixelY;
+ pVertex[0].Diffuse = dwDiffuse;
+
+ pVertex[1].ptPt.X = rPixelXEnd;
+ pVertex[1].ptPt.Y = rPixelY;
+ pVertex[1].Diffuse = dwDiffuse;
+
+ // Advance output vertex pointer
+ pVertex += 2;*/
+ }
+ }
+
+ //
+ // Advance coverage buffer
+ //
+
+ pIntervalSpanStart = (*pIntervalSpanStart).m_pNext.get();
+ }
+
+
+//Cleanup:
+ RRETURN!(hr);
+
+}
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddLineAsTriangleList
+//
+// Synopsis: Adds a horizontal line as a triangle list to work around
+// issue in D3D9 where horizontal lines with y = 0 may not render.
+//
+// Line clipping in D3D9
+// This behavior will change in D3D10 and this work-around will no
+// longer be needed. (Pixel center conventions will also change.)
+//
+//-----------------------------------------------------------------------------
+impl CHwVertexBuffer<'_> {
+ fn AddLineAsTriangleList(&mut self,
+ pBegin: &CD3DVertexXYZDUV2, // Begin
+ pEnd: &CD3DVertexXYZDUV2 // End
+ ) -> HRESULT
+{
+ let hr = S_OK;
+
+ // Collect pertinent data from vertices.
+ debug_assert!(pBegin.y == pEnd.y);
+ debug_assert!(pBegin.coverage == pEnd.coverage);
+
+ // Offset begin and end X left by 0.5 because the line starts on the first
+ // pixel center and ends on the center of the pixel after the line segment.
+ let x0 = pBegin.x - 0.5;
+ let x1 = pEnd.x - 0.5;
+ let y = pBegin.y;
+ let dwDiffuse = pBegin.coverage;
+
+ //
+ // Add the vertices
+ //
+
+ // OpenGL doesn't specify how vertex positions are converted to fixed point prior to rasterization. On macOS, with AMD GPUs,
+ // the GPU appears to truncate to fixed point instead of rounding. This behaviour is controlled by PA_SU_VTX_CNTL
+ // register. To handle this we'll add a 1./512. subpixel bias to the center vertex to cause the coordinates to round instead
+ // of truncate.
+ //
+ // D3D11 requires the fixed point integer result to be within 0.6ULP which implicitly disallows the truncate behaviour above.
+ // This means that D2D doesn't need to deal with this problem.
+ let subpixel_bias = self.subpixel_bias;
+
+
+ // Use a single triangle to cover the entire line
+ self.AddTriVertices(
+ OutputVertex{ x: x0, y: y - 0.5, coverage: dwDiffuse },
+ OutputVertex{ x: x0, y: y + 0.5, coverage: dwDiffuse },
+ OutputVertex{ x: x1, y: y + subpixel_bias, coverage: dwDiffuse },
+ );
+
+ self.AddedNonLineSegment();
+
+ //Cleanup:
+ RRETURN!(hr);
+}
+}
+
+/*
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddParallelogram
+//
+// Synopsis: This function adds the coordinates of a parallelogram to the vertex strip buffer.
+//
+// Parameter: rgPosition contains four coordinates of the parallelogram. Coordinates should have
+// a winding order
+//
+//-----------------------------------------------------------------------------
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::AddParallelogram(
+ __in_ecount(4) const MilPoint2F *rgPosition
+ )
+{
+ HRESULT hr = S_OK;
+
+ if (AreWaffling())
+ {
+ PointXYA rgPoints[4];
+ for (int i = 0; i < 4; ++i)
+ {
+ rgPoints[i].x = rgPosition[i].X;
+ rgPoints[i].y = rgPosition[i].Y;
+ rgPoints[i].a = 1;
+ }
+ TriangleWaffler<PointXYA> wafflers[NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2];
+ TriangleWaffler<PointXYA>::ISink *pWaffleSinkNoRef = BuildWafflePipeline(wafflers);
+ IFC(pWaffleSinkNoRef->AddTriangle(rgPoints[0], rgPoints[1], rgPoints[3]));
+ IFC(pWaffleSinkNoRef->AddTriangle(rgPoints[3], rgPoints[1], rgPoints[2]));
+ }
+ else
+ {
+ TVertex *pVertex;
+
+ //
+ // Add the vertices
+ //
+
+ IFC(m_pVB->AddTriStripVertices(6, &pVertex));
+
+ //
+ // Duplicate the first vertex. This creates 2 degenerate triangles: one connecting
+ // the previous rect to this one and another between vertices 0 and 1.
+ //
+
+ pVertex[0].ptPt = rgPosition[0];
+ pVertex[0].Diffuse = FLOAT_ONE;
+
+ pVertex[1].ptPt = rgPosition[0];
+ pVertex[1].Diffuse = FLOAT_ONE;
+
+ pVertex[2].ptPt = rgPosition[1];
+ pVertex[2].Diffuse = FLOAT_ONE;
+
+ pVertex[3].ptPt = rgPosition[3];
+ pVertex[3].Diffuse = FLOAT_ONE;
+
+ pVertex[4].ptPt = rgPosition[2];
+ pVertex[4].Diffuse = FLOAT_ONE;
+
+ //
+ // Duplicate the last vertex. This creates 2 degenerate triangles: one
+ // between vertices 4 and 5 and one connecting this Rect to the
+ // next one.
+ //
+
+ pVertex[5].ptPt = rgPosition[2];
+ pVertex[5].Diffuse = FLOAT_ONE;
+ }
+
+ Cleanup:
+ RRETURN(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::BuildWafflePipeline<TWaffler>
+//
+// Synopsis: Builds a pipeline of wafflers into the provided array of wafflers.
+// And returns a pointer (not to be deleted) to the input sink
+// of the waffle pipeline.
+// the final result is sinked int m_pVB.
+//
+//-----------------------------------------------------------------------------
+
+template<class TVertex>
+template<class TWaffler>
+__out_ecount(1) typename TWaffler::ISink *
+CHwTVertexBuffer<TVertex>::Builder::BuildWafflePipeline(
+ __out_xcount(NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2) TWaffler *wafflers,
+ __out_ecount(1) bool &fWafflersUsed
+ ) const
+{
+ UINT count = 0;
+
+ for (int i = 0; i < NUM_OF_VERTEX_TEXTURE_COORDS(TVertex); ++i)
+ {
+ if (m_map.m_rgWaffleMode[i] != 0)
+ {
+ const MILMatrix3x2 &pMatWaffle = m_map.m_rgmatPointToUV[i];
+
+ // Each column ([a,b,c] transpose) of this matrix specifies a waffler that
+ // partitions the plane into regions between the lines:
+ // ax + by + c = k
+ // for every integer k.
+ //
+ // If this partition width is substantially less than a pixel we have
+ // serious problems with waffling generating too many triangles for
+ // doubtful visual effect so we don't perform a waffling with width less
+ // than c_rMinWaffleWidthPixels. So we need to know the width of the partition
+ // regions:
+ //
+ // Changing c just translates the partition so let's assume c = 0.
+ // The line ax + by = 0 goes through the origin and the line ax + by
+ // = 1 is adjacent to it in the partition. The distance between
+ // these lines is also the distance from ax + by = 1 to the origin.
+ // Using Lagrange multipliers we can determine that this distance
+ // is
+ // 1/sqrt(a*a+b*b).
+ // We want to avoid waffling if this is less than c_rMinWaffleWidthPixels
+ // or equivalently:
+ // 1/sqrt(a*a+b*b) < c_rMinWaffleWidthPixels
+ // sqrt(a*a+b*b) > 1/c_rMinWaffleWidthPixels
+ // a*a+b*b > 1/(c_rMinWaffleWidthPixels*c_rMinWaffleWidthPixels)
+ //
+
+ const float c_rMaxWaffleMagnitude = 1/(c_rMinWaffleWidthPixels*c_rMinWaffleWidthPixels);
+
+ float mag0 = pMatWaffle.m_00*pMatWaffle.m_00+pMatWaffle.m_10*pMatWaffle.m_10;
+ if (mag0 < c_rMaxWaffleMagnitude)
+ {
+ wafflers[count].Set(pMatWaffle.m_00, pMatWaffle.m_10, pMatWaffle.m_20, wafflers+count+1);
+ ++count;
+ }
+
+ float mag1 = pMatWaffle.m_01*pMatWaffle.m_01+pMatWaffle.m_11*pMatWaffle.m_11;
+ if (mag1 < c_rMaxWaffleMagnitude)
+ {
+ wafflers[count].Set(pMatWaffle.m_01, pMatWaffle.m_11, pMatWaffle.m_21, wafflers+count+1);
+ ++count;
+ }
+ }
+ }
+
+ if (count)
+ {
+ fWafflersUsed = true;
+ // As the last step in the chain we send the triangles to our vertex buffer.
+ wafflers[count-1].SetSink(m_pVB);
+ return &wafflers[0];
+ }
+ else
+ {
+ fWafflersUsed = false;
+ // If we built no wafflers then sink straight into the vertex buffer.
+ return m_pVB;
+ }
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::IsEmpty
+//
+// Synopsis: Does our VB have any triangles/lines?
+//
+//-----------------------------------------------------------------------------
+template <class TVertex>
+BOOL
+CHwTVertexBuffer<TVertex>::Builder::IsEmpty()
+{
+ return m_pVB->IsEmpty();
+}
+*/
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddTrapezoid
+//
+// Synopsis: Add a trapezoid to the vertex buffer
+//
+//
+// left edge right edge
+// ___+_________________+___ <<< top edge
+// / + / \ + \
+// / + / \ + \
+// / + / \ + \
+// /__+__/___________________\__+__\ <<< bottom edge
+// + ^^ +
+// delta
+//
+impl CHwVertexBufferBuilder<'_, '_> {
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddTrapezoidStandard
+//
+// Synopsis: See AddTrapezoid. This doesn't do waffling & uses tri strips.
+//
+
+fn AddTrapezoidStandard(&mut self,
+ rPixelYTop: f32, // In: y coordinate of top of trapezoid
+ rPixelXTopLeft: f32, // In: x coordinate for top left
+ rPixelXTopRight: f32, // In: x coordinate for top right
+ rPixelYBottom: f32, // In: y coordinate of bottom of trapezoid
+ rPixelXBottomLeft: f32, // In: x coordinate for bottom left
+ rPixelXBottomRight: f32, // In: x coordinate for bottom right
+ rPixelXLeftDelta: f32, // In: trapezoid expand radius for left edge
+ rPixelXRightDelta: f32 // In: trapezoid expand radius for right edge
+ ) -> HRESULT
+{
+ type TVertex = CD3DVertexXYZDUV2;
+ let hr = S_OK;
+ //TVertex *pVertex;
+
+ IFC!(self.PrepareStratum(
+ rPixelYTop,
+ rPixelYBottom,
+ true, /* Trapezoid */
+ rPixelXTopLeft.min(rPixelXBottomLeft),
+ rPixelXTopRight.max(rPixelXBottomRight),
+ rPixelXTopLeft - rPixelXLeftDelta, rPixelXBottomLeft - rPixelXLeftDelta,
+ rPixelXTopRight + rPixelXRightDelta, rPixelXBottomRight + rPixelXRightDelta
+ ));
+
+ //
+ // Add the vertices
+ //
+
+ let fNeedOutsideGeometry: bool; let fNeedInsideGeometry: bool;
+ fNeedOutsideGeometry = self.NeedOutsideGeometry();
+ fNeedInsideGeometry = self.NeedInsideGeometry();
+
+ //
+ // Fill in the vertices
+ //
+
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: rPixelXTopLeft - rPixelXLeftDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rPixelXBottomLeft - rPixelXLeftDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rPixelXTopLeft + rPixelXLeftDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXBottomLeft + rPixelXLeftDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ONE,
+ }
+ );
+
+
+ if (fNeedInsideGeometry)
+ {
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: rPixelXTopLeft + rPixelXLeftDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXBottomLeft + rPixelXLeftDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXTopRight - rPixelXRightDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXBottomRight - rPixelXRightDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ONE,
+ }
+ );
+ }
+
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: rPixelXTopRight - rPixelXRightDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXBottomRight - rPixelXRightDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ONE,
+ },
+ OutputVertex{
+ x: rPixelXTopRight + rPixelXRightDelta,
+ y: rPixelYTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rPixelXBottomRight + rPixelXRightDelta,
+ y: rPixelYBottom,
+ coverage: FLOAT_ZERO,
+ }
+ );
+
+ if (!fNeedOutsideGeometry)
+ {
+ //
+ // Duplicate the last vertex. This creates 2 degenerate triangles: one
+ // between vertices 8 and 9 and one connecting this trapezoid to the
+ // next one.
+ //
+
+ //pVertex.push(OutputVertex{
+ // x: rPixelXBottomRight + rPixelXRightDelta,
+ // y: rPixelYBottom,
+ // coverage: FLOAT_ZERO,
+ //});
+ }
+
+ self.m_pVB.AddedNonLineSegment();
+
+//Cleanup:
+ RRETURN!(hr);
+}
+}
+/*
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::AddTrapezoidWaffle
+//
+// Synopsis: See AddTrapezoid. This adds a waffled trapezoid.
+//
+//-----------------------------------------------------------------------------
+template <class TVertex>
+HRESULT
+CHwTVertexBuffer<TVertex>::Builder::AddTrapezoidWaffle(
+ float rPixelYTop, // In: y coordinate of top of trapezoid
+ float rPixelXTopLeft, // In: x coordinate for top left
+ float rPixelXTopRight, // In: x coordinate for top right
+ float rPixelYBottom, // In: y coordinate of bottom of trapezoid
+ float rPixelXBottomLeft, // In: x coordinate for bottom left
+ float rPixelXBottomRight, // In: x coordinate for bottom right
+ float rPixelXLeftDelta, // In: trapezoid expand radius for left edge
+ float rPixelXRightDelta // In: trapezoid expand radius for right edge
+ )
+{
+ HRESULT hr = S_OK;
+
+ // We have 2 (u & v) wafflers per texture coordinate that need waffling.
+ TriangleWaffler<PointXYA> wafflers[NUM_OF_VERTEX_TEXTURE_COORDS(TVertex) * 2];
+ bool fWafflersUsed = false;
+
+ TriangleWaffler<PointXYA>::ISink *pWaffleSinkNoRef = BuildWafflePipeline(wafflers, OUT fWafflersUsed);
+
+ PointXYA vertices[8];
+
+ //
+ // Fill in the strip vertices
+ //
+
+ // Nonstandard coverage mapping and waffling are not supported at the same time.
+ Assert(!NeedOutsideGeometry());
+
+ vertices[0].x = rPixelXTopLeft - rPixelXLeftDelta;
+ vertices[0].y = rPixelYTop;
+ vertices[0].a = 0;
+
+ vertices[1].x = rPixelXBottomLeft - rPixelXLeftDelta;
+ vertices[1].y = rPixelYBottom;
+ vertices[1].a = 0;
+
+ vertices[2].x = rPixelXTopLeft + rPixelXLeftDelta;
+ vertices[2].y = rPixelYTop;
+ vertices[2].a = 1;
+
+ vertices[3].x = rPixelXBottomLeft + rPixelXLeftDelta;
+ vertices[3].y = rPixelYBottom;
+ vertices[3].a = 1;
+
+ vertices[4].x = rPixelXTopRight - rPixelXRightDelta;
+ vertices[4].y = rPixelYTop;
+ vertices[4].a = 1;
+
+ vertices[5].x = rPixelXBottomRight - rPixelXRightDelta;
+ vertices[5].y = rPixelYBottom;
+ vertices[5].a = 1;
+
+ vertices[6].x = rPixelXTopRight + rPixelXRightDelta;
+ vertices[6].y = rPixelYTop;
+ vertices[6].a = 0;
+
+ vertices[7].x = rPixelXBottomRight + rPixelXRightDelta;
+ vertices[7].y = rPixelYBottom;
+ vertices[7].a = 0;
+
+ // Send the triangles in the strip through the waffle pipeline.
+ for (int i = 0; i < 6; ++i)
+ {
+ IFC(pWaffleSinkNoRef->AddTriangle(vertices[i+1], vertices[i], vertices[i+2]));
+ }
+
+Cleanup:
+ RRETURN(hr);
+}
+*/
+impl CHwVertexBufferBuilder<'_, '_> {
+
+ //+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::NeedCoverageGeometry
+//
+// Synopsis: Returns true if the coverage value needs to be rendered
+// based on NeedInsideGeometry() and NeedOutsideGeometry()
+//
+// Two cases where we don't need to generate geometry:
+// 1. NeedInsideGeometry is false, and coverage is c_nShiftSizeSquared.
+// 2. NeedOutsideGeometry is false and coverage is 0
+//
+//-----------------------------------------------------------------------------
+fn NeedCoverageGeometry(&self,
+ nCoverage: INT
+ ) -> bool
+{
+ return (self.NeedInsideGeometry() || nCoverage != c_nShiftSizeSquared)
+ && (self.NeedOutsideGeometry() || nCoverage != 0);
+}
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: NeedOutsideGeometry
+ //
+ // Synopsis: True if we should create geometry with zero alpha for
+ // areas outside the input geometry but within a given
+ // bounding box.
+ //
+ //-------------------------------------------------------------------------
+ fn NeedOutsideGeometry(&self) -> bool
+ {
+ return self.m_fNeedOutsideGeometry;
+ }
+
+ //+------------------------------------------------------------------------
+ //
+ // Member: NeedInsideGeometry
+ //
+ // Synopsis: True if we should create geometry for areas completely
+ // withing the input geometry (i.e. alpha 1.) Should only
+ // be false if NeedOutsideGeometry is true.
+ //
+ //-------------------------------------------------------------------------
+ fn NeedInsideGeometry(&self) -> bool
+ {
+ assert!(self.m_fNeedOutsideGeometry || self.m_fNeedInsideGeometry);
+ return self.m_fNeedInsideGeometry;
+ }
+
+
+
+ // Helpers that handle extra shapes in trapezoid mode.
+ fn PrepareStratum(&mut self,
+ rStratumTop: f32,
+ rStratumBottom: f32,
+ fTrapezoid: bool,
+ rTrapezoidLeft: f32,
+ rTrapezoidRight: f32,
+ rTrapezoidTopLeft: f32, // = 0
+ rTrapezoidBottomLeft: f32, // = 0
+ rTrapezoidTopRight: f32, // = 0
+ rTrapezoidBottomRight: f32, // = 0
+
+ ) -> HRESULT
+ {
+ return if self.NeedOutsideGeometry() {
+ self.PrepareStratumSlow(
+ rStratumTop,
+ rStratumBottom,
+ fTrapezoid,
+ rTrapezoidLeft,
+ rTrapezoidRight,
+ rTrapezoidTopLeft,
+ rTrapezoidBottomLeft,
+ rTrapezoidTopRight,
+ rTrapezoidBottomRight
+ )
+ } else { S_OK };
+ }
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::PrepareStratumSlow
+//
+// Synopsis: Call before producing a new stratum (complex span or trapezoid.)
+// Handles several tasks:
+// 1. Producing between top of complement geometry & the 1st
+// stratum or when a gap between strata occurs (because
+// the geometry is not closed and has horizontal gaps.)
+// Passing in FLT_MAX for rStratumTop and rStratumBottom
+// Fills the gap between the last stratum and the bottom
+// of the outside.
+// 2. Begins and/or ends the triangle strip corresponding to
+// a trapezoid row.
+// 3. Updates status vars m_rCurStratumTop & m_rCurStratumBottom
+//
+// Note: Call PrepareStratum which inlines the check for NeedOutsideGeometry()
+// If NeedOutsideGeometry is false PrepareStratum() does nothing.
+// This (slow) version asserts NeedOutsideGeometry()
+//
+//-----------------------------------------------------------------------------
+fn PrepareStratumSlow(&mut self,
+ rStratumTop: f32,
+ rStratumBottom: f32,
+ fTrapezoid: bool,
+ rTrapezoidLeft: f32,
+ rTrapezoidRight: f32,
+ rTrapezoidTopLeft: f32,
+ rTrapezoidBottomLeft: f32,
+ rTrapezoidTopRight: f32,
+ rTrapezoidBottomRight: f32,
+ ) -> HRESULT
+{
+ type TVertex = OutputVertex;
+ let hr: HRESULT = S_OK;
+
+ assert!(!(rStratumTop > rStratumBottom));
+ assert!(self.NeedOutsideGeometry());
+
+ // There's only once case where a stratum can go "backwards"
+ // and that's when we're done building & calling from
+ // EndBuildingOutside
+
+ let fEndBuildingOutside: f32 = (rStratumBottom == self.OutsideBottom() &&
+ rStratumTop == self.OutsideBottom()) as i32 as f32;
+
+ if (fEndBuildingOutside == 1.)
+ {
+ assert!(!fTrapezoid);
+ }
+ else
+ {
+ assert!(!(rStratumBottom < self.m_rCurStratumBottom));
+ }
+
+ if ( fEndBuildingOutside == 1.
+ || rStratumBottom != self.m_rCurStratumBottom)
+ {
+
+ // New stratum starting now. Two things to do
+ // 1. Close out current trapezoid stratum if necessary.
+ // 2. Begin new trapezoid stratum if necessary.
+
+ if (self.m_rCurStratumTop != f32::MAX)
+ {
+ // we do not clip trapezoids so RIGHT boundary
+ // of the stratus can be outside of m_rcOutsideBounds.
+
+ let rOutsideRight: f32 = self.OutsideRight().max(self.m_rLastTrapezoidRight);
+
+ // End current trapezoid stratum.
+
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: self.m_rLastTrapezoidTopRight,
+ y: self.m_rCurStratumTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: self.m_rLastTrapezoidBottomRight,
+ y: self.m_rCurStratumBottom,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rOutsideRight,
+ y: self.m_rCurStratumTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rOutsideRight,
+ y: self.m_rCurStratumBottom,
+ coverage: FLOAT_ZERO,
+ }
+ );
+ }
+ // Compute the gap between where the last stratum ended and where
+ // this one begins.
+ let flGap: f32 = rStratumTop - self.m_rCurStratumBottom;
+
+ if (flGap > 0.)
+ {
+ // The "special" case of a gap at the beginning is caught here
+ // using the sentinel initial value of m_rCurStratumBottom.
+
+ let flRectTop: f32 = if self.m_rCurStratumBottom == -f32::MAX {
+ self.OutsideTop() } else {
+ self.m_rCurStratumBottom };
+ let flRectBot: f32 = (rStratumTop as f32);
+
+ // Produce rectangular for any horizontal intervals in the
+ // outside bounds that have no generated geometry.
+ assert!(self.m_rCurStratumBottom != -f32::MAX || self.m_rCurStratumTop == f32::MAX);
+
+ let outside_left = self.OutsideLeft();
+ let outside_right = self.OutsideRight();
+
+ // Duplicate first vertex.
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: outside_left,
+ y: flRectTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: outside_left,
+ y: flRectBot,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: outside_right,
+ y: flRectTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: outside_right,
+ y: flRectBot,
+ coverage: FLOAT_ZERO,
+ }
+ );
+ }
+
+ if (fTrapezoid)
+ {
+
+ // we do not clip trapezoids so left boundary
+ // of the stratus can be outside of m_rcOutsideBounds.
+
+ let rOutsideLeft: f32 = self.OutsideLeft().min(rTrapezoidLeft);
+
+ // Begin new trapezoid stratum.
+
+ self.m_pVB.AddTrapezoidVertices(
+ OutputVertex{
+ x: rOutsideLeft,
+ y: rStratumTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rOutsideLeft,
+ y: rStratumBottom,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rTrapezoidTopLeft,
+ y: rStratumTop,
+ coverage: FLOAT_ZERO,
+ },
+ OutputVertex{
+ x: rTrapezoidBottomLeft,
+ y: rStratumBottom,
+ coverage: FLOAT_ZERO,
+ }
+ );
+ }
+ }
+
+ if (fTrapezoid)
+ {
+ self.m_rLastTrapezoidTopRight = rTrapezoidTopRight;
+ self.m_rLastTrapezoidBottomRight = rTrapezoidBottomRight;
+ self.m_rLastTrapezoidRight = rTrapezoidRight;
+ }
+
+ self.m_rCurStratumTop = if fTrapezoid { rStratumTop } else { f32::MAX };
+ self.m_rCurStratumBottom = rStratumBottom;
+
+ RRETURN!(hr);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::EndBuildingOutside
+//
+// Synopsis: Finish creating outside geometry.
+// 1. If no geometry was created then just fill bounds.
+// 2. Otherwise:
+// A. End last trapezoid row
+// B. Produce stop stratum
+//
+//-----------------------------------------------------------------------------
+fn EndBuildingOutside(&mut self) -> HRESULT
+{
+ return self.PrepareStratum(
+ self.OutsideBottom(),
+ self.OutsideBottom(),
+ false, /* Not a trapezoid. */
+ 0., 0.,
+ 0., 0.,
+ 0., 0.,
+ );
+}
+
+//+----------------------------------------------------------------------------
+//
+// Member: CHwTVertexBuffer<TVertex>::Builder::EndBuilding
+//
+// Synopsis: Expand all vertices to the full required format and return
+// vertex buffer.
+//
+//-----------------------------------------------------------------------------
+pub fn EndBuilding(&mut self) -> HRESULT
+{
+ let hr = S_OK;
+
+ IFC!(self.EndBuildingOutside());
+
+//Cleanup:
+ RRETURN!(hr);
+}
+
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/lib.rs b/third_party/rust/wpf-gpu-raster/src/lib.rs
new file mode 100644
index 0000000000..ba730e5105
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/lib.rs
@@ -0,0 +1,669 @@
+/*!
+Converts a 2D path into a set of vertices of a triangle strip mesh that represents the antialiased fill of that path.
+
+```rust
+ use wpf_gpu_raster::PathBuilder;
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 40.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+```
+
+*/
+#![allow(unused_parens)]
+#![allow(overflowing_literals)]
+#![allow(non_snake_case)]
+#![allow(non_camel_case_types)]
+#![allow(non_upper_case_globals)]
+#![allow(dead_code)]
+#![allow(unused_macros)]
+
+#[macro_use]
+mod fix;
+#[macro_use]
+mod helpers;
+#[macro_use]
+mod real;
+mod bezier;
+#[macro_use]
+mod aarasterizer;
+mod hwrasterizer;
+mod aacoverage;
+mod hwvertexbuffer;
+
+mod types;
+mod geometry_sink;
+mod matrix;
+
+mod nullable_ref;
+
+#[cfg(feature = "c_bindings")]
+pub mod c_bindings;
+
+#[cfg(test)]
+mod tri_rasterize;
+
+use aarasterizer::CheckValidRange28_4;
+use hwrasterizer::CHwRasterizer;
+use hwvertexbuffer::{CHwVertexBuffer, CHwVertexBufferBuilder};
+use real::CFloatFPU;
+use types::{MilFillMode, PathPointTypeStart, MilPoint2F, MilPointAndSizeL, PathPointTypeLine, MilVertexFormat, MilVertexFormatAttribute, DynArray, BYTE, PathPointTypeBezier, PathPointTypeCloseSubpath, CMILSurfaceRect, POINT};
+
+#[repr(C)]
+#[derive(Clone, Debug, Default)]
+pub struct OutputVertex {
+ pub x: f32,
+ pub y: f32,
+ pub coverage: f32
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub enum FillMode {
+ EvenOdd = 0,
+ Winding = 1,
+}
+
+impl Default for FillMode {
+ fn default() -> Self {
+ FillMode::EvenOdd
+ }
+}
+
+#[derive(Clone, Default)]
+pub struct OutputPath {
+ fill_mode: FillMode,
+ points: Box<[POINT]>,
+ types: Box<[BYTE]>,
+}
+
+impl std::hash::Hash for OutputVertex {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ self.x.to_bits().hash(state);
+ self.y.to_bits().hash(state);
+ self.coverage.to_bits().hash(state);
+ }
+}
+
+pub struct PathBuilder {
+ points: DynArray<POINT>,
+ types: DynArray<BYTE>,
+ initial_point: Option<MilPoint2F>,
+ current_point: Option<MilPoint2F>,
+ in_shape: bool,
+ fill_mode: FillMode,
+ outside_bounds: Option<CMILSurfaceRect>,
+ need_inside: bool,
+ valid_range: bool,
+ rasterization_truncates: bool,
+}
+
+impl PathBuilder {
+ pub fn new() -> Self {
+ Self {
+ points: Vec::new(),
+ types: Vec::new(),
+ initial_point: None,
+ current_point: None,
+ in_shape: false,
+ fill_mode: FillMode::EvenOdd,
+ outside_bounds: None,
+ need_inside: true,
+ valid_range: true,
+ rasterization_truncates: false,
+ }
+ }
+ fn add_point(&mut self, x: f32, y: f32) {
+ self.current_point = Some(MilPoint2F{X: x, Y: y});
+ // Transform from pixel corner at 0.0 to pixel center at 0.0. Scale into 28.4 range.
+ // Validate that the point before rounding is within expected bounds for the rasterizer.
+ let (x, y) = ((x - 0.5) * 16.0, (y - 0.5) * 16.0);
+ self.valid_range = self.valid_range && CheckValidRange28_4(x, y);
+ self.points.push(POINT {
+ x: CFloatFPU::Round(x),
+ y: CFloatFPU::Round(y),
+ });
+ }
+ pub fn line_to(&mut self, x: f32, y: f32) {
+ if let Some(initial_point) = self.initial_point {
+ if !self.in_shape {
+ self.types.push(PathPointTypeStart);
+ self.add_point(initial_point.X, initial_point.Y);
+ self.in_shape = true;
+ }
+ self.types.push(PathPointTypeLine);
+ self.add_point(x, y);
+ } else {
+ self.initial_point = Some(MilPoint2F{X: x, Y: y})
+ }
+ }
+ pub fn move_to(&mut self, x: f32, y: f32) {
+ self.in_shape = false;
+ self.initial_point = Some(MilPoint2F{X: x, Y: y});
+ self.current_point = self.initial_point;
+ }
+ pub fn curve_to(&mut self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
+ let initial_point = match self.initial_point {
+ Some(initial_point) => initial_point,
+ None => MilPoint2F{X:c1x, Y:c1y}
+ };
+ if !self.in_shape {
+ self.types.push(PathPointTypeStart);
+ self.add_point(initial_point.X, initial_point.Y);
+ self.initial_point = Some(initial_point);
+ self.in_shape = true;
+ }
+ self.types.push(PathPointTypeBezier);
+ self.add_point(c1x, c1y);
+ self.add_point(c2x, c2y);
+ self.add_point(x, y);
+ }
+ pub fn quad_to(&mut self, cx: f32, cy: f32, x: f32, y: f32) {
+ // For now we just implement quad_to on top of curve_to.
+ // Long term we probably want to support quad curves
+ // directly.
+ let c0 = match self.current_point {
+ Some(current_point) => current_point,
+ None => MilPoint2F{X:cx, Y:cy}
+ };
+
+ let c1x = c0.X + (2./3.) * (cx - c0.X);
+ let c1y = c0.Y + (2./3.) * (cy - c0.Y);
+
+ let c2x = x + (2./3.) * (cx - x);
+ let c2y = y + (2./3.) * (cy - y);
+
+ self.curve_to(c1x, c1y, c2x, c2y, x, y);
+ }
+ pub fn close(&mut self) {
+ if let Some(last) = self.types.last_mut() {
+ *last |= PathPointTypeCloseSubpath;
+ }
+ self.in_shape = false;
+ self.initial_point = None;
+ }
+ pub fn set_fill_mode(&mut self, fill_mode: FillMode) {
+ self.fill_mode = fill_mode;
+ }
+ /// Enables rendering geometry for areas outside the shape but
+ /// within the bounds. These areas will be created with
+ /// zero alpha.
+ ///
+ /// This is useful for creating geometry for other blend modes.
+ /// For example:
+ /// - `IN(dest, geometry)` can be done with `outside_bounds` and `need_inside = false`
+ /// - `IN(dest, geometry, alpha)` can be done with `outside_bounds` and `need_inside = true`
+ ///
+ /// Note: trapezoidal areas won't be clipped to outside_bounds
+ pub fn set_outside_bounds(&mut self, outside_bounds: Option<(i32, i32, i32, i32)>, need_inside: bool) {
+ self.outside_bounds = outside_bounds.map(|r| CMILSurfaceRect { left: r.0, top: r.1, right: r.2, bottom: r.3 });
+ self.need_inside = need_inside;
+ }
+
+ /// Set this to true if post vertex shader coordinates are converted to fixed point
+ /// via truncation. This has been observed with OpenGL on AMD GPUs on macOS.
+ pub fn set_rasterization_truncates(&mut self, rasterization_truncates: bool) {
+ self.rasterization_truncates = rasterization_truncates;
+ }
+
+ /// Note: trapezoidal areas won't necessarily be clipped to the clip rect
+ pub fn rasterize_to_tri_list(&self, clip_x: i32, clip_y: i32, clip_width: i32, clip_height: i32) -> Box<[OutputVertex]> {
+ if !self.valid_range {
+ // If any of the points are outside of valid 28.4 range, then just return an empty triangle list.
+ return Box::new([]);
+ }
+ let (x, y, width, height, need_outside) = if let Some(CMILSurfaceRect { left, top, right, bottom }) = self.outside_bounds {
+ let x0 = clip_x.max(left);
+ let y0 = clip_y.max(top);
+ let x1 = (clip_x + clip_width).min(right);
+ let y1 = (clip_y + clip_height).min(bottom);
+ (x0, y0, x1 - x0, y1 - y0, true)
+ } else {
+ (clip_x, clip_y, clip_width, clip_height, false)
+ };
+ rasterize_to_tri_list(self.fill_mode, &self.types, &self.points, x, y, width, height, self.need_inside, need_outside, self.rasterization_truncates, None)
+ .flush_output()
+ }
+
+ pub fn get_path(&mut self) -> Option<OutputPath> {
+ if self.valid_range && !self.points.is_empty() && !self.types.is_empty() {
+ Some(OutputPath {
+ fill_mode: self.fill_mode,
+ points: std::mem::take(&mut self.points).into_boxed_slice(),
+ types: std::mem::take(&mut self.types).into_boxed_slice(),
+ })
+ } else {
+ None
+ }
+ }
+}
+
+// Converts a path that is specified as an array of edge types, each associated with a fixed number
+// of points that are serialized to the points array. Edge types are specified via PathPointType
+// masks, whereas points must be supplied in 28.4 signed fixed-point format. By default, users can
+// fill the inside of the path excluding the outside. It may alternatively be desirable to fill the
+// outside the path out to the clip boundary, optionally keeping the inside. PathBuilder may be
+// used instead as a simpler interface to this function that handles building the path arrays.
+pub fn rasterize_to_tri_list<'a>(
+ fill_mode: FillMode,
+ types: &[BYTE],
+ points: &[POINT],
+ clip_x: i32,
+ clip_y: i32,
+ clip_width: i32,
+ clip_height: i32,
+ need_inside: bool,
+ need_outside: bool,
+ rasterization_truncates: bool,
+ output_buffer: Option<&'a mut [OutputVertex]>,
+) -> CHwVertexBuffer<'a> {
+ let clipRect = MilPointAndSizeL {
+ X: clip_x,
+ Y: clip_y,
+ Width: clip_width,
+ Height: clip_height,
+ };
+
+ let mil_fill_mode = match fill_mode {
+ FillMode::EvenOdd => MilFillMode::Alternate,
+ FillMode::Winding => MilFillMode::Winding,
+ };
+
+ let m_mvfIn: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrXY as MilVertexFormat;
+ let m_mvfGenerated: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat;
+ //let mvfaAALocation = MILVFAttrNone;
+ const HWPIPELINE_ANTIALIAS_LOCATION: MilVertexFormatAttribute = MilVertexFormatAttribute::MILVFAttrDiffuse;
+ let mvfaAALocation = HWPIPELINE_ANTIALIAS_LOCATION;
+
+ let outside_bounds = if need_outside {
+ Some(CMILSurfaceRect {
+ left: clip_x,
+ top: clip_y,
+ right: clip_x + clip_width,
+ bottom: clip_y + clip_height,
+ })
+ } else {
+ None
+ };
+
+ let mut vertexBuffer = CHwVertexBuffer::new(rasterization_truncates, output_buffer);
+ {
+ let mut vertexBuilder = CHwVertexBufferBuilder::Create(
+ m_mvfIn, m_mvfIn | m_mvfGenerated, mvfaAALocation, &mut vertexBuffer);
+ vertexBuilder.SetOutsideBounds(outside_bounds.as_ref(), need_inside);
+ vertexBuilder.BeginBuilding();
+ {
+ let mut rasterizer = CHwRasterizer::new(
+ &mut vertexBuilder, mil_fill_mode, None, clipRect);
+ rasterizer.SendGeometry(points, types);
+ }
+ vertexBuilder.EndBuilding();
+ }
+
+ vertexBuffer
+}
+
+#[cfg(test)]
+mod tests {
+ use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
+ use crate::{*, tri_rasterize::rasterize_to_mask};
+ fn calculate_hash<T: Hash>(t: &T) -> u64 {
+ let mut s = DefaultHasher::new();
+ t.hash(&mut s);
+ s.finish()
+ }
+ #[test]
+ fn basic() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(10., 30.);
+ p.line_to(30., 30.);
+ p.line_to(30., 10.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 18);
+ //assert_eq!(dbg!(calculate_hash(&result)), 0x5851570566450135);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfbb7c3932059e240);
+ }
+
+ #[test]
+ fn simple() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 40.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ //assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
+ }
+
+ #[test]
+ fn rust() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 40.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ //assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
+ }
+
+ #[test]
+ fn fill_mode() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 40.);
+ p.line_to(10., 40.);
+ p.close();
+ p.move_to(15., 15.);
+ p.line_to(35., 15.);
+ p.line_to(35., 35.);
+ p.line_to(15., 35.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ //assert_eq!(dbg!(calculate_hash(&result)), 0xb34344234f2f75a8);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xc7bf999c56ccfc34);
+
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 40.);
+ p.line_to(10., 40.);
+ p.close();
+ p.move_to(15., 15.);
+ p.line_to(35., 15.);
+ p.line_to(35., 35.);
+ p.line_to(15., 35.);
+ p.close();
+ p.set_fill_mode(FillMode::Winding);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ //assert_eq!(dbg!(calculate_hash(&result)), 0xee4ecd8a738fc42c);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfafad659db9a2efd);
+
+ }
+
+ #[test]
+ fn range() {
+ // test for a start point out of range
+ let mut p = PathBuilder::new();
+ p.curve_to(8.872974e16, 0., 0., 0., 0., 0.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 0);
+
+ // test for a subsequent point out of range
+ let mut p = PathBuilder::new();
+ p.curve_to(0., 0., 8.872974e16, 0., 0., 0.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 0);
+ }
+
+ #[test]
+ fn multiple_starts() {
+ let mut p = PathBuilder::new();
+ p.line_to(10., 10.);
+ p.move_to(0., 0.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 0);
+ }
+
+ #[test]
+ fn path_closing() {
+ let mut p = PathBuilder::new();
+ p.curve_to(0., 0., 0., 0., 0., 32.0);
+ p.close();
+ p.curve_to(0., 0., 0., 0., 0., 32.0);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 0);
+ }
+
+ #[test]
+ fn curve() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.curve_to(40., 10., 40., 10., 40., 40.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xa92aae8dba7b8cd4);
+ assert_eq!(dbg!(calculate_hash(&result)), 0x8dbc4d23f9bba38d);
+ }
+
+ #[test]
+ fn partial_coverage_last_line() {
+ let mut p = PathBuilder::new();
+
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(40., 39.6);
+ p.line_to(10., 39.6);
+
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 21);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfa200c3bae144952);
+ assert_eq!(dbg!(calculate_hash(&result)), 0xf90cb6afaadfb559);
+ }
+
+ #[test]
+ fn delta_upper_bound() {
+ let mut p = PathBuilder::new();
+ p.move_to(-122.3 + 200.,84.285);
+ p.curve_to(-122.3 + 200., 84.285, -122.2 + 200.,86.179, -123.03 + 200., 86.16);
+ p.curve_to(-123.85 + 200., 86.141, -140.3 + 200., 38.066, -160.83 + 200., 40.309);
+ p.curve_to(-160.83 + 200., 40.309, -143.05 + 200., 32.956, -122.3 + 200., 84.285);
+ p.close();
+
+ let result = p.rasterize_to_tri_list(0, 0, 400, 400);
+ assert_eq!(result.len(), 429);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5e82d98fdb47a796);
+ assert_eq!(dbg!(calculate_hash(&result)), 0x52d52992e249587a);
+ }
+
+
+ #[test]
+ fn self_intersect() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(10., 40.);
+ p.line_to(40., 40.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
+ assert_eq!(dbg!(calculate_hash(&result)), 0xf10babef5c619d19);
+ }
+
+ #[test]
+ fn grid() {
+ let mut p = PathBuilder::new();
+
+ for i in 0..200 {
+ let offset = i as f32 * 1.3;
+ p.move_to(0. + offset, -8.);
+ p.line_to(0.5 + offset, -8.);
+ p.line_to(0.5 + offset, 40.);
+ p.line_to(0. + offset, 40.);
+ p.close();
+ }
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 12000);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5a7df39d9e9292f0);
+ }
+
+ #[test]
+ fn outside() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(10., 40.);
+ p.line_to(40., 40.);
+ p.close();
+ p.set_outside_bounds(Some((0, 0, 50, 50)), false);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
+ assert_eq!(dbg!(calculate_hash(&result)), 0x805fd385e47e6f2);
+
+ // ensure that adjusting the outside bounds changes the results
+ p.set_outside_bounds(Some((5, 5, 50, 50)), false);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
+ assert_eq!(dbg!(calculate_hash(&result)), 0xcec2ed688999c966);
+ }
+
+ #[test]
+ fn outside_inside() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(40., 10.);
+ p.line_to(10., 40.);
+ p.line_to(40., 40.);
+ p.close();
+ p.set_outside_bounds(Some((0, 0, 50, 50)), true);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
+ assert_eq!(dbg!(calculate_hash(&result)), 0xaf76b42a5244d1ec);
+ }
+
+ #[test]
+ fn outside_clipped() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.);
+ p.line_to(10., 40.);
+ p.line_to(90., 40.);
+ p.line_to(40., 10.);
+ p.close();
+ p.set_outside_bounds(Some((0, 0, 50, 50)), false);
+ let result = p.rasterize_to_tri_list(0, 0, 50, 50);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x3d2a08f5d0bac999);
+ assert_eq!(dbg!(calculate_hash(&result)), 0xbd42b934ab52be39);
+ }
+
+ #[test]
+ fn clip_edge() {
+ let mut p = PathBuilder::new();
+ // tests the bigNumerator < 0 case of aarasterizer::ClipEdge
+ p.curve_to(-24., -10., -300., 119., 0.0, 0.0);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ // The edge merging only happens between points inside the enumerate buffer. This means
+ // that the vertex output can depend on the size of the enumerate buffer because there
+ // the number of edges and positions of vertices will change depending on edge merging.
+ if ENUMERATE_BUFFER_NUMBER!() == 32 {
+ assert_eq!(result.len(), 111);
+ } else {
+ assert_eq!(result.len(), 171);
+ }
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x50b887b09a4c16e);
+ }
+
+ #[test]
+ fn enum_buffer_num() {
+ let mut p = PathBuilder::new();
+ p.curve_to(0.0, 0.0, 0.0, 12.0, 0.0, 44.919434);
+ p.line_to(64.0, 36.0 );
+ p.line_to(0.0, 80.0,);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 300);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x659cc742f16b42f2);
+ }
+
+ #[test]
+ fn fill_alternating_empty_interior_pairs() {
+ let mut p = PathBuilder::new();
+ p.line_to( 0., 2. );
+ p.curve_to(0.0, 0.0,1., 6., 0.0, 0.0);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 9);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x726606a662fe46a0);
+ }
+
+ #[test]
+ fn fill_winding_empty_interior_pairs() {
+ let mut p = PathBuilder::new();
+ p.curve_to(45., 61., 0.09, 0., 0., 0.);
+ p.curve_to(45., 61., 0.09, 0., 0., 0.);
+ p.curve_to(0., 0., 0., 38., 0.09, 15.);
+ p.set_fill_mode(FillMode::Winding);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 462);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x651ea4ade5543087);
+ }
+
+ #[test]
+ fn empty_fill() {
+ let mut p = PathBuilder::new();
+ p.move_to(0., 0.);
+ p.line_to(10., 100.);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 0);
+ }
+
+ #[test]
+ fn rasterize_line() {
+ let mut p = PathBuilder::new();
+ p.move_to(1., 1.);
+ p.line_to(2., 1.);
+ p.line_to(2., 2.);
+ p.line_to(1., 2.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ let mask = rasterize_to_mask(&result, 3, 3);
+ assert_eq!(&mask[..], &[0, 0, 0,
+ 0, 255, 0,
+ 0, 0, 0][..]);
+ }
+
+ #[test]
+ fn triangle() {
+ let mut p = PathBuilder::new();
+ p.move_to(1., 10.);
+ p.line_to(100., 13.);
+ p.line_to(1., 16.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x4757b0c5a19b02f0);
+ }
+
+ #[test]
+ fn single_pixel() {
+ let mut p = PathBuilder::new();
+ p.move_to(1.5, 1.5);
+ p.line_to(2., 1.5);
+ p.line_to(2., 2.);
+ p.line_to(1.5, 2.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(result.len(), 3);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 4, 4)), 0x9f481fe5588e341c);
+ }
+
+ #[test]
+ fn traps_outside_bounds() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.0);
+ p.line_to(30., 10.);
+ p.line_to(50., 20.);
+ p.line_to(30., 30.);
+ p.line_to(10., 30.);
+ p.close();
+ // The generated trapezoids are not necessarily clipped to the outside bounds rect
+ // and in this case the outside bounds geometry ends up drawing on top of the
+ // edge geometry which could be considered a bug.
+ p.set_outside_bounds(Some((0, 0, 50, 30)), true);
+ let result = p.rasterize_to_tri_list(0, 0, 100, 100);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6514e3d79d641f09);
+
+ }
+
+ #[test]
+ fn quad_to() {
+ let mut p = PathBuilder::new();
+ p.move_to(10., 10.0);
+ p.quad_to(30., 10., 30., 30.);
+ p.quad_to(10., 30., 30., 30.);
+ p.quad_to(60., 30., 60., 10.);
+ p.close();
+ let result = p.rasterize_to_tri_list(0, 0, 70, 40);
+ assert_eq!(result.len(), 279);
+ assert_eq!(calculate_hash(&rasterize_to_mask(&result, 70, 40)), 0xbd2eec3cfe9bd30b);
+ }
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/matrix.rs b/third_party/rust/wpf-gpu-raster/src/matrix.rs
new file mode 100644
index 0000000000..ed873410f8
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/matrix.rs
@@ -0,0 +1,37 @@
+use std::marker::PhantomData;
+
+use crate::types::CoordinateSpace;
+
+pub type CMILMatrix = CMatrix<CoordinateSpace::Shape,CoordinateSpace::Device>;
+#[derive(Default, Clone)]
+pub struct CMatrix<InCoordSpace, OutCoordSpace> {
+ _11: f32, _12: f32, _13: f32, _14: f32,
+ _21: f32, _22: f32, _23: f32 , _24: f32,
+ _31: f32, _32: f32, _33: f32, _34: f32,
+ _41: f32, _42: f32, _43: f32, _44: f32,
+ in_coord: PhantomData<InCoordSpace>,
+ out_coord: PhantomData<OutCoordSpace>
+}
+
+impl<InCoordSpace: Default, OutCoordSpace: Default> CMatrix<InCoordSpace, OutCoordSpace> {
+ pub fn Identity() -> Self { let mut ret: Self = Default::default();
+ ret._11 = 1.;
+ ret._22 = 1.;
+ ret._33 = 1.;
+ ret._44 = 1.;
+ ret
+ }
+ pub fn GetM11(&self) -> f32 { self._11 }
+ pub fn GetM12(&self) -> f32 { self._12 }
+ pub fn GetM21(&self) -> f32 { self._21 }
+ pub fn GetM22(&self) -> f32 { self._22 }
+ pub fn GetDx(&self) -> f32 { self._41 }
+ pub fn GetDy(&self) -> f32 { self._42 }
+
+ pub fn SetM11(&mut self, r: f32) { self._11 = r}
+ pub fn SetM12(&mut self, r: f32) { self._12 = r}
+ pub fn SetM21(&mut self, r: f32) { self._21 = r}
+ pub fn SetM22(&mut self, r: f32) { self._22 = r}
+ pub fn SetDx(&mut self, dx: f32) { self._41 = dx }
+ pub fn SetDy(&mut self, dy: f32) { self._42 = dy }
+} \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/src/notes b/third_party/rust/wpf-gpu-raster/src/notes
new file mode 100644
index 0000000000..2737f1903e
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/notes
@@ -0,0 +1,12 @@
+crossing goto
+./MultiSpaceRectF.inl:70:5: error: call to implicitly-deleted default constructor of 'union (anonymous union at ./MultiSpaceRectF.inl:138:5)'
+
+
+Rust conversion
+---------------
+CEdge is a singly linked list
+
+Future
+------
+When flatening curves if we try to flatten at integer values
+we can avoid the ComplexSpan code path.
diff --git a/third_party/rust/wpf-gpu-raster/src/nullable_ref.rs b/third_party/rust/wpf-gpu-raster/src/nullable_ref.rs
new file mode 100644
index 0000000000..1e8389e5b7
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/nullable_ref.rs
@@ -0,0 +1,53 @@
+use std::{marker::PhantomData, ops::Deref};
+
+pub struct Ref<'a, T> {
+ ptr: *const T,
+ _phantom: PhantomData<&'a T>
+}
+
+impl<'a, T> Copy for Ref<'a, T> { }
+
+impl<'a, T> Clone for Ref<'a, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'a, T> Ref<'a, T> {
+ pub fn new(p: &'a T) -> Self {
+ Ref { ptr: p as *const T, _phantom: PhantomData}
+ }
+ pub unsafe fn null() -> Self {
+ Ref { ptr: std::ptr::null(), _phantom: PhantomData}
+ }
+ pub fn is_null(&self) -> bool {
+ self.ptr.is_null()
+ }
+ pub fn get_ref(self) -> &'a T {
+ unsafe { &*self.ptr }
+ }
+}
+
+impl<'a, T> PartialEq for Ref<'a, T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.ptr == other.ptr && self._phantom == other._phantom
+ }
+}
+
+impl<'a, T> PartialOrd for Ref<'a, T> {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ match self.ptr.partial_cmp(&other.ptr) {
+ Some(core::cmp::Ordering::Equal) => {}
+ ord => return ord,
+ }
+ self._phantom.partial_cmp(&other._phantom)
+ }
+}
+
+impl<'a, T> Deref for Ref<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.ptr }
+ }
+} \ No newline at end of file
diff --git a/third_party/rust/wpf-gpu-raster/src/real.rs b/third_party/rust/wpf-gpu-raster/src/real.rs
new file mode 100644
index 0000000000..a9144ec149
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/real.rs
@@ -0,0 +1,163 @@
+pub mod CFloatFPU {
+ // Maximum allowed argument for SmallRound
+ // const sc_uSmallMax: u32 = 0xFFFFF;
+
+ // Binary representation of static_cast<float>(sc_uSmallMax)
+ const sc_uBinaryFloatSmallMax: u32 = 0x497ffff0;
+
+ fn LargeRound(x: f32) -> i32 {
+ //XXX: the SSE2 version is probably slower than a naive SSE4 implementation that can use roundss
+ #[cfg(target_feature = "sse2")]
+ unsafe {
+ #[cfg(target_arch = "x86")]
+ use std::arch::x86::{__m128, _mm_set_ss, _mm_cvtss_si32, _mm_cvtsi32_ss, _mm_sub_ss, _mm_cmple_ss, _mm_store_ss, _mm_setzero_ps};
+ #[cfg(target_arch = "x86_64")]
+ use std::arch::x86_64::{__m128, _mm_set_ss, _mm_cvtss_si32, _mm_cvtsi32_ss, _mm_sub_ss, _mm_cmple_ss, _mm_store_ss, _mm_setzero_ps};
+
+ let given: __m128 = _mm_set_ss(x); // load given value
+ let result = _mm_cvtss_si32(given);
+ let rounded: __m128 = _mm_setzero_ps(); // convert it to integer (rounding mode doesn't matter)
+ let rounded = _mm_cvtsi32_ss(rounded, result); // convert back to float
+ let diff = _mm_sub_ss(rounded, given); // diff = (rounded - given)
+ let negHalf = _mm_set_ss(-0.5); // load -0.5f
+ let mask = _mm_cmple_ss(diff, negHalf); // get all-ones if (rounded - given) < -0.5f
+ let mut correction: i32 = 0;
+ _mm_store_ss((&mut correction) as *mut _ as *mut _, mask); // get comparison result as integer
+ return result - correction; // correct the result of rounding
+ }
+ #[cfg(not(target_feature = "sse2"))]
+ return (x + 0.5).floor() as i32;
+ }
+
+
+ //+------------------------------------------------------------------------
+//
+// Function: CFloatFPU::SmallRound
+//
+// Synopsis: Convert given floating point value to nearest integer.
+// Half-integers are rounded up.
+//
+// Important: this routine is fast but restricted:
+// given x should be within (-(0x100000-.5) < x < (0x100000-.5))
+//
+// Details: Implementation has abnormal looking that use to confuse
+// many people. However, it indeed works, being tested
+// thoroughly on x86 and ia64 platforms for literally
+// each possible argument values in the given range.
+//
+// More details:
+// Implementation is based on the knowledge of floating point
+// value representation. This 32-bits value consists of three parts:
+// v & 0x80000000 = sign
+// v & 0x7F800000 = exponent
+// v & 0x007FFFFF - mantissa
+//
+// Let N to be a floating point number within -0x400000 <= N <= 0x3FFFFF.
+// The sum (S = 0xC00000 + N) thus will satisfy Ox800000 <= S <= 0xFFFFFF.
+// All the numbers within this range (sometimes referred to as "binade")
+// have same position of most significant bit, i.e. 0x800000.
+// Therefore they are normalized equal way, thus
+// providing the weights on mantissa's bits to be the same
+// as integer numbers have. In other words, to get
+// integer value of floating point S, when Ox800000 <= S <= 0xFFFFFF,
+// we can just throw away the exponent and sign, and add assumed
+// most significant bit (that is always 1 and therefore is not stored
+// in floating point value):
+// (int)S = (<float S as int> & 0x7FFFFF | 0x800000);
+// To get given N in as integer, we need to subtract back
+// the value 0xC00000 that was added in order to obtain
+// proper normalization:
+// N = (<float S as int> & 0x7FFFFF | 0x800000) - 0xC00000.
+// or
+// N = (<float S as int> & 0x7FFFFF ) - 0x400000.
+//
+// Hopefully, the text above explains how
+// following routine works:
+// int SmallRound1(float x)
+// {
+// union
+// {
+// __int32 i;
+// float f;
+// } u;
+//
+// u.f = x + float(0x00C00000);
+// return ((u.i - (int)0x00400000) << 9) >> 9;
+// }
+// Unfortunatelly it is imperfect, due to the way how FPU
+// use to round intermediate calculation results.
+// By default, rounding mode is set to "nearest".
+// This means that when it calculates N+float(0x00C00000),
+// the 80-bit precise result will not fit in 32-bit float,
+// so some least significant bits will be thrown away.
+// Rounding to nearest means that S consisting of intS + fraction,
+// where 0 <= fraction < 1, will be converted to intS
+// when fraction < 0.5 and to intS+1 if fraction > 0.5.
+// What would happen with fraction exactly equal to 0.5?
+// Smart thing: S will go to intS if intS is even and
+// to intS+1 if intS is odd. In other words, half-integers
+// are rounded to nearest even number.
+// This FPU feature apparently is useful to minimize
+// average rounding error when somebody is, say,
+// digitally simulating electrons' behavior in plasma.
+// However for graphics this is not desired.
+//
+// We want to move half-integers up, therefore
+// define SmallRound(x) as {return SmallRound1(x*2+.5) >> 1;}.
+// This may require more comments.
+// Let given x = i+f, where i is integer and f is fraction, 0 <= f < 1.
+// Let's wee what is y = x*2+.5:
+// y = i*2 + (f*2 + .5) = i*2 + g, where g = f*2 + .5;
+// If "f" is in the range 0 <= f < .5 (so correct rounding result should be "i"),
+// then range for "g" is .5 <= g < 1.5. The very first value, .5 will force
+// SmallRound1 result to be "i*2", due to round-to-even rule; the remaining
+// will lead to "i*2+1". Consequent shift will throw away extra "1" and give
+// us desired "i".
+// When "f" in in the range .5 <= f < 1, then 1.5 <= g < 2.5.
+// All these values will round to 2, so SmallRound1 will return (2*i+2),
+// and the final shift will give desired 1+1.
+//
+// To get final routine looking we need to transform the combines
+// expression for u.f:
+// (x*2) + .5 + float(0x00C00000) ==
+// (x + (.25 + double(0x00600000)) )*2
+// Note that the ratio "2" means nothing for following operations,
+// since it affects only exponent bits that are ignored anyway.
+// So we can save some processor cycles avoiding this multiplication.
+//
+// And, the very final beautification:
+// to avoid subtracting 0x00400000 let's ignore this bit.
+// This mean that we effectively decrease available range by 1 bit,
+// but we're chasing for performance and found it acceptable.
+// So
+// return ((u.i - (int)0x00400000) << 9) >> 9;
+// is converted to
+// return ((u.i ) << 10) >> 10;
+// Eventually, will found that final shift by 10 bits may be combined
+// with shift by 1 in the definition {return SmallRound1(x*2+.5) >> 1;},
+// we'll just shift by 11 bits. That's it.
+//
+//-------------------------------------------------------------------------
+fn SmallRound(x: f32) -> i32
+{
+ //AssertPrecisionAndRoundingMode();
+ debug_assert!(-(0x100000 as f64 -0.5) < x as f64 && (x as f64) < (0x100000 as f64 -0.5));
+
+
+ let fi = (x as f64 + (0x00600000 as f64 + 0.25)) as f32;
+ let result = ((fi.to_bits() as i32) << 10) >> 11;
+
+ debug_assert!(x < (result as f32) + 0.5 && x >= (result as f32) - 0.5);
+ return result;
+}
+
+pub fn Round(x: f32) -> i32
+{
+ // cut off sign
+ let xAbs: u32 = x.to_bits() & 0x7FFFFFFF;
+
+ return if xAbs <= sc_uBinaryFloatSmallMax {SmallRound(x)} else {LargeRound(x)};
+}
+}
+
+macro_rules! TOREAL { ($e: expr) => { $e as REAL } }
diff --git a/third_party/rust/wpf-gpu-raster/src/tri_rasterize.rs b/third_party/rust/wpf-gpu-raster/src/tri_rasterize.rs
new file mode 100644
index 0000000000..28fbf6a73f
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/tri_rasterize.rs
@@ -0,0 +1,190 @@
+/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
+
+ /**************************************************************************
+ *
+ * Copyright 2012 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+*/
+
+use std::ops::Index;
+use crate::OutputVertex;
+#[derive(Debug)]
+struct Vertex {
+ x: f32,
+ y: f32,
+ coverage: f32
+}
+#[derive(Debug)]
+struct Triangle {
+ v: [Vertex; 3],
+}
+
+impl Index<usize> for Triangle {
+ type Output = Vertex;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.v[index]
+ }
+}
+
+// D3D11 mandates 8 bit subpixel precision:
+// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
+const FIXED_SHIFT: i32 = 8;
+const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
+
+/* Proper rounding of float to integer */
+fn iround(mut v: f32) -> i64 {
+ if v > 0.0 {
+ v += 0.5;
+ }
+ if v < 0.0 {
+ v -= 0.5;
+ }
+ return v as i64
+}
+
+/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
+fn rast_triangle(buffer: &mut [u8], width: usize, height: usize, tri: &Triangle) {
+ let center_offset = -0.5;
+
+ let mut coverage1 = tri[0].coverage;
+ let mut coverage2 = tri[1].coverage;
+ let mut coverage3 = tri[2].coverage;
+
+ /* fixed point coordinates */
+ let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
+ let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
+ let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
+
+ let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
+ let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
+ let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
+
+
+ /* Force correct vertex order */
+ let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
+ if cross > 0 {
+ std::mem::swap(&mut x1, &mut x3);
+ std::mem::swap(&mut y1, &mut y3);
+ // I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
+ std::mem::swap(&mut coverage2, &mut coverage3);
+ } else {
+ std::mem::swap(&mut coverage1, &mut coverage3);
+ }
+
+ /* Deltas */
+ let dx12 = x1 - x2;
+ let dx23 = x2 - x3;
+ let dx31 = x3 - x1;
+
+ let dy12 = y1 - y2;
+ let dy23 = y2 - y3;
+ let dy31 = y3 - y1;
+
+ /* Fixed-point deltas */
+ let fdx12 = dx12 << FIXED_SHIFT;
+ let fdx23 = dx23 << FIXED_SHIFT;
+ let fdx31 = dx31 << FIXED_SHIFT;
+
+ let fdy12 = dy12 << FIXED_SHIFT;
+ let fdy23 = dy23 << FIXED_SHIFT;
+ let fdy31 = dy31 << FIXED_SHIFT;
+
+ /* Bounding rectangle */
+ let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
+ let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
+
+ let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
+ let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
+
+ minx = minx.max(0);
+ maxx = maxx.min(width as i64 - 1);
+
+ miny = miny.max(0);
+ maxy = maxy.min(height as i64 - 1);
+
+ /* Half-edge constants */
+ let mut c1 = dy12 * x1 - dx12 * y1;
+ let mut c2 = dy23 * x2 - dx23 * y2;
+ let mut c3 = dy31 * x3 - dx31 * y3;
+
+ /* Correct for top-left filling convention */
+ if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
+ if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
+ if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
+
+ let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
+ let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
+ let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
+ //dbg!(minx, maxx, tri, cross);
+ /* Perform rasterization */
+ let mut buffer = &mut buffer[miny as usize * width..];
+ for _y in miny..=maxy {
+ let mut cx1 = cy1;
+ let mut cx2 = cy2;
+ let mut cx3 = cy3;
+
+ for x in minx..=maxx {
+ if cx1 > 0 && cx2 > 0 && cx3 > 0 {
+ // cross is equal to 2*area of the triangle.
+ // we can normalize cx by 2*area to get barycentric coords.
+ let area = cross.abs() as f32;
+ let bary = (cx1 as f32 / area, cx2 as f32 / area, cx3 as f32 / area);
+ let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
+ let color = (coverages * 255. + 0.5) as u8;
+
+ buffer[x as usize] = color;
+ }
+
+ cx1 -= fdy12;
+ cx2 -= fdy23;
+ cx3 -= fdy31;
+ }
+
+ cy1 += fdx12;
+ cy2 += fdx23;
+ cy3 += fdx31;
+
+ buffer = &mut buffer[width..];
+ }
+}
+
+pub fn rasterize_to_mask(vertices: &[OutputVertex], width: u32, height: u32) -> Box<[u8]> {
+ let mut mask = vec![0; (width * height) as usize];
+ for n in (0..vertices.len()).step_by(3) {
+ let tri =
+ [&vertices[n], &vertices[n+1], &vertices[n+2]];
+
+ let tri = Triangle { v: [
+ Vertex { x: tri[0].x, y: tri[0].y, coverage: tri[0].coverage},
+ Vertex { x: tri[1].x, y: tri[1].y, coverage: tri[1].coverage},
+ Vertex { x: tri[2].x, y: tri[2].y, coverage: tri[2].coverage}
+ ]
+ };
+ rast_triangle(&mut mask, width as usize, height as usize, &tri);
+ }
+ mask.into_boxed_slice()
+}
diff --git a/third_party/rust/wpf-gpu-raster/src/types.rs b/third_party/rust/wpf-gpu-raster/src/types.rs
new file mode 100644
index 0000000000..696976f185
--- /dev/null
+++ b/third_party/rust/wpf-gpu-raster/src/types.rs
@@ -0,0 +1,181 @@
+pub(crate) type LONG = i32;
+pub(crate) type INT = i32;
+pub(crate) type UINT = u32;
+pub(crate) type ULONG = u32;
+pub(crate) type DWORD = ULONG;
+pub(crate) type WORD = u16;
+pub(crate) type LONGLONG = i64;
+pub(crate) type ULONGLONG = u64;
+pub(crate) type BYTE = u8;
+pub(crate) type FLOAT = f32;
+pub(crate) type REAL = FLOAT;
+pub(crate) type HRESULT = LONG;
+
+pub(crate) const S_OK: HRESULT = 0;
+pub(crate) const INTSAFE_E_ARITHMETIC_OVERFLOW: HRESULT = 0x80070216;
+pub(crate) const WGXERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
+pub(crate) const WINCODEC_ERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
+const fn MAKE_HRESULT(sev: LONG,fac: LONG,code: LONG) -> HRESULT {
+ ( (((sev)<<31) | ((fac)<<16) | ((code))) )
+}
+
+const FACILITY_WGX: LONG = 0x898;
+
+
+const fn MAKE_WGXHR( sev: LONG, code: LONG) -> HRESULT {
+ MAKE_HRESULT( sev, FACILITY_WGX, (code) )
+}
+
+const fn MAKE_WGXHR_ERR( code: LONG ) -> HRESULT
+{
+ MAKE_WGXHR( 1, code )
+}
+
+pub const WGXHR_CLIPPEDTOEMPTY: HRESULT = MAKE_WGXHR(0, 1);
+pub const WGXHR_EMPTYFILL: HRESULT = MAKE_WGXHR(0, 2);
+pub const WGXHR_INTERNALTEMPORARYSUCCESS: HRESULT = MAKE_WGXHR(0, 3);
+pub const WGXHR_RESETSHAREDHANDLEMANAGER: HRESULT = MAKE_WGXHR(0, 4);
+
+pub const WGXERR_BADNUMBER: HRESULT = MAKE_WGXHR_ERR(0x00A); // 4438
+
+pub fn FAILED(hr: HRESULT) -> bool {
+ hr != S_OK
+}
+pub trait NullPtr {
+ fn make() -> Self;
+}
+
+impl<T> NullPtr for *mut T {
+ fn make() -> Self {
+ std::ptr::null_mut()
+ }
+}
+
+impl<T> NullPtr for *const T {
+ fn make() -> Self {
+ std::ptr::null()
+ }
+}
+
+pub fn NULL<T: NullPtr>() -> T {
+ T::make()
+}
+#[derive(Default, Clone)]
+pub struct RECT {
+ pub left: LONG,
+ pub top: LONG,
+ pub right: LONG,
+ pub bottom: LONG,
+}
+#[derive(Default, Clone, Copy, PartialEq, Eq)]
+pub struct POINT {
+ pub x: LONG,
+ pub y: LONG
+}
+#[derive(Clone, Copy)]
+pub struct MilPoint2F
+{
+ pub X: FLOAT,
+ pub Y: FLOAT,
+}
+
+#[derive(Default, Clone)]
+pub struct MilPointAndSizeL
+{
+ pub X: INT,
+ pub Y: INT,
+ pub Width: INT,
+ pub Height: INT,
+}
+
+pub type CMILSurfaceRect = RECT;
+
+#[derive(PartialEq)]
+pub enum MilAntiAliasMode {
+ None = 0,
+ EightByEight = 1,
+}
+#[derive(PartialEq, Clone, Copy)]
+pub enum MilFillMode {
+ Alternate = 0,
+ Winding = 1,
+}
+
+pub const PathPointTypeStart: u8 = 0; // move, 1 point
+pub const PathPointTypeLine: u8 = 1; // line, 1 point
+pub const PathPointTypeBezier: u8 = 3; // default Bezier (= cubic Bezier), 3 points
+pub const PathPointTypePathTypeMask: u8 = 0x07; // type mask (lowest 3 bits).
+pub const PathPointTypeCloseSubpath: u8 = 0x80; // closed flag
+
+
+pub type DynArray<T> = Vec<T>;
+
+pub trait DynArrayExts<T> {
+ fn Reset(&mut self, shrink: bool);
+ fn GetCount(&self) -> usize;
+ fn SetCount(&mut self, count: usize);
+ fn GetDataBuffer(&self) -> &[T];
+}
+
+impl<T> DynArrayExts<T> for DynArray<T> {
+ fn Reset(&mut self, shrink: bool) {
+ self.clear();
+ if shrink {
+ self.shrink_to_fit();
+ }
+ }
+ fn GetCount(&self) -> usize {
+ self.len()
+ }
+ fn SetCount(&mut self, count: usize) {
+ assert!(count <= self.len());
+ self.truncate(count);
+ }
+
+ fn GetDataBuffer(&self) -> &[T] {
+ self
+ }
+}
+
+pub struct CHwPipelineBuilder;
+
+pub mod CoordinateSpace {
+ #[derive(Default, Clone)]
+ pub struct Shape;
+ #[derive(Default, Clone)]
+ pub struct Device;
+}
+
+pub trait IShapeData {
+ fn GetFillMode(&self) -> MilFillMode;
+}
+
+pub type MilVertexFormat = DWORD;
+
+pub enum MilVertexFormatAttribute {
+ MILVFAttrNone = 0x0,
+ MILVFAttrXY = 0x1,
+ MILVFAttrZ = 0x2,
+ MILVFAttrXYZ = 0x3,
+ MILVFAttrNormal = 0x4,
+ MILVFAttrDiffuse = 0x8,
+ MILVFAttrSpecular = 0x10,
+ MILVFAttrUV1 = 0x100,
+ MILVFAttrUV2 = 0x300,
+ MILVFAttrUV3 = 0x700,
+ MILVFAttrUV4 = 0xf00,
+ MILVFAttrUV5 = 0x1f00,
+ MILVFAttrUV6 = 0x3f00,
+ MILVFAttrUV7 = 0x7f00,
+ MILVFAttrUV8 = 0xff00, // Vertex fields that are pre-generated
+
+}
+
+pub struct CHwPipeline;
+
+pub struct CBufferDispenser;
+#[derive(Default)]
+pub struct PointXYA
+{
+ pub x: f32,pub y: f32, pub a: f32,
+}