summaryrefslogtreecommitdiffstats
path: root/third_party/rust/aa-stroke
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/aa-stroke
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/aa-stroke')
-rw-r--r--third_party/rust/aa-stroke/.cargo-checksum.json1
-rw-r--r--third_party/rust/aa-stroke/.github/workflows/rust.yml20
-rw-r--r--third_party/rust/aa-stroke/Cargo.toml27
-rw-r--r--third_party/rust/aa-stroke/README.md12
-rw-r--r--third_party/rust/aa-stroke/examples/simple.rs97
-rw-r--r--third_party/rust/aa-stroke/src/bezierflattener.rs828
-rw-r--r--third_party/rust/aa-stroke/src/c_bindings.rs101
-rw-r--r--third_party/rust/aa-stroke/src/lib.rs910
-rw-r--r--third_party/rust/aa-stroke/src/tri_rasterize.rs190
9 files changed, 2186 insertions, 0 deletions
diff --git a/third_party/rust/aa-stroke/.cargo-checksum.json b/third_party/rust/aa-stroke/.cargo-checksum.json
new file mode 100644
index 0000000000..c00e12abd6
--- /dev/null
+++ b/third_party/rust/aa-stroke/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{".github/workflows/rust.yml":"e859b12cfed145b66e4fbf1571dde15880359717ca5b0a9720341229183f2c6f","Cargo.toml":"f507cac11c3c26af28420d68ec3748a5453322d51ef1379a340fdd3b1c9b187a","README.md":"60b34cfa653114d5054009696df2ed2ea1d4926a6bc312d0cac4b84845c2beff","examples/simple.rs":"c196e79568fe4be31a08374aa451c70c9377db5428aef924a985e069c12ed91e","src/bezierflattener.rs":"4268d18ef80899d4b5d1fdbb0ed23fb4a4f9a6197b0796cb7a5e55194437a67b","src/c_bindings.rs":"23890f7bb5d40d87d72b28842821ada7b2e1509a3b37c9af8ed0055036ceadcc","src/lib.rs":"fb46f09d8ed2a53d4a6770a6f1bb2ff1a9df736c5c6d390ba159fefafbeec308","src/tri_rasterize.rs":"fb6f595ab9340d8ea6429b41638c378bbd772c8e4d8f7793e225624c12cd3a21"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/aa-stroke/.github/workflows/rust.yml b/third_party/rust/aa-stroke/.github/workflows/rust.yml
new file mode 100644
index 0000000000..e78f9416c9
--- /dev/null
+++ b/third_party/rust/aa-stroke/.github/workflows/rust.yml
@@ -0,0 +1,20 @@
+name: Rust
+
+on:
+ push:
+ pull_request:
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Build
+ run: cargo build --verbose --features=c_bindings
+ - name: Run tests
+ run: cargo test --verbose --features=c_bindings
diff --git a/third_party/rust/aa-stroke/Cargo.toml b/third_party/rust/aa-stroke/Cargo.toml
new file mode 100644
index 0000000000..55928c67de
--- /dev/null
+++ b/third_party/rust/aa-stroke/Cargo.toml
@@ -0,0 +1,27 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "aa-stroke"
+version = "0.1.0"
+readme = "README.md"
+license = "MIT"
+
+[dependencies]
+euclid = "0.22.7"
+
+[dev-dependencies]
+png = "0.17.7"
+
+[features]
+c_bindings = []
+default = ["c_bindings"]
diff --git a/third_party/rust/aa-stroke/README.md b/third_party/rust/aa-stroke/README.md
new file mode 100644
index 0000000000..d52cc1765a
--- /dev/null
+++ b/third_party/rust/aa-stroke/README.md
@@ -0,0 +1,12 @@
+Takes a path and produces a triangle mesh that corresponds to the antialiased stroked path.
+
+The approach here is naive and only works for opaquely filled paths. Overlaping areas can
+end up with seams or otherwise incorrect coverage values.
+
+Transforms with uniform scale can be supported by scaling the input points and the stroke width
+before passing them to the stroker. Other transforms are not currently (or ever?) supported.
+
+### TODO
+- using triangle strips instead of triangle lists
+- handle curves more efficiently than just flattening to lines
+- handle cusps of curves more correctly
diff --git a/third_party/rust/aa-stroke/examples/simple.rs b/third_party/rust/aa-stroke/examples/simple.rs
new file mode 100644
index 0000000000..84cade4a16
--- /dev/null
+++ b/third_party/rust/aa-stroke/examples/simple.rs
@@ -0,0 +1,97 @@
+use aa_stroke::{StrokeStyle, LineCap, LineJoin, Point, Stroker, tri_rasterize::rasterize_to_mask};
+
+
+
+fn write_image(data: &[u8], path: &str, width: u32, height: u32) {
+ use std::path::Path;
+ use std::fs::File;
+ use std::io::BufWriter;
+
+ /*let mut png_data: Vec<u8> = vec![0; (width * height * 3) as usize];
+ let mut i = 0;
+ for pixel in data {
+ png_data[i] = ((pixel >> 16) & 0xff) as u8;
+ png_data[i + 1] = ((pixel >> 8) & 0xff) as u8;
+ png_data[i + 2] = ((pixel >> 0) & 0xff) as u8;
+ i += 3;
+ }*/
+
+
+ let path = Path::new(path);
+ let file = File::create(path).unwrap();
+ let w = &mut BufWriter::new(file);
+
+ let mut encoder = png::Encoder::new(w, width, height); // Width is 2 pixels and height is 1.
+ encoder.set_color(png::ColorType::Grayscale);
+ encoder.set_depth(png::BitDepth::Eight);
+ let mut writer = encoder.write_header().unwrap();
+
+ writer.write_image_data(&data).unwrap(); // Save
+}
+
+
+// WpfGfx uses CShapeBase which has a set of Figures
+// Figures have FigureFlags which include FigureFlagClosed
+// so that we can know ahead of time whether the figure/subpath
+// is closed
+
+// How do we handle transformed paths? D2D seems to only support transforms that
+// can be applied before stroking. (one's with uniform scale?)
+fn main() {
+ let mut stroker = Stroker::new(&StrokeStyle{
+ cap: LineCap::Round,
+ join: LineJoin::Bevel,
+ width: 20.,
+ ..Default::default()});
+ stroker.move_to(Point::new(20., 20.), false);
+ stroker.line_to(Point::new(100., 100.));
+ stroker.line_to_capped(Point::new(110., 20.));
+
+
+ stroker.move_to(Point::new(120., 20.), true);
+ stroker.line_to(Point::new(120., 50.));
+ stroker.line_to(Point::new(140., 50.));
+ stroker.close();
+
+ stroker.move_to(Point::new(20., 160.), true);
+ stroker.curve_to(Point::new(100., 160.), Point::new(100., 180.), Point::new(20., 180.));
+ stroker.close();
+
+ let stroked = stroker.finish();
+ dbg!(&stroked);
+
+ let mask = rasterize_to_mask(&stroked, 200, 200);
+ write_image(&mask,"out.png", 200, 200);
+/*
+ struct Target;
+ impl CFlatteningSink for Target {
+ fn AcceptPointAndTangent(&mut self,
+ pt: &GpPointR,
+ // The point
+ vec: &GpPointR,
+ // The tangent there
+ fLast: bool
+ // Is this the last point on the curve?
+ ) -> HRESULT {
+ todo!()
+ }
+
+ fn AcceptPoint(&mut self,
+ pt: &GpPointR,
+ // The point
+ t: f64,
+ // Parameter we're at
+ fAborted: &mut bool) -> HRESULT {
+ println!("{} {}", pt.x, pt.y);
+ return S_OK;
+ }
+ }
+ let bezier = CBezier::new([GpPointR { x: 0., y: 0. },
+ GpPointR { x: 0., y: 0. },
+ GpPointR { x: 0., y: 0. },
+ GpPointR { x: 100., y: 100. }]);
+ let mut t = Target{};
+ let mut f = CBezierFlattener::new(&bezier, &mut t, 0.1);
+ f.Flatten(false);*/
+
+} \ No newline at end of file
diff --git a/third_party/rust/aa-stroke/src/bezierflattener.rs b/third_party/rust/aa-stroke/src/bezierflattener.rs
new file mode 100644
index 0000000000..fd1ab21839
--- /dev/null
+++ b/third_party/rust/aa-stroke/src/bezierflattener.rs
@@ -0,0 +1,828 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+#![allow(non_snake_case)]
+
+use std::ops::{Sub, Mul, Add, AddAssign, SubAssign, MulAssign, Div};
+
+macro_rules! IFC {
+ ($e: expr) => {
+ assert_eq!($e, S_OK);
+ }
+}
+
+pub type HRESULT = i32;
+
+pub const S_OK: i32 = 0;
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct GpPointR {
+ pub x: f64,
+ pub y: f64
+}
+
+impl Sub for GpPointR {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self::Output {
+ GpPointR { x: self.x - rhs.x, y: self.y - rhs.y }
+ }
+}
+
+impl Add for GpPointR {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self::Output {
+ GpPointR { x: self.x + rhs.x, y: self.y + rhs.y }
+ }
+}
+
+impl AddAssign for GpPointR {
+ fn add_assign(&mut self, rhs: Self) {
+ *self = *self + rhs;
+ }
+}
+
+impl SubAssign for GpPointR {
+ fn sub_assign(&mut self, rhs: Self) {
+ *self = *self - rhs;
+ }
+}
+
+impl MulAssign<f64> for GpPointR {
+ fn mul_assign(&mut self, rhs: f64) {
+ *self = *self * rhs;
+ }
+}
+
+
+impl Mul<f64> for GpPointR {
+ type Output = Self;
+
+ fn mul(self, rhs: f64) -> Self::Output {
+ GpPointR { x: self.x * rhs, y: self.y * rhs }
+ }
+}
+
+impl Div<f64> for GpPointR {
+ type Output = Self;
+
+ fn div(self, rhs: f64) -> Self::Output {
+ GpPointR { x: self.x / rhs, y: self.y / rhs }
+ }
+}
+
+
+impl Mul for GpPointR {
+ type Output = f64;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self.x * rhs.x + self.y * rhs.y
+ }
+}
+
+impl GpPointR {
+ pub fn ApproxNorm(&self) -> f64 {
+ self.x.abs().max(self.y.abs())
+ }
+ pub fn Norm(&self) -> f64 {
+ self.x.hypot(self.y)
+ }
+}
+
+// Relative to this is relative to the tolerance squared. In other words, a vector
+// whose length is less than .01*tolerance will be considered 0
+const SQ_LENGTH_FUZZ: f64 = 1.0e-4;
+
+// Some of these constants need further thinking
+
+//const FUZZ: f64 = 1.0e-6; // Relative 0
+// Minimum allowed tolerance - should probably be adjusted to the size of the
+// geometry we are rendering, but for now ---
+
+/*
+const FUZZ_DOUBLE: f64 = 1.0e-12; // Double-precision relative 0
+const MIN_TOLERANCE: f64 = 1.0e-6;
+const DEFAULT_FLATTENING_TOLERANCE: f64 = 0.25;*/
+const TWICE_MIN_BEZIER_STEP_SIZE: f64 = 1.0e-3; // The step size in the Bezier flattener should
+ // never go below half this amount.
+//+-----------------------------------------------------------------------------
+//
+
+//
+// $TAG ENGR
+
+// $Module: win_mil_graphics_geometry
+// $Keywords:
+//
+// $Description:
+// Definition of CBezierFlattener.
+//
+// $ENDTAG
+//
+//------------------------------------------------------------------------------
+
+//+-----------------------------------------------------------------------------
+//
+// Class:
+// CFlatteningSink
+//
+// Synopsis:
+// Callback interface for the results of curve flattening
+//
+// Notes:
+// Methods are implemented rather than pure, for callers who do not use all
+// of them.
+//
+//------------------------------------------------------------------------------
+//
+// Definition of CFlatteningSink
+//
+//------------------------------------------------------------------------------
+/*
+struct CFlatteningSink
+{
+public:
+ CFlatteningSink() {}
+
+ virtual ~CFlatteningSink() {}
+
+ virtual HRESULT Begin(
+ __in_ecount(1) const GpPointR &)
+ // First point (transformed)
+ {
+ // Do nothing stub, should not be called
+ RIP("Base class Begin called");
+ return E_NOTIMPL;
+ }
+
+ virtual HRESULT AcceptPoint(
+ __in_ecount(1) const GpPointR &pt,
+ // The point
+ IN GpReal t,
+ // Parameter we're at
+ __out_ecount(1) bool &fAborted)
+ // Set to true to signal aborting
+ {
+ UNREFERENCED_PARAMETER(pt);
+ UNREFERENCED_PARAMETER(t);
+ UNREFERENCED_PARAMETER(fAborted);
+
+ // Do nothing stub, should not be called
+ RIP("Base class AcceptPoint called");
+ return E_NOTIMPL;
+ }
+
+ virtual HRESULT AcceptPointAndTangent(
+ __in_ecount(1) const GpPointR &,
+ //The point
+ __in_ecount(1) const GpPointR &,
+ //The tangent there
+ IN bool fLast) // Is this the last point on the curve?
+ {
+ // Do nothing stub, should not be called
+ RIP("Base class AcceptPointAndTangent called");
+ return E_NOTIMPL;
+ }
+};
+
+
+
+*/
+#[derive(Clone, Debug)]
+
+pub struct CBezier
+{
+ /*
+public:
+ CBezier()
+ {
+ }
+
+ CBezier(
+ __in_ecount(4) const GpPointR *pPt)
+ // The defining Bezier points
+ {
+ Assert(pPt);
+ memcpy(&m_ptB, pPt, 4 * sizeof(GpPointR));
+ }
+
+ CBezier(
+ __in_ecount(1) const CBezier &other)
+ // Another Bezier to copy
+ {
+ Copy(other);
+ }
+
+ void Copy(
+ __in_ecount(1) const CBezier &other)
+ // Another Bezier to copy
+ {
+ memcpy(&m_ptB, other.m_ptB, 4 * sizeof(GpPointR));
+ }
+
+ void Initialize(
+ __in_ecount(1) const GpPointR &ptFirst,
+ // The first Bezier point
+ __in_ecount(3) const GpPointR *pPt)
+ // The remaining 3 Bezier points
+ {
+ m_ptB[0] = ptFirst;
+ memcpy(m_ptB + 1, pPt, 3 * sizeof(GpPointR));
+ }
+
+ __outro_ecount(1) const GpPointR &GetControlPoint(__range(0, 3) UINT i) const
+ {
+ Assert(i < 4);
+ return m_ptB[i];
+ }
+
+ __outro_ecount(1) const GpPointR &GetFirstPoint() const
+ {
+ return m_ptB[0];
+ }
+
+ __outro_ecount(1) const GpPointR &GetLastPoint() const
+ {
+ return m_ptB[3];
+ }
+
+ void GetPoint(
+ _In_ double t,
+ // Parameter value
+ __out_ecount(1) GpPointR &pt) const;
+ // Point there
+
+ void GetPointAndDerivatives(
+ __in double t,
+ // Parameter value
+ __out_ecount(3) GpPointR *pValues) const;
+ // Point, first derivative and second derivative there
+
+ void TrimToStartAt(
+ IN double t); // Parameter value
+
+ void TrimToEndAt(
+ IN double t); // Parameter value
+
+ bool TrimBetween(
+ __in double rStart,
+ // Parameter value for the new start, must be between 0 and 1
+ __in double rEnd);
+ // Parameter value for the new end, must be between 0 and 1
+
+ bool operator ==(__in_ecount(1) const CBezier &other) const
+ {
+ return (m_ptB[0] == other.m_ptB[0]) &&
+ (m_ptB[1] == other.m_ptB[1]) &&
+ (m_ptB[2] == other.m_ptB[2]) &&
+ (m_ptB[3] == other.m_ptB[3]);
+ }
+
+ void AssertEqualOrNaN(__in_ecount(1) const CBezier &other) const
+ {
+ m_ptB[0].AssertEqualOrNaN(other.m_ptB[0]);
+ m_ptB[1].AssertEqualOrNaN(other.m_ptB[1]);
+ m_ptB[2].AssertEqualOrNaN(other.m_ptB[2]);
+ m_ptB[3].AssertEqualOrNaN(other.m_ptB[3]);
+ }
+
+protected:
+ */
+ // Data
+ m_ptB: [GpPointR; 4],
+ // The defining Bezier points
+}
+
+impl CBezier {
+ pub fn new(curve: [GpPointR; 4]) -> Self {
+ Self { m_ptB: curve }
+ }
+
+ pub fn is_degenerate(&self) -> bool {
+ self.m_ptB[0] == self.m_ptB[1] &&
+ self.m_ptB[0] == self.m_ptB[2] &&
+ self.m_ptB[0] == self.m_ptB[3]
+ }
+}
+
+pub trait CFlatteningSink {
+ fn AcceptPointAndTangent(&mut self,
+ pt: &GpPointR,
+ // The point
+ vec: &GpPointR,
+ // The tangent there
+ fLast: bool
+ // Is this the last point on the curve?
+ ) -> HRESULT;
+
+ fn AcceptPoint(&mut self,
+ pt: &GpPointR,
+ // The point
+ t: f64,
+ // Parameter we're at
+ fAborted: &mut bool,
+ lastPoint: bool
+ ) -> HRESULT;
+}
+
+//+-----------------------------------------------------------------------------
+//
+// Class:
+// CBezierFlattener
+//
+// Synopsis:
+// Generates a polygonal apprximation to a given Bezier curve
+//
+//------------------------------------------------------------------------------
+pub struct CBezierFlattener<'a>
+{
+ bezier: CBezier,
+ // Flattening defining data
+ m_pSink: &'a mut dyn CFlatteningSink, // The recipient of the flattening data
+ m_rTolerance: f64, // Prescribed tolerance
+ m_fWithTangents: bool, // Generate tangent vectors if true
+ m_rQuarterTolerance: f64,// Prescribed tolerance/4 (for doubling the step)
+ m_rFuzz: f64, // Computational zero
+
+ // Flattening working data
+ m_ptE: [GpPointR; 4], // The moving basis of the curve definition
+ m_cSteps: i32, // The number of steps left to the end of the curve
+ m_rParameter: f64, // Parameter value
+ m_rStepSize: f64, // Steps size in parameter domain
+}
+impl<'a> CBezierFlattener<'a> {
+ /*fn new(
+ __in_ecount_opt(1) CFlatteningSink *pSink,
+ // The reciptient of the flattened data
+ IN GpReal rTolerance)
+ // Flattening tolerance
+ {
+ Initialize(pSink, rTolerance);
+ }*/
+/*
+ void SetTarget(__in_ecount_opt(1) CFlatteningSink *pSink)
+ {
+ m_pSink = pSink;
+ }
+
+ void Initialize(
+ __in_ecount_opt(1) CFlatteningSink *pSink,
+ // The reciptient of the flattened data
+ IN GpReal rTolerance);
+ // Flattening tolerance
+
+ void SetPoint(
+ __in UINT i,
+ // index of the point (must be between 0 and 3)
+ __in_ecount(1) const GpPointR &pt)
+ // point value
+ {
+ Assert(i < 4);
+ m_ptB[i] = pt;
+ }
+
+ HRESULT GetFirstTangent(
+ __out_ecount(1) GpPointR &vecTangent) const;
+ // Tangent vector there
+
+ GpPointR GetLastTangent() const;
+
+ HRESULT Flatten(
+ IN bool fWithTangents); // Return tangents with the points if true
+
+private:
+ // Disallow copy constructor
+ CBezierFlattener(__in_ecount(1) const CBezierFlattener &)
+ {
+ RIP("CBezierFlattener copy constructor reached.");
+ }
+
+protected:
+*/
+/* fn Step(
+ __out_ecount(1) bool &fAbort); // Set to true if flattening should be aborted
+
+ fn HalveTheStep();
+
+ fn TryDoubleTheStep();*/
+
+}
+
+
+
+
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+//+-----------------------------------------------------------------------------
+//
+
+//
+// $TAG ENGR
+
+// $Module: win_mil_graphics_geometry
+// $Keywords:
+//
+// $Description:
+// Implementation of CBezierFlattener.
+//
+// $ENDTAG
+//
+//------------------------------------------------------------------------------
+
+impl<'a> CBezierFlattener<'a> {
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Implementation of CBezierFlattener
+
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::Initialize
+//
+// Synopsis:
+// Initialize the sink and tolerance
+//
+//------------------------------------------------------------------------------
+pub fn new(bezier: &CBezier,
+ pSink: &'a mut dyn CFlatteningSink,
+ // The reciptient of the flattened data
+ rTolerance: f64) // Flattening tolerance
+ -> Self
+{
+ let mut result = CBezierFlattener {
+ bezier: bezier.clone(),
+ // Flattening defining data
+ m_pSink: pSink, // The recipient of the flattening data
+ m_rTolerance: 0., // Prescribed tolerance
+ m_fWithTangents: false, // Generate tangent vectors if true
+ m_rQuarterTolerance: 0.,// Prescribed tolerance/4 (for doubling the step)
+ m_rFuzz: 0., // Computational zero
+
+ // Flattening working data
+ m_ptE: [GpPointR { x: 0., y: 0.}; 4], // The moving basis of the curve definition
+ m_cSteps: 0, // The number of steps left to the end of the curve
+ m_rParameter: 0., // Parameter value
+ m_rStepSize: 0., // Steps size in parameter domain
+ };
+
+ // If rTolerance == NaN or less than 0, we'll treat it as 0.
+ result.m_rTolerance = if rTolerance >= 0.0 { rTolerance } else { 0.0 };
+ result.m_rFuzz = rTolerance * rTolerance * SQ_LENGTH_FUZZ;
+
+ // The error is tested on max(|e2|, |e2|), which represent 6 times the actual error, so:
+ result.m_rTolerance *= 6.;
+ result.m_rQuarterTolerance = result.m_rTolerance * 0.25;
+ result
+}
+
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::Flatten
+//
+// Synopsis:
+// Flatten this curve
+//
+// Notes:
+
+// The algorithm is described in detail in the 1995 patent # 5367617 "System and
+// method of hybrid forward differencing to render Bezier splines" to be found
+// on the Microsoft legal dept. web site (LCAWEB). Additional references are:
+// Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
+// Rendering Curves and Surfaces", Computer Graphics, July 1987
+// Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
+// Differencing", Computer Graphics, August 1988
+// Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
+//
+// The basic idea is to replace the Bernstein basis (underlying Bezier curves)
+// with the Hybrid Forward Differencing (HFD) basis which is more efficient at
+// for flattening. Each one of the 3 actions - Step, Halve and Double (step
+// size) this basis affords very efficient formulas for computing coefficients
+// for the new interval.
+//
+// The coefficients of the HFD basis are defined in terms of the Bezier
+// coefficients as follows:
+//
+// e0 = p0, e1 = p3 - p0, e2 = 6(p1 - 2p2 + p3), e3 = 6(p0 - 2p1 + p2),
+//
+// but formulas may be easier to understand by going through the power basis
+// representation: f(t) = a*t + b*t + c * t^2 + d * t^3.
+//
+// The conversion is then:
+// e0 = a
+// e1 = f(1) - f(0) = b + c + d
+// e2 = f"(1) = 2c + 6d
+// e3 = f"(0) = 2c
+//
+// This is inverted to:
+// a = e0
+// c = e3 / 2
+// d = (e2 - 2c) / 6 = (e2 - e3) / 6
+// b = e1 - c - d = e1 - e2 / 6 - e3 / 3
+//
+// a, b, c, d for the new (halved, doubled or forwarded) interval are derived
+// and then converted to e0, e1, e2, e3 using these relationships.
+//
+// An exact integer version is implemented in Bezier.h and Bezier.cpp.
+//
+//------------------------------------------------------------------------------
+
+
+pub fn Flatten(&mut self,
+ fWithTangents: bool) // Return tangents with the points if true
+ -> HRESULT
+{
+
+ let hr = S_OK;
+ let mut fAbort = false;
+
+ /*if (!self.m_pSink)
+ {
+ return E_UNEXPECTED;
+ }*/
+
+ self.m_fWithTangents = fWithTangents;
+
+ self.m_cSteps = 1;
+
+ self.m_rParameter = 0.;
+ self.m_rStepSize = 1.;
+
+ // Compute the HFD basis
+ self.m_ptE[0] = self.bezier.m_ptB[0];
+ self.m_ptE[1] = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
+ self.m_ptE[2] = (self.bezier.m_ptB[1] - self.bezier.m_ptB[2] * 2. + self.bezier.m_ptB[3]) * 6.; // The second derivative at curve end
+ self.m_ptE[3] = (self.bezier.m_ptB[0] - self.bezier.m_ptB[1] * 2. + self.bezier.m_ptB[2]) * 6.; // The second derivative at curve start
+
+ // Determine the initial step size
+ self.m_cSteps = 1;
+ while ((self.m_ptE[2].ApproxNorm() > self.m_rTolerance) || (self.m_ptE[3].ApproxNorm() > self.m_rTolerance)) &&
+ (self.m_rStepSize > TWICE_MIN_BEZIER_STEP_SIZE)
+
+ {
+ self.HalveTheStep();
+ }
+
+ while self.m_cSteps > 1
+ {
+ IFC!(self.Step(&mut fAbort));
+ if fAbort {
+ return hr;
+ }
+
+ // E[3] was already tested as E[2] in the previous step
+ if self.m_ptE[2].ApproxNorm() > self.m_rTolerance &&
+ self.m_rStepSize > TWICE_MIN_BEZIER_STEP_SIZE
+ {
+ // Halving the step once is provably sufficient (see Notes above), so ---
+ self.HalveTheStep();
+ }
+ else
+ {
+ // --- but the step can possibly be more than doubled, hence the while loop
+ while self.TryDoubleTheStep() {
+ continue;
+ }
+ }
+ }
+
+ // Last point
+ if self.m_fWithTangents
+ {
+ IFC!(self.m_pSink.AcceptPointAndTangent(&self.bezier.m_ptB[3], &self.GetLastTangent(), true /* last point */));
+ }
+ else
+ {
+ IFC!(self.m_pSink.AcceptPoint(&self.bezier.m_ptB[3], 1., &mut fAbort, true));
+ }
+
+ return hr;
+}
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::Step
+//
+// Synopsis:
+// Step forward on the polygonal approximation of the curve
+//
+// Notes:
+// Taking a step means replacing a,b,c,d by coefficients of g(t) = f(t+1).
+// Express those in terms of a,b,c,d and convert to e0, e1, e2, e3 to get:
+//
+// New e0 = e0 + e1
+// New e1 = e1 + e2
+// New e2 = 2e2 - e3
+// New e3 = e2
+//
+// The patent application (see above) explains why.
+//
+// Getting a tangent vector is a minor enhancement along the same lines:
+// f'(0) = b = 6e1 - e2 - 2e3.
+//
+//------------------------------------------------------------------------------
+
+fn Step(&mut self,
+ fAbort: &mut bool) -> HRESULT // Set to true if flattening should be aborted, untouched otherwise
+{
+ let hr = S_OK;
+
+ // Compute the basis for the same curve on the next interval
+ let mut pt;
+
+ self.m_ptE[0] += self.m_ptE[1];
+ pt = self.m_ptE[2];
+ self.m_ptE[1] += pt;
+ self.m_ptE[2] += pt; self.m_ptE[2] -= self.m_ptE[3];
+ self.m_ptE[3] = pt;
+
+ // Increment the parameter
+ self.m_rParameter += self.m_rStepSize;
+
+ // Generate the start point of the new interval
+ if self.m_fWithTangents
+ {
+ // Compute the tangent there
+ pt = self.m_ptE[1] * 6. - self.m_ptE[2] - self.m_ptE[3] * 2.; // = twice the derivative at E[0]
+ IFC!(self.m_pSink.AcceptPointAndTangent(&self.m_ptE[0], &pt, false /* not the last point */));
+ }
+ else
+ {
+ IFC!(self.m_pSink.AcceptPoint(&self.m_ptE[0], self.m_rParameter, fAbort, false));
+ }
+
+ self.m_cSteps-=1;
+ return hr;
+}
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::HalveTheStep
+//
+// Synopsis:
+// Halve the size of the step
+//
+// Notes:
+// Halving the step means replacing a,b,c,d by coefficients of g(t) =
+// f(t/2). Experss those in terms of a,b,c,d and convert to e0, e1, e2, e3
+// to get:
+//
+// New e0 = e0
+// New e1 = (e1 - e2) / 2
+// New e2 = (e2 + e3) / 8
+// New e3 = e3 / 4
+//
+// The patent application (see above) explains why.
+//
+//------------------------------------------------------------------------------
+fn HalveTheStep(&mut self)
+{
+ self.m_ptE[2] += self.m_ptE[3]; self.m_ptE[2] *= 0.125;
+ self.m_ptE[1] -= self.m_ptE[2]; self.m_ptE[1] *= 0.5;
+ self.m_ptE[3] *= 0.25;
+
+ self.m_cSteps *= 2; // Double the number of steps left
+ self.m_rStepSize *= 0.5;
+}
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::TryDoubleTheStep
+//
+// Synopsis:
+// Double the step size if possible within tolerance.
+//
+// Notes:
+// Coubling the step means replacing a,b,c,d by coefficients of g(t) =
+// f(2t). Experss those in terms of a,b,c,d and convert to e0, e1, e2, e3
+// to get:
+//
+// New e0 = e0
+// New e1 = 2e1 + e2
+// New e2 = 8e2 - 4e3
+// New e3 = 4e3
+//
+// The patent application (see above) explains why. Note also that these
+// formulas are the inverse of those for halving the step.
+//
+//------------------------------------------------------------------------------
+fn
+TryDoubleTheStep(&mut self) -> bool
+{
+ let mut fDoubled = 0 == (self.m_cSteps & 1);
+ if fDoubled
+ {
+ let ptTemp = self.m_ptE[2] * 2. - self.m_ptE[3];
+
+ fDoubled = (self.m_ptE[3].ApproxNorm() <= self.m_rQuarterTolerance) &&
+ (ptTemp.ApproxNorm() <= self.m_rQuarterTolerance);
+
+ if fDoubled
+ {
+ self.m_ptE[1] *= 2.; self.m_ptE[1] += self.m_ptE[2];
+ self.m_ptE[3] *= 4.;
+ self.m_ptE[2] = ptTemp * 4.;
+
+ self.m_cSteps /= 2; // Halve the number of steps left
+ self.m_rStepSize *= 2.;
+ }
+ }
+
+ return fDoubled;
+}
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::GetFirstTangent
+//
+// Synopsis:
+// Get the tangent at curve start
+//
+// Return:
+// WGXERR_ZEROVECTOR if the tangent vector has practically 0 length
+//
+// Notes:
+// This method can return an error if all the points are bunched together.
+// The idea is that the caller will detect that, abandon this curve, and
+// never call GetLasttangent, which can therefore be presumed to succeed.
+// The failure here is benign.
+//
+//------------------------------------------------------------------------------
+#[allow(dead_code)]
+fn GetFirstTangent(&self) -> Option<GpPointR> // Tangent vector there
+
+{
+
+ let mut vecTangent = self.bezier.m_ptB[1] - self.bezier.m_ptB[0];
+ if vecTangent * vecTangent > self.m_rFuzz
+ {
+ return Some(vecTangent); // - we're done
+ }
+ // Zero first derivative, go for the second
+ vecTangent = self.bezier.m_ptB[2] - self.bezier.m_ptB[0];
+ if vecTangent * vecTangent > self.m_rFuzz
+ {
+ return Some(vecTangent); // - we're done
+ }
+ // Zero second derivative, go for the third
+ vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
+
+ if vecTangent * vecTangent <= self.m_rFuzz
+ {
+ return None;
+ }
+
+ return Some(vecTangent); // no RRETURN, error is expected
+}
+//+-----------------------------------------------------------------------------
+//
+// Member:
+// CBezierFlattener::GetLastTangent
+//
+// Synopsis:
+// Get the tangent at curve end
+//
+// Return:
+// The tangent
+//
+// Notes:
+// This method has no error return while GetFirstTangent returns
+// WGXERR_ZEROVECTOR if the tangent is zero. The idea is that we should
+// only fail if all the control points coincide, that should have been
+// detected at GetFirstTangent, and then we should have not be called.
+//
+//------------------------------------------------------------------------------
+fn GetLastTangent(&self) -> GpPointR
+{
+ let mut vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[2];
+
+ // If the curve is degenerate, we should have detected it at curve-start, skipped this curve
+ // altogether and not be here. But the test in GetFirstTangent is for the point-differences
+ // 1-0, 2-0 and 3-0, while here it is for points 3-2, 3-1 and 3-0, which is not quite the same.
+ // Still, In a disk of radius r no 2 points are more than 2r apart. The tests are done with
+ // squared distance, and m_rFuzz is the minimal accepted squared distance. GetFirstTangent()
+ // succeeded, so there is a pair of points whose squared distance is greater than m_rfuzz.
+ // So the squared radius of a disk about point 3 that contains the remaining points must be
+ // at least m_rFuzz/4. Allowing some margin for arithmetic error:
+
+ let rLastTangentFuzz = self.m_rFuzz/8.;
+
+ if vecTangent * vecTangent <= rLastTangentFuzz
+ {
+ // Zero first derivative, go for the second
+ vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[1];
+ if vecTangent * vecTangent <= rLastTangentFuzz
+ {
+ // Zero second derivative, go for the third
+ vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
+ }
+ }
+
+ debug_assert! (!(vecTangent * vecTangent < rLastTangentFuzz)); // Ignore NaNs
+
+ return vecTangent;
+}
+}
diff --git a/third_party/rust/aa-stroke/src/c_bindings.rs b/third_party/rust/aa-stroke/src/c_bindings.rs
new file mode 100644
index 0000000000..8880e49293
--- /dev/null
+++ b/third_party/rust/aa-stroke/src/c_bindings.rs
@@ -0,0 +1,101 @@
+use crate::{Stroker, StrokeStyle, Point};
+
+type OutputVertex = crate::Vertex;
+
+#[repr(C)]
+pub struct VertexBuffer {
+ data: *const OutputVertex,
+ len: usize
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_new(
+ style: &StrokeStyle,
+ output_ptr: *mut OutputVertex,
+ output_capacity: usize,
+) -> *mut Stroker {
+ let mut s = Stroker::new(style);
+ if output_ptr != std::ptr::null_mut() {
+ let slice = unsafe { std::slice::from_raw_parts_mut(output_ptr, output_capacity) };
+ s.set_output_buffer(slice);
+ }
+ Box::into_raw(Box::new(s))
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_move_to(s: &mut Stroker, x: f32, y: f32, closed: bool) {
+ s.move_to(Point::new(x, y), closed);
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_line_to(s: &mut Stroker, x: f32, y: f32, end: bool) {
+ if end {
+ s.line_to_capped(Point::new(x, y))
+ } else {
+ s.line_to(Point::new(x, y));
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_curve_to(s: &mut Stroker, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32, end: bool) {
+ if end {
+ s.curve_to_capped(Point::new(c1x, c1y), Point::new(c2x, c2y), Point::new(x, y));
+ } else {
+ s.curve_to(Point::new(c1x, c1y), Point::new(c2x, c2y), Point::new(x, y));
+ }
+}
+
+/*
+#[no_mangle]
+pub extern "C" fn aa_stroke_quad_to(s: &mut Stroker, cx: f32, cy: f32, x: f32, y: f32) {
+ s.quad_to(cx, cy, x, y);
+}*/
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_close(s: &mut Stroker) {
+ s.close();
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_finish(s: &mut Stroker) -> VertexBuffer {
+ let stroked_path = s.get_stroked_path();
+ if let Some(output_buffer_size) = stroked_path.get_output_buffer_size() {
+ VertexBuffer {
+ data: std::ptr::null(),
+ len: output_buffer_size,
+ }
+ } else {
+ let result = stroked_path.finish();
+ let vb = VertexBuffer { data: result.as_ptr(), len: result.len() };
+ std::mem::forget(result);
+ vb
+ }
+}
+
+#[no_mangle]
+pub extern "C" fn aa_stroke_vertex_buffer_release(vb: VertexBuffer)
+{
+ if vb.data != std::ptr::null() {
+ unsafe {
+ drop(Box::from_raw(std::slice::from_raw_parts_mut(vb.data as *mut OutputVertex, vb.len)));
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn aa_stroke_release(s: *mut Stroker) {
+ drop(Box::from_raw(s));
+}
+
+
+#[test]
+fn simple() {
+ let style = StrokeStyle::default();
+ let s = unsafe { &mut *aa_stroke_new(&style, std::ptr::null_mut(), 0) } ;
+ aa_stroke_move_to(s, 10., 10., false);
+ aa_stroke_line_to(s, 100., 100., false);
+ aa_stroke_line_to(s, 100., 10., true);
+ let vb = aa_stroke_finish(s);
+ aa_stroke_vertex_buffer_release(vb);
+ unsafe { aa_stroke_release(s) } ;
+}
diff --git a/third_party/rust/aa-stroke/src/lib.rs b/third_party/rust/aa-stroke/src/lib.rs
new file mode 100644
index 0000000000..a359194c47
--- /dev/null
+++ b/third_party/rust/aa-stroke/src/lib.rs
@@ -0,0 +1,910 @@
+
+use std::default::Default;
+
+use bezierflattener::CBezierFlattener;
+
+use crate::{bezierflattener::{CFlatteningSink, GpPointR, HRESULT, S_OK, CBezier}};
+
+mod bezierflattener;
+pub mod tri_rasterize;
+#[cfg(feature = "c_bindings")]
+pub mod c_bindings;
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum Winding {
+ EvenOdd,
+ NonZero,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum PathOp {
+ MoveTo(Point),
+ LineTo(Point),
+ QuadTo(Point, Point),
+ CubicTo(Point, Point, Point),
+ Close,
+}
+
+
+/// Represents a complete path usable for filling or stroking.
+#[derive(Clone, Debug)]
+pub struct Path {
+ pub ops: Vec<PathOp>,
+ pub winding: Winding,
+}
+
+pub type Point = euclid::default::Point2D<f32>;
+pub type Transform = euclid::default::Transform2D<f32>;
+pub type Vector = euclid::default::Vector2D<f32>;
+
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+#[repr(C)]
+pub enum LineCap {
+ Round,
+ Square,
+ Butt,
+}
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+#[repr(C)]
+pub enum LineJoin {
+ Round,
+ Miter,
+ Bevel,
+}
+
+#[derive(Clone, PartialEq, Debug)]
+#[repr(C)]
+pub struct StrokeStyle {
+ pub width: f32,
+ pub cap: LineCap,
+ pub join: LineJoin,
+ pub miter_limit: f32,
+}
+
+impl Default for StrokeStyle {
+ fn default() -> Self {
+ StrokeStyle {
+ width: 1.,
+ cap: LineCap::Butt,
+ join: LineJoin::Miter,
+ miter_limit: 10.,
+ }
+ }
+}
+#[derive(Debug)]
+pub struct Vertex {
+ x: f32,
+ y: f32,
+ coverage: f32
+}
+
+/// A helper struct used for constructing a `Path`.
+pub struct PathBuilder<'z> {
+ output_buffer: Option<&'z mut [Vertex]>,
+ output_buffer_offset: usize,
+ vertices: Vec<Vertex>,
+ coverage: f32,
+ aa: bool
+}
+
+
+
+impl<'z> PathBuilder<'z> {
+ pub fn new(coverage: f32) -> PathBuilder<'z> {
+ PathBuilder {
+ output_buffer: None,
+ output_buffer_offset: 0,
+ vertices: Vec::new(),
+ coverage,
+ aa: true
+ }
+ }
+
+ pub fn set_output_buffer(&mut self, output_buffer: &'z mut [Vertex]) {
+ assert!(self.output_buffer.is_none());
+ self.output_buffer = Some(output_buffer);
+ }
+
+ pub fn push_tri_with_coverage(&mut self, x1: f32, y1: f32, c1: f32, x2: f32, y2: f32, c2: f32, x3: f32, y3: f32, c3: f32) {
+ let v1 = Vertex { x: x1, y: y1, coverage: c1 };
+ let v2 = Vertex { x: x2, y: y2, coverage: c2 };
+ let v3 = Vertex { x: x3, y: y3, coverage: c3 };
+ if let Some(output_buffer) = &mut self.output_buffer {
+ let offset = self.output_buffer_offset;
+ if offset + 3 <= output_buffer.len() {
+ output_buffer[offset] = v1;
+ output_buffer[offset + 1] = v2;
+ output_buffer[offset + 2] = v3;
+ }
+ self.output_buffer_offset = offset + 3;
+ } else {
+ self.vertices.push(v1);
+ self.vertices.push(v2);
+ self.vertices.push(v3);
+ }
+ }
+
+ pub fn push_tri(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
+ self.push_tri_with_coverage(x1, y1, self.coverage, x2, y2, self.coverage, x3, y3, self.coverage);
+ }
+
+
+ // x3, y3 is the full coverage vert
+ pub fn tri_ramp(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
+ self.push_tri_with_coverage(x1, y1, 0., x2, y2, 0., x3, y3, self.coverage);
+ }
+
+ pub fn quad(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32, x4: f32, y4: f32) {
+ self.push_tri(x1, y1, x2, y2, x3, y3);
+ self.push_tri(x3, y3, x4, y4, x1, y1);
+ }
+
+ pub fn ramp(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32, x4: f32, y4: f32) {
+ self.push_tri_with_coverage(x1, y1, self.coverage, x2, y2, 0., x3, y3, 0.);
+ self.push_tri_with_coverage(x3, y3, 0., x4, y4, self.coverage, x1, y1, self.coverage);
+ }
+
+ // first edge is outside
+ pub fn tri(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
+ self.push_tri(x1, y1, x2, y2, x3, y3);
+ }
+
+ pub fn arc_wedge(&mut self, c: Point, radius: f32, a: Vector, b: Vector) {
+ arc(self, c.x, c.y, radius, a, b);
+ }
+
+ /// Completes the current path
+ pub fn finish(self) -> Vec<Vertex> {
+ self.vertices
+ }
+
+ pub fn get_output_buffer_size(&self) -> Option<usize> {
+ if self.output_buffer.is_some() {
+ Some(self.output_buffer_offset)
+ } else {
+ None
+ }
+ }
+}
+
+
+
+fn compute_normal(p0: Point, p1: Point) -> Option<Vector> {
+ let ux = p1.x - p0.x;
+ let uy = p1.y - p0.y;
+
+ // this could overflow f32. Skia checks for this and
+ // uses a double in that situation
+ let ulen = ux.hypot(uy);
+ if ulen == 0. {
+ return None;
+ }
+ // the normal is perpendicular to the *unit* vector
+ Some(Vector::new(-uy / ulen, ux / ulen))
+}
+
+fn flip(v: Vector) -> Vector {
+ Vector::new(-v.x, -v.y)
+}
+
+/* Compute a spline approximation of the arc
+centered at xc, yc from the angle a to the angle b
+
+The angle between a and b should not be more than a
+quarter circle (pi/2)
+
+The approximation is similar to an approximation given in:
+"Approximation of a cubic bezier curve by circular arcs and vice versa"
+by Alekas Riškus. However that approximation becomes unstable when the
+angle of the arc approaches 0.
+
+This approximation is inspired by a discusion with Boris Zbarsky
+and essentially just computes:
+
+ h = 4.0/3.0 * tan ((angle_B - angle_A) / 4.0);
+
+without converting to polar coordinates.
+
+A different way to do this is covered in "Approximation of a cubic bezier
+curve by circular arcs and vice versa" by Alekas Riškus. However, the method
+presented there doesn't handle arcs with angles close to 0 because it
+divides by the perp dot product of the two angle vectors.
+*/
+
+fn arc_segment_tri(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vector, b: Vector) {
+ let r_sin_a = radius * a.y;
+ let r_cos_a = radius * a.x;
+ let r_sin_b = radius * b.y;
+ let r_cos_b = radius * b.x;
+
+
+ /* bisect the angle between 'a' and 'b' with 'mid' */
+ let mut mid = a + b;
+ mid /= mid.length();
+
+ /* bisect the angle between 'a' and 'mid' with 'mid2' this is parallel to a
+ * line with angle (B - A)/4 */
+ let mid2 = a + mid;
+
+ let h = (4. / 3.) * dot(perp(a), mid2) / dot(a, mid2);
+
+ let last_point = GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64 };
+ let initial_normal = GpPointR { x: a.x as f64, y: a.y as f64 };
+
+
+ struct Target<'a, 'b> { last_point: GpPointR, last_normal: GpPointR, xc: f32, yc: f32, path: &'a mut PathBuilder<'b> }
+ impl<'a, 'b> CFlatteningSink for Target<'a, 'b> {
+ fn AcceptPointAndTangent(&mut self,
+ pt: &GpPointR,
+ // The point
+ vec: &GpPointR,
+ // The tangent there
+ _last: bool
+ // Is this the last point on the curve?
+ ) -> HRESULT {
+ if self.path.aa {
+ let len = vec.Norm();
+ let normal = *vec/len;
+ let normal = GpPointR { x: -normal.y, y: normal.x };
+ // FIXME: we probably need more width here because
+ // the normals are not perpendicular with the edge
+ let width = 0.5;
+
+ self.path.ramp(
+ (pt.x - normal.x * width) as f32,
+ (pt.y - normal.y * width) as f32,
+ (pt.x + normal.x * width) as f32,
+ (pt.y + normal.y * width) as f32,
+ (self.last_point.x + self.last_normal.x * width) as f32,
+ (self.last_point.y + self.last_normal.y * width) as f32,
+ (self.last_point.x - self.last_normal.x * width) as f32,
+ (self.last_point.y - self.last_normal.y * width) as f32, );
+ self.path.push_tri(
+ (self.last_point.x - self.last_normal.x * 0.5) as f32,
+ (self.last_point.y - self.last_normal.y * 0.5) as f32,
+ (pt.x - normal.x * 0.5) as f32,
+ (pt.y - normal.y * 0.5) as f32,
+ self.xc, self.yc);
+ self.last_normal = normal;
+
+ } else {
+ self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
+ }
+ self.last_point = pt.clone();
+ return S_OK;
+ }
+
+ fn AcceptPoint(&mut self,
+ pt: &GpPointR,
+ // The point
+ _t: f64,
+ // Parameter we're at
+ _aborted: &mut bool,
+ _last_point: bool) -> HRESULT {
+ self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
+ self.last_point = pt.clone();
+ return S_OK;
+ }
+ }
+ let bezier = CBezier::new([GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64, },
+ GpPointR { x: (xc + r_cos_a - h * r_sin_a) as f64, y: (yc + r_sin_a + h * r_cos_a) as f64, },
+ GpPointR { x: (xc + r_cos_b + h * r_sin_b) as f64, y: (yc + r_sin_b - h * r_cos_b) as f64, },
+ GpPointR { x: (xc + r_cos_b) as f64, y: (yc + r_sin_b) as f64, }]);
+ if bezier.is_degenerate() {
+ return;
+ }
+ let mut t = Target{ last_point, last_normal: initial_normal, xc, yc, path };
+ let mut f = CBezierFlattener::new(&bezier, &mut t, 0.25);
+ f.Flatten(true);
+
+}
+
+/* The angle between the vectors must be <= pi */
+fn bisect(a: Vector, b: Vector) -> Vector {
+ let mut mid;
+ if dot(a, b) >= 0. {
+ /* if the angle between a and b is accute, then we can
+ * just add the vectors and normalize */
+ mid = a + b;
+ } else {
+ /* otherwise, we can flip a, add it
+ * and then use the perpendicular of the result */
+ mid = flip(a) + b;
+ mid = perp(mid);
+ }
+
+ /* normalize */
+ /* because we assume that 'a' and 'b' are normalized, we can use
+ * sqrt instead of hypot because the range of mid is limited */
+ let mid_len = mid.x * mid.x + mid.y * mid.y;
+ let len = mid_len.sqrt();
+ return mid / len;
+}
+
+fn arc(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vector, b: Vector) {
+ /* find a vector that bisects the angle between a and b */
+ let mid_v = bisect(a, b);
+
+ /* construct the arc using two curve segments */
+ arc_segment_tri(path, xc, yc, radius, a, mid_v);
+ arc_segment_tri(path, xc, yc, radius, mid_v, b);
+}
+
+/*
+fn join_round(path: &mut PathBuilder, center: Point, a: Vector, b: Vector, radius: f32) {
+ /*
+ int ccw = dot (perp (b), a) >= 0; // XXX: is this always true?
+ yes, otherwise we have an interior angle.
+ assert (ccw);
+ */
+ arc(path, center.x, center.y, radius, a, b);
+}*/
+
+fn cap_line(dest: &mut PathBuilder, style: &StrokeStyle, pt: Point, normal: Vector) {
+ let offset = style.width / 2.;
+ match style.cap {
+ LineCap::Butt => {
+ if dest.aa {
+ let half_width = offset;
+ let end = pt;
+ let v = Vector::new(normal.y, -normal.x);
+ // end
+ dest.ramp(
+ end.x - normal.x * (half_width - 0.5),
+ end.y - normal.y * (half_width - 0.5),
+ end.x + v.x - normal.x * (half_width - 0.5),
+ end.y + v.y - normal.y * (half_width - 0.5),
+ end.x + v.x + normal.x * (half_width - 0.5),
+ end.y + v.y + normal.y * (half_width - 0.5),
+ end.x + normal.x * (half_width - 0.5),
+ end.y + normal.y * (half_width - 0.5),
+ );
+ dest.tri_ramp(
+ end.x + v.x - normal.x * (half_width - 0.5),
+ end.y + v.y - normal.y * (half_width - 0.5),
+ end.x - normal.x * (half_width + 0.5),
+ end.y - normal.y * (half_width + 0.5),
+ end.x - normal.x * (half_width - 0.5),
+ end.y - normal.y * (half_width - 0.5));
+ dest.tri_ramp(
+ end.x + v.x + normal.x * (half_width - 0.5),
+ end.y + v.y + normal.y * (half_width - 0.5),
+ end.x + normal.x * (half_width + 0.5),
+ end.y + normal.y * (half_width + 0.5),
+ end.x + normal.x * (half_width - 0.5),
+ end.y + normal.y * (half_width - 0.5));
+ }
+ }
+ LineCap::Round => {
+ dest.arc_wedge(pt, offset, normal, flip(normal));
+ }
+ LineCap::Square => {
+ // parallel vector
+ let v = Vector::new(normal.y, -normal.x);
+ let end = pt + v * offset;
+ if dest.aa {
+ let half_width = offset;
+ let offset = offset - 0.5;
+ dest.ramp(
+ end.x + normal.x * (half_width - 0.5),
+ end.y + normal.y * (half_width - 0.5),
+ end.x + normal.x * (half_width + 0.5),
+ end.y + normal.y * (half_width + 0.5),
+ pt.x + normal.x * (half_width + 0.5),
+ pt.y + normal.y * (half_width + 0.5),
+ pt.x + normal.x * (half_width - 0.5),
+ pt.y + normal.y * (half_width - 0.5),
+ );
+ dest.quad(pt.x + normal.x * offset, pt.y + normal.y * offset,
+ end.x + normal.x * offset, end.y + normal.y * offset,
+ end.x + -normal.x * offset, end.y + -normal.y * offset,
+ pt.x - normal.x * offset, pt.y - normal.y * offset);
+
+ dest.ramp(
+ pt.x - normal.x * (half_width - 0.5),
+ pt.y - normal.y * (half_width - 0.5),
+ pt.x - normal.x * (half_width + 0.5),
+ pt.y - normal.y * (half_width + 0.5),
+ end.x - normal.x * (half_width + 0.5),
+ end.y - normal.y * (half_width + 0.5),
+ end.x - normal.x * (half_width - 0.5),
+ end.y - normal.y * (half_width - 0.5));
+
+ // end
+ dest.ramp(
+ end.x - normal.x * (half_width - 0.5),
+ end.y - normal.y * (half_width - 0.5),
+ end.x + v.x - normal.x * (half_width - 0.5),
+ end.y + v.y - normal.y * (half_width - 0.5),
+ end.x + v.x + normal.x * (half_width - 0.5),
+ end.y + v.y + normal.y * (half_width - 0.5),
+ end.x + normal.x * (half_width - 0.5),
+ end.y + normal.y * (half_width - 0.5),
+ );
+ dest.tri_ramp(
+ end.x + v.x - normal.x * (half_width - 0.5),
+ end.y + v.y - normal.y * (half_width - 0.5),
+ end.x - normal.x * (half_width + 0.5),
+ end.y - normal.y * (half_width + 0.5),
+ end.x - normal.x * (half_width - 0.5),
+ end.y - normal.y * (half_width - 0.5));
+ dest.tri_ramp(
+ end.x + v.x + normal.x * (half_width - 0.5),
+ end.y + v.y + normal.y * (half_width - 0.5),
+ end.x + normal.x * (half_width + 0.5),
+ end.y + normal.y * (half_width + 0.5),
+ end.x + normal.x * (half_width - 0.5),
+ end.y + normal.y * (half_width - 0.5));
+ } else {
+ dest.quad(pt.x + normal.x * offset, pt.y + normal.y * offset,
+ end.x + normal.x * offset, end.y + normal.y * offset,
+ end.x + -normal.x * offset, end.y + -normal.y * offset,
+ pt.x - normal.x * offset, pt.y - normal.y * offset);
+ }
+ }
+ }
+}
+
+fn bevel(
+ dest: &mut PathBuilder,
+ style: &StrokeStyle,
+ pt: Point,
+ s1_normal: Vector,
+ s2_normal: Vector,
+) {
+ let offset = style.width / 2.;
+ if dest.aa {
+ let width = 1.;
+ let offset = offset - width / 2.;
+ //XXX: we should be able to just bisect the two norms to get this
+ let diff = match (s2_normal - s1_normal).try_normalize() {
+ Some(diff) => diff,
+ None => return,
+ };
+ let edge_normal = perp(diff);
+
+ dest.tri(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ pt.x, pt.y);
+
+ dest.tri_ramp(pt.x + s1_normal.x * (offset + width), pt.y + s1_normal.y * (offset + width),
+ pt.x + s1_normal.x * offset + edge_normal.x, pt.y + s1_normal.y * offset + edge_normal.y,
+ pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset);
+ dest.ramp(
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ pt.x + s2_normal.x * offset + edge_normal.x, pt.y + s2_normal.y * offset + edge_normal.y,
+ pt.x + s1_normal.x * offset + edge_normal.x, pt.y + s1_normal.y * offset + edge_normal.y,
+ pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ );
+ dest.tri_ramp(pt.x + s2_normal.x * (offset + width), pt.y + s2_normal.y * (offset + width),
+ pt.x + s2_normal.x * offset + edge_normal.x, pt.y + s2_normal.y * offset + edge_normal.y,
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset);
+ } else {
+ dest.tri(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ pt.x, pt.y);
+ }
+}
+
+/* given a normal rotate the vector 90 degrees to the right clockwise
+ * This function has a period of 4. e.g. swap(swap(swap(swap(x) == x */
+fn swap(a: Vector) -> Vector {
+ /* one of these needs to be negative. We choose a.x so that we rotate to the right instead of negating */
+ Vector::new(a.y, -a.x)
+}
+
+fn unperp(a: Vector) -> Vector {
+ swap(a)
+}
+
+/* rotate a vector 90 degrees to the left */
+fn perp(v: Vector) -> Vector {
+ Vector::new(-v.y, v.x)
+}
+
+fn dot(a: Vector, b: Vector) -> f32 {
+ a.x * b.x + a.y * b.y
+}
+
+/* Finds the intersection of two lines each defined by a point and a normal.
+From "Example 2: Find the intersection of two lines" of
+"The Pleasures of "Perp Dot" Products"
+F. S. Hill, Jr. */
+fn line_intersection(a: Point, a_perp: Vector, b: Point, b_perp: Vector) -> Option<Point> {
+ let a_parallel = unperp(a_perp);
+ let c = b - a;
+ let denom = dot(b_perp, a_parallel);
+ if denom == 0.0 {
+ return None;
+ }
+
+ let t = dot(b_perp, c) / denom;
+
+ let intersection = Point::new(a.x + t * (a_parallel.x), a.y + t * (a_parallel.y));
+
+ Some(intersection)
+}
+
+fn is_interior_angle(a: Vector, b: Vector) -> bool {
+ /* angles of 180 and 0 degress will evaluate to 0, however
+ * we to treat 180 as an interior angle and 180 as an exterior angle */
+ dot(perp(a), b) > 0. || a == b /* 0 degrees is interior */
+}
+
+fn join_line(
+ dest: &mut PathBuilder,
+ style: &StrokeStyle,
+ pt: Point,
+ mut s1_normal: Vector,
+ mut s2_normal: Vector,
+) {
+ if is_interior_angle(s1_normal, s2_normal) {
+ s2_normal = flip(s2_normal);
+ s1_normal = flip(s1_normal);
+ std::mem::swap(&mut s1_normal, &mut s2_normal);
+ }
+
+ // XXX: joining uses `pt` which can cause seams because it lies halfway on a line and the
+ // rasterizer may not find exactly the same spot
+ let mut offset = style.width / 2.;
+
+ match style.join {
+ LineJoin::Round => {
+ dest.arc_wedge(pt, offset, s1_normal, s2_normal);
+ }
+ LineJoin::Miter => {
+ if dest.aa {
+ offset -= 0.5;
+ }
+ let in_dot_out = -s1_normal.x * s2_normal.x + -s1_normal.y * s2_normal.y;
+ if 2. <= style.miter_limit * style.miter_limit * (1. - in_dot_out) {
+ let start = pt + s1_normal * offset;
+ let end = pt + s2_normal * offset;
+ if let Some(intersection) = line_intersection(start, s1_normal, end, s2_normal) {
+ // We won't have an intersection if the segments are parallel
+ if dest.aa {
+ let ramp_start = pt + s1_normal * (offset + 1.);
+ let ramp_end = pt + s2_normal * (offset + 1.);
+ let mid = bisect(s1_normal, s2_normal);
+ let ramp_intersection = intersection + mid;
+
+ let ramp_s1 = line_intersection(ramp_start, s1_normal, ramp_intersection, flip(mid));
+ let ramp_s2 = line_intersection(ramp_end, s2_normal, ramp_intersection, flip(mid));
+
+ if let Some(ramp_s1) = ramp_s1 {
+ dest.ramp(intersection.x, intersection.y,
+ ramp_s1.x, ramp_s1.y,
+ ramp_start.x, ramp_start.y,
+ pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ );
+ }
+ if let Some(ramp_s2) = ramp_s2 {
+ dest.ramp(pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ ramp_end.x, ramp_end.y,
+ ramp_s2.x, ramp_s2.y,
+ intersection.x, intersection.y);
+ if let Some(ramp_s1) = ramp_s1 {
+ dest.tri_ramp(ramp_s1.x, ramp_s1.y, ramp_s2.x, ramp_s2.y, intersection.x, intersection.y);
+ }
+ }
+
+ // we'll want to intersect the ramps and put a flat cap on the end
+ dest.quad(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ intersection.x, intersection.y,
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ pt.x, pt.y);
+ } else {
+ dest.quad(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
+ intersection.x, intersection.y,
+ pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
+ pt.x, pt.y);
+ }
+ }
+ } else {
+ bevel(dest, style, pt, s1_normal, s2_normal);
+ }
+ }
+ LineJoin::Bevel => {
+ bevel(dest, style, pt, s1_normal, s2_normal);
+ }
+ }
+}
+
+pub struct Stroker<'z> {
+ stroked_path: PathBuilder<'z>,
+ cur_pt: Option<Point>,
+ last_normal: Vector,
+ half_width: f32,
+ start_point: Option<(Point, Vector)>,
+ style: StrokeStyle,
+ closed_subpath: bool
+}
+
+impl<'z> Stroker<'z> {
+ pub fn new(style: &StrokeStyle) -> Self {
+ let mut style = style.clone();
+ let mut coverage = 1.;
+ if style.width < 1. {
+ coverage = style.width;
+ style.width = 1.;
+ }
+ Stroker {
+ stroked_path: PathBuilder::new(coverage),
+ cur_pt: None,
+ last_normal: Vector::zero(),
+ half_width: style.width / 2.,
+ start_point: None,
+ style,
+ closed_subpath: false,
+ }
+ }
+
+ pub fn set_output_buffer(&mut self, output_buffer: &'z mut [Vertex]) {
+ self.stroked_path.set_output_buffer(output_buffer);
+ }
+
+ pub fn line_to_capped(&mut self, pt: Point) {
+ if let Some(cur_pt) = self.cur_pt {
+ let normal = compute_normal(cur_pt, pt).unwrap_or(self.last_normal);
+ self.line_to(if self.stroked_path.aa && self.style.cap == LineCap::Butt { pt - flip(normal) * 0.5} else { pt });
+ if let (Some(cur_pt), Some((_point, _normal))) = (self.cur_pt, self.start_point) {
+ // cap end
+ cap_line(&mut self.stroked_path, &self.style, cur_pt, self.last_normal);
+ }
+ }
+ self.start_point = None;
+ }
+
+ pub fn move_to(&mut self, pt: Point, closed_subpath: bool) {
+ self.start_point = None;
+ self.cur_pt = Some(pt);
+ self.closed_subpath = closed_subpath;
+ }
+
+ pub fn line_to(&mut self, pt: Point) {
+ let cur_pt = self.cur_pt;
+ let stroked_path = &mut self.stroked_path;
+ let half_width = self.half_width;
+
+ if cur_pt.is_none() {
+ self.start_point = None;
+ } else if let Some(cur_pt) = cur_pt {
+ if let Some(normal) = compute_normal(cur_pt, pt) {
+ if self.start_point.is_none() {
+ if !self.closed_subpath {
+ // cap beginning
+ cap_line(stroked_path, &self.style, cur_pt, flip(normal));
+ if stroked_path.aa && self.style.cap == LineCap::Butt {
+
+ }
+ }
+ self.start_point = Some((cur_pt, normal));
+ } else {
+ join_line(stroked_path, &self.style, cur_pt, self.last_normal, normal);
+ }
+ if stroked_path.aa {
+ stroked_path.ramp(
+ pt.x + normal.x * (half_width - 0.5),
+ pt.y + normal.y * (half_width - 0.5),
+ pt.x + normal.x * (half_width + 0.5),
+ pt.y + normal.y * (half_width + 0.5),
+ cur_pt.x + normal.x * (half_width + 0.5),
+ cur_pt.y + normal.y * (half_width + 0.5),
+ cur_pt.x + normal.x * (half_width - 0.5),
+ cur_pt.y + normal.y * (half_width - 0.5),
+ );
+ stroked_path.quad(
+ cur_pt.x + normal.x * (half_width - 0.5),
+ cur_pt.y + normal.y * (half_width - 0.5),
+ pt.x + normal.x * (half_width - 0.5), pt.y + normal.y * (half_width - 0.5),
+ pt.x + -normal.x * (half_width - 0.5), pt.y + -normal.y * (half_width - 0.5),
+ cur_pt.x - normal.x * (half_width - 0.5),
+ cur_pt.y - normal.y * (half_width - 0.5),
+ );
+ stroked_path.ramp(
+ cur_pt.x - normal.x * (half_width - 0.5),
+ cur_pt.y - normal.y * (half_width - 0.5),
+ cur_pt.x - normal.x * (half_width + 0.5),
+ cur_pt.y - normal.y * (half_width + 0.5),
+ pt.x - normal.x * (half_width + 0.5),
+ pt.y - normal.y * (half_width + 0.5),
+ pt.x - normal.x * (half_width - 0.5),
+ pt.y - normal.y * (half_width - 0.5),
+ );
+ } else {
+ stroked_path.quad(
+ cur_pt.x + normal.x * half_width,
+ cur_pt.y + normal.y * half_width,
+ pt.x + normal.x * half_width, pt.y + normal.y * half_width,
+ pt.x + -normal.x * half_width, pt.y + -normal.y * half_width,
+ cur_pt.x - normal.x * half_width,
+ cur_pt.y - normal.y * half_width,
+ );
+ }
+
+ self.last_normal = normal;
+
+ }
+ }
+ self.cur_pt = Some(pt);
+ }
+
+ pub fn curve_to(&mut self, cx1: Point, cx2: Point, pt: Point) {
+ self.curve_to_internal(cx1, cx2, pt, false);
+ }
+
+ pub fn curve_to_capped(&mut self, cx1: Point, cx2: Point, pt: Point) {
+ self.curve_to_internal(cx1, cx2, pt, true);
+ }
+
+ pub fn curve_to_internal(&mut self, cx1: Point, cx2: Point, pt: Point, end: bool) {
+ struct Target<'a, 'b> { stroker: &'a mut Stroker<'b>, end: bool }
+ impl<'a, 'b> CFlatteningSink for Target<'a, 'b> {
+ fn AcceptPointAndTangent(&mut self, _: &GpPointR, _: &GpPointR, _: bool ) -> HRESULT {
+ panic!()
+ }
+
+ fn AcceptPoint(&mut self,
+ pt: &GpPointR,
+ // The point
+ _t: f64,
+ // Parameter we're at
+ _aborted: &mut bool,
+ last_point: bool) -> HRESULT {
+ if last_point && self.end {
+ self.stroker.line_to_capped(Point::new(pt.x as f32, pt.y as f32));
+ } else {
+ self.stroker.line_to(Point::new(pt.x as f32, pt.y as f32));
+ }
+ return S_OK;
+ }
+ }
+ let cur_pt = self.cur_pt.unwrap_or(cx1);
+ let bezier = CBezier::new([GpPointR { x: cur_pt.x as f64, y: cur_pt.y as f64, },
+ GpPointR { x: cx1.x as f64, y: cx1.y as f64, },
+ GpPointR { x: cx2.x as f64, y: cx2.y as f64, },
+ GpPointR { x: pt.x as f64, y: pt.y as f64, }]);
+ let mut t = Target{ stroker: self, end };
+ let mut f = CBezierFlattener::new(&bezier, &mut t, 0.25);
+ f.Flatten(false);
+ }
+
+ pub fn close(&mut self) {
+ let stroked_path = &mut self.stroked_path;
+ let half_width = self.half_width;
+ if let (Some(cur_pt), Some((end_point, start_normal))) = (self.cur_pt, self.start_point) {
+ if let Some(normal) = compute_normal(cur_pt, end_point) {
+ join_line(stroked_path, &self.style, cur_pt, self.last_normal, normal);
+ if stroked_path.aa {
+ stroked_path.ramp(
+ end_point.x + normal.x * (half_width - 0.5),
+ end_point.y + normal.y * (half_width - 0.5),
+ end_point.x + normal.x * (half_width + 0.5),
+ end_point.y + normal.y * (half_width + 0.5),
+ cur_pt.x + normal.x * (half_width + 0.5),
+ cur_pt.y + normal.y * (half_width + 0.5),
+ cur_pt.x + normal.x * (half_width - 0.5),
+ cur_pt.y + normal.y * (half_width - 0.5),
+ );
+ stroked_path.quad(
+ cur_pt.x + normal.x * (half_width - 0.5),
+ cur_pt.y + normal.y * (half_width - 0.5),
+ end_point.x + normal.x * (half_width - 0.5), end_point.y + normal.y * (half_width - 0.5),
+ end_point.x + -normal.x * (half_width - 0.5), end_point.y + -normal.y * (half_width - 0.5),
+ cur_pt.x - normal.x * (half_width - 0.5),
+ cur_pt.y - normal.y * (half_width - 0.5),
+ );
+ stroked_path.ramp(
+ cur_pt.x - normal.x * (half_width - 0.5),
+ cur_pt.y - normal.y * (half_width - 0.5),
+ cur_pt.x - normal.x * (half_width + 0.5),
+ cur_pt.y - normal.y * (half_width + 0.5),
+ end_point.x - normal.x * (half_width + 0.5),
+ end_point.y - normal.y * (half_width + 0.5),
+ end_point.x - normal.x * (half_width - 0.5),
+ end_point.y - normal.y * (half_width - 0.5),
+ );
+ } else {
+ stroked_path.quad(
+ cur_pt.x + normal.x * half_width,
+ cur_pt.y + normal.y * half_width,
+ end_point.x + normal.x * half_width, end_point.y + normal.y * half_width,
+ end_point.x + -normal.x * half_width, end_point.y + -normal.y * half_width,
+ cur_pt.x - normal.x * half_width,
+ cur_pt.y - normal.y * half_width,
+ );
+ }
+ join_line(stroked_path, &self.style, end_point, normal, start_normal);
+ } else {
+ join_line(stroked_path, &self.style, end_point, self.last_normal, start_normal);
+ }
+ }
+ self.cur_pt = self.start_point.map(|x| x.0);
+ self.start_point = None;
+ }
+
+ pub fn get_stroked_path(&mut self) -> PathBuilder<'z> {
+ let mut stroked_path = std::mem::replace(&mut self.stroked_path, PathBuilder::new(1.));
+
+ if let (Some(cur_pt), Some((point, normal))) = (self.cur_pt, self.start_point) {
+ // cap end
+ cap_line(&mut stroked_path, &self.style, cur_pt, self.last_normal);
+ // cap beginning
+ cap_line(&mut stroked_path, &self.style, point, flip(normal));
+ }
+
+ stroked_path
+ }
+
+ pub fn finish(&mut self) -> Vec<Vertex> {
+ self.get_stroked_path().finish()
+ }
+}
+
+#[test]
+fn simple() {
+ let mut stroker = Stroker::new(&StrokeStyle{
+ cap: LineCap::Round,
+ join: LineJoin::Bevel,
+ width: 20.,
+ ..Default::default()});
+ stroker.move_to(Point::new(20., 20.), false);
+ stroker.line_to(Point::new(100., 100.));
+ stroker.line_to_capped(Point::new(110., 20.));
+
+ stroker.move_to(Point::new(120., 20.), true);
+ stroker.line_to(Point::new(120., 50.));
+ stroker.line_to(Point::new(140., 50.));
+ stroker.close();
+
+ let stroked = stroker.finish();
+ assert_eq!(stroked.len(), 330);
+}
+
+#[test]
+fn curve() {
+ let mut stroker = Stroker::new(&StrokeStyle{
+ cap: LineCap::Round,
+ join: LineJoin::Bevel,
+ width: 20.,
+ ..Default::default()});
+ stroker.move_to(Point::new(20., 160.), true);
+ stroker.curve_to(Point::new(100., 160.), Point::new(100., 180.), Point::new(20., 180.));
+ stroker.close();
+ let stroked = stroker.finish();
+ assert_eq!(stroked.len(), 1089);
+}
+
+#[test]
+fn width_one_radius_arc() {
+ // previously this caused us to try to flatten an arc with radius 0
+ let mut stroker = Stroker::new(&StrokeStyle{
+ cap: LineCap::Round,
+ join: LineJoin::Round,
+ width: 1.,
+ ..Default::default()});
+ stroker.move_to(Point::new(20., 20.), false);
+ stroker.line_to(Point::new(30., 160.));
+ stroker.line_to_capped(Point::new(40., 20.));
+ stroker.finish();
+}
+
+#[test]
+fn parallel_line_join() {
+ // ensure line joins of almost parallel lines don't cause math to fail
+ for join in [LineJoin::Bevel, LineJoin::Round, LineJoin::Miter] {
+ let mut stroker = Stroker::new(&StrokeStyle{
+ cap: LineCap::Butt,
+ join,
+ width: 1.0,
+ ..Default::default()});
+ stroker.move_to(Point::new(19.812500, 71.625000), true);
+ stroker.line_to(Point::new(19.250000, 72.000000));
+ stroker.line_to(Point::new(19.062500, 72.125000));
+ stroker.close();
+ stroker.finish();
+ }
+}
+
diff --git a/third_party/rust/aa-stroke/src/tri_rasterize.rs b/third_party/rust/aa-stroke/src/tri_rasterize.rs
new file mode 100644
index 0000000000..7a80ce437b
--- /dev/null
+++ b/third_party/rust/aa-stroke/src/tri_rasterize.rs
@@ -0,0 +1,190 @@
+/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
+
+ /**************************************************************************
+ *
+ * Copyright 2012 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+*/
+
+use std::ops::Index;
+use crate::Vertex as OutputVertex;
+#[derive(Debug)]
+struct Vertex {
+ x: f32,
+ y: f32,
+ coverage: f32
+}
+#[derive(Debug)]
+struct Triangle {
+ v: [Vertex; 3],
+}
+
+impl Index<usize> for Triangle {
+ type Output = Vertex;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.v[index]
+ }
+}
+
+// D3D11 mandates 8 bit subpixel precision:
+// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
+const FIXED_SHIFT: i32 = 8;
+const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
+
+/* Proper rounding of float to integer */
+fn iround(mut v: f32) -> i64 {
+ if v > 0.0 {
+ v += 0.5;
+ }
+ if v < 0.0 {
+ v -= 0.5;
+ }
+ return v as i64
+}
+
+/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
+fn rast_triangle(buffer: &mut [u8], width: usize, height: usize, tri: &Triangle) {
+ let center_offset = -0.5;
+
+ let mut coverage1 = tri[0].coverage;
+ let mut coverage2 = tri[1].coverage;
+ let mut coverage3 = tri[2].coverage;
+
+ /* fixed point coordinates */
+ let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
+ let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
+ let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
+
+ let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
+ let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
+ let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
+
+
+ /* Force correct vertex order */
+ let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
+ if cross > 0 {
+ std::mem::swap(&mut x1, &mut x3);
+ std::mem::swap(&mut y1, &mut y3);
+ // I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
+ std::mem::swap(&mut coverage2, &mut coverage3);
+ } else {
+ std::mem::swap(&mut coverage1, &mut coverage3);
+ }
+
+ /* Deltas */
+ let dx12 = x1 - x2;
+ let dx23 = x2 - x3;
+ let dx31 = x3 - x1;
+
+ let dy12 = y1 - y2;
+ let dy23 = y2 - y3;
+ let dy31 = y3 - y1;
+
+ /* Fixed-point deltas */
+ let fdx12 = dx12 << FIXED_SHIFT;
+ let fdx23 = dx23 << FIXED_SHIFT;
+ let fdx31 = dx31 << FIXED_SHIFT;
+
+ let fdy12 = dy12 << FIXED_SHIFT;
+ let fdy23 = dy23 << FIXED_SHIFT;
+ let fdy31 = dy31 << FIXED_SHIFT;
+
+ /* Bounding rectangle */
+ let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
+ let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
+
+ let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
+ let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
+
+ minx = minx.max(0);
+ maxx = maxx.min(width as i64 - 1);
+
+ miny = miny.max(0);
+ maxy = maxy.min(height as i64 - 1);
+
+ /* Half-edge constants */
+ let mut c1 = dy12 * x1 - dx12 * y1;
+ let mut c2 = dy23 * x2 - dx23 * y2;
+ let mut c3 = dy31 * x3 - dx31 * y3;
+
+ /* Correct for top-left filling convention */
+ if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
+ if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
+ if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
+
+ let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
+ let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
+ let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
+ //dbg!(minx, maxx, tri, cross);
+ /* Perform rasterization */
+ let mut buffer = &mut buffer[miny as usize * width..];
+ for _y in miny..=maxy {
+ let mut cx1 = cy1;
+ let mut cx2 = cy2;
+ let mut cx3 = cy3;
+
+ for x in minx..=maxx {
+ if cx1 > 0 && cx2 > 0 && cx3 > 0 {
+ // cross is equal to 2*area of the triangle.
+ // we can normalize cx by 2*area to get barycentric coords.
+ let area = cross.abs() as f32;
+ let bary = (cx1 as f32 / area, cx2 as f32 / area, cx3 as f32 / area);
+ let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
+ let color = (coverages * 255. + 0.5) as u8;
+
+ buffer[x as usize] = color;
+ }
+
+ cx1 -= fdy12;
+ cx2 -= fdy23;
+ cx3 -= fdy31;
+ }
+
+ cy1 += fdx12;
+ cy2 += fdx23;
+ cy3 += fdx31;
+
+ buffer = &mut buffer[width..];
+ }
+}
+
+pub fn rasterize_to_mask(vertices: &[OutputVertex], width: u32, height: u32) -> Box<[u8]> {
+ let mut mask = vec![0; (width * height) as usize];
+ for n in (0..vertices.len()).step_by(3) {
+ let tri =
+ [&vertices[n], &vertices[n+1], &vertices[n+2]];
+
+ let tri = Triangle { v: [
+ Vertex { x: tri[0].x, y: tri[0].y, coverage: tri[0].coverage},
+ Vertex { x: tri[1].x, y: tri[1].y, coverage: tri[1].coverage},
+ Vertex { x: tri[2].x, y: tri[2].y, coverage: tri[2].coverage}
+ ]
+ };
+ rast_triangle(&mut mask, width as usize, height as usize, &tri);
+ }
+ mask.into_boxed_slice()
+}