summaryrefslogtreecommitdiffstats
path: root/src/2geom
diff options
context:
space:
mode:
Diffstat (limited to '')
-rwxr-xr-xsrc/2geom/CMakeLists.txt206
-rw-r--r--src/2geom/affine.cpp522
-rw-r--r--src/2geom/basic-intersection.cpp493
-rw-r--r--src/2geom/bezier-clipping.cpp1174
-rw-r--r--src/2geom/bezier-curve.cpp695
-rw-r--r--src/2geom/bezier-utils.cpp997
-rw-r--r--src/2geom/bezier.cpp415
-rw-r--r--src/2geom/cairo-path-sink.cpp127
-rw-r--r--src/2geom/circle.cpp337
-rw-r--r--src/2geom/concepts.cpp69
-rw-r--r--src/2geom/conic_section_clipper_impl.cpp574
-rw-r--r--src/2geom/conicsec.cpp1640
-rw-r--r--src/2geom/convex-hull.cpp746
-rw-r--r--src/2geom/coord.cpp123
-rw-r--r--src/2geom/crossing.cpp233
-rw-r--r--src/2geom/curve.cpp235
-rw-r--r--src/2geom/d2-sbasis.cpp364
-rw-r--r--src/2geom/doxygen.cpp301
-rw-r--r--src/2geom/ellipse.cpp790
-rw-r--r--src/2geom/elliptical-arc-from-sbasis.cpp341
-rw-r--r--src/2geom/elliptical-arc.cpp1045
-rw-r--r--src/2geom/geom.cpp396
-rw-r--r--src/2geom/intersection-graph.cpp535
-rw-r--r--src/2geom/intervaltree/interval_tree.cc799
-rw-r--r--src/2geom/intervaltree/test2.cc74
-rw-r--r--src/2geom/line.cpp610
-rw-r--r--src/2geom/nearest-time.cpp322
-rw-r--r--src/2geom/numeric/matrix.cpp154
-rw-r--r--src/2geom/orphan-code/arc-length.cpp292
-rw-r--r--src/2geom/orphan-code/chebyshev.cpp126
-rw-r--r--src/2geom/orphan-code/intersection-by-bezier-clipping.cpp560
-rw-r--r--src/2geom/orphan-code/intersection-by-smashing.cpp349
-rw-r--r--src/2geom/orphan-code/nearestpoint.cpp405
-rw-r--r--src/2geom/orphan-code/redblack-toy.cpp327
-rw-r--r--src/2geom/orphan-code/redblacktree.cpp575
-rw-r--r--src/2geom/orphan-code/rtree.cpp1350
-rw-r--r--src/2geom/parallelogram.cpp136
-rw-r--r--src/2geom/parting-point.cpp280
-rw-r--r--src/2geom/path-extrema.cpp156
-rw-r--r--src/2geom/path-intersection.cpp728
-rw-r--r--src/2geom/path-sink.cpp104
-rw-r--r--src/2geom/path.cpp1161
-rw-r--r--src/2geom/pathvector.cpp336
-rw-r--r--src/2geom/piecewise.cpp266
-rw-r--r--src/2geom/planar-graph.h1252
-rw-r--r--src/2geom/point.cpp274
-rw-r--r--src/2geom/polynomial.cpp337
-rw-r--r--src/2geom/rect.cpp187
-rw-r--r--src/2geom/recursive-bezier-intersection.cpp476
-rw-r--r--src/2geom/sbasis-2d.cpp202
-rw-r--r--src/2geom/sbasis-geometric.cpp790
-rw-r--r--src/2geom/sbasis-math.cpp379
-rw-r--r--src/2geom/sbasis-poly.cpp59
-rw-r--r--src/2geom/sbasis-roots.cpp656
-rw-r--r--src/2geom/sbasis-to-bezier.cpp584
-rw-r--r--src/2geom/sbasis.cpp681
-rw-r--r--src/2geom/self-intersect.cpp313
-rw-r--r--src/2geom/solve-bezier-one-d.cpp243
-rw-r--r--src/2geom/solve-bezier-parametric.cpp189
-rw-r--r--src/2geom/solve-bezier.cpp304
-rw-r--r--src/2geom/svg-path-parser.cpp1615
-rw-r--r--src/2geom/svg-path-parser.rl487
-rw-r--r--src/2geom/svg-path-writer.cpp296
-rw-r--r--src/2geom/sweep-bounds.cpp154
-rw-r--r--src/2geom/transforms.cpp205
-rw-r--r--src/2geom/utils.cpp86
66 files changed, 31237 insertions, 0 deletions
diff --git a/src/2geom/CMakeLists.txt b/src/2geom/CMakeLists.txt
new file mode 100755
index 0000000..80b9e8e
--- /dev/null
+++ b/src/2geom/CMakeLists.txt
@@ -0,0 +1,206 @@
+# (re-)generate parser file with ragel if it's available
+SET(SVG_PARSER_CPP "svg-path-parser.cpp")
+SET(SVG_PARSER_RL "svg-path-parser.rl")
+find_program(RAGEL_PROGRAM
+ NAMES ragel
+ HINTS /usr/bin
+ /usr/local/bin
+)
+if(RAGEL_PROGRAM)
+ message(STATUS "Found Ragel in ${RAGEL_PROGRAM}. ${SVG_PARSER_CPP} will be recreated from ${SVG_PARSER_RL}.")
+ add_custom_command(OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/${SVG_PARSER_CPP}"
+ COMMAND ${RAGEL_PROGRAM} -o "${SVG_PARSER_CPP}" "${SVG_PARSER_RL}"
+ DEPENDS "${SVG_PARSER_RL}"
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ COMMENT "Generating ${SVG_PARSER_CPP} with ragel")
+else()
+ message(STATUS "Ragel NOT found. Using stale ${SVG_PARSER_CPP}.")
+endif()
+
+
+add_library(2geom ${LIB_TYPE}
+ # sources
+ affine.cpp
+
+ basic-intersection.cpp
+ bezier.cpp
+ bezier-clipping.cpp
+ bezier-curve.cpp
+ bezier-utils.cpp
+
+ cairo-path-sink.cpp
+ circle.cpp
+ concepts.cpp
+ conicsec.cpp
+ conic_section_clipper_impl.cpp
+ convex-hull.cpp
+ coord.cpp
+ crossing.cpp
+ curve.cpp
+
+ d2-sbasis.cpp
+
+ ellipse.cpp
+ elliptical-arc.cpp
+ elliptical-arc-from-sbasis.cpp
+
+ geom.cpp
+
+ intersection-graph.cpp
+
+ line.cpp
+
+ nearest-time.cpp
+
+ numeric/matrix.cpp
+
+ parallelogram.cpp
+ parting-point.cpp
+ path-extrema.cpp
+ path-intersection.cpp
+ path-sink.cpp
+ path.cpp
+ pathvector.cpp
+ piecewise.cpp
+ point.cpp
+ polynomial.cpp
+
+ rect.cpp
+ recursive-bezier-intersection.cpp
+
+ sbasis-2d.cpp
+ sbasis-geometric.cpp
+ sbasis-math.cpp
+ sbasis-poly.cpp
+ sbasis-roots.cpp
+ sbasis-to-bezier.cpp
+ sbasis.cpp
+ self-intersect.cpp
+ solve-bezier.cpp
+ solve-bezier-one-d.cpp
+ solve-bezier-parametric.cpp
+ svg-path-parser.cpp
+ svg-path-writer.cpp
+ sweep-bounds.cpp
+
+ transforms.cpp
+
+ utils.cpp
+
+ # headers (for IDE support only)
+ # private:
+ planar-graph.h
+
+ # public:
+ ${2GEOM_INCLUDE_DIR}/2geom/affine.h
+ ${2GEOM_INCLUDE_DIR}/2geom/angle.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/basic-intersection.h
+ ${2GEOM_INCLUDE_DIR}/2geom/bezier.h
+ ${2GEOM_INCLUDE_DIR}/2geom/bezier-curve.h
+ ${2GEOM_INCLUDE_DIR}/2geom/bezier-to-sbasis.h
+ ${2GEOM_INCLUDE_DIR}/2geom/bezier-utils.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/cairo-path-sink.h
+ ${2GEOM_INCLUDE_DIR}/2geom/choose.h
+ ${2GEOM_INCLUDE_DIR}/2geom/circle.h
+ ${2GEOM_INCLUDE_DIR}/2geom/concepts.h
+ ${2GEOM_INCLUDE_DIR}/2geom/conicsec.h
+ ${2GEOM_INCLUDE_DIR}/2geom/conic_section_clipper.h
+ ${2GEOM_INCLUDE_DIR}/2geom/conic_section_clipper_cr.h
+ ${2GEOM_INCLUDE_DIR}/2geom/conic_section_clipper_impl.h
+ ${2GEOM_INCLUDE_DIR}/2geom/convex-hull.h
+ ${2GEOM_INCLUDE_DIR}/2geom/coord.h
+ ${2GEOM_INCLUDE_DIR}/2geom/crossing.h
+ ${2GEOM_INCLUDE_DIR}/2geom/curve.h
+ ${2GEOM_INCLUDE_DIR}/2geom/curves.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/d2.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/ellipse.h
+ ${2GEOM_INCLUDE_DIR}/2geom/elliptical-arc.h
+ ${2GEOM_INCLUDE_DIR}/2geom/exception.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/forward.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/geom.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/intersection.h
+ ${2GEOM_INCLUDE_DIR}/2geom/intersection-graph.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/line.h
+ ${2GEOM_INCLUDE_DIR}/2geom/linear.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/math-utils.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/nearest-time.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/ord.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/parallelogram.h
+ ${2GEOM_INCLUDE_DIR}/2geom/path-intersection.h
+ ${2GEOM_INCLUDE_DIR}/2geom/path-sink.h
+ ${2GEOM_INCLUDE_DIR}/2geom/path.h
+ ${2GEOM_INCLUDE_DIR}/2geom/pathvector.h
+ ${2GEOM_INCLUDE_DIR}/2geom/piecewise.h
+ ${2GEOM_INCLUDE_DIR}/2geom/point.h
+ ${2GEOM_INCLUDE_DIR}/2geom/polynomial.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/ray.h
+ ${2GEOM_INCLUDE_DIR}/2geom/rect.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-2d.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-curve.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-geometric.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-math.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-poly.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis-to-bezier.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sbasis.h
+ ${2GEOM_INCLUDE_DIR}/2geom/solver.h
+ ${2GEOM_INCLUDE_DIR}/2geom/svg-path-parser.h
+ ${2GEOM_INCLUDE_DIR}/2geom/svg-path-writer.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sweeper.h
+ ${2GEOM_INCLUDE_DIR}/2geom/sweep-bounds.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/transforms.h
+
+ ${2GEOM_INCLUDE_DIR}/2geom/utils.h
+)
+
+# make lib for 2geom
+target_include_directories(2geom
+ PUBLIC
+ ${GLIB_INCLUDE_DIRS}
+ ${GSL_INCLUDE_DIRS}
+ ${CAIRO_INCLUDE_DIRS}
+ ${DoubleConversion_INCLUDE_DIRS}
+ $<BUILD_INTERFACE:${2GEOM_INCLUDE_DIR}>
+ $<BUILD_INTERFACE:${2GEOM_INCLUDE_DIR}/2geom>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/2geom-${2GEOM_VERSION}>
+ )
+
+target_link_libraries(2geom
+ PUBLIC
+ ${GLIB_LIBRARIES}
+ ${GSL_LIBRARIES}
+ ${CAIRO_LIBRARIES}
+ ${DoubleConversion_LIBRARIES}
+ )
+
+set_target_properties(2geom PROPERTIES SOVERSION "${2GEOM_ABI_VERSION}")
+
+install(TARGETS 2geom
+ EXPORT 2geom_targets
+ RUNTIME
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ COMPONENT "lib2geom${2GEOM_VERSION}"
+ LIBRARY
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT "lib2geom${2GEOM_VERSION}"
+ NAMELINK_COMPONENT "lib2geom_dev"
+ ARCHIVE
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT "lib2geom${2GEOM_VERSION}"
+)
+
+add_library(2Geom::2geom ALIAS 2geom)
diff --git a/src/2geom/affine.cpp b/src/2geom/affine.cpp
new file mode 100644
index 0000000..48179e8
--- /dev/null
+++ b/src/2geom/affine.cpp
@@ -0,0 +1,522 @@
+/*
+ * Authors:
+ * Lauris Kaplinski <lauris@kaplinski.com>
+ * Michael G. Sloan <mgsloan@gmail.com>
+ *
+ * This code is in public domain
+ */
+
+#include <2geom/affine.h>
+#include <2geom/point.h>
+#include <2geom/polynomial.h>
+#include <2geom/utils.h>
+
+namespace Geom {
+
+/** Creates a Affine given an axis and origin point.
+ * The axis is represented as two vectors, which represent skew, rotation, and scaling in two dimensions.
+ * from_basis(Point(1, 0), Point(0, 1), Point(0, 0)) would return the identity matrix.
+
+ \param x_basis the vector for the x-axis.
+ \param y_basis the vector for the y-axis.
+ \param offset the translation applied by the matrix.
+ \return The new Affine.
+ */
+//NOTE: Inkscape's version is broken, so when including this version, you'll have to search for code with this func
+Affine from_basis(Point const &x_basis, Point const &y_basis, Point const &offset) {
+ return Affine(x_basis[X], x_basis[Y],
+ y_basis[X], y_basis[Y],
+ offset [X], offset [Y]);
+}
+
+Point Affine::xAxis() const {
+ return Point(_c[0], _c[1]);
+}
+
+Point Affine::yAxis() const {
+ return Point(_c[2], _c[3]);
+}
+
+/// Gets the translation imparted by the Affine.
+Point Affine::translation() const {
+ return Point(_c[4], _c[5]);
+}
+
+void Affine::setXAxis(Point const &vec) {
+ for(int i = 0; i < 2; i++)
+ _c[i] = vec[i];
+}
+
+void Affine::setYAxis(Point const &vec) {
+ for(int i = 0; i < 2; i++)
+ _c[i + 2] = vec[i];
+}
+
+/// Sets the translation imparted by the Affine.
+void Affine::setTranslation(Point const &loc) {
+ for(int i = 0; i < 2; i++)
+ _c[i + 4] = loc[i];
+}
+
+/** Calculates the amount of x-scaling imparted by the Affine. This is the scaling applied to
+ * the original x-axis region. It is \emph{not} the overall x-scaling of the transformation.
+ * Equivalent to L2(m.xAxis()). */
+double Affine::expansionX() const {
+ return sqrt(_c[0] * _c[0] + _c[1] * _c[1]);
+}
+
+/** Calculates the amount of y-scaling imparted by the Affine. This is the scaling applied before
+ * the other transformations. It is \emph{not} the overall y-scaling of the transformation.
+ * Equivalent to L2(m.yAxis()). */
+double Affine::expansionY() const {
+ return sqrt(_c[2] * _c[2] + _c[3] * _c[3]);
+}
+
+void Affine::setExpansionX(double val) {
+ double exp_x = expansionX();
+ if (exp_x != 0.0) { //TODO: best way to deal with it is to skip op?
+ double coef = val / expansionX();
+ for (unsigned i = 0; i < 2; ++i) {
+ _c[i] *= coef;
+ }
+ }
+}
+
+void Affine::setExpansionY(double val) {
+ double exp_y = expansionY();
+ if (exp_y != 0.0) { //TODO: best way to deal with it is to skip op?
+ double coef = val / expansionY();
+ for (unsigned i = 2; i < 4; ++i) {
+ _c[i] *= coef;
+ }
+ }
+}
+
+/** Sets this matrix to be the Identity Affine. */
+void Affine::setIdentity() {
+ _c[0] = 1.0; _c[1] = 0.0;
+ _c[2] = 0.0; _c[3] = 1.0;
+ _c[4] = 0.0; _c[5] = 0.0;
+}
+
+/** @brief Check whether this matrix is an identity matrix.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & 0 & 0 \\
+ 0 & 1 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ */
+bool Affine::isIdentity(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[1], 0.0, eps) &&
+ are_near(_c[2], 0.0, eps) && are_near(_c[3], 1.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents a pure translation.
+ * Will return true for the identity matrix, which represents a zero translation.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & 0 & 0 \\
+ 0 & 1 & 0 \\
+ a & b & 1 \end{array}\right]\f$ */
+bool Affine::isTranslation(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[1], 0.0, eps) &&
+ are_near(_c[2], 0.0, eps) && are_near(_c[3], 1.0, eps);
+}
+/** @brief Check whether this matrix represents a pure nonzero translation.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & 0 & 0 \\
+ 0 & 1 & 0 \\
+ a & b & 1 \end{array}\right]\f$ and \f$a, b \neq 0\f$ */
+bool Affine::isNonzeroTranslation(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[1], 0.0, eps) &&
+ are_near(_c[2], 0.0, eps) && are_near(_c[3], 1.0, eps) &&
+ (!are_near(_c[4], 0.0, eps) || !are_near(_c[5], 0.0, eps));
+}
+
+/** @brief Check whether this matrix represents pure scaling.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & 0 & 0 \\
+ 0 & b & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$. */
+bool Affine::isScale(Coord eps) const {
+ if (isSingular(eps)) return false;
+ return are_near(_c[1], 0.0, eps) && are_near(_c[2], 0.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents pure, nonzero scaling.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & 0 & 0 \\
+ 0 & b & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ and \f$a, b \neq 1\f$. */
+bool Affine::isNonzeroScale(Coord eps) const {
+ if (isSingular(eps)) return false;
+ return (!are_near(_c[0], 1.0, eps) || !are_near(_c[3], 1.0, eps)) && //NOTE: these are the diags, and the next line opposite diags
+ are_near(_c[1], 0.0, eps) && are_near(_c[2], 0.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents pure uniform scaling.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a_1 & 0 & 0 \\
+ 0 & a_2 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ where \f$|a_1| = |a_2|\f$. */
+bool Affine::isUniformScale(Coord eps) const {
+ if (isSingular(eps)) return false;
+ return are_near(fabs(_c[0]), fabs(_c[3]), eps) &&
+ are_near(_c[1], 0.0, eps) && are_near(_c[2], 0.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents pure, nonzero uniform scaling.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a_1 & 0 & 0 \\
+ 0 & a_2 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ where \f$|a_1| = |a_2|\f$
+ * and \f$a_1, a_2 \neq 1\f$. */
+bool Affine::isNonzeroUniformScale(Coord eps) const {
+ if (isSingular(eps)) return false;
+ // we need to test both c0 and c3 to handle the case of flips,
+ // which should be treated as nonzero uniform scales
+ return !(are_near(_c[0], 1.0, eps) && are_near(_c[3], 1.0, eps)) &&
+ are_near(fabs(_c[0]), fabs(_c[3]), eps) &&
+ are_near(_c[1], 0.0, eps) && are_near(_c[2], 0.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents a pure rotation.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & b & 0 \\
+ -b & a & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ and \f$a^2 + b^2 = 1\f$. */
+bool Affine::isRotation(Coord eps) const {
+ return are_near(_c[0], _c[3], eps) && are_near(_c[1], -_c[2], eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps) &&
+ are_near(_c[0]*_c[0] + _c[1]*_c[1], 1.0, eps);
+}
+
+/** @brief Check whether this matrix represents a pure, nonzero rotation.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & b & 0 \\
+ -b & a & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$, \f$a^2 + b^2 = 1\f$ and \f$a \neq 1\f$. */
+bool Affine::isNonzeroRotation(Coord eps) const {
+ return !are_near(_c[0], 1.0, eps) &&
+ are_near(_c[0], _c[3], eps) && are_near(_c[1], -_c[2], eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps) &&
+ are_near(_c[0]*_c[0] + _c[1]*_c[1], 1.0, eps);
+}
+
+/** @brief Check whether this matrix represents a non-zero rotation about any point.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & b & 0 \\
+ -b & a & 0 \\
+ c & d & 1 \end{array}\right]\f$, \f$a^2 + b^2 = 1\f$ and \f$a \neq 1\f$. */
+bool Affine::isNonzeroNonpureRotation(Coord eps) const {
+ return !are_near(_c[0], 1.0, eps) &&
+ are_near(_c[0], _c[3], eps) && are_near(_c[1], -_c[2], eps) &&
+ are_near(_c[0]*_c[0] + _c[1]*_c[1], 1.0, eps);
+}
+
+/** @brief For a (possibly non-pure) non-zero-rotation matrix, calculate the rotation center.
+ * @pre The matrix must be a non-zero-rotation matrix to prevent division by zero, see isNonzeroNonpureRotation().
+ * @return The rotation center x, the solution to the equation
+ * \f$A x = x\f$. */
+Point Affine::rotationCenter() const {
+ Coord x = (_c[2]*_c[5]+_c[4]-_c[4]*_c[3]) / (1-_c[3]-_c[0]+_c[0]*_c[3]-_c[2]*_c[1]);
+ Coord y = (_c[1]*x + _c[5]) / (1 - _c[3]);
+ return Point(x,y);
+};
+
+/** @brief Check whether this matrix represents pure horizontal shearing.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & 0 & 0 \\
+ k & 1 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$. */
+bool Affine::isHShear(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[1], 0.0, eps) &&
+ are_near(_c[3], 1.0, eps) && are_near(_c[4], 0.0, eps) &&
+ are_near(_c[5], 0.0, eps);
+}
+/** @brief Check whether this matrix represents pure, nonzero horizontal shearing.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & 0 & 0 \\
+ k & 1 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ and \f$k \neq 0\f$. */
+bool Affine::isNonzeroHShear(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[1], 0.0, eps) &&
+ !are_near(_c[2], 0.0, eps) && are_near(_c[3], 1.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents pure vertical shearing.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & k & 0 \\
+ 0 & 1 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$. */
+bool Affine::isVShear(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && are_near(_c[2], 0.0, eps) &&
+ are_near(_c[3], 1.0, eps) && are_near(_c[4], 0.0, eps) &&
+ are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents pure, nonzero vertical shearing.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ 1 & k & 0 \\
+ 0 & 1 & 0 \\
+ 0 & 0 & 1 \end{array}\right]\f$ and \f$k \neq 0\f$. */
+bool Affine::isNonzeroVShear(Coord eps) const {
+ return are_near(_c[0], 1.0, eps) && !are_near(_c[1], 0.0, eps) &&
+ are_near(_c[2], 0.0, eps) && are_near(_c[3], 1.0, eps) &&
+ are_near(_c[4], 0.0, eps) && are_near(_c[5], 0.0, eps);
+}
+
+/** @brief Check whether this matrix represents zooming.
+ * Zooming is any combination of translation and uniform non-flipping scaling.
+ * It preserves angles, ratios of distances between arbitrary points
+ * and unit vectors of line segments.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is invertible and of the form
+ * \f$\left[\begin{array}{ccc}
+ a & 0 & 0 \\
+ 0 & a & 0 \\
+ b & c & 1 \end{array}\right]\f$. */
+bool Affine::isZoom(Coord eps) const {
+ if (isSingular(eps)) return false;
+ return are_near(_c[0], _c[3], eps) && are_near(_c[1], 0, eps) && are_near(_c[2], 0, eps);
+}
+
+/** @brief Check whether the transformation preserves areas of polygons.
+ * This means that the transformation can be any combination of translation, rotation,
+ * shearing and squeezing (non-uniform scaling such that the absolute value of the product
+ * of Y-scale and X-scale is 1).
+ * @param eps Numerical tolerance
+ * @return True iff \f$|\det A| = 1\f$. */
+bool Affine::preservesArea(Coord eps) const
+{
+ return are_near(descrim2(), 1.0, eps);
+}
+
+/** @brief Check whether the transformation preserves angles between lines.
+ * This means that the transformation can be any combination of translation, uniform scaling,
+ * rotation and flipping.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & b & 0 \\
+ -b & a & 0 \\
+ c & d & 1 \end{array}\right]\f$ or
+ \f$\left[\begin{array}{ccc}
+ -a & b & 0 \\
+ b & a & 0 \\
+ c & d & 1 \end{array}\right]\f$. */
+bool Affine::preservesAngles(Coord eps) const
+{
+ if (isSingular(eps)) return false;
+ return (are_near(_c[0], _c[3], eps) && are_near(_c[1], -_c[2], eps)) ||
+ (are_near(_c[0], -_c[3], eps) && are_near(_c[1], _c[2], eps));
+}
+
+/** @brief Check whether the transformation preserves distances between points.
+ * This means that the transformation can be any combination of translation,
+ * rotation and flipping.
+ * @param eps Numerical tolerance
+ * @return True iff the matrix is of the form
+ * \f$\left[\begin{array}{ccc}
+ a & b & 0 \\
+ -b & a & 0 \\
+ c & d & 1 \end{array}\right]\f$ or
+ \f$\left[\begin{array}{ccc}
+ -a & b & 0 \\
+ b & a & 0 \\
+ c & d & 1 \end{array}\right]\f$ and \f$a^2 + b^2 = 1\f$. */
+bool Affine::preservesDistances(Coord eps) const
+{
+ return ((are_near(_c[0], _c[3], eps) && are_near(_c[1], -_c[2], eps)) ||
+ (are_near(_c[0], -_c[3], eps) && are_near(_c[1], _c[2], eps))) &&
+ are_near(_c[0] * _c[0] + _c[1] * _c[1], 1.0, eps);
+}
+
+/** @brief Check whether this transformation flips objects.
+ * A transformation flips objects if it has a negative scaling component. */
+bool Affine::flips() const {
+ return det() < 0;
+}
+
+/** @brief Check whether this matrix is singular.
+ * Singular matrices have no inverse, which means that applying them to a set of points
+ * results in a loss of information.
+ * @param eps Numerical tolerance
+ * @return True iff the determinant is near zero. */
+bool Affine::isSingular(Coord eps) const {
+ return are_near(det(), 0.0, eps);
+}
+
+/** @brief Compute the inverse matrix.
+ * Inverse is a matrix (denoted \f$A^{-1}\f$) such that \f$AA^{-1} = A^{-1}A = I\f$.
+ * Singular matrices have no inverse (for example a matrix that has two of its columns equal).
+ * For such matrices, the identity matrix will be returned instead.
+ * @param eps Numerical tolerance
+ * @return Inverse of the matrix, or the identity matrix if the inverse is undefined.
+ * @post (m * m.inverse()).isIdentity() == true */
+Affine Affine::inverse() const {
+ Affine d;
+
+ double mx = std::max(fabs(_c[0]) + fabs(_c[1]),
+ fabs(_c[2]) + fabs(_c[3])); // a random matrix norm (either l1 or linfty
+ if(mx > 0) {
+ Geom::Coord const determ = det();
+ if (!rel_error_bound(std::sqrt(fabs(determ)), mx)) {
+ Geom::Coord const ideterm = 1.0 / (determ);
+
+ d._c[0] = _c[3] * ideterm;
+ d._c[1] = -_c[1] * ideterm;
+ d._c[2] = -_c[2] * ideterm;
+ d._c[3] = _c[0] * ideterm;
+ d._c[4] = (-_c[4] * d._c[0] - _c[5] * d._c[2]);
+ d._c[5] = (-_c[4] * d._c[1] - _c[5] * d._c[3]);
+ } else {
+ d.setIdentity();
+ }
+ } else {
+ d.setIdentity();
+ }
+
+ return d;
+}
+
+/** @brief Calculate the determinant.
+ * @return \f$\det A\f$. */
+Coord Affine::det() const {
+ // TODO this can overflow
+ return _c[0] * _c[3] - _c[1] * _c[2];
+}
+
+/** @brief Calculate the square of the descriminant.
+ * This is simply the absolute value of the determinant.
+ * @return \f$|\det A|\f$. */
+Coord Affine::descrim2() const {
+ return fabs(det());
+}
+
+/** @brief Calculate the descriminant.
+ * If the matrix doesn't contain a shearing or non-uniform scaling component, this value says
+ * how will the length of any line segment change after applying this transformation
+ * to arbitrary objects on a plane. The new length will be
+ * @code line_seg.length() * m.descrim()) @endcode
+ * @return \f$\sqrt{|\det A|}\f$. */
+Coord Affine::descrim() const {
+ return sqrt(descrim2());
+}
+
+/** @brief Combine this transformation with another one.
+ * After this operation, the matrix will correspond to the transformation
+ * obtained by first applying the original version of this matrix, and then
+ * applying @a m. */
+Affine &Affine::operator*=(Affine const &o) {
+ Coord nc[6];
+ for(int a = 0; a < 5; a += 2) {
+ for(int b = 0; b < 2; b++) {
+ nc[a + b] = _c[a] * o._c[b] + _c[a + 1] * o._c[b + 2];
+ }
+ }
+ for(int a = 0; a < 6; ++a) {
+ _c[a] = nc[a];
+ }
+ _c[4] += o._c[4];
+ _c[5] += o._c[5];
+ return *this;
+}
+
+//TODO: What's this!?!
+/** Given a matrix m such that unit_circle = m*x, this returns the
+ * quadratic form x*A*x = 1.
+ * @relates Affine */
+Affine elliptic_quadratic_form(Affine const &m) {
+ double od = m[0] * m[1] + m[2] * m[3];
+ Affine ret (m[0]*m[0] + m[1]*m[1], od,
+ od, m[2]*m[2] + m[3]*m[3],
+ 0, 0);
+ return ret; // allow NRVO
+}
+
+Eigen::Eigen(Affine const &m) {
+ double const B = -m[0] - m[3];
+ double const C = m[0]*m[3] - m[1]*m[2];
+
+ std::vector<double> v = solve_quadratic(1, B, C);
+
+ for (unsigned i = 0; i < v.size(); ++i) {
+ values[i] = v[i];
+ vectors[i] = unit_vector(rot90(Point(m[0] - values[i], m[1])));
+ }
+ for (unsigned i = v.size(); i < 2; ++i) {
+ values[i] = 0;
+ vectors[i] = Point(0,0);
+ }
+}
+
+Eigen::Eigen(double m[2][2]) {
+ double const B = -m[0][0] - m[1][1];
+ double const C = m[0][0]*m[1][1] - m[1][0]*m[0][1];
+
+ std::vector<double> v = solve_quadratic(1, B, C);
+
+ for (unsigned i = 0; i < v.size(); ++i) {
+ values[i] = v[i];
+ vectors[i] = unit_vector(rot90(Point(m[0][0] - values[i], m[0][1])));
+ }
+ for (unsigned i = v.size(); i < 2; ++i) {
+ values[i] = 0;
+ vectors[i] = Point(0,0);
+ }
+}
+
+/** @brief Nearness predicate for affine transforms.
+ * @returns True if all entries of matrices are within eps of each other.
+ * @relates Affine */
+bool are_near(Affine const &a, Affine const &b, Coord eps)
+{
+ return are_near(a[0], b[0], eps) && are_near(a[1], b[1], eps) &&
+ are_near(a[2], b[2], eps) && are_near(a[3], b[3], eps) &&
+ are_near(a[4], b[4], eps) && are_near(a[5], b[5], eps);
+}
+
+} //namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/basic-intersection.cpp b/src/2geom/basic-intersection.cpp
new file mode 100644
index 0000000..61d7a6d
--- /dev/null
+++ b/src/2geom/basic-intersection.cpp
@@ -0,0 +1,493 @@
+/** @file
+ * @brief Basic intersection routines
+ *//*
+ * Authors:
+ * Nathan Hurst <njh@njhurst.com>
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Jean-François Barraud <jf.barraud@gmail.com>
+ *
+ * Copyright 2008-2009 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/basic-intersection.h>
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/exception.h>
+
+#ifdef HAVE_GSL
+#include <gsl/gsl_vector.h>
+#include <gsl/gsl_multiroots.h>
+#endif
+
+using std::vector;
+namespace Geom {
+
+//#ifdef USE_RECURSIVE_INTERSECTOR
+
+// void find_intersections(std::vector<std::pair<double, double> > &xs,
+// D2<SBasis> const & A,
+// D2<SBasis> const & B) {
+// vector<Point> BezA, BezB;
+// sbasis_to_bezier(BezA, A);
+// sbasis_to_bezier(BezB, B);
+
+// xs.clear();
+
+// find_intersections_bezier_recursive(xs, BezA, BezB);
+// }
+// void find_intersections(std::vector< std::pair<double, double> > & xs,
+// std::vector<Point> const& A,
+// std::vector<Point> const& B,
+// double precision){
+// find_intersections_bezier_recursive(xs, A, B, precision);
+// }
+
+//#else
+
+namespace detail{ namespace bezier_clipping {
+void portion(std::vector<Point> &B, Interval const &I);
+void derivative(std::vector<Point> &D, std::vector<Point> const &B);
+}; };
+
+void find_intersections(std::vector<std::pair<double, double> > &xs,
+ D2<Bezier> const & A,
+ D2<Bezier> const & B,
+ double precision)
+{
+ find_intersections_bezier_clipping(xs, bezier_points(A), bezier_points(B), precision);
+}
+
+void find_intersections(std::vector<std::pair<double, double> > &xs,
+ D2<SBasis> const & A,
+ D2<SBasis> const & B,
+ double precision)
+{
+ vector<Point> BezA, BezB;
+ sbasis_to_bezier(BezA, A);
+ sbasis_to_bezier(BezB, B);
+
+ find_intersections_bezier_clipping(xs, BezA, BezB, precision);
+}
+
+void find_intersections(std::vector< std::pair<double, double> > & xs,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ find_intersections_bezier_clipping(xs, A, B, precision);
+}
+
+//#endif
+
+/*
+ * split the curve at the midpoint, returning an array with the two parts
+ * Temporary storage is minimized by using part of the storage for the result
+ * to hold an intermediate value until it is no longer needed.
+ */
+// TODO replace with Bezier method
+void split(vector<Point> const &p, double t,
+ vector<Point> &left, vector<Point> &right) {
+ const unsigned sz = p.size();
+ //Geom::Point Vtemp[sz][sz];
+ vector<vector<Point> > Vtemp(sz);
+ for ( size_t i = 0; i < sz; ++i )
+ Vtemp[i].reserve(sz);
+
+ /* Copy control points */
+ std::copy(p.begin(), p.end(), Vtemp[0].begin());
+
+ /* Triangle computation */
+ for (unsigned i = 1; i < sz; i++) {
+ for (unsigned j = 0; j < sz - i; j++) {
+ Vtemp[i][j] = lerp(t, Vtemp[i-1][j], Vtemp[i-1][j+1]);
+ }
+ }
+
+ left.resize(sz);
+ right.resize(sz);
+ for (unsigned j = 0; j < sz; j++)
+ left[j] = Vtemp[j][0];
+ for (unsigned j = 0; j < sz; j++)
+ right[j] = Vtemp[sz-1-j][j];
+}
+
+
+
+void find_self_intersections(std::vector<std::pair<double, double> > &xs,
+ D2<Bezier> const &A,
+ double precision)
+{
+ std::vector<double> dr = derivative(A[X]).roots();
+ {
+ std::vector<double> dyr = derivative(A[Y]).roots();
+ dr.insert(dr.begin(), dyr.begin(), dyr.end());
+ }
+ dr.push_back(0);
+ dr.push_back(1);
+ // We want to be sure that we have no empty segments
+ std::sort(dr.begin(), dr.end());
+ std::vector<double>::iterator new_end = std::unique(dr.begin(), dr.end());
+ dr.resize( new_end - dr.begin() );
+
+ std::vector< D2<Bezier> > pieces;
+ for (unsigned i = 0; i < dr.size() - 1; ++i) {
+ pieces.push_back(portion(A, dr[i], dr[i+1]));
+ }
+ /*{
+ vector<Point> l, r, in = A;
+ for(unsigned i = 0; i < dr.size()-1; i++) {
+ split(in, (dr[i+1]-dr[i]) / (1 - dr[i]), l, r);
+ pieces.push_back(l);
+ in = r;
+ }
+ }*/
+
+ for(unsigned i = 0; i < dr.size()-1; i++) {
+ for(unsigned j = i+1; j < dr.size()-1; j++) {
+ std::vector<std::pair<double, double> > section;
+
+ find_intersections(section, pieces[i], pieces[j], precision);
+ for(auto & k : section) {
+ double l = k.first;
+ double r = k.second;
+// XXX: This condition will prune out false positives, but it might create some false negatives. Todo: Confirm it is correct.
+ if(j == i+1)
+ //if((l == 1) && (r == 0))
+ if( ( l > precision ) && (r < precision) )//FIXME: what precision should be used here???
+ continue;
+ xs.emplace_back((1-l)*dr[i] + l*dr[i+1],
+ (1-r)*dr[j] + r*dr[j+1]);
+ }
+ }
+ }
+
+ // Because i is in order, xs should be roughly already in order?
+ //sort(xs.begin(), xs.end());
+ //unique(xs.begin(), xs.end());
+}
+
+void find_self_intersections(std::vector<std::pair<double, double> > &xs,
+ D2<SBasis> const &A,
+ double precision)
+{
+ D2<Bezier> in;
+ sbasis_to_bezier(in, A);
+ find_self_intersections(xs, in, precision);
+}
+
+
+void subdivide(D2<Bezier> const &a,
+ D2<Bezier> const &b,
+ std::vector< std::pair<double, double> > const &xs,
+ std::vector< D2<Bezier> > &av,
+ std::vector< D2<Bezier> > &bv)
+{
+ if (xs.empty()) {
+ av.push_back(a);
+ bv.push_back(b);
+ return;
+ }
+
+ std::pair<double, double> prev = std::make_pair(0., 0.);
+ for (const auto & x : xs) {
+ av.push_back(portion(a, prev.first, x.first));
+ bv.push_back(portion(b, prev.second, x.second));
+ av.back()[X].at0() = bv.back()[X].at0() = lerp(0.5, av.back()[X].at0(), bv.back()[X].at0());
+ av.back()[X].at1() = bv.back()[X].at1() = lerp(0.5, av.back()[X].at1(), bv.back()[X].at1());
+ av.back()[Y].at0() = bv.back()[Y].at0() = lerp(0.5, av.back()[Y].at0(), bv.back()[Y].at0());
+ av.back()[Y].at1() = bv.back()[Y].at1() = lerp(0.5, av.back()[Y].at1(), bv.back()[Y].at1());
+ prev = x;
+ }
+ av.push_back(portion(a, prev.first, 1));
+ bv.push_back(portion(b, prev.second, 1));
+ av.back()[X].at0() = bv.back()[X].at0() = lerp(0.5, av.back()[X].at0(), bv.back()[X].at0());
+ av.back()[X].at1() = bv.back()[X].at1() = lerp(0.5, av.back()[X].at1(), bv.back()[X].at1());
+ av.back()[Y].at0() = bv.back()[Y].at0() = lerp(0.5, av.back()[Y].at0(), bv.back()[Y].at0());
+ av.back()[Y].at1() = bv.back()[Y].at1() = lerp(0.5, av.back()[Y].at1(), bv.back()[Y].at1());
+}
+
+#ifdef HAVE_GSL
+#include <gsl/gsl_multiroots.h>
+
+struct rparams
+{
+ D2<SBasis> const &A;
+ D2<SBasis> const &B;
+};
+
+static int
+intersect_polish_f (const gsl_vector * x, void *params,
+ gsl_vector * f)
+{
+ const double x0 = gsl_vector_get (x, 0);
+ const double x1 = gsl_vector_get (x, 1);
+
+ Geom::Point dx = ((struct rparams *) params)->A(x0) -
+ ((struct rparams *) params)->B(x1);
+
+ gsl_vector_set (f, 0, dx[0]);
+ gsl_vector_set (f, 1, dx[1]);
+
+ return GSL_SUCCESS;
+}
+#endif
+
+union dbl_64{
+ long long i64;
+ double d64;
+};
+
+static double EpsilonBy(double value, int eps)
+{
+ dbl_64 s;
+ s.d64 = value;
+ s.i64 += eps;
+ return s.d64;
+}
+
+
+static void intersect_polish_root (D2<SBasis> const &A, double &s,
+ D2<SBasis> const &B, double &t) {
+#ifdef HAVE_GSL
+ const gsl_multiroot_fsolver_type *T;
+ gsl_multiroot_fsolver *sol;
+
+ int status;
+ size_t iter = 0;
+#endif
+ std::vector<Point> as, bs;
+ as = A.valueAndDerivatives(s, 2);
+ bs = B.valueAndDerivatives(t, 2);
+ Point F = as[0] - bs[0];
+ double best = dot(F, F);
+
+ for(int i = 0; i < 4; i++) {
+
+ /**
+ we want to solve
+ J*(x1 - x0) = f(x0)
+
+ |dA(s)[0] -dB(t)[0]| (X1 - X0) = A(s) - B(t)
+ |dA(s)[1] -dB(t)[1]|
+ **/
+
+ // We're using the standard transformation matricies, which is numerically rather poor. Much better to solve the equation using elimination.
+
+ Affine jack(as[1][0], as[1][1],
+ -bs[1][0], -bs[1][1],
+ 0, 0);
+ Point soln = (F)*jack.inverse();
+ double ns = s - soln[0];
+ double nt = t - soln[1];
+
+ as = A.valueAndDerivatives(ns, 2);
+ bs = B.valueAndDerivatives(nt, 2);
+ F = as[0] - bs[0];
+ double trial = dot(F, F);
+ if (trial > best*0.1) {// we have standards, you know
+ // At this point we could do a line search
+ break;
+ }
+ best = trial;
+ s = ns;
+ t = nt;
+ }
+
+#ifdef HAVE_GSL
+ const size_t n = 2;
+ struct rparams p = {A, B};
+ gsl_multiroot_function f = {&intersect_polish_f, n, &p};
+
+ double x_init[2] = {s, t};
+ gsl_vector *x = gsl_vector_alloc (n);
+
+ gsl_vector_set (x, 0, x_init[0]);
+ gsl_vector_set (x, 1, x_init[1]);
+
+ T = gsl_multiroot_fsolver_hybrids;
+ sol = gsl_multiroot_fsolver_alloc (T, 2);
+ gsl_multiroot_fsolver_set (sol, &f, x);
+
+ do
+ {
+ iter++;
+ status = gsl_multiroot_fsolver_iterate (sol);
+
+ if (status) /* check if solver is stuck */
+ break;
+
+ status =
+ gsl_multiroot_test_residual (sol->f, 1e-12);
+ }
+ while (status == GSL_CONTINUE && iter < 1000);
+
+ s = gsl_vector_get (sol->x, 0);
+ t = gsl_vector_get (sol->x, 1);
+
+ gsl_multiroot_fsolver_free (sol);
+ gsl_vector_free (x);
+#endif
+
+ {
+ // This code does a neighbourhood search for minor improvements.
+ double best_v = L1(A(s) - B(t));
+ //std::cout << "------\n" << best_v << std::endl;
+ Point best(s,t);
+ while (true) {
+ Point trial = best;
+ double trial_v = best_v;
+ for(int nsi = -1; nsi < 2; nsi++) {
+ for(int nti = -1; nti < 2; nti++) {
+ Point n(EpsilonBy(best[0], nsi),
+ EpsilonBy(best[1], nti));
+ double c = L1(A(n[0]) - B(n[1]));
+ //std::cout << c << "; ";
+ if (c < trial_v) {
+ trial = n;
+ trial_v = c;
+ }
+ }
+ }
+ if(trial == best) {
+ //std::cout << "\n" << s << " -> " << s - best[0] << std::endl;
+ //std::cout << t << " -> " << t - best[1] << std::endl;
+ //std::cout << best_v << std::endl;
+ s = best[0];
+ t = best[1];
+ return;
+ } else {
+ best = trial;
+ best_v = trial_v;
+ }
+ }
+ }
+}
+
+
+void polish_intersections(std::vector<std::pair<double, double> > &xs,
+ D2<SBasis> const &A, D2<SBasis> const &B)
+{
+ for(auto & x : xs)
+ intersect_polish_root(A, x.first,
+ B, x.second);
+}
+
+/**
+ * Compute the Hausdorf distance from A to B only.
+ */
+double hausdorfl(D2<SBasis>& A, D2<SBasis> const& B,
+ double m_precision,
+ double *a_t, double* b_t) {
+ std::vector< std::pair<double, double> > xs;
+ std::vector<Point> Az, Bz;
+ sbasis_to_bezier (Az, A);
+ sbasis_to_bezier (Bz, B);
+ find_collinear_normal(xs, Az, Bz, m_precision);
+ double h_dist = 0, h_a_t = 0, h_b_t = 0;
+ double dist = 0;
+ Point Ax = A.at0();
+ double t = Geom::nearest_time(Ax, B);
+ dist = Geom::distance(Ax, B(t));
+ if (dist > h_dist) {
+ h_a_t = 0;
+ h_b_t = t;
+ h_dist = dist;
+ }
+ Ax = A.at1();
+ t = Geom::nearest_time(Ax, B);
+ dist = Geom::distance(Ax, B(t));
+ if (dist > h_dist) {
+ h_a_t = 1;
+ h_b_t = t;
+ h_dist = dist;
+ }
+ for (auto & x : xs)
+ {
+ Point At = A(x.first);
+ Point Bu = B(x.second);
+ double distAtBu = Geom::distance(At, Bu);
+ t = Geom::nearest_time(At, B);
+ dist = Geom::distance(At, B(t));
+ //FIXME: we might miss it due to floating point precision...
+ if (dist >= distAtBu-.1 && distAtBu > h_dist) {
+ h_a_t = x.first;
+ h_b_t = x.second;
+ h_dist = distAtBu;
+ }
+
+ }
+ if(a_t) *a_t = h_a_t;
+ if(b_t) *b_t = h_b_t;
+
+ return h_dist;
+}
+
+/**
+ * Compute the symmetric Hausdorf distance.
+ */
+double hausdorf(D2<SBasis>& A, D2<SBasis> const& B,
+ double m_precision,
+ double *a_t, double* b_t) {
+ double h_dist = hausdorfl(A, B, m_precision, a_t, b_t);
+
+ double dist = 0;
+ Point Bx = B.at0();
+ double t = Geom::nearest_time(Bx, A);
+ dist = Geom::distance(Bx, A(t));
+ if (dist > h_dist) {
+ if(a_t) *a_t = t;
+ if(b_t) *b_t = 0;
+ h_dist = dist;
+ }
+ Bx = B.at1();
+ t = Geom::nearest_time(Bx, A);
+ dist = Geom::distance(Bx, A(t));
+ if (dist > h_dist) {
+ if(a_t) *a_t = t;
+ if(b_t) *b_t = 1;
+ h_dist = dist;
+ }
+
+ return h_dist;
+}
+
+bool non_collinear_segments_intersect(const Point &A, const Point &B, const Point &C, const Point &D)
+{
+ return cross(D - C, A - C) * cross(D - C, B - C) < 0 && //
+ cross(B - A, C - A) * cross(B - A, D - A) < 0;
+}
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/bezier-clipping.cpp b/src/2geom/bezier-clipping.cpp
new file mode 100644
index 0000000..27da3d2
--- /dev/null
+++ b/src/2geom/bezier-clipping.cpp
@@ -0,0 +1,1174 @@
+/*
+ * Implement the Bezier clipping algorithm for finding
+ * Bezier curve intersection points and collinear normals
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ *
+ * Copyright 2008 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+
+
+#include <2geom/basic-intersection.h>
+#include <2geom/choose.h>
+#include <2geom/point.h>
+#include <2geom/interval.h>
+#include <2geom/bezier.h>
+#include <2geom/numeric/matrix.h>
+#include <2geom/convex-hull.h>
+#include <2geom/line.h>
+
+#include <cassert>
+#include <vector>
+#include <algorithm>
+#include <utility>
+//#include <iomanip>
+
+using std::swap;
+
+
+#define VERBOSE 0
+#define CHECK 0
+
+namespace Geom {
+
+namespace detail { namespace bezier_clipping {
+
+////////////////////////////////////////////////////////////////////////////////
+// for debugging
+//
+
+void print(std::vector<Point> const& cp, const char* msg = "")
+{
+ std::cerr << msg << std::endl;
+ for (size_t i = 0; i < cp.size(); ++i)
+ std::cerr << i << " : " << cp[i] << std::endl;
+}
+
+template< class charT >
+std::basic_ostream<charT> &
+operator<< (std::basic_ostream<charT> & os, const Interval & I)
+{
+ os << "[" << I.min() << ", " << I.max() << "]";
+ return os;
+}
+
+double angle (std::vector<Point> const& A)
+{
+ size_t n = A.size() -1;
+ double a = std::atan2(A[n][Y] - A[0][Y], A[n][X] - A[0][X]);
+ return (180 * a / M_PI);
+}
+
+size_t get_precision(Interval const& I)
+{
+ double d = I.extent();
+ double e = 0.1, p = 10;
+ int n = 0;
+ while (n < 16 && d < e)
+ {
+ p *= 10;
+ e = 1/p;
+ ++n;
+ }
+ return n;
+}
+
+void range_assertion(int k, int m, int n, const char* msg)
+{
+ if ( k < m || k > n)
+ {
+ std::cerr << "range assertion failed: \n"
+ << msg << std::endl
+ << "value: " << k
+ << " range: " << m << ", " << n << std::endl;
+ assert (k >= m && k <= n);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// numerical routines
+
+/*
+ * Compute the determinant of the 2x2 matrix with column the point P1, P2
+ */
+double det(Point const& P1, Point const& P2)
+{
+ return P1[X]*P2[Y] - P1[Y]*P2[X];
+}
+
+/*
+ * Solve the linear system [P1,P2] * P = Q
+ * in case there isn't exactly one solution the routine returns false
+ */
+bool solve(Point & P, Point const& P1, Point const& P2, Point const& Q)
+{
+ double d = det(P1, P2);
+ if (d == 0) return false;
+ d = 1 / d;
+ P[X] = det(Q, P2) * d;
+ P[Y] = det(P1, Q) * d;
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// interval routines
+
+/*
+ * Map the sub-interval I in [0,1] into the interval J and assign it to J
+ */
+void map_to(Interval & J, Interval const& I)
+{
+ J.setEnds(J.valueAt(I.min()), J.valueAt(I.max()));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// bezier curve routines
+
+/*
+ * Return true if all the Bezier curve control points are near,
+ * false otherwise
+ */
+// Bezier.isConstant(precision)
+bool is_constant(std::vector<Point> const& A, double precision)
+{
+ for (unsigned int i = 1; i < A.size(); ++i)
+ {
+ if(!are_near(A[i], A[0], precision))
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Compute the hodograph of the bezier curve B and return it in D
+ */
+// derivative(Bezier)
+void derivative(std::vector<Point> & D, std::vector<Point> const& B)
+{
+ D.clear();
+ size_t sz = B.size();
+ if (sz == 0) return;
+ if (sz == 1)
+ {
+ D.resize(1, Point(0,0));
+ return;
+ }
+ size_t n = sz-1;
+ D.reserve(n);
+ for (size_t i = 0; i < n; ++i)
+ {
+ D.push_back(n*(B[i+1] - B[i]));
+ }
+}
+
+/*
+ * Compute the hodograph of the Bezier curve B rotated of 90 degree
+ * and return it in D; we have N(t) orthogonal to B(t) for any t
+ */
+// rot90(derivative(Bezier))
+void normal(std::vector<Point> & N, std::vector<Point> const& B)
+{
+ derivative(N,B);
+ for (auto & i : N)
+ {
+ i = rot90(i);
+ }
+}
+
+/*
+ * Compute the portion of the Bezier curve "B" wrt the interval [0,t]
+ */
+// portion(Bezier, 0, t)
+void left_portion(Coord t, std::vector<Point> & B)
+{
+ size_t n = B.size();
+ for (size_t i = 1; i < n; ++i)
+ {
+ for (size_t j = n-1; j > i-1 ; --j)
+ {
+ B[j] = lerp(t, B[j-1], B[j]);
+ }
+ }
+}
+
+/*
+ * Compute the portion of the Bezier curve "B" wrt the interval [t,1]
+ */
+// portion(Bezier, t, 1)
+void right_portion(Coord t, std::vector<Point> & B)
+{
+ size_t n = B.size();
+ for (size_t i = 1; i < n; ++i)
+ {
+ for (size_t j = 0; j < n-i; ++j)
+ {
+ B[j] = lerp(t, B[j], B[j+1]);
+ }
+ }
+}
+
+/*
+ * Compute the portion of the Bezier curve "B" wrt the interval "I"
+ */
+// portion(Bezier, I)
+void portion (std::vector<Point> & B , Interval const& I)
+{
+ if (I.min() == 0)
+ {
+ if (I.max() == 1) return;
+ left_portion(I.max(), B);
+ return;
+ }
+ right_portion(I.min(), B);
+ if (I.max() == 1) return;
+ double t = I.extent() / (1 - I.min());
+ left_portion(t, B);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// tags
+
+struct intersection_point_tag;
+struct collinear_normal_tag;
+template <typename Tag>
+OptInterval clip(std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision);
+template <typename Tag>
+void iterate(std::vector<Interval>& domsA,
+ std::vector<Interval>& domsB,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ Interval const& domA,
+ Interval const& domB,
+ double precision );
+
+
+////////////////////////////////////////////////////////////////////////////////
+// intersection
+
+/*
+ * Make up an orientation line using the control points c[i] and c[j]
+ * the line is returned in the output parameter "l" in the form of a 3 element
+ * vector : l[0] * x + l[1] * y + l[2] == 0; the line is normalized.
+ */
+// Line(c[i], c[j])
+void orientation_line (std::vector<double> & l,
+ std::vector<Point> const& c,
+ size_t i, size_t j)
+{
+ l[0] = c[j][Y] - c[i][Y];
+ l[1] = c[i][X] - c[j][X];
+ l[2] = cross(c[j], c[i]);
+ double length = std::sqrt(l[0] * l[0] + l[1] * l[1]);
+ assert (length != 0);
+ l[0] /= length;
+ l[1] /= length;
+ l[2] /= length;
+}
+
+/*
+ * Pick up an orientation line for the Bezier curve "c" and return it in
+ * the output parameter "l"
+ */
+Line pick_orientation_line (std::vector<Point> const &c, double precision)
+{
+ size_t i = c.size();
+ while (--i > 0 && are_near(c[0], c[i], precision))
+ {}
+
+ // this should never happen because when a new curve portion is created
+ // we check that it is not constant;
+ // however this requires that the precision used in the is_constant
+ // routine has to be the same used here in the are_near test
+ assert(i != 0);
+
+ Line line(c[0], c[i]);
+ return line;
+ //std::cerr << "i = " << i << std::endl;
+}
+
+/*
+ * Make up an orientation line for constant bezier curve;
+ * the orientation line is made up orthogonal to the other curve base line;
+ * the line is returned in the output parameter "l" in the form of a 3 element
+ * vector : l[0] * x + l[1] * y + l[2] == 0; the line is normalized.
+ */
+Line orthogonal_orientation_line (std::vector<Point> const &c,
+ Point const &p,
+ double precision)
+{
+ // this should never happen
+ assert(!is_constant(c, precision));
+
+ Line line(p, (c.back() - c.front()).cw() + p);
+ return line;
+}
+
+/*
+ * Compute the signed distance of the point "P" from the normalized line l
+ */
+double signed_distance(Point const &p, Line const &l)
+{
+ Coord a, b, c;
+ l.coefficients(a, b, c);
+ return a * p[X] + b * p[Y] + c;
+}
+
+/*
+ * Compute the min and max distance of the control points of the Bezier
+ * curve "c" from the normalized orientation line "l".
+ * This bounds are returned through the output Interval parameter"bound".
+ */
+Interval fat_line_bounds (std::vector<Point> const &c,
+ Line const &l)
+{
+ Interval bound(0, 0);
+ for (auto i : c) {
+ bound.expandTo(signed_distance(i, l));
+ }
+ return bound;
+}
+
+/*
+ * return the x component of the intersection point between the line
+ * passing through points p1, p2 and the line Y = "y"
+ */
+double intersect (Point const& p1, Point const& p2, double y)
+{
+ // we are sure that p2[Y] != p1[Y] because this routine is called
+ // only when the lower or the upper bound is crossed
+ double dy = (p2[Y] - p1[Y]);
+ double s = (y - p1[Y]) / dy;
+ return (p2[X]-p1[X])*s + p1[X];
+}
+
+/*
+ * Clip the Bezier curve "B" wrt the fat line defined by the orientation
+ * line "l" and the interval range "bound", the new parameter interval for
+ * the clipped curve is returned through the output parameter "dom"
+ */
+OptInterval clip_interval (std::vector<Point> const& B,
+ Line const &l,
+ Interval const &bound)
+{
+ double n = B.size() - 1; // number of sub-intervals
+ std::vector<Point> D; // distance curve control points
+ D.reserve (B.size());
+ for (size_t i = 0; i < B.size(); ++i)
+ {
+ const double d = signed_distance(B[i], l);
+ D.emplace_back(i/n, d);
+ }
+ //print(D);
+
+ ConvexHull p;
+ p.swap(D);
+ //print(p);
+
+ bool plower, phigher;
+ bool clower, chigher;
+ double t, tmin = 1, tmax = 0;
+// std::cerr << "bound : " << bound << std::endl;
+
+ plower = (p[0][Y] < bound.min());
+ phigher = (p[0][Y] > bound.max());
+ if (!(plower || phigher)) // inside the fat line
+ {
+ if (tmin > p[0][X]) tmin = p[0][X];
+ if (tmax < p[0][X]) tmax = p[0][X];
+// std::cerr << "0 : inside " << p[0]
+// << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+
+ for (size_t i = 1; i < p.size(); ++i)
+ {
+ clower = (p[i][Y] < bound.min());
+ chigher = (p[i][Y] > bound.max());
+ if (!(clower || chigher)) // inside the fat line
+ {
+ if (tmin > p[i][X]) tmin = p[i][X];
+ if (tmax < p[i][X]) tmax = p[i][X];
+// std::cerr << i << " : inside " << p[i]
+// << " : tmin = " << tmin << ", tmax = " << tmax
+// << std::endl;
+ }
+ if (clower != plower) // cross the lower bound
+ {
+ t = intersect(p[i-1], p[i], bound.min());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ plower = clower;
+// std::cerr << i << " : lower " << p[i]
+// << " : tmin = " << tmin << ", tmax = " << tmax
+// << std::endl;
+ }
+ if (chigher != phigher) // cross the upper bound
+ {
+ t = intersect(p[i-1], p[i], bound.max());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ phigher = chigher;
+// std::cerr << i << " : higher " << p[i]
+// << " : tmin = " << tmin << ", tmax = " << tmax
+// << std::endl;
+ }
+ }
+
+ // we have to test the closing segment for intersection
+ size_t last = p.size() - 1;
+ clower = (p[0][Y] < bound.min());
+ chigher = (p[0][Y] > bound.max());
+ if (clower != plower) // cross the lower bound
+ {
+ t = intersect(p[last], p[0], bound.min());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+// std::cerr << "0 : lower " << p[0]
+// << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+ if (chigher != phigher) // cross the upper bound
+ {
+ t = intersect(p[last], p[0], bound.max());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+// std::cerr << "0 : higher " << p[0]
+// << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+
+ if (tmin == 1 && tmax == 0) {
+ return OptInterval();
+ } else {
+ return Interval(tmin, tmax);
+ }
+}
+
+/*
+ * Clip the Bezier curve "B" wrt the Bezier curve "A" for individuating
+ * intersection points the new parameter interval for the clipped curve
+ * is returned through the output parameter "dom"
+ */
+template <>
+OptInterval clip<intersection_point_tag> (std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ Line bl;
+ if (is_constant(A, precision)) {
+ Point M = middle_point(A.front(), A.back());
+ bl = orthogonal_orientation_line(B, M, precision);
+ } else {
+ bl = pick_orientation_line(A, precision);
+ }
+ bl.normalize();
+ Interval bound = fat_line_bounds(A, bl);
+ return clip_interval(B, bl, bound);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// collinear normal
+
+/*
+ * Compute a closed focus for the Bezier curve B and return it in F
+ * A focus is any curve through which all lines perpendicular to B(t) pass.
+ */
+void make_focus (std::vector<Point> & F, std::vector<Point> const& B)
+{
+ assert (B.size() > 2);
+ size_t n = B.size() - 1;
+ normal(F, B);
+ Point c(1, 1);
+#if VERBOSE
+ if (!solve(c, F[0], -F[n-1], B[n]-B[0]))
+ {
+ std::cerr << "make_focus: unable to make up a closed focus" << std::endl;
+ }
+#else
+ solve(c, F[0], -F[n-1], B[n]-B[0]);
+#endif
+// std::cerr << "c = " << c << std::endl;
+
+
+ // B(t) + c(t) * N(t)
+ double n_inv = 1 / (double)(n);
+ Point c0ni;
+ F.push_back(c[1] * F[n-1]);
+ F[n] += B[n];
+ for (size_t i = n-1; i > 0; --i)
+ {
+ F[i] *= -c[0];
+ c0ni = F[i];
+ F[i] += (c[1] * F[i-1]);
+ F[i] *= (i * n_inv);
+ F[i] -= c0ni;
+ F[i] += B[i];
+ }
+ F[0] *= c[0];
+ F[0] += B[0];
+}
+
+/*
+ * Compute the projection on the plane (t, d) of the control points
+ * (t, u, D(t,u)) where D(t,u) = <(B(t) - F(u)), B'(t)> with 0 <= t, u <= 1
+ * B is a Bezier curve and F is a focus of another Bezier curve.
+ * See Sederberg, Nishita, 1990 - Curve intersection using Bezier clipping.
+ */
+void distance_control_points (std::vector<Point> & D,
+ std::vector<Point> const& B,
+ std::vector<Point> const& F)
+{
+ assert (B.size() > 1);
+ assert (!F.empty());
+ const size_t n = B.size() - 1;
+ const size_t m = F.size() - 1;
+ const size_t r = 2 * n - 1;
+ const double r_inv = 1 / (double)(r);
+ D.clear();
+ D.reserve (B.size() * F.size());
+
+ std::vector<Point> dB;
+ dB.reserve(n);
+ for (size_t k = 0; k < n; ++k)
+ {
+ dB.push_back (B[k+1] - B[k]);
+ }
+ NL::Matrix dBB(n,B.size());
+ for (size_t i = 0; i < n; ++i)
+ for (size_t j = 0; j < B.size(); ++j)
+ dBB(i,j) = dot (dB[i], B[j]);
+ NL::Matrix dBF(n, F.size());
+ for (size_t i = 0; i < n; ++i)
+ for (size_t j = 0; j < F.size(); ++j)
+ dBF(i,j) = dot (dB[i], F[j]);
+
+ size_t l;
+ double bc;
+ Point dij;
+ std::vector<double> d(F.size());
+ int rci = 1;
+ int b1 = 1;
+ for (size_t i = 0; i <= r; ++i)
+ {
+ for (size_t j = 0; j <= m; ++j)
+ {
+ d[j] = 0;
+ }
+ const size_t k0 = std::max(i, n) - n;
+ const size_t kn = std::min(i, n-1);
+ const double bri = (double)n / rci;
+
+ // assert(rci == binomial(r, i));
+ binomial_increment_k(rci, r, i);
+
+ int b2 = b1;
+ for (size_t k = k0; k <= kn; ++k)
+ {
+ //if (k > i || (i-k) > n) continue;
+ l = i - k;
+#if CHECK
+ assert (l <= n);
+#endif
+ bc = bri * b2;
+
+ // assert(b2 == binomial(n, l) * binomial(n - 1, k));
+ binomial_decrement_k(b2, n, l);
+ binomial_increment_k(b2, n - 1, k);
+
+ for (size_t j = 0; j <= m; ++j)
+ {
+ //d[j] += bc * dot(dB[k], B[l] - F[j]);
+ d[j] += bc * (dBB(k,l) - dBF(k,j));
+ }
+ }
+
+ // assert(b1 == binomial(n, i - k0) * binomial(n - 1, k0));
+ if (i < n) {
+ binomial_increment_k(b1, n, i);
+ } else {
+ binomial_increment_k(b1, n - 1, k0);
+ }
+
+ double dmin, dmax;
+ dmin = dmax = d[m];
+ for (size_t j = 0; j < m; ++j)
+ {
+ if (dmin > d[j]) dmin = d[j];
+ if (dmax < d[j]) dmax = d[j];
+ }
+ dij[0] = i * r_inv;
+ dij[1] = dmin;
+ D.push_back (dij);
+ dij[1] = dmax;
+ D.push_back (dij);
+ }
+}
+
+/*
+ * Clip the Bezier curve "B" wrt the focus "F"; the new parameter interval for
+ * the clipped curve is returned through the output parameter "dom"
+ */
+OptInterval clip_interval (std::vector<Point> const& B,
+ std::vector<Point> const& F)
+{
+ std::vector<Point> D; // distance curve control points
+ distance_control_points(D, B, F);
+ //print(D, "D");
+// ConvexHull chD(D);
+// std::vector<Point>& p = chD.boundary; // convex hull vertices
+
+ ConvexHull p;
+ p.swap(D);
+ //print(p, "CH(D)");
+
+ bool plower, clower;
+ double t, tmin = 1, tmax = 0;
+
+ plower = (p[0][Y] < 0);
+ if (p[0][Y] == 0) // on the x axis
+ {
+ if (tmin > p[0][X]) tmin = p[0][X];
+ if (tmax < p[0][X]) tmax = p[0][X];
+// std::cerr << "0 : on x axis " << p[0]
+// << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+
+ for (size_t i = 1; i < p.size(); ++i)
+ {
+ clower = (p[i][Y] < 0);
+ if (p[i][Y] == 0) // on x axis
+ {
+ if (tmin > p[i][X]) tmin = p[i][X];
+ if (tmax < p[i][X]) tmax = p[i][X];
+// std::cerr << i << " : on x axis " << p[i]
+// << " : tmin = " << tmin << ", tmax = " << tmax
+// << std::endl;
+ }
+ else if (clower != plower) // cross the x axis
+ {
+ t = intersect(p[i-1], p[i], 0);
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ plower = clower;
+// std::cerr << i << " : lower " << p[i]
+// << " : tmin = " << tmin << ", tmax = " << tmax
+// << std::endl;
+ }
+ }
+
+ // we have to test the closing segment for intersection
+ size_t last = p.size() - 1;
+ clower = (p[0][Y] < 0);
+ if (clower != plower) // cross the x axis
+ {
+ t = intersect(p[last], p[0], 0);
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+// std::cerr << "0 : lower " << p[0]
+// << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+ if (tmin == 1 && tmax == 0) {
+ return OptInterval();
+ } else {
+ return Interval(tmin, tmax);
+ }
+}
+
+/*
+ * Clip the Bezier curve "B" wrt the Bezier curve "A" for individuating
+ * points which have collinear normals; the new parameter interval
+ * for the clipped curve is returned through the output parameter "dom"
+ */
+template <>
+OptInterval clip<collinear_normal_tag> (std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double /*precision*/)
+{
+ std::vector<Point> F;
+ make_focus(F, A);
+ return clip_interval(B, F);
+}
+
+
+
+const double MAX_PRECISION = 1e-8;
+const double MIN_CLIPPED_SIZE_THRESHOLD = 0.8;
+const Interval UNIT_INTERVAL(0,1);
+const OptInterval EMPTY_INTERVAL;
+const Interval H1_INTERVAL(0, 0.5);
+const Interval H2_INTERVAL(nextafter(0.5, 1.0), 1.0);
+
+/*
+ * iterate
+ *
+ * input:
+ * A, B: control point sets of two bezier curves
+ * domA, domB: real parameter intervals of the two curves
+ * precision: required computational precision of the returned parameter ranges
+ * output:
+ * domsA, domsB: sets of parameter intervals
+ *
+ * The parameter intervals are computed by using a Bezier clipping algorithm,
+ * in case the clipping doesn't shrink the initial interval more than 20%,
+ * a subdivision step is performed.
+ * If during the computation both curves collapse to a single point
+ * the routine exits independently by the precision reached in the computation
+ * of the curve intervals.
+ */
+template <>
+void iterate<intersection_point_tag> (std::vector<Interval>& domsA,
+ std::vector<Interval>& domsB,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ Interval const& domA,
+ Interval const& domB,
+ double precision )
+{
+ // in order to limit recursion
+ static size_t counter = 0;
+ if (domA.extent() == 1 && domB.extent() == 1) counter = 0;
+ if (++counter > 100) return;
+#if VERBOSE
+ std::cerr << std::fixed << std::setprecision(16);
+ std::cerr << ">> curve subdision performed <<" << std::endl;
+ std::cerr << "dom(A) : " << domA << std::endl;
+ std::cerr << "dom(B) : " << domB << std::endl;
+// std::cerr << "angle(A) : " << angle(A) << std::endl;
+// std::cerr << "angle(B) : " << angle(B) << std::endl;
+#endif
+
+ if (precision < MAX_PRECISION)
+ precision = MAX_PRECISION;
+
+ std::vector<Point> pA = A;
+ std::vector<Point> pB = B;
+ std::vector<Point>* C1 = &pA;
+ std::vector<Point>* C2 = &pB;
+
+ Interval dompA = domA;
+ Interval dompB = domB;
+ Interval* dom1 = &dompA;
+ Interval* dom2 = &dompB;
+
+ OptInterval dom;
+
+ if ( is_constant(A, precision) && is_constant(B, precision) ){
+ Point M1 = middle_point(C1->front(), C1->back());
+ Point M2 = middle_point(C2->front(), C2->back());
+ if (are_near(M1,M2)){
+ domsA.push_back(domA);
+ domsB.push_back(domB);
+ }
+ return;
+ }
+
+ size_t iter = 0;
+ while (++iter < 100
+ && (dompA.extent() >= precision || dompB.extent() >= precision))
+ {
+#if VERBOSE
+ std::cerr << "iter: " << iter << std::endl;
+#endif
+ dom = clip<intersection_point_tag>(*C1, *C2, precision);
+
+ if (dom.empty())
+ {
+#if VERBOSE
+ std::cerr << "dom: empty" << std::endl;
+#endif
+ return;
+ }
+#if VERBOSE
+ std::cerr << "dom : " << dom << std::endl;
+#endif
+ // all other cases where dom[0] > dom[1] are invalid
+ assert(dom->min() <= dom->max());
+
+ map_to(*dom2, *dom);
+
+ portion(*C2, *dom);
+ if (is_constant(*C2, precision) && is_constant(*C1, precision))
+ {
+ Point M1 = middle_point(C1->front(), C1->back());
+ Point M2 = middle_point(C2->front(), C2->back());
+#if VERBOSE
+ std::cerr << "both curves are constant: \n"
+ << "M1: " << M1 << "\n"
+ << "M2: " << M2 << std::endl;
+ print(*C2, "C2");
+ print(*C1, "C1");
+#endif
+ if (are_near(M1,M2))
+ break; // append the new interval
+ else
+ return; // exit without appending any new interval
+ }
+
+
+ // if we have clipped less than 20% than we need to subdive the curve
+ // with the largest domain into two sub-curves
+ if (dom->extent() > MIN_CLIPPED_SIZE_THRESHOLD)
+ {
+#if VERBOSE
+ std::cerr << "clipped less than 20% : " << dom->extent() << std::endl;
+ std::cerr << "angle(pA) : " << angle(pA) << std::endl;
+ std::cerr << "angle(pB) : " << angle(pB) << std::endl;
+#endif
+ std::vector<Point> pC1, pC2;
+ Interval dompC1, dompC2;
+ if (dompA.extent() > dompB.extent())
+ {
+ pC1 = pC2 = pA;
+ portion(pC1, H1_INTERVAL);
+ portion(pC2, H2_INTERVAL);
+ dompC1 = dompC2 = dompA;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ iterate<intersection_point_tag>(domsA, domsB, pC1, pB,
+ dompC1, dompB, precision);
+ iterate<intersection_point_tag>(domsA, domsB, pC2, pB,
+ dompC2, dompB, precision);
+ }
+ else
+ {
+ pC1 = pC2 = pB;
+ portion(pC1, H1_INTERVAL);
+ portion(pC2, H2_INTERVAL);
+ dompC1 = dompC2 = dompB;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ iterate<intersection_point_tag>(domsB, domsA, pC1, pA,
+ dompC1, dompA, precision);
+ iterate<intersection_point_tag>(domsB, domsA, pC2, pA,
+ dompC2, dompA, precision);
+ }
+ return;
+ }
+
+ swap(C1, C2);
+ swap(dom1, dom2);
+#if VERBOSE
+ std::cerr << "dom(pA) : " << dompA << std::endl;
+ std::cerr << "dom(pB) : " << dompB << std::endl;
+#endif
+ }
+ domsA.push_back(dompA);
+ domsB.push_back(dompB);
+}
+
+
+/*
+ * iterate
+ *
+ * input:
+ * A, B: control point sets of two bezier curves
+ * domA, domB: real parameter intervals of the two curves
+ * precision: required computational precision of the returned parameter ranges
+ * output:
+ * domsA, domsB: sets of parameter intervals
+ *
+ * The parameter intervals are computed by using a Bezier clipping algorithm,
+ * in case the clipping doesn't shrink the initial interval more than 20%,
+ * a subdivision step is performed.
+ * If during the computation one of the two curve interval length becomes less
+ * than MAX_PRECISION the routine exits independently by the precision reached
+ * in the computation of the other curve interval.
+ */
+template <>
+void iterate<collinear_normal_tag> (std::vector<Interval>& domsA,
+ std::vector<Interval>& domsB,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ Interval const& domA,
+ Interval const& domB,
+ double precision)
+{
+ // in order to limit recursion
+ static size_t counter = 0;
+ if (domA.extent() == 1 && domB.extent() == 1) counter = 0;
+ if (++counter > 100) return;
+#if VERBOSE
+ std::cerr << std::fixed << std::setprecision(16);
+ std::cerr << ">> curve subdision performed <<" << std::endl;
+ std::cerr << "dom(A) : " << domA << std::endl;
+ std::cerr << "dom(B) : " << domB << std::endl;
+// std::cerr << "angle(A) : " << angle(A) << std::endl;
+// std::cerr << "angle(B) : " << angle(B) << std::endl;
+#endif
+
+ if (precision < MAX_PRECISION)
+ precision = MAX_PRECISION;
+
+ std::vector<Point> pA = A;
+ std::vector<Point> pB = B;
+ std::vector<Point>* C1 = &pA;
+ std::vector<Point>* C2 = &pB;
+
+ Interval dompA = domA;
+ Interval dompB = domB;
+ Interval* dom1 = &dompA;
+ Interval* dom2 = &dompB;
+
+ OptInterval dom;
+
+ size_t iter = 0;
+ while (++iter < 100
+ && (dompA.extent() >= precision || dompB.extent() >= precision))
+ {
+#if VERBOSE
+ std::cerr << "iter: " << iter << std::endl;
+#endif
+ dom = clip<collinear_normal_tag>(*C1, *C2, precision);
+
+ if (dom.empty()) {
+#if VERBOSE
+ std::cerr << "dom: empty" << std::endl;
+#endif
+ return;
+ }
+#if VERBOSE
+ std::cerr << "dom : " << dom << std::endl;
+#endif
+ assert(dom->min() <= dom->max());
+
+ map_to(*dom2, *dom);
+
+ // it's better to stop before losing computational precision
+ if (iter > 1 && (dom2->extent() <= MAX_PRECISION))
+ {
+#if VERBOSE
+ std::cerr << "beyond max precision limit" << std::endl;
+#endif
+ break;
+ }
+
+ portion(*C2, *dom);
+ if (iter > 1 && is_constant(*C2, precision))
+ {
+#if VERBOSE
+ std::cerr << "new curve portion pC1 is constant" << std::endl;
+#endif
+ break;
+ }
+
+
+ // if we have clipped less than 20% than we need to subdive the curve
+ // with the largest domain into two sub-curves
+ if ( dom->extent() > MIN_CLIPPED_SIZE_THRESHOLD)
+ {
+#if VERBOSE
+ std::cerr << "clipped less than 20% : " << dom->extent() << std::endl;
+ std::cerr << "angle(pA) : " << angle(pA) << std::endl;
+ std::cerr << "angle(pB) : " << angle(pB) << std::endl;
+#endif
+ std::vector<Point> pC1, pC2;
+ Interval dompC1, dompC2;
+ if (dompA.extent() > dompB.extent())
+ {
+ if ((dompA.extent() / 2) < MAX_PRECISION)
+ {
+ break;
+ }
+ pC1 = pC2 = pA;
+ portion(pC1, H1_INTERVAL);
+ if (false && is_constant(pC1, precision))
+ {
+#if VERBOSE
+ std::cerr << "new curve portion pC1 is constant" << std::endl;
+#endif
+ break;
+ }
+ portion(pC2, H2_INTERVAL);
+ if (is_constant(pC2, precision))
+ {
+#if VERBOSE
+ std::cerr << "new curve portion pC2 is constant" << std::endl;
+#endif
+ break;
+ }
+ dompC1 = dompC2 = dompA;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ iterate<collinear_normal_tag>(domsA, domsB, pC1, pB,
+ dompC1, dompB, precision);
+ iterate<collinear_normal_tag>(domsA, domsB, pC2, pB,
+ dompC2, dompB, precision);
+ }
+ else
+ {
+ if ((dompB.extent() / 2) < MAX_PRECISION)
+ {
+ break;
+ }
+ pC1 = pC2 = pB;
+ portion(pC1, H1_INTERVAL);
+ if (is_constant(pC1, precision))
+ {
+#if VERBOSE
+ std::cerr << "new curve portion pC1 is constant" << std::endl;
+#endif
+ break;
+ }
+ portion(pC2, H2_INTERVAL);
+ if (is_constant(pC2, precision))
+ {
+#if VERBOSE
+ std::cerr << "new curve portion pC2 is constant" << std::endl;
+#endif
+ break;
+ }
+ dompC1 = dompC2 = dompB;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ iterate<collinear_normal_tag>(domsB, domsA, pC1, pA,
+ dompC1, dompA, precision);
+ iterate<collinear_normal_tag>(domsB, domsA, pC2, pA,
+ dompC2, dompA, precision);
+ }
+ return;
+ }
+
+ swap(C1, C2);
+ swap(dom1, dom2);
+#if VERBOSE
+ std::cerr << "dom(pA) : " << dompA << std::endl;
+ std::cerr << "dom(pB) : " << dompB << std::endl;
+#endif
+ }
+ domsA.push_back(dompA);
+ domsB.push_back(dompB);
+}
+
+
+/*
+ * get_solutions
+ *
+ * input: A, B - set of control points of two Bezier curve
+ * input: precision - required precision of computation
+ * input: clip - the routine used for clipping
+ * output: xs - set of pairs of parameter values
+ * at which the clipping algorithm converges
+ *
+ * This routine is based on the Bezier Clipping Algorithm,
+ * see: Sederberg - Computer Aided Geometric Design
+ */
+template <typename Tag>
+void get_solutions (std::vector< std::pair<double, double> >& xs,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ std::pair<double, double> ci;
+ std::vector<Interval> domsA, domsB;
+ iterate<Tag> (domsA, domsB, A, B, UNIT_INTERVAL, UNIT_INTERVAL, precision);
+ if (domsA.size() != domsB.size())
+ {
+ assert (domsA.size() == domsB.size());
+ }
+ xs.clear();
+ xs.reserve(domsA.size());
+ for (size_t i = 0; i < domsA.size(); ++i)
+ {
+#if VERBOSE
+ std::cerr << i << " : domA : " << domsA[i] << std::endl;
+ std::cerr << "extent A: " << domsA[i].extent() << " ";
+ std::cerr << "precision A: " << get_precision(domsA[i]) << std::endl;
+ std::cerr << i << " : domB : " << domsB[i] << std::endl;
+ std::cerr << "extent B: " << domsB[i].extent() << " ";
+ std::cerr << "precision B: " << get_precision(domsB[i]) << std::endl;
+#endif
+ ci.first = domsA[i].middle();
+ ci.second = domsB[i].middle();
+ xs.push_back(ci);
+ }
+}
+
+} /* end namespace bezier_clipping */ } /* end namespace detail */
+
+
+/*
+ * find_collinear_normal
+ *
+ * input: A, B - set of control points of two Bezier curve
+ * input: precision - required precision of computation
+ * output: xs - set of pairs of parameter values
+ * at which there are collinear normals
+ *
+ * This routine is based on the Bezier Clipping Algorithm,
+ * see: Sederberg, Nishita, 1990 - Curve intersection using Bezier clipping
+ */
+void find_collinear_normal (std::vector< std::pair<double, double> >& xs,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ using detail::bezier_clipping::get_solutions;
+ using detail::bezier_clipping::collinear_normal_tag;
+ get_solutions<collinear_normal_tag>(xs, A, B, precision);
+}
+
+
+/*
+ * find_intersections_bezier_clipping
+ *
+ * input: A, B - set of control points of two Bezier curve
+ * input: precision - required precision of computation
+ * output: xs - set of pairs of parameter values
+ * at which crossing happens
+ *
+ * This routine is based on the Bezier Clipping Algorithm,
+ * see: Sederberg, Nishita, 1990 - Curve intersection using Bezier clipping
+ */
+void find_intersections_bezier_clipping (std::vector< std::pair<double, double> >& xs,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ using detail::bezier_clipping::get_solutions;
+ using detail::bezier_clipping::intersection_point_tag;
+ get_solutions<intersection_point_tag>(xs, A, B, precision);
+}
+
+} // end namespace Geom
+
+
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/bezier-curve.cpp b/src/2geom/bezier-curve.cpp
new file mode 100644
index 0000000..ca0f787
--- /dev/null
+++ b/src/2geom/bezier-curve.cpp
@@ -0,0 +1,695 @@
+/* Bezier curve implementation
+ *
+ * Authors:
+ * MenTaLguY <mental@rydia.net>
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2007-2009 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/bezier-curve.h>
+#include <2geom/path-sink.h>
+#include <2geom/basic-intersection.h>
+#include <2geom/nearest-time.h>
+#include <2geom/polynomial.h>
+
+namespace Geom
+{
+
+/**
+ * @class BezierCurve
+ * @brief Two-dimensional Bezier curve of arbitrary order.
+ *
+ * Bezier curves are an expansion of the concept of linear interpolation to n points.
+ * Linear segments in 2Geom are in fact Bezier curves of order 1.
+ *
+ * Let \f$\mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\ldots\mathbf{p}_n}\f$ denote a Bezier curve
+ * of order \f$n\f$ defined by the points \f$\mathbf{p}_0, \mathbf{p}_1, \ldots, \mathbf{p}_n\f$.
+ * Bezier curve of order 1 is a linear interpolation curve between two points, defined as
+ * \f[ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1}(t) = (1-t)\mathbf{p}_0 + t\mathbf{p}_1 \f]
+ * If we now substitute points \f$\mathbf{p_0}\f$ and \f$\mathbf{p_1}\f$ in this definition
+ * by linear interpolations, we get the definition of a Bezier curve of order 2, also called
+ * a quadratic Bezier curve.
+ * \f{align*}{ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\mathbf{p}_2}(t)
+ &= (1-t) \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1}(t) + t \mathbf{B}_{\mathbf{p}_1\mathbf{p}_2}(t) \\
+ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\mathbf{p}_2}(t)
+ &= (1-t)^2\mathbf{p}_0 + 2(1-t)t\mathbf{p}_1 + t^2\mathbf{p}_2 \f}
+ * By substituting points for quadratic Bezier curves in the original definition,
+ * we get a Bezier curve of order 3, called a cubic Bezier curve.
+ * \f{align*}{ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\mathbf{p}_2\mathbf{p}_3}(t)
+ &= (1-t) \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\mathbf{p}_2}(t)
+ + t \mathbf{B}_{\mathbf{p}_1\mathbf{p}_2\mathbf{p}_3}(t) \\
+ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\mathbf{p}_2\mathbf{p}_3}(t)
+ &= (1-t)^3\mathbf{p}_0+3(1-t)^2t\mathbf{p}_1+3(1-t)t^2\mathbf{p}_2+t^3\mathbf{p}_3 \f}
+ * In general, a Bezier curve or order \f$n\f$ can be recursively defined as
+ * \f[ \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\ldots\mathbf{p}_n}(t)
+ = (1-t) \mathbf{B}_{\mathbf{p}_0\mathbf{p}_1\ldots\mathbf{p}_{n-1}}(t)
+ + t \mathbf{B}_{\mathbf{p}_1\mathbf{p}_2\ldots\mathbf{p}_n}(t) \f]
+ *
+ * This substitution can be repeated an arbitrary number of times. To picture this, imagine
+ * the evaluation of a point on the curve as follows: first, all control points are joined with
+ * straight lines, and a point corresponding to the selected time value is marked on them.
+ * Then, the marked points are joined with straight lines and the point corresponding to
+ * the time value is marked. This is repeated until only one marked point remains, which is the
+ * point at the selected time value.
+ *
+ * @image html bezier-curve-evaluation.png "Evaluation of the Bezier curve"
+ *
+ * An important property of the Bezier curves is that their parameters (control points)
+ * have an intuitive geometric interpretation. Because of this, they are frequently used
+ * in vector graphics editors.
+ *
+ * Every Bezier curve is contained in its control polygon (the convex polygon composed
+ * of its control points). This fact is useful for sweepline algorithms and intersection.
+ *
+ * @par Implementation notes
+ * The order of a Bezier curve is immuable once it has been created. Normally, you should
+ * know the order at compile time and use the BezierCurveN template. If you need to determine
+ * the order at runtime, use the BezierCurve::create() function. It will create a BezierCurveN
+ * for orders 1, 2 and 3 (up to cubic Beziers), so you can later <tt>dynamic_cast</tt>
+ * to those types, and for higher orders it will create an instance of BezierCurve.
+ *
+ * @relates BezierCurveN
+ * @ingroup Curves
+ */
+
+/**
+ * @class BezierCurveN
+ * @brief Bezier curve with compile-time specified order.
+ *
+ * @tparam degree unsigned value indicating the order of the Bezier curve
+ *
+ * @relates BezierCurve
+ * @ingroup Curves
+ */
+
+
+BezierCurve::BezierCurve(std::vector<Point> const &pts)
+ : inner(pts)
+{
+ if (pts.size() < 2) {
+ THROW_RANGEERROR("Bezier curve must have at least 2 control points");
+ }
+}
+
+bool BezierCurve::isDegenerate() const
+{
+ for (unsigned d = 0; d < 2; ++d) {
+ Coord ic = inner[d][0];
+ for (unsigned i = 1; i < size(); ++i) {
+ if (inner[d][i] != ic) return false;
+ }
+ }
+ return true;
+}
+
+/** Return false if there are at least 3 distinct control points, true otherwise. */
+bool BezierCurve::isLineSegment() const
+{
+ auto const last_idx = size() - 1;
+ if (last_idx == 1) {
+ return true;
+ }
+ auto const start = controlPoint(0);
+ auto const end = controlPoint(last_idx);
+ for (unsigned i = 1; i < last_idx; ++i) {
+ auto const pi = controlPoint(i);
+ if (pi != start && pi != end) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void BezierCurve::expandToTransformed(Rect &bbox, Affine const &transform) const
+{
+ bbox |= bounds_exact(inner * transform);
+}
+
+Coord BezierCurve::length(Coord tolerance) const
+{
+ switch (order())
+ {
+ case 0:
+ return 0.0;
+ case 1:
+ return distance(initialPoint(), finalPoint());
+ case 2:
+ {
+ std::vector<Point> pts = controlPoints();
+ return bezier_length(pts[0], pts[1], pts[2], tolerance);
+ }
+ case 3:
+ {
+ std::vector<Point> pts = controlPoints();
+ return bezier_length(pts[0], pts[1], pts[2], pts[3], tolerance);
+ }
+ default:
+ return bezier_length(controlPoints(), tolerance);
+ }
+}
+
+std::vector<CurveIntersection>
+BezierCurve::intersect(Curve const &other, Coord eps) const
+{
+ std::vector<CurveIntersection> result;
+
+ // in case we encounter an order-1 curve created from a vector
+ // or a degenerate elliptical arc
+ if (isLineSegment()) {
+ LineSegment ls(initialPoint(), finalPoint());
+ result = ls.intersect(other);
+ return result;
+ }
+
+ // here we are sure that this curve is at least a quadratic Bezier
+ BezierCurve const *bez = dynamic_cast<BezierCurve const *>(&other);
+ if (bez) {
+ std::vector<std::pair<double, double> > xs;
+ find_intersections(xs, inner, bez->inner, eps);
+ for (auto & i : xs) {
+ CurveIntersection x(*this, other, i.first, i.second);
+ result.push_back(x);
+ }
+ return result;
+ }
+
+ // pass other intersection types to the other curve
+ result = other.intersect(*this, eps);
+ transpose_in_place(result);
+ return result;
+}
+
+bool BezierCurve::isNear(Curve const &c, Coord precision) const
+{
+ if (this == &c) return true;
+
+ BezierCurve const *other = dynamic_cast<BezierCurve const *>(&c);
+ if (!other) return false;
+
+ if (!are_near(inner.at0(), other->inner.at0(), precision)) return false;
+ if (!are_near(inner.at1(), other->inner.at1(), precision)) return false;
+
+ if (size() == other->size()) {
+ for (unsigned i = 1; i < order(); ++i) {
+ if (!are_near(inner.point(i), other->inner.point(i), precision)) {
+ return false;
+ }
+ }
+ return true;
+ } else {
+ // Must equalize the degrees before comparing
+ BezierCurve elevated_this, elevated_other;
+ for (size_t dim = 0; dim < 2; dim++) {
+ unsigned const our_degree = inner[dim].degree();
+ unsigned const other_degree = other->inner[dim].degree();
+
+ if (our_degree < other_degree) {
+ // Elevate our degree
+ elevated_this.inner[dim] = inner[dim].elevate_to_degree(other_degree);
+ elevated_other.inner[dim] = other->inner[dim];
+ } else if (our_degree > other_degree) {
+ // Elevate the other's degree
+ elevated_this.inner[dim] = inner[dim];
+ elevated_other.inner[dim] = other->inner[dim].elevate_to_degree(our_degree);
+ } else {
+ // Equal degrees: just copy
+ elevated_this.inner[dim] = inner[dim];
+ elevated_other.inner[dim] = other->inner[dim];
+ }
+ }
+ assert(elevated_other.size() == elevated_this.size());
+ return elevated_this.isNear(elevated_other, precision);
+ }
+}
+
+Curve *BezierCurve::portion(Coord f, Coord t) const
+{
+ if (f == 0.0 && t == 1.0) {
+ return duplicate();
+ }
+ if (f == 1.0 && t == 0.0) {
+ return reverse();
+ }
+ return new BezierCurve(Geom::portion(inner, f, t));
+}
+
+bool BezierCurve::operator==(Curve const &c) const
+{
+ if (this == &c) return true;
+
+ BezierCurve const *other = dynamic_cast<BezierCurve const *>(&c);
+ if (!other) return false;
+ if (size() != other->size()) return false;
+
+ for (unsigned i = 0; i < size(); ++i) {
+ if (controlPoint(i) != other->controlPoint(i)) return false;
+ }
+ return true;
+}
+
+Coord BezierCurve::nearestTime(Point const &p, Coord from, Coord to) const
+{
+ return nearest_time(p, inner, from, to);
+}
+
+void BezierCurve::feed(PathSink &sink, bool moveto_initial) const
+{
+ if (size() > 4) {
+ Curve::feed(sink, moveto_initial);
+ return;
+ }
+
+ Point ip = controlPoint(0);
+ if (moveto_initial) {
+ sink.moveTo(ip);
+ }
+ switch (size()) {
+ case 2:
+ sink.lineTo(controlPoint(1));
+ break;
+ case 3:
+ sink.quadTo(controlPoint(1), controlPoint(2));
+ break;
+ case 4:
+ sink.curveTo(controlPoint(1), controlPoint(2), controlPoint(3));
+ break;
+ default:
+ // TODO: add a path sink method that accepts a vector of control points
+ // and converts to cubic spline by default
+ assert(false);
+ break;
+ }
+}
+
+BezierCurve *BezierCurve::create(std::vector<Point> const &pts)
+{
+ switch (pts.size()) {
+ case 0:
+ case 1:
+ THROW_LOGICALERROR("BezierCurve::create: too few points in vector");
+ return NULL;
+ case 2:
+ return new LineSegment(pts[0], pts[1]);
+ case 3:
+ return new QuadraticBezier(pts[0], pts[1], pts[2]);
+ case 4:
+ return new CubicBezier(pts[0], pts[1], pts[2], pts[3]);
+ default:
+ return new BezierCurve(pts);
+ }
+}
+
+// optimized specializations for LineSegment
+
+template <>
+Curve *BezierCurveN<1>::derivative() const {
+ double dx = inner[X][1] - inner[X][0], dy = inner[Y][1] - inner[Y][0];
+ return new BezierCurveN<1>(Point(dx,dy),Point(dx,dy));
+}
+
+template<>
+Coord BezierCurveN<1>::nearestTime(Point const& p, Coord from, Coord to) const
+{
+ using std::swap;
+
+ if ( from > to ) swap(from, to);
+ Point ip = pointAt(from);
+ Point fp = pointAt(to);
+ Point v = fp - ip;
+ Coord l2v = L2sq(v);
+ if (l2v == 0) return 0;
+ Coord t = dot( p - ip, v ) / l2v;
+ if ( t <= 0 ) return from;
+ else if ( t >= 1 ) return to;
+ else return from + t*(to-from);
+}
+
+/* Specialized intersection routine for line segments. */
+template <>
+std::vector<CurveIntersection> BezierCurveN<1>::intersect(Curve const &other, Coord eps) const
+{
+ std::vector<CurveIntersection> result;
+
+ // only handle intersections with other LineSegments here
+ if (other.isLineSegment()) {
+ Line this_line(initialPoint(), finalPoint());
+ Line other_line(other.initialPoint(), other.finalPoint());
+ result = this_line.intersect(other_line);
+ filter_line_segment_intersections(result, true, true);
+ return result;
+ }
+
+ // pass all other types to the other curve
+ result = other.intersect(*this, eps);
+ transpose_in_place(result);
+ return result;
+}
+
+/** @brief Find intersections of a low-degree Bézier curve with a line segment.
+ *
+ * Uses algebraic solutions to low-degree polynomial equations which may be faster
+ * and more precise than iterative methods.
+ *
+ * @tparam degree The degree of the Bézier curve; must be 2 or 3.
+ * @param curve A Bézier curve of the given degree.
+ * @param line A line (but really a segment).
+ * @return Intersections between the passed curve and the fundamental segment of the line
+ * (the segment where the time parameter lies in the unit interval).
+ */
+template <unsigned degree>
+static std::vector<CurveIntersection> bezier_line_intersections(BezierCurveN<degree> const &curve, Line const &line)
+{
+ static_assert(degree == 2 || degree == 3, "bezier_line_intersections<degree>() error: degree must be 2 or 3.");
+
+ auto const length = distance(line.initialPoint(), line.finalPoint());
+ if (length == 0) {
+ return {};
+ }
+ std::vector<CurveIntersection> result;
+
+ // Find the isometry mapping the line to the x-axis, taking the initial point to the origin
+ // and the final point to (length, 0). Apply this transformation to the Bézier curve and
+ // extract the y-coordinate polynomial.
+ auto const transform = line.rotationToZero(Y);
+ Bezier const y = (curve.fragment() * transform)[Y];
+ std::vector<double> roots;
+
+ // Find roots of the polynomial y.
+ {
+ double const c2 = y[0] + y[2] - 2.0 * y[1];
+ double const c1 = y[1] - y[0];
+ double const c0 = y[0];
+
+ if constexpr (degree == 2) {
+ roots = solve_quadratic(c2, 2.0 * c1, c0);
+ } else if constexpr (degree == 3) {
+ double const c3 = y[3] - y[0] + 3.0 * (y[1] - y[2]);
+ roots = solve_cubic(c3, 3.0 * c2, 3.0 * c1 , c0);
+ }
+ }
+
+ // Filter the roots and assemble intersections.
+ for (double root : roots) {
+ if (root < 0.0 || root > 1.0) {
+ continue;
+ }
+ Coord x = (curve.pointAt(root) * transform)[X];
+ if (x < 0.0 || x > length) {
+ continue;
+ }
+ result.emplace_back(curve, line, root, x / length);
+ }
+ return result;
+}
+
+/* Specialized intersection routine for quadratic Bézier curves. */
+template <>
+std::vector<CurveIntersection> BezierCurveN<2>::intersect(Curve const &other, Coord eps) const
+{
+ if (auto other_bezier = dynamic_cast<BezierCurve const *>(&other)) {
+ auto const other_degree = other_bezier->order();
+ if (other_degree == 1) {
+ // Use the exact method to intersect a quadratic Bézier with a line segment.
+ auto line = Line(other_bezier->initialPoint(), other_bezier->finalPoint());
+ return bezier_line_intersections<2>(*this, line);
+ }
+ // TODO: implement exact intersection of two quadratic Béziers using the method of resultants.
+ }
+ return BezierCurve::intersect(other, eps);
+}
+
+/* Specialized intersection routine for cubic Bézier curves. */
+template <>
+std::vector<CurveIntersection> BezierCurveN<3>::intersect(Curve const &other, Coord eps) const
+{
+ if (auto other_bezier = dynamic_cast<BezierCurve const *>(&other)) {
+ if (other_bezier->order() == 1) {
+ // Use the exact method to intersect a cubic Bézier with a line segment.
+ auto line = Line(other_bezier->initialPoint(), other_bezier->finalPoint());
+ return bezier_line_intersections<3>(*this, line);
+ }
+ }
+ return BezierCurve::intersect(other, eps);
+}
+
+template <>
+int BezierCurveN<1>::winding(Point const &p) const
+{
+ Point ip = inner.at0(), fp = inner.at1();
+ if (p[Y] == std::max(ip[Y], fp[Y])) return 0;
+
+ Point v = fp - ip;
+ assert(v[Y] != 0);
+ Coord t = (p[Y] - ip[Y]) / v[Y];
+ assert(t >= 0 && t <= 1);
+ Coord xcross = lerp(t, ip[X], fp[X]);
+ if (xcross > p[X]) {
+ return v[Y] > 0 ? 1 : -1;
+ }
+ return 0;
+}
+
+template <>
+void BezierCurveN<1>::feed(PathSink &sink, bool moveto_initial) const
+{
+ if (moveto_initial) {
+ sink.moveTo(controlPoint(0));
+ }
+ sink.lineTo(controlPoint(1));
+}
+
+template <>
+void BezierCurveN<2>::feed(PathSink &sink, bool moveto_initial) const
+{
+ if (moveto_initial) {
+ sink.moveTo(controlPoint(0));
+ }
+ sink.quadTo(controlPoint(1), controlPoint(2));
+}
+
+template <>
+void BezierCurveN<3>::feed(PathSink &sink, bool moveto_initial) const
+{
+ if (moveto_initial) {
+ sink.moveTo(controlPoint(0));
+ }
+ sink.curveTo(controlPoint(1), controlPoint(2), controlPoint(3));
+}
+
+static void bezier_expand_to_image(Rect &range, Point const &x0, Point const &x1, Point const &x2)
+{
+ for (auto i : { X, Y }) {
+ bezier_expand_to_image(range[i], x0[i], x1[i], x2[i]);
+ }
+}
+
+static void bezier_expand_to_image(Rect &range, Point const &x0, Point const &x1, Point const &x2, Point const &x3)
+{
+ for (auto i : { X, Y }) {
+ bezier_expand_to_image(range[i], x0[i], x1[i], x2[i], x3[i]);
+ }
+}
+
+template <>
+void BezierCurveN<1>::expandToTransformed(Rect &bbox, Affine const &transform) const
+{
+ bbox.expandTo(finalPoint() * transform);
+}
+
+template <>
+void BezierCurveN<2>::expandToTransformed(Rect &bbox, Affine const &transform) const
+{
+ bezier_expand_to_image(bbox, controlPoint(0) * transform,
+ controlPoint(1) * transform,
+ controlPoint(2) * transform);
+}
+
+template <>
+void BezierCurveN<3>::expandToTransformed(Rect &bbox, Affine const &transform) const
+{
+ bezier_expand_to_image(bbox, controlPoint(0) * transform,
+ controlPoint(1) * transform,
+ controlPoint(2) * transform,
+ controlPoint(3) * transform);
+}
+
+static Coord bezier_length_internal(std::vector<Point> &v1, Coord tolerance, int level)
+{
+ /* The Bezier length algorithm used in 2Geom utilizes a simple fact:
+ * the Bezier curve is longer than the distance between its endpoints
+ * but shorter than the length of the polyline formed by its control
+ * points. When the difference between the two values is smaller than the
+ * error tolerance, we can be sure that the true value is no further than
+ * 0.5 * tolerance from their arithmetic mean. When it's larger, we recursively
+ * subdivide the Bezier curve into two parts and add their lengths.
+ *
+ * We cap the maximum number of subdivisions at 256, which corresponds to 8 levels.
+ */
+ Coord lower = distance(v1.front(), v1.back());
+ Coord upper = 0.0;
+ for (size_t i = 0; i < v1.size() - 1; ++i) {
+ upper += distance(v1[i], v1[i+1]);
+ }
+ if (upper - lower <= 2*tolerance || level >= 8) {
+ return (lower + upper) / 2;
+ }
+
+
+ std::vector<Point> v2 = v1;
+
+ /* Compute the right subdivision directly in v1 and the left one in v2.
+ * Explanation of the algorithm used:
+ * We have to compute the left and right edges of this triangle in which
+ * the top row are the control points of the Bezier curve, and each cell
+ * is equal to the arithmetic mean of the cells directly above it
+ * to the right and left. This corresponds to subdividing the Bezier curve
+ * at time value 0.5: the left edge has the control points of the first
+ * portion of the Bezier curve and the right edge - the second one.
+ * In the example we subdivide a curve with 5 control points (order 4).
+ *
+ * Start:
+ * 0 1 2 3 4
+ * ? ? ? ?
+ * ? ? ?
+ * ? ?
+ * ?
+ * # means we have overwritten the value, ? means we don't know
+ * the value yet. Numbers mean the value is at i-th position in the vector.
+ *
+ * After loop with i==1
+ * # 1 2 3 4
+ * 0 ? ? ? -> write 0 to v2[1]
+ * ? ? ?
+ * ? ?
+ * ?
+ *
+ * After loop with i==2
+ * # # 2 3 4
+ * # 1 ? ?
+ * 0 ? ? -> write 0 to v2[2]
+ * ? ?
+ * ?
+ *
+ * After loop with i==3
+ * # # # 3 4
+ * # # 2 ?
+ * # 1 ?
+ * 0 ? -> write 0 to v2[3]
+ * ?
+ *
+ * After loop with i==4, we have the right edge of the triangle in v1,
+ * and we write the last value needed for the left edge in v2[4].
+ */
+
+ for (size_t i = 1; i < v1.size(); ++i) {
+ for (size_t j = i; j > 0; --j) {
+ v1[j-1] = 0.5 * (v1[j-1] + v1[j]);
+ }
+ v2[i] = v1[0];
+ }
+
+ return bezier_length_internal(v1, 0.5 * tolerance, level + 1) +
+ bezier_length_internal(v2, 0.5 * tolerance, level + 1);
+}
+
+/** @brief Compute the length of a bezier curve given by a vector of its control points
+ * @relatesalso BezierCurve */
+Coord bezier_length(std::vector<Point> const &points, Coord tolerance)
+{
+ if (points.size() < 2) return 0.0;
+ std::vector<Point> v1 = points;
+ return bezier_length_internal(v1, tolerance, 0);
+}
+
+static Coord bezier_length_internal(Point a0, Point a1, Point a2, Coord tolerance, int level)
+{
+ Coord lower = distance(a0, a2);
+ Coord upper = distance(a0, a1) + distance(a1, a2);
+
+ if (upper - lower <= 2*tolerance || level >= 8) {
+ return (lower + upper) / 2;
+ }
+
+ Point // Casteljau subdivision
+ // b0 = a0,
+ // c0 = a2,
+ b1 = 0.5*(a0 + a1),
+ c1 = 0.5*(a1 + a2),
+ b2 = 0.5*(b1 + c1); // == c2
+ return bezier_length_internal(a0, b1, b2, 0.5 * tolerance, level + 1) +
+ bezier_length_internal(b2, c1, a2, 0.5 * tolerance, level + 1);
+}
+
+/** @brief Compute the length of a quadratic bezier curve given by its control points
+ * @relatesalso QuadraticBezier */
+Coord bezier_length(Point a0, Point a1, Point a2, Coord tolerance)
+{
+ return bezier_length_internal(a0, a1, a2, tolerance, 0);
+}
+
+static Coord bezier_length_internal(Point a0, Point a1, Point a2, Point a3, Coord tolerance, int level)
+{
+ Coord lower = distance(a0, a3);
+ Coord upper = distance(a0, a1) + distance(a1, a2) + distance(a2, a3);
+
+ if (upper - lower <= 2*tolerance || level >= 8) {
+ return (lower + upper) / 2;
+ }
+
+ Point // Casteljau subdivision
+ // b0 = a0,
+ // c0 = a3,
+ b1 = 0.5*(a0 + a1),
+ t0 = 0.5*(a1 + a2),
+ c1 = 0.5*(a2 + a3),
+ b2 = 0.5*(b1 + t0),
+ c2 = 0.5*(t0 + c1),
+ b3 = 0.5*(b2 + c2); // == c3
+ return bezier_length_internal(a0, b1, b2, b3, 0.5 * tolerance, level + 1) +
+ bezier_length_internal(b3, c2, c1, a3, 0.5 * tolerance, level + 1);
+}
+
+/** @brief Compute the length of a cubic bezier curve given by its control points
+ * @relatesalso CubicBezier */
+Coord bezier_length(Point a0, Point a1, Point a2, Point a3, Coord tolerance)
+{
+ return bezier_length_internal(a0, a1, a2, a3, tolerance, 0);
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/bezier-utils.cpp b/src/2geom/bezier-utils.cpp
new file mode 100644
index 0000000..181b5b3
--- /dev/null
+++ b/src/2geom/bezier-utils.cpp
@@ -0,0 +1,997 @@
+/* Bezier interpolation for inkscape drawing code.
+ *
+ * Original code published in:
+ * An Algorithm for Automatically Fitting Digitized Curves
+ * by Philip J. Schneider
+ * "Graphics Gems", Academic Press, 1990
+ *
+ * Authors:
+ * Philip J. Schneider
+ * Lauris Kaplinski <lauris@kaplinski.com>
+ * Peter Moulder <pmoulder@mail.csse.monash.edu.au>
+ *
+ * Copyright (C) 1990 Philip J. Schneider
+ * Copyright (C) 2001 Lauris Kaplinski
+ * Copyright (C) 2001 Ximian, Inc.
+ * Copyright (C) 2003,2004 Monash University
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#define SP_HUGE 1e5
+#define noBEZIER_DEBUG
+
+#ifdef HAVE_IEEEFP_H
+# include <ieeefp.h>
+#endif
+
+#include <2geom/bezier-utils.h>
+#include <2geom/math-utils.h>
+#include <assert.h>
+
+namespace Geom {
+
+/* Forward declarations */
+static void generate_bezier(Point b[], Point const d[], double const u[], unsigned len,
+ Point const &tHat1, Point const &tHat2, double tolerance_sq);
+static void estimate_lengths(Point bezier[],
+ Point const data[], double const u[], unsigned len,
+ Point const &tHat1, Point const &tHat2);
+static void estimate_bi(Point b[4], unsigned ei,
+ Point const data[], double const u[], unsigned len);
+static void reparameterize(Point const d[], unsigned len, double u[], Point const bezCurve[]);
+static double NewtonRaphsonRootFind(Point const Q[], Point const &P, double u);
+static Point darray_center_tangent(Point const d[], unsigned center, unsigned length);
+static Point darray_right_tangent(Point const d[], unsigned const len);
+static unsigned copy_without_nans_or_adjacent_duplicates(Point const src[], unsigned src_len, Point dest[]);
+static void chord_length_parameterize(Point const d[], double u[], unsigned len);
+static double compute_max_error_ratio(Point const d[], double const u[], unsigned len,
+ Point const bezCurve[], double tolerance,
+ unsigned *splitPoint);
+static double compute_hook(Point const &a, Point const &b, double const u, Point const bezCurve[],
+ double const tolerance);
+
+
+static Point const unconstrained_tangent(0, 0);
+
+
+/*
+ * B0, B1, B2, B3 : Bezier multipliers
+ */
+
+#define B0(u) ( ( 1.0 - u ) * ( 1.0 - u ) * ( 1.0 - u ) )
+#define B1(u) ( 3 * u * ( 1.0 - u ) * ( 1.0 - u ) )
+#define B2(u) ( 3 * u * u * ( 1.0 - u ) )
+#define B3(u) ( u * u * u )
+
+#ifdef BEZIER_DEBUG
+# define DOUBLE_ASSERT(x) assert( ( (x) > -SP_HUGE ) && ( (x) < SP_HUGE ) )
+# define BEZIER_ASSERT(b) do { \
+ DOUBLE_ASSERT((b)[0][X]); DOUBLE_ASSERT((b)[0][Y]); \
+ DOUBLE_ASSERT((b)[1][X]); DOUBLE_ASSERT((b)[1][Y]); \
+ DOUBLE_ASSERT((b)[2][X]); DOUBLE_ASSERT((b)[2][Y]); \
+ DOUBLE_ASSERT((b)[3][X]); DOUBLE_ASSERT((b)[3][Y]); \
+ } while(0)
+#else
+# define DOUBLE_ASSERT(x) do { } while(0)
+# define BEZIER_ASSERT(b) do { } while(0)
+#endif
+
+
+/**
+ * Fit a single-segment Bezier curve to a set of digitized points.
+ *
+ * \return Number of segments generated, or -1 on error.
+ */
+int
+bezier_fit_cubic(Point *bezier, Point const *data, int len, double error)
+{
+ return bezier_fit_cubic_r(bezier, data, len, error, 1);
+}
+
+/**
+ * Fit a multi-segment Bezier curve to a set of digitized points, with
+ * possible weedout of identical points and NaNs.
+ *
+ * \param max_beziers Maximum number of generated segments
+ * \param Result array, must be large enough for n. segments * 4 elements.
+ *
+ * \return Number of segments generated, or -1 on error.
+ */
+int
+bezier_fit_cubic_r(Point bezier[], Point const data[], int const len, double const error, unsigned const max_beziers)
+{
+ if(bezier == NULL ||
+ data == NULL ||
+ len <= 0 ||
+ max_beziers >= (1ul << (31 - 2 - 1 - 3)))
+ return -1;
+
+ Point *uniqued_data = new Point[len];
+ unsigned uniqued_len = copy_without_nans_or_adjacent_duplicates(data, len, uniqued_data);
+
+ if ( uniqued_len < 2 ) {
+ delete[] uniqued_data;
+ return 0;
+ }
+
+ /* Call fit-cubic function with recursion. */
+ int const ret = bezier_fit_cubic_full(bezier, NULL, uniqued_data, uniqued_len,
+ unconstrained_tangent, unconstrained_tangent,
+ error, max_beziers);
+ delete[] uniqued_data;
+ return ret;
+}
+
+/**
+ * Copy points from src to dest, filter out points containing NaN and
+ * adjacent points with equal x and y.
+ * \return length of dest
+ */
+static unsigned
+copy_without_nans_or_adjacent_duplicates(Point const src[], unsigned src_len, Point dest[])
+{
+ unsigned si = 0;
+ for (;;) {
+ if ( si == src_len ) {
+ return 0;
+ }
+ if (!std::isnan(src[si][X]) &&
+ !std::isnan(src[si][Y])) {
+ dest[0] = Point(src[si]);
+ ++si;
+ break;
+ }
+ si++;
+ }
+ unsigned di = 0;
+ for (; si < src_len; ++si) {
+ Point const src_pt = Point(src[si]);
+ if ( src_pt != dest[di]
+ && !std::isnan(src_pt[X])
+ && !std::isnan(src_pt[Y])) {
+ dest[++di] = src_pt;
+ }
+ }
+ unsigned dest_len = di + 1;
+ assert( dest_len <= src_len );
+ return dest_len;
+}
+
+/**
+ * Fit a multi-segment Bezier curve to a set of digitized points, without
+ * possible weedout of identical points and NaNs.
+ *
+ * \pre data is uniqued, i.e. not exist i: data[i] == data[i + 1].
+ * \param max_beziers Maximum number of generated segments
+ * \param Result array, must be large enough for n. segments * 4 elements.
+ */
+int
+bezier_fit_cubic_full(Point bezier[], int split_points[],
+ Point const data[], int const len,
+ Point const &tHat1, Point const &tHat2,
+ double const error, unsigned const max_beziers)
+{
+ if(!(bezier != NULL) ||
+ !(data != NULL) ||
+ !(len > 0) ||
+ !(max_beziers >= 1) ||
+ !(error >= 0.0))
+ return -1;
+
+ if ( len < 2 ) return 0;
+
+ if ( len == 2 ) {
+ /* We have 2 points, which can be fitted trivially. */
+ bezier[0] = data[0];
+ bezier[3] = data[len - 1];
+ double const dist = distance(bezier[0], bezier[3]) / 3.0;
+ if (std::isnan(dist)) {
+ /* Numerical problem, fall back to straight line segment. */
+ bezier[1] = bezier[0];
+ bezier[2] = bezier[3];
+ } else {
+ bezier[1] = ( is_zero(tHat1)
+ ? ( 2 * bezier[0] + bezier[3] ) / 3.
+ : bezier[0] + dist * tHat1 );
+ bezier[2] = ( is_zero(tHat2)
+ ? ( bezier[0] + 2 * bezier[3] ) / 3.
+ : bezier[3] + dist * tHat2 );
+ }
+ BEZIER_ASSERT(bezier);
+ return 1;
+ }
+
+ /* Parameterize points, and attempt to fit curve */
+ unsigned splitPoint; /* Point to split point set at. */
+ bool is_corner;
+ {
+ double *u = new double[len];
+ chord_length_parameterize(data, u, len);
+ if ( u[len - 1] == 0.0 ) {
+ /* Zero-length path: every point in data[] is the same.
+ *
+ * (Clients aren't allowed to pass such data; handling the case is defensive
+ * programming.)
+ */
+ delete[] u;
+ return 0;
+ }
+
+ generate_bezier(bezier, data, u, len, tHat1, tHat2, error);
+ reparameterize(data, len, u, bezier);
+
+ /* Find max deviation of points to fitted curve. */
+ double const tolerance = sqrt(error + 1e-9);
+ double maxErrorRatio = compute_max_error_ratio(data, u, len, bezier, tolerance, &splitPoint);
+
+ if ( fabs(maxErrorRatio) <= 1.0 ) {
+ BEZIER_ASSERT(bezier);
+ delete[] u;
+ return 1;
+ }
+
+ /* If error not too large, then try some reparameterization and iteration. */
+ if ( 0.0 <= maxErrorRatio && maxErrorRatio <= 3.0 ) {
+ int const maxIterations = 4; /* std::max times to try iterating */
+ for (int i = 0; i < maxIterations; i++) {
+ generate_bezier(bezier, data, u, len, tHat1, tHat2, error);
+ reparameterize(data, len, u, bezier);
+ maxErrorRatio = compute_max_error_ratio(data, u, len, bezier, tolerance, &splitPoint);
+ if ( fabs(maxErrorRatio) <= 1.0 ) {
+ BEZIER_ASSERT(bezier);
+ delete[] u;
+ return 1;
+ }
+ }
+ }
+ delete[] u;
+ is_corner = (maxErrorRatio < 0);
+ }
+
+ if (is_corner) {
+ assert(splitPoint < unsigned(len));
+ if (splitPoint == 0) {
+ if (is_zero(tHat1)) {
+ /* Got spike even with unconstrained initial tangent. */
+ ++splitPoint;
+ } else {
+ return bezier_fit_cubic_full(bezier, split_points, data, len, unconstrained_tangent, tHat2,
+ error, max_beziers);
+ }
+ } else if (splitPoint == unsigned(len - 1)) {
+ if (is_zero(tHat2)) {
+ /* Got spike even with unconstrained final tangent. */
+ --splitPoint;
+ } else {
+ return bezier_fit_cubic_full(bezier, split_points, data, len, tHat1, unconstrained_tangent,
+ error, max_beziers);
+ }
+ }
+ }
+
+ if ( 1 < max_beziers ) {
+ /*
+ * Fitting failed -- split at max error point and fit recursively
+ */
+ unsigned const rec_max_beziers1 = max_beziers - 1;
+
+ Point recTHat2, recTHat1;
+ if (is_corner) {
+ if(!(0 < splitPoint && splitPoint < unsigned(len - 1)))
+ return -1;
+ recTHat1 = recTHat2 = unconstrained_tangent;
+ } else {
+ /* Unit tangent vector at splitPoint. */
+ recTHat2 = darray_center_tangent(data, splitPoint, len);
+ recTHat1 = -recTHat2;
+ }
+ int const nsegs1 = bezier_fit_cubic_full(bezier, split_points, data, splitPoint + 1,
+ tHat1, recTHat2, error, rec_max_beziers1);
+ if ( nsegs1 < 0 ) {
+#ifdef BEZIER_DEBUG
+ g_print("fit_cubic[1]: recursive call failed\n");
+#endif
+ return -1;
+ }
+ assert( nsegs1 != 0 );
+ if (split_points != NULL) {
+ split_points[nsegs1 - 1] = splitPoint;
+ }
+ unsigned const rec_max_beziers2 = max_beziers - nsegs1;
+ int const nsegs2 = bezier_fit_cubic_full(bezier + nsegs1*4,
+ ( split_points == NULL
+ ? NULL
+ : split_points + nsegs1 ),
+ data + splitPoint, len - splitPoint,
+ recTHat1, tHat2, error, rec_max_beziers2);
+ if ( nsegs2 < 0 ) {
+#ifdef BEZIER_DEBUG
+ g_print("fit_cubic[2]: recursive call failed\n");
+#endif
+ return -1;
+ }
+
+#ifdef BEZIER_DEBUG
+ g_print("fit_cubic: success[nsegs: %d+%d=%d] on max_beziers:%u\n",
+ nsegs1, nsegs2, nsegs1 + nsegs2, max_beziers);
+#endif
+ return nsegs1 + nsegs2;
+ } else {
+ return -1;
+ }
+}
+
+
+/**
+ * Fill in \a bezier[] based on the given data and tangent requirements, using
+ * a least-squares fit.
+ *
+ * Each of tHat1 and tHat2 should be either a zero vector or a unit vector.
+ * If it is zero, then bezier[1 or 2] is estimated without constraint; otherwise,
+ * it bezier[1 or 2] is placed in the specified direction from bezier[0 or 3].
+ *
+ * \param tolerance_sq Used only for an initial guess as to tangent directions
+ * when \a tHat1 or \a tHat2 is zero.
+ */
+static void
+generate_bezier(Point bezier[],
+ Point const data[], double const u[], unsigned const len,
+ Point const &tHat1, Point const &tHat2,
+ double const tolerance_sq)
+{
+ bool const est1 = is_zero(tHat1);
+ bool const est2 = is_zero(tHat2);
+ Point est_tHat1( est1
+ ? darray_left_tangent(data, len, tolerance_sq)
+ : tHat1 );
+ Point est_tHat2( est2
+ ? darray_right_tangent(data, len, tolerance_sq)
+ : tHat2 );
+ estimate_lengths(bezier, data, u, len, est_tHat1, est_tHat2);
+ /* We find that darray_right_tangent tends to produce better results
+ for our current freehand tool than full estimation. */
+ if (est1) {
+ estimate_bi(bezier, 1, data, u, len);
+ if (bezier[1] != bezier[0]) {
+ est_tHat1 = unit_vector(bezier[1] - bezier[0]);
+ }
+ estimate_lengths(bezier, data, u, len, est_tHat1, est_tHat2);
+ }
+}
+
+
+static void
+estimate_lengths(Point bezier[],
+ Point const data[], double const uPrime[], unsigned const len,
+ Point const &tHat1, Point const &tHat2)
+{
+ double C[2][2]; /* Matrix C. */
+ double X[2]; /* Matrix X. */
+
+ /* Create the C and X matrices. */
+ C[0][0] = 0.0;
+ C[0][1] = 0.0;
+ C[1][0] = 0.0;
+ C[1][1] = 0.0;
+ X[0] = 0.0;
+ X[1] = 0.0;
+
+ /* First and last control points of the Bezier curve are positioned exactly at the first and
+ last data points. */
+ bezier[0] = data[0];
+ bezier[3] = data[len - 1];
+
+ for (unsigned i = 0; i < len; i++) {
+ /* Bezier control point coefficients. */
+ double const b0 = B0(uPrime[i]);
+ double const b1 = B1(uPrime[i]);
+ double const b2 = B2(uPrime[i]);
+ double const b3 = B3(uPrime[i]);
+
+ /* rhs for eqn */
+ Point const a1 = b1 * tHat1;
+ Point const a2 = b2 * tHat2;
+
+ C[0][0] += dot(a1, a1);
+ C[0][1] += dot(a1, a2);
+ C[1][0] = C[0][1];
+ C[1][1] += dot(a2, a2);
+
+ /* Additional offset to the data point from the predicted point if we were to set bezier[1]
+ to bezier[0] and bezier[2] to bezier[3]. */
+ Point const shortfall
+ = ( data[i]
+ - ( ( b0 + b1 ) * bezier[0] )
+ - ( ( b2 + b3 ) * bezier[3] ) );
+ X[0] += dot(a1, shortfall);
+ X[1] += dot(a2, shortfall);
+ }
+
+ /* We've constructed a pair of equations in the form of a matrix product C * alpha = X.
+ Now solve for alpha. */
+ double alpha_l, alpha_r;
+
+ /* Compute the determinants of C and X. */
+ double const det_C0_C1 = C[0][0] * C[1][1] - C[1][0] * C[0][1];
+ if ( det_C0_C1 != 0 ) {
+ /* Apparently Kramer's rule. */
+ double const det_C0_X = C[0][0] * X[1] - C[0][1] * X[0];
+ double const det_X_C1 = X[0] * C[1][1] - X[1] * C[0][1];
+ alpha_l = det_X_C1 / det_C0_C1;
+ alpha_r = det_C0_X / det_C0_C1;
+ } else {
+ /* The matrix is under-determined. Try requiring alpha_l == alpha_r.
+ *
+ * One way of implementing the constraint alpha_l == alpha_r is to treat them as the same
+ * variable in the equations. We can do this by adding the columns of C to form a single
+ * column, to be multiplied by alpha to give the column vector X.
+ *
+ * We try each row in turn.
+ */
+ double const c0 = C[0][0] + C[0][1];
+ if (c0 != 0) {
+ alpha_l = alpha_r = X[0] / c0;
+ } else {
+ double const c1 = C[1][0] + C[1][1];
+ if (c1 != 0) {
+ alpha_l = alpha_r = X[1] / c1;
+ } else {
+ /* Let the below code handle this. */
+ alpha_l = alpha_r = 0.;
+ }
+ }
+ }
+
+ /* If alpha negative, use the Wu/Barsky heuristic (see text). (If alpha is 0, you get
+ coincident control points that lead to divide by zero in any subsequent
+ NewtonRaphsonRootFind() call.) */
+ /// \todo Check whether this special-casing is necessary now that
+ /// NewtonRaphsonRootFind handles non-positive denominator.
+ if ( alpha_l < 1.0e-6 ||
+ alpha_r < 1.0e-6 )
+ {
+ alpha_l = alpha_r = distance(data[0], data[len-1]) / 3.0;
+ }
+
+ /* Control points 1 and 2 are positioned an alpha distance out on the tangent vectors, left and
+ right, respectively. */
+ bezier[1] = alpha_l * tHat1 + bezier[0];
+ bezier[2] = alpha_r * tHat2 + bezier[3];
+
+ return;
+}
+
+static double lensq(Point const p) {
+ return dot(p, p);
+}
+
+static void
+estimate_bi(Point bezier[4], unsigned const ei,
+ Point const data[], double const u[], unsigned const len)
+{
+ if(!(1 <= ei && ei <= 2))
+ return;
+ unsigned const oi = 3 - ei;
+ double num[2] = {0., 0.};
+ double den = 0.;
+ for (unsigned i = 0; i < len; ++i) {
+ double const ui = u[i];
+ double const b[4] = {
+ B0(ui),
+ B1(ui),
+ B2(ui),
+ B3(ui)
+ };
+
+ for (unsigned d = 0; d < 2; ++d) {
+ num[d] += b[ei] * (b[0] * bezier[0][d] +
+ b[oi] * bezier[oi][d] +
+ b[3] * bezier[3][d] +
+ - data[i][d]);
+ }
+ den -= b[ei] * b[ei];
+ }
+
+ if (den != 0.) {
+ for (unsigned d = 0; d < 2; ++d) {
+ bezier[ei][d] = num[d] / den;
+ }
+ } else {
+ bezier[ei] = ( oi * bezier[0] + ei * bezier[3] ) / 3.;
+ }
+}
+
+/**
+ * Given set of points and their parameterization, try to find a better assignment of parameter
+ * values for the points.
+ *
+ * \param d Array of digitized points.
+ * \param u Current parameter values.
+ * \param bezCurve Current fitted curve.
+ * \param len Number of values in both d and u arrays.
+ * Also the size of the array that is allocated for return.
+ */
+static void
+reparameterize(Point const d[],
+ unsigned const len,
+ double u[],
+ Point const bezCurve[])
+{
+ assert( 2 <= len );
+
+ unsigned const last = len - 1;
+ assert( bezCurve[0] == d[0] );
+ assert( bezCurve[3] == d[last] );
+ assert( u[0] == 0.0 );
+ assert( u[last] == 1.0 );
+ /* Otherwise, consider including 0 and last in the below loop. */
+
+ for (unsigned i = 1; i < last; i++) {
+ u[i] = NewtonRaphsonRootFind(bezCurve, d[i], u[i]);
+ }
+}
+
+/**
+ * Use Newton-Raphson iteration to find better root.
+ *
+ * \param Q Current fitted curve
+ * \param P Digitized point
+ * \param u Parameter value for "P"
+ *
+ * \return Improved u
+ */
+static double
+NewtonRaphsonRootFind(Point const Q[], Point const &P, double const u)
+{
+ assert( 0.0 <= u );
+ assert( u <= 1.0 );
+
+ /* Generate control vertices for Q'. */
+ Point Q1[3];
+ for (unsigned i = 0; i < 3; i++) {
+ Q1[i] = 3.0 * ( Q[i+1] - Q[i] );
+ }
+
+ /* Generate control vertices for Q''. */
+ Point Q2[2];
+ for (unsigned i = 0; i < 2; i++) {
+ Q2[i] = 2.0 * ( Q1[i+1] - Q1[i] );
+ }
+
+ /* Compute Q(u), Q'(u) and Q''(u). */
+ Point const Q_u = bezier_pt(3, Q, u);
+ Point const Q1_u = bezier_pt(2, Q1, u);
+ Point const Q2_u = bezier_pt(1, Q2, u);
+
+ /* Compute f(u)/f'(u), where f is the derivative wrt u of distsq(u) = 0.5 * the square of the
+ distance from P to Q(u). Here we're using Newton-Raphson to find a stationary point in the
+ distsq(u), hopefully corresponding to a local minimum in distsq (and hence a local minimum
+ distance from P to Q(u)). */
+ Point const diff = Q_u - P;
+ double numerator = dot(diff, Q1_u);
+ double denominator = dot(Q1_u, Q1_u) + dot(diff, Q2_u);
+
+ double improved_u;
+ if ( denominator > 0. ) {
+ /* One iteration of Newton-Raphson:
+ improved_u = u - f(u)/f'(u) */
+ improved_u = u - ( numerator / denominator );
+ } else {
+ /* Using Newton-Raphson would move in the wrong direction (towards a local maximum rather
+ than local minimum), so we move an arbitrary amount in the right direction. */
+ if ( numerator > 0. ) {
+ improved_u = u * .98 - .01;
+ } else if ( numerator < 0. ) {
+ /* Deliberately asymmetrical, to reduce the chance of cycling. */
+ improved_u = .031 + u * .98;
+ } else {
+ improved_u = u;
+ }
+ }
+
+ if (!std::isfinite(improved_u)) {
+ improved_u = u;
+ } else if ( improved_u < 0.0 ) {
+ improved_u = 0.0;
+ } else if ( improved_u > 1.0 ) {
+ improved_u = 1.0;
+ }
+
+ /* Ensure that improved_u isn't actually worse. */
+ {
+ double const diff_lensq = lensq(diff);
+ for (double proportion = .125; ; proportion += .125) {
+ if ( lensq( bezier_pt(3, Q, improved_u) - P ) > diff_lensq ) {
+ if ( proportion > 1.0 ) {
+ //g_warning("found proportion %g", proportion);
+ improved_u = u;
+ break;
+ }
+ improved_u = ( ( 1 - proportion ) * improved_u +
+ proportion * u );
+ } else {
+ break;
+ }
+ }
+ }
+
+ DOUBLE_ASSERT(improved_u);
+ return improved_u;
+}
+
+/**
+ * Evaluate a Bezier curve at parameter value \a t.
+ *
+ * \param degree The degree of the Bezier curve: 3 for cubic, 2 for quadratic etc. Must be less
+ * than 4.
+ * \param V The control points for the Bezier curve. Must have (\a degree+1)
+ * elements.
+ * \param t The "parameter" value, specifying whereabouts along the curve to
+ * evaluate. Typically in the range [0.0, 1.0].
+ *
+ * Let s = 1 - t.
+ * BezierII(1, V) gives (s, t) * V, i.e. t of the way
+ * from V[0] to V[1].
+ * BezierII(2, V) gives (s**2, 2*s*t, t**2) * V.
+ * BezierII(3, V) gives (s**3, 3 s**2 t, 3s t**2, t**3) * V.
+ *
+ * The derivative of BezierII(i, V) with respect to t
+ * is i * BezierII(i-1, V'), where for all j, V'[j] =
+ * V[j + 1] - V[j].
+ */
+Point
+bezier_pt(unsigned const degree, Point const V[], double const t)
+{
+ /** Pascal's triangle. */
+ static int const pascal[4][4] = {{1, 0, 0, 0},
+ {1, 1, 0, 0},
+ {1, 2, 1, 0},
+ {1, 3, 3, 1}};
+ assert( degree < 4);
+ double const s = 1.0 - t;
+
+ /* Calculate powers of t and s. */
+ double spow[4];
+ double tpow[4];
+ spow[0] = 1.0; spow[1] = s;
+ tpow[0] = 1.0; tpow[1] = t;
+ for (unsigned i = 1; i < degree; ++i) {
+ spow[i + 1] = spow[i] * s;
+ tpow[i + 1] = tpow[i] * t;
+ }
+
+ Point ret = spow[degree] * V[0];
+ for (unsigned i = 1; i <= degree; ++i) {
+ ret += pascal[degree][i] * spow[degree - i] * tpow[i] * V[i];
+ }
+ return ret;
+}
+
+/*
+ * ComputeLeftTangent, ComputeRightTangent, ComputeCenterTangent :
+ * Approximate unit tangents at endpoints and "center" of digitized curve
+ */
+
+/**
+ * Estimate the (forward) tangent at point d[first + 0.5].
+ *
+ * Unlike the center and right versions, this calculates the tangent in
+ * the way one might expect, i.e., wrt increasing index into d.
+ * \pre (2 \<= len) and (d[0] != d[1]).
+ **/
+Point
+darray_left_tangent(Point const d[], unsigned const len)
+{
+ assert( len >= 2 );
+ assert( d[0] != d[1] );
+ return unit_vector( d[1] - d[0] );
+}
+
+/**
+ * Estimates the (backward) tangent at d[last - 0.5].
+ *
+ * \note The tangent is "backwards", i.e. it is with respect to
+ * decreasing index rather than increasing index.
+ *
+ * \pre 2 \<= len.
+ * \pre d[len - 1] != d[len - 2].
+ * \pre all[p in d] in_svg_plane(p).
+ */
+static Point
+darray_right_tangent(Point const d[], unsigned const len)
+{
+ assert( 2 <= len );
+ unsigned const last = len - 1;
+ unsigned const prev = last - 1;
+ assert( d[last] != d[prev] );
+ return unit_vector( d[prev] - d[last] );
+}
+
+/**
+ * Estimate the (forward) tangent at point d[0].
+ *
+ * Unlike the center and right versions, this calculates the tangent in
+ * the way one might expect, i.e., wrt increasing index into d.
+ *
+ * \pre 2 \<= len.
+ * \pre d[0] != d[1].
+ * \pre all[p in d] in_svg_plane(p).
+ * \post is_unit_vector(ret).
+ **/
+Point
+darray_left_tangent(Point const d[], unsigned const len, double const tolerance_sq)
+{
+ assert( 2 <= len );
+ assert( 0 <= tolerance_sq );
+ for (unsigned i = 1;;) {
+ Point const pi(d[i]);
+ Point const t(pi - d[0]);
+ double const distsq = dot(t, t);
+ if ( tolerance_sq < distsq ) {
+ return unit_vector(t);
+ }
+ ++i;
+ if (i == len) {
+ return ( distsq == 0
+ ? darray_left_tangent(d, len)
+ : unit_vector(t) );
+ }
+ }
+}
+
+/**
+ * Estimates the (backward) tangent at d[last].
+ *
+ * \note The tangent is "backwards", i.e. it is with respect to
+ * decreasing index rather than increasing index.
+ *
+ * \pre 2 \<= len.
+ * \pre d[len - 1] != d[len - 2].
+ * \pre all[p in d] in_svg_plane(p).
+ */
+Point
+darray_right_tangent(Point const d[], unsigned const len, double const tolerance_sq)
+{
+ assert( 2 <= len );
+ assert( 0 <= tolerance_sq );
+ unsigned const last = len - 1;
+ for (unsigned i = last - 1;; i--) {
+ Point const pi(d[i]);
+ Point const t(pi - d[last]);
+ double const distsq = dot(t, t);
+ if ( tolerance_sq < distsq ) {
+ return unit_vector(t);
+ }
+ if (i == 0) {
+ return ( distsq == 0
+ ? darray_right_tangent(d, len)
+ : unit_vector(t) );
+ }
+ }
+}
+
+/**
+ * Estimates the (backward) tangent at d[center], by averaging the two
+ * segments connected to d[center] (and then normalizing the result).
+ *
+ * \note The tangent is "backwards", i.e. it is with respect to
+ * decreasing index rather than increasing index.
+ *
+ * \pre (0 \< center \< len - 1) and d is uniqued (at least in
+ * the immediate vicinity of \a center).
+ */
+static Point
+darray_center_tangent(Point const d[],
+ unsigned const center,
+ unsigned const len)
+{
+ assert( center != 0 );
+ assert( center < len - 1 );
+
+ Point ret;
+ if ( d[center + 1] == d[center - 1] ) {
+ /* Rotate 90 degrees in an arbitrary direction. */
+ Point const diff = d[center] - d[center - 1];
+ ret = rot90(diff);
+ } else {
+ ret = d[center - 1] - d[center + 1];
+ }
+ ret.normalize();
+ return ret;
+}
+
+
+/**
+ * Assign parameter values to digitized points using relative distances between points.
+ *
+ * \pre Parameter array u must have space for \a len items.
+ */
+static void
+chord_length_parameterize(Point const d[], double u[], unsigned const len)
+{
+ if(!( 2 <= len ))
+ return;
+
+ /* First let u[i] equal the distance travelled along the path from d[0] to d[i]. */
+ u[0] = 0.0;
+ for (unsigned i = 1; i < len; i++) {
+ double const dist = distance(d[i], d[i-1]);
+ u[i] = u[i-1] + dist;
+ }
+
+ /* Then scale to [0.0 .. 1.0]. */
+ double tot_len = u[len - 1];
+ if(!( tot_len != 0 ))
+ return;
+ if (std::isfinite(tot_len)) {
+ for (unsigned i = 1; i < len; ++i) {
+ u[i] /= tot_len;
+ }
+ } else {
+ /* We could do better, but this probably never happens anyway. */
+ for (unsigned i = 1; i < len; ++i) {
+ u[i] = i / (double) ( len - 1 );
+ }
+ }
+
+ /** \todo
+ * It's been reported that u[len - 1] can differ from 1.0 on some
+ * systems (amd64), despite it having been calculated as x / x where x
+ * is isFinite and non-zero.
+ */
+ if (u[len - 1] != 1) {
+ double const diff = u[len - 1] - 1;
+ if (fabs(diff) > 1e-13) {
+ assert(0); // No warnings in 2geom
+ //g_warning("u[len - 1] = %19g (= 1 + %19g), expecting exactly 1",
+ // u[len - 1], diff);
+ }
+ u[len - 1] = 1;
+ }
+
+#ifdef BEZIER_DEBUG
+ assert( u[0] == 0.0 && u[len - 1] == 1.0 );
+ for (unsigned i = 1; i < len; i++) {
+ assert( u[i] >= u[i-1] );
+ }
+#endif
+}
+
+
+
+
+/**
+ * Find the maximum squared distance of digitized points to fitted curve, and (if this maximum
+ * error is non-zero) set \a *splitPoint to the corresponding index.
+ *
+ * \pre 2 \<= len.
+ * \pre u[0] == 0.
+ * \pre u[len - 1] == 1.0.
+ * \post ((ret == 0.0)
+ * || ((*splitPoint \< len - 1)
+ * \&\& (*splitPoint != 0 || ret \< 0.0))).
+ */
+static double
+compute_max_error_ratio(Point const d[], double const u[], unsigned const len,
+ Point const bezCurve[], double const tolerance,
+ unsigned *const splitPoint)
+{
+ assert( 2 <= len );
+ unsigned const last = len - 1;
+ assert( bezCurve[0] == d[0] );
+ assert( bezCurve[3] == d[last] );
+ assert( u[0] == 0.0 );
+ assert( u[last] == 1.0 );
+ /* I.e. assert that the error for the first & last points is zero.
+ * Otherwise we should include those points in the below loop.
+ * The assertion is also necessary to ensure 0 < splitPoint < last.
+ */
+
+ double maxDistsq = 0.0; /* Maximum error */
+ double max_hook_ratio = 0.0;
+ unsigned snap_end = 0;
+ Point prev = bezCurve[0];
+ for (unsigned i = 1; i <= last; i++) {
+ Point const curr = bezier_pt(3, bezCurve, u[i]);
+ double const distsq = lensq( curr - d[i] );
+ if ( distsq > maxDistsq ) {
+ maxDistsq = distsq;
+ *splitPoint = i;
+ }
+ double const hook_ratio = compute_hook(prev, curr, .5 * (u[i - 1] + u[i]), bezCurve, tolerance);
+ if (max_hook_ratio < hook_ratio) {
+ max_hook_ratio = hook_ratio;
+ snap_end = i;
+ }
+ prev = curr;
+ }
+
+ double const dist_ratio = sqrt(maxDistsq) / tolerance;
+ double ret;
+ if (max_hook_ratio <= dist_ratio) {
+ ret = dist_ratio;
+ } else {
+ assert(0 < snap_end);
+ ret = -max_hook_ratio;
+ *splitPoint = snap_end - 1;
+ }
+ assert( ret == 0.0
+ || ( ( *splitPoint < last )
+ && ( *splitPoint != 0 || ret < 0. ) ) );
+ return ret;
+}
+
+/**
+ * Whereas compute_max_error_ratio() checks for itself that each data point
+ * is near some point on the curve, this function checks that each point on
+ * the curve is near some data point (or near some point on the polyline
+ * defined by the data points, or something like that: we allow for a
+ * "reasonable curviness" from such a polyline). "Reasonable curviness"
+ * means we draw a circle centred at the midpoint of a..b, of radius
+ * proportional to the length |a - b|, and require that each point on the
+ * segment of bezCurve between the parameters of a and b be within that circle.
+ * If any point P on the bezCurve segment is outside of that allowable
+ * region (circle), then we return some metric that increases with the
+ * distance from P to the circle.
+ *
+ * Given that this is a fairly arbitrary criterion for finding appropriate
+ * places for sharp corners, we test only one point on bezCurve, namely
+ * the point on bezCurve with parameter halfway between our estimated
+ * parameters for a and b. (Alternatives are taking the farthest of a
+ * few parameters between those of a and b, or even using a variant of
+ * NewtonRaphsonFindRoot() for finding the maximum rather than minimum
+ * distance.)
+ */
+static double
+compute_hook(Point const &a, Point const &b, double const u, Point const bezCurve[],
+ double const tolerance)
+{
+ Point const P = bezier_pt(3, bezCurve, u);
+ double const dist = distance((a+b)*.5, P);
+ if (dist < tolerance) {
+ return 0;
+ }
+ double const allowed = distance(a, b) + tolerance;
+ return dist / allowed;
+ /** \todo
+ * effic: Hooks are very rare. We could start by comparing
+ * distsq, only resorting to the more expensive L2 in cases of
+ * uncertainty.
+ */
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/bezier.cpp b/src/2geom/bezier.cpp
new file mode 100644
index 0000000..264b3c2
--- /dev/null
+++ b/src/2geom/bezier.cpp
@@ -0,0 +1,415 @@
+/**
+ * @file
+ * @brief Bernstein-Bezier polynomial
+ *//*
+ * Authors:
+ * MenTaLguY <mental@rydia.net>
+ * Michael Sloan <mgsloan@gmail.com>
+ * Nathan Hurst <njh@njhurst.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2007-2015 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/bezier.h>
+#include <2geom/solver.h>
+#include <2geom/concepts.h>
+#include <2geom/choose.h>
+
+namespace Geom {
+
+std::vector<Coord> Bezier::valueAndDerivatives(Coord t, unsigned n_derivs) const {
+ /* This is inelegant, as it uses several extra stores. I think there might be a way to
+ * evaluate roughly in situ. */
+
+ // initialize return vector with zeroes, such that we only need to replace the non-zero derivs
+ std::vector<Coord> val_n_der(n_derivs + 1, Coord(0.0));
+
+ // initialize temp storage variables
+ std::valarray<Coord> d_(order()+1);
+ for(unsigned i = 0; i < size(); i++) {
+ d_[i] = c_[i];
+ }
+
+ unsigned nn = n_derivs + 1;
+ if(n_derivs > order()) {
+ nn = order()+1; // only calculate the non zero derivs
+ }
+ for(unsigned di = 0; di < nn; di++) {
+ //val_n_der[di] = (casteljau_subdivision(t, &d_[0], NULL, NULL, order() - di));
+ val_n_der[di] = bernstein_value_at(t, &d_[0], order() - di);
+ for(unsigned i = 0; i < order() - di; i++) {
+ d_[i] = (order()-di)*(d_[i+1] - d_[i]);
+ }
+ }
+
+ return val_n_der;
+}
+
+void Bezier::subdivide(Coord t, Bezier *left, Bezier *right) const
+{
+ if (left) {
+ left->c_.resize(size());
+ if (right) {
+ right->c_.resize(size());
+ casteljau_subdivision<double>(t, &const_cast<std::valarray<Coord>&>(c_)[0],
+ &left->c_[0], &right->c_[0], order());
+ } else {
+ casteljau_subdivision<double>(t, &const_cast<std::valarray<Coord>&>(c_)[0],
+ &left->c_[0], NULL, order());
+ }
+ } else if (right) {
+ right->c_.resize(size());
+ casteljau_subdivision<double>(t, &const_cast<std::valarray<Coord>&>(c_)[0],
+ NULL, &right->c_[0], order());
+ }
+}
+
+std::pair<Bezier, Bezier> Bezier::subdivide(Coord t) const
+{
+ std::pair<Bezier, Bezier> ret;
+ subdivide(t, &ret.first, &ret.second);
+ return ret;
+}
+
+std::vector<Coord> Bezier::roots() const
+{
+ std::vector<Coord> solutions;
+ find_bezier_roots(solutions, 0, 1);
+ std::sort(solutions.begin(), solutions.end());
+ return solutions;
+}
+
+std::vector<Coord> Bezier::roots(Interval const &ivl) const
+{
+ std::vector<Coord> solutions;
+ find_bernstein_roots(&const_cast<std::valarray<Coord>&>(c_)[0], order(), solutions, 0, ivl.min(), ivl.max());
+ std::sort(solutions.begin(), solutions.end());
+ return solutions;
+}
+
+Bezier Bezier::forward_difference(unsigned k) const
+{
+ Bezier fd(Order(order() - k));
+ int n = fd.size();
+
+ for (int i = 0; i < n; i++) {
+ fd[i] = 0;
+ int b = (i & 1) ? -1 : 1; // b = (-1)^j binomial(n, j - i)
+ for (int j = i; j < n; j++) {
+ fd[i] += c_[j] * b;
+ binomial_increment_k(b, n, j - i);
+ b = -b;
+ }
+ }
+ return fd;
+}
+
+Bezier Bezier::elevate_degree() const
+{
+ Bezier ed(Order(order()+1));
+ unsigned n = size();
+ ed[0] = c_[0];
+ ed[n] = c_[n-1];
+ for(unsigned i = 1; i < n; i++) {
+ ed[i] = (i*c_[i-1] + (n - i)*c_[i])/(n);
+ }
+ return ed;
+}
+
+Bezier Bezier::reduce_degree() const
+{
+ if(order() == 0) return *this;
+ Bezier ed(Order(order()-1));
+ unsigned n = size();
+ ed[0] = c_[0];
+ ed[n-1] = c_[n]; // ensure exact endpoints
+ unsigned middle = n/2;
+ for(unsigned i = 1; i < middle; i++) {
+ ed[i] = (n*c_[i] - i*ed[i-1])/(n-i);
+ }
+ for(unsigned i = n-1; i >= middle; i--) {
+ ed[i] = (n*c_[i] - i*ed[n-i])/(i);
+ }
+ return ed;
+}
+
+Bezier Bezier::elevate_to_degree(unsigned newDegree) const
+{
+ Bezier ed = *this;
+ for(unsigned i = degree(); i < newDegree; i++) {
+ ed = ed.elevate_degree();
+ }
+ return ed;
+}
+
+Bezier Bezier::deflate() const
+{
+ if(order() == 0) return *this;
+ unsigned n = order();
+ Bezier b(Order(n-1));
+ for(unsigned i = 0; i < n; i++) {
+ b[i] = (n*c_[i+1])/(i+1);
+ }
+ return b;
+}
+
+SBasis Bezier::toSBasis() const
+{
+ SBasis sb;
+ bezier_to_sbasis(sb, (*this));
+ return sb;
+ //return bezier_to_sbasis(&c_[0], order());
+}
+
+Bezier &Bezier::operator+=(Bezier const &other)
+{
+ if (c_.size() > other.size()) {
+ c_ += other.elevate_to_degree(degree()).c_;
+ } else if (c_.size() < other.size()) {
+ *this = elevate_to_degree(other.degree());
+ c_ += other.c_;
+ } else {
+ c_ += other.c_;
+ }
+ return *this;
+}
+
+Bezier &Bezier::operator-=(Bezier const &other)
+{
+ if (c_.size() > other.size()) {
+ c_ -= other.elevate_to_degree(degree()).c_;
+ } else if (c_.size() < other.size()) {
+ *this = elevate_to_degree(other.degree());
+ c_ -= other.c_;
+ } else {
+ c_ -= other.c_;
+ }
+ return *this;
+}
+
+Bezier operator*(Bezier const &f, Bezier const &g)
+{
+ int m = f.order();
+ int n = g.order();
+ Bezier h(Bezier::Order(m+n));
+ // h_k = sum_(i+j=k) (m i)f_i (n j)g_j / (m+n k)
+
+ int mci = 1;
+ for (int i = 0; i <= m; i++) {
+ double const fi = mci * f[i];
+
+ int ncj = 1;
+ for (int j = 0; j <= n; j++) {
+ h[i + j] += fi * ncj * g[j];
+ binomial_increment_k(ncj, n, j);
+ }
+
+ binomial_increment_k(mci, m, i);
+ }
+
+ int mnck = 1;
+ for (int k = 0; k <= m + n; k++) {
+ h[k] /= mnck;
+ binomial_increment_k(mnck, m + n, k);
+ }
+
+ return h;
+}
+
+Bezier portion(Bezier const &a, double from, double to)
+{
+ Bezier ret(a);
+
+ bool reverse_result = false;
+ if (from > to) {
+ std::swap(from, to);
+ reverse_result = true;
+ }
+
+ do {
+ if (from == 0) {
+ if (to == 1) {
+ break;
+ }
+ casteljau_subdivision<double>(to, &ret.c_[0], &ret.c_[0], NULL, ret.order());
+ break;
+ }
+ casteljau_subdivision<double>(from, &ret.c_[0], NULL, &ret.c_[0], ret.order());
+ if (to == 1) break;
+ casteljau_subdivision<double>((to - from) / (1 - from), &ret.c_[0], &ret.c_[0], NULL, ret.order());
+ // to protect against numerical inaccuracy in the above expression, we manually set
+ // the last coefficient to a value evaluated directly from the original polynomial
+ ret.c_[ret.order()] = a.valueAt(to);
+ } while(0);
+
+ if (reverse_result) {
+ std::reverse(&ret.c_[0], &ret.c_[0] + ret.c_.size());
+ }
+ return ret;
+}
+
+Bezier derivative(Bezier const &a)
+{
+ //if(a.order() == 1) return Bezier(0.0);
+ if(a.order() == 1) return Bezier(a.c_[1]-a.c_[0]);
+ Bezier der(Bezier::Order(a.order()-1));
+
+ for(unsigned i = 0; i < a.order(); i++) {
+ der.c_[i] = a.order()*(a.c_[i+1] - a.c_[i]);
+ }
+ return der;
+}
+
+Bezier integral(Bezier const &a)
+{
+ Bezier inte(Bezier::Order(a.order()+1));
+
+ inte[0] = 0;
+ for(unsigned i = 0; i < inte.order(); i++) {
+ inte[i+1] = inte[i] + a[i]/(inte.order());
+ }
+ return inte;
+}
+
+OptInterval bounds_fast(Bezier const &b)
+{
+ OptInterval ret = Interval::from_array(&const_cast<Bezier&>(b).c_[0], b.size());
+ return ret;
+}
+
+OptInterval bounds_exact(Bezier const &b)
+{
+ OptInterval ret(b.at0(), b.at1());
+ std::vector<Coord> r = derivative(b).roots();
+ for (double i : r) {
+ ret->expandTo(b.valueAt(i));
+ }
+ return ret;
+}
+
+OptInterval bounds_local(Bezier const &b, OptInterval const &i)
+{
+ //return bounds_local(b.toSBasis(), i);
+ if (i) {
+ return bounds_fast(portion(b, i->min(), i->max()));
+ } else {
+ return OptInterval();
+ }
+}
+
+/*
+ * The general bézier of degree n is
+ *
+ * p(t) = sum_{i = 0...n} binomial(n, i) t^i (1 - t)^(n - i) x[i]
+ *
+ * It can be written explicitly as a polynomial in t as
+ *
+ * p(t) = sum_{i = 0...n} binomial(n, i) t^i [ sum_{j = 0...i} binomial(i, j) (-1)^(i - j) x[j] ]
+ *
+ * Its derivative is
+ *
+ * p'(t) = n sum_{i = 1...n} binomial(n - 1, i - 1) t^(i - 1) [ sum_{j = 0...i} binomial(i, j) (-1)^(i - j) x[j] ]
+ *
+ * This is used by the various specialisations below as an optimisation for low degree n <= 3.
+ * In the remaining cases, the generic implementation is used which resorts to iteration.
+ */
+
+void bezier_expand_to_image(Interval &range, Coord x0, Coord x1, Coord x2)
+{
+ range.expandTo(x2);
+
+ if (range.contains(x1)) {
+ // The interval contains all control points, and therefore the entire curve.
+ return;
+ }
+
+ // p'(t) / 2 = at + b
+ auto a = (x2 - x1) - (x1 - x0);
+ auto b = x1 - x0;
+
+ // t = -b / a
+ if (std::abs(a) > EPSILON) {
+ auto t = -b / a;
+ if (t > 0.0 && t < 1.0) {
+ auto s = 1.0 - t;
+ auto x = s * s * x0 + 2 * s * t * x1 + t * t * x2;
+ range.expandTo(x);
+ }
+ }
+}
+
+void bezier_expand_to_image(Interval &range, Coord x0, Coord x1, Coord x2, Coord x3)
+{
+ range.expandTo(x3);
+
+ if (range.contains(x1) && range.contains(x2)) {
+ // The interval contains all control points, and therefore the entire curve.
+ return;
+ }
+
+ // p'(t) / 3 = at^2 + 2bt + c
+ auto a = (x3 - x0) - 3 * (x2 - x1);
+ auto b = (x2 - x1) - (x1 - x0);
+ auto c = x1 - x0;
+
+ auto expand = [&] (Coord t) {
+ if (t > 0.0 && t < 1.0) {
+ auto s = 1.0 - t;
+ auto x = s * s * s * x0 + 3 * s * s * t * x1 + 3 * t * t * s * x2 + t * t * t * x3;
+ range.expandTo(x);
+ }
+ };
+
+ // t = (-b ± sqrt(b^2 - ac)) / a
+ if (std::abs(a) < EPSILON) {
+ if (std::abs(b) > EPSILON) {
+ expand(-c / (2 * b));
+ }
+ } else {
+ auto d2 = b * b - a * c;
+ if (d2 >= 0.0) {
+ auto bsign = b >= 0.0 ? 1 : -1;
+ auto tmp = -(b + bsign * std::sqrt(d2));
+ expand(tmp / a);
+ expand(c / tmp); // Using Vieta's formula: product of roots == c/a
+ }
+ }
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/cairo-path-sink.cpp b/src/2geom/cairo-path-sink.cpp
new file mode 100644
index 0000000..a04f715
--- /dev/null
+++ b/src/2geom/cairo-path-sink.cpp
@@ -0,0 +1,127 @@
+/**
+ * @file
+ * @brief Path sink for Cairo contexts
+ *//*
+ * Copyright 2014 Krzysztof Kosiński
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, output to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#ifdef HAVE_CAIRO
+
+#include <cairo.h>
+#include <2geom/cairo-path-sink.h>
+#include <2geom/elliptical-arc.h>
+
+namespace Geom {
+
+CairoPathSink::CairoPathSink(cairo_t *cr)
+ : _cr(cr)
+{}
+
+void CairoPathSink::moveTo(Point const &p)
+{
+ cairo_move_to(_cr, p[X], p[Y]);
+ _current_point = p;
+}
+
+void CairoPathSink::lineTo(Point const &p)
+{
+ cairo_line_to(_cr, p[X], p[Y]);
+ _current_point = p;
+}
+
+void CairoPathSink::curveTo(Point const &p1, Point const &p2, Point const &p3)
+{
+ cairo_curve_to(_cr, p1[X], p1[Y], p2[X], p2[Y], p3[X], p3[Y]);
+ _current_point = p3;
+}
+
+void CairoPathSink::quadTo(Point const &p1, Point const &p2)
+{
+ // degree-elevate to cubic Bezier, since Cairo doesn't do quad Beziers
+ // google "Bezier degree elevation" for more info
+ Point q1 = (1./3.) * _current_point + (2./3.) * p1;
+ Point q2 = (2./3.) * p1 + (1./3.) * p2;
+ // q3 = p2
+ cairo_curve_to(_cr, q1[X], q1[Y], q2[X], q2[Y], p2[X], p2[Y]);
+ _current_point = p2;
+}
+
+void CairoPathSink::arcTo(double rx, double ry, double angle,
+ bool large_arc, bool sweep, Point const &p)
+{
+ EllipticalArc arc(_current_point, rx, ry, angle, large_arc, sweep, p);
+ // Cairo only does circular arcs.
+ // To do elliptical arcs, we must use a temporary transform.
+ Affine uct = arc.unitCircleTransform();
+
+ // TODO move Cairo-2Geom matrix conversion into a common location
+ cairo_matrix_t cm;
+ cm.xx = uct[0];
+ cm.xy = uct[2];
+ cm.x0 = uct[4];
+ cm.yx = uct[1];
+ cm.yy = uct[3];
+ cm.y0 = uct[5];
+
+ cairo_save(_cr);
+ cairo_transform(_cr, &cm);
+ if (sweep) {
+ cairo_arc(_cr, 0, 0, 1, arc.initialAngle(), arc.finalAngle());
+ } else {
+ cairo_arc_negative(_cr, 0, 0, 1, arc.initialAngle(), arc.finalAngle());
+ }
+ _current_point = p;
+ cairo_restore(_cr);
+
+ /* Note that an extra linear segment will be inserted before the arc
+ * if Cairo considers the current point distinct from the initial point
+ * of the arc; we could partially alleviate this by not emitting
+ * linear segments that are followed by arc segments, but this would require
+ * buffering the input curves. */
+}
+
+void CairoPathSink::closePath()
+{
+ cairo_close_path(_cr);
+}
+
+void CairoPathSink::flush() {}
+
+} // namespace Geom
+
+#endif
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/circle.cpp b/src/2geom/circle.cpp
new file mode 100644
index 0000000..d97487a
--- /dev/null
+++ b/src/2geom/circle.cpp
@@ -0,0 +1,337 @@
+/** @file
+ * @brief Circle shape
+ *//*
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2008-2014 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/circle.h>
+#include <2geom/ellipse.h>
+#include <2geom/elliptical-arc.h>
+#include <2geom/numeric/fitting-tool.h>
+#include <2geom/numeric/fitting-model.h>
+
+namespace Geom {
+
+Rect Circle::boundsFast() const
+{
+ Point rr(_radius, _radius);
+ Rect bbox(_center - rr, _center + rr);
+ return bbox;
+}
+
+void Circle::setCoefficients(Coord A, Coord B, Coord C, Coord D)
+{
+ if (A == 0) {
+ THROW_RANGEERROR("square term coefficient == 0");
+ }
+
+ //std::cerr << "B = " << B << " C = " << C << " D = " << D << std::endl;
+
+ Coord b = B / A;
+ Coord c = C / A;
+ Coord d = D / A;
+
+ _center[X] = -b/2;
+ _center[Y] = -c/2;
+ Coord r2 = _center[X] * _center[X] + _center[Y] * _center[Y] - d;
+
+ if (r2 < 0) {
+ THROW_RANGEERROR("ray^2 < 0");
+ }
+
+ _radius = std::sqrt(r2);
+}
+
+void Circle::coefficients(Coord &A, Coord &B, Coord &C, Coord &D) const
+{
+ A = 1;
+ B = -2 * _center[X];
+ C = -2 * _center[Y];
+ D = _center[X] * _center[X] + _center[Y] * _center[Y] - _radius * _radius;
+}
+
+std::vector<Coord> Circle::coefficients() const
+{
+ std::vector<Coord> c(4);
+ coefficients(c[0], c[1], c[2], c[3]);
+ return c;
+}
+
+
+Zoom Circle::unitCircleTransform() const
+{
+ Zoom ret(_radius, _center / _radius);
+ return ret;
+}
+
+Zoom Circle::inverseUnitCircleTransform() const
+{
+ if (_radius == 0) {
+ THROW_RANGEERROR("degenerate circle does not have an inverse unit circle transform");
+ }
+
+ Zoom ret(1/_radius, Translate(-_center));
+ return ret;
+}
+
+Point Circle::initialPoint() const
+{
+ Point p(_center);
+ p[X] += _radius;
+ return p;
+}
+
+Point Circle::pointAt(Coord t) const {
+ return _center + Point::polar(t) * _radius;
+}
+
+Coord Circle::valueAt(Coord t, Dim2 d) const {
+ Coord delta = (d == X ? std::cos(t) : std::sin(t));
+ return _center[d] + delta * _radius;
+}
+
+Coord Circle::timeAt(Point const &p) const {
+ if (_center == p) return 0;
+ return atan2(p - _center);
+}
+
+Coord Circle::nearestTime(Point const &p) const {
+ return timeAt(p);
+}
+
+bool Circle::contains(Rect const &r) const
+{
+ for (unsigned i = 0; i < 4; ++i) {
+ if (!contains(r.corner(i))) return false;
+ }
+ return true;
+}
+
+bool Circle::contains(Circle const &other) const
+{
+ Coord cdist = distance(_center, other._center);
+ Coord rdist = fabs(_radius - other._radius);
+ return cdist <= rdist;
+}
+
+bool Circle::intersects(Line const &l) const
+{
+ // http://mathworld.wolfram.com/Circle-LineIntersection.html
+ Coord dr = l.vector().length();
+ Coord r = _radius;
+ Coord D = cross(l.initialPoint(), l.finalPoint());
+ Coord delta = r*r * dr*dr - D*D;
+ if (delta >= 0) return true;
+ return false;
+}
+
+bool Circle::intersects(Circle const &other) const
+{
+ Coord cdist = distance(_center, other._center);
+ Coord rsum = _radius + other._radius;
+ return cdist <= rsum;
+}
+
+
+std::vector<ShapeIntersection> Circle::intersect(Line const &l) const
+{
+ // http://mathworld.wolfram.com/Circle-LineIntersection.html
+ Coord dr = l.vector().length();
+ Coord dx = l.vector().x();
+ Coord dy = l.vector().y();
+ Coord D = cross(l.initialPoint() - _center, l.finalPoint() - _center);
+ Coord delta = _radius*_radius * dr*dr - D*D;
+
+ std::vector<ShapeIntersection> result;
+ if (delta < 0) return result;
+ if (delta == 0) {
+ Coord ix = (D*dy) / (dr*dr);
+ Coord iy = (-D*dx) / (dr*dr);
+ Point ip(ix, iy); ip += _center;
+ result.emplace_back(timeAt(ip), l.timeAt(ip), ip);
+ return result;
+ }
+
+ Coord sqrt_delta = std::sqrt(delta);
+ Coord signmod = dy < 0 ? -1 : 1;
+
+ Coord i1x = (D*dy + signmod * dx * sqrt_delta) / (dr*dr);
+ Coord i1y = (-D*dx + fabs(dy) * sqrt_delta) / (dr*dr);
+ Point i1p(i1x, i1y); i1p += _center;
+
+ Coord i2x = (D*dy - signmod * dx * sqrt_delta) / (dr*dr);
+ Coord i2y = (-D*dx - fabs(dy) * sqrt_delta) / (dr*dr);
+ Point i2p(i2x, i2y); i2p += _center;
+
+ result.emplace_back(timeAt(i1p), l.timeAt(i1p), i1p);
+ result.emplace_back(timeAt(i2p), l.timeAt(i2p), i2p);
+ return result;
+}
+
+std::vector<ShapeIntersection> Circle::intersect(LineSegment const &l) const
+{
+ std::vector<ShapeIntersection> result = intersect(Line(l));
+ filter_line_segment_intersections(result);
+ return result;
+}
+
+std::vector<ShapeIntersection> Circle::intersect(Circle const &other) const
+{
+ std::vector<ShapeIntersection> result;
+
+ if (*this == other) {
+ THROW_INFINITESOLUTIONS();
+ }
+ if (contains(other)) return result;
+ if (!intersects(other)) return result;
+
+ // See e.g. http://mathworld.wolfram.com/Circle-CircleIntersection.html
+ // Basically, we figure out where is the third point of a triangle
+ // with two points in the centers and with edge lengths equal to radii
+ Point cv = other._center - _center;
+ Coord d = cv.length();
+ Coord R = radius(), r = other.radius();
+
+ if (d == R + r) {
+ Point px = lerp(R / d, _center, other._center);
+ Coord T = timeAt(px), t = other.timeAt(px);
+ result.emplace_back(T, t, px);
+ return result;
+ }
+
+ // q is the distance along the line between centers to the perpendicular line
+ // that goes through both intersections.
+ Coord q = (d*d - r*r + R*R) / (2*d);
+ Point qp = lerp(q/d, _center, other._center);
+
+ // The triangle given by the points:
+ // _center, qp, intersection
+ // is a right triangle. Determine the distance between qp and intersection
+ // using the Pythagorean theorem.
+ Coord h = std::sqrt(R*R - q*q);
+ Point qd = (h/d) * cv.cw();
+
+ // now compute the intersection points
+ Point x1 = qp + qd;
+ Point x2 = qp - qd;
+
+ result.emplace_back(timeAt(x1), other.timeAt(x1), x1);
+ result.emplace_back(timeAt(x2), other.timeAt(x2), x2);
+ return result;
+}
+
+/**
+ @param inner a point whose angle with the circle center is inside the angle that the arc spans
+ */
+EllipticalArc *
+Circle::arc(Point const& initial, Point const& inner, Point const& final) const
+{
+ // TODO native implementation!
+ Ellipse e(_center[X], _center[Y], _radius, _radius, 0);
+ return e.arc(initial, inner, final);
+}
+
+bool Circle::operator==(Circle const &other) const
+{
+ if (_center != other._center) return false;
+ if (_radius != other._radius) return false;
+ return true;
+}
+
+D2<SBasis> Circle::toSBasis() const
+{
+ D2<SBasis> B;
+ Linear bo = Linear(0, 2 * M_PI);
+
+ B[0] = cos(bo,4);
+ B[1] = sin(bo,4);
+
+ B = B * _radius + _center;
+
+ return B;
+}
+
+
+void Circle::fit(std::vector<Point> const& points)
+{
+ size_t sz = points.size();
+ if (sz < 2) {
+ THROW_RANGEERROR("fitting error: too few points passed");
+ }
+ if (sz == 2) {
+ _center = points[0] * 0.5 + points[1] * 0.5;
+ _radius = distance(points[0], points[1]) / 2;
+ return;
+ }
+
+ NL::LFMCircle model;
+ NL::least_squeares_fitter<NL::LFMCircle> fitter(model, sz);
+
+ for (size_t i = 0; i < sz; ++i) {
+ fitter.append(points[i]);
+ }
+ fitter.update();
+
+ NL::Vector z(sz, 0.0);
+ model.instance(*this, fitter.result(z));
+}
+
+
+bool are_near(Circle const &a, Circle const &b, Coord eps)
+{
+ // to check whether no point on a is further than eps from b,
+ // we check two things:
+ // 1. if radii differ by more than eps, there is definitely a point that fails
+ // 2. if they differ by less, we check the centers. They have to be closer
+ // together if the radius differs, since the maximum distance will be
+ // equal to sum of radius difference and distance between centers.
+ if (!are_near(a.radius(), b.radius(), eps)) return false;
+ Coord adjusted_eps = eps - fabs(a.radius() - b.radius());
+ return are_near(a.center(), b.center(), adjusted_eps);
+}
+
+std::ostream &operator<<(std::ostream &out, Circle const &c)
+{
+ out << "Circle(" << c.center() << ", " << format_coord_nice(c.radius()) << ")";
+ return out;
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/concepts.cpp b/src/2geom/concepts.cpp
new file mode 100644
index 0000000..e8c8e5c
--- /dev/null
+++ b/src/2geom/concepts.cpp
@@ -0,0 +1,69 @@
+/**
+ * \file
+ * \brief Concept checking
+ *//*
+ * Copyright 2015 Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, output to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <boost/concept/assert.hpp>
+#include <2geom/concepts.h>
+
+#include <2geom/line.h>
+#include <2geom/circle.h>
+#include <2geom/ellipse.h>
+#include <2geom/curves.h>
+#include <2geom/convex-hull.h>
+#include <2geom/path.h>
+#include <2geom/pathvector.h>
+
+#include <2geom/bezier.h>
+#include <2geom/sbasis.h>
+#include <2geom/linear.h>
+#include <2geom/d2.h>
+
+namespace Geom {
+
+void concept_checks()
+{
+ BOOST_CONCEPT_ASSERT((ShapeConcept<Line>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<Circle>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<Ellipse>));
+ BOOST_CONCEPT_ASSERT((ShapeConcept<BezierCurve>));
+ BOOST_CONCEPT_ASSERT((ShapeConcept<EllipticalArc>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<SBasisCurve>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<ConvexHull>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<Path>));
+ //BOOST_CONCEPT_ASSERT((ShapeConcept<PathVector>));
+
+ BOOST_CONCEPT_ASSERT((NearConcept<Coord>));
+ BOOST_CONCEPT_ASSERT((NearConcept<Point>));
+
+ BOOST_CONCEPT_ASSERT((FragmentConcept<Bezier>));
+ BOOST_CONCEPT_ASSERT((FragmentConcept<Linear>));
+ BOOST_CONCEPT_ASSERT((FragmentConcept<SBasis>));
+}
+
+} // end namespace Geom
diff --git a/src/2geom/conic_section_clipper_impl.cpp b/src/2geom/conic_section_clipper_impl.cpp
new file mode 100644
index 0000000..8b0445e
--- /dev/null
+++ b/src/2geom/conic_section_clipper_impl.cpp
@@ -0,0 +1,574 @@
+/* Conic section clipping with respect to a rectangle
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail>
+ *
+ * Copyright 2009 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <optional>
+
+#ifndef CLIP_WITH_CAIRO_SUPPORT
+ #include <2geom/conic_section_clipper.h>
+#endif
+
+namespace Geom
+{
+
+/*
+ * Find rectangle-conic crossing points. They are returned in the
+ * "crossing_points" parameter.
+ * The method returns true if the conic section intersects at least one
+ * of the four lines passing through rectangle edges, else it returns false.
+ */
+bool CLIPPER_CLASS::intersect (std::vector<Point> & crossing_points) const
+{
+ crossing_points.clear();
+
+ std::vector<double> rts;
+ std::vector<Point> cpts;
+ // rectangle corners
+ enum {TOP_LEFT, TOP_RIGHT, BOTTOM_RIGHT, BOTTOM_LEFT};
+
+ bool no_crossing = true;
+
+ // right edge
+ cs.roots (rts, R.right(), X);
+ if (!rts.empty())
+ {
+ no_crossing = false;
+ DBGPRINT ("CLIP: right: rts[0] = ", rts[0])
+ DBGPRINTIF ((rts.size() == 2), "CLIP: right: rts[1] = ", rts[1])
+
+ Point corner1 = R.corner(TOP_RIGHT);
+ Point corner2 = R.corner(BOTTOM_RIGHT);
+
+ for (double rt : rts)
+ {
+ if (rt < R.top() || rt > R.bottom()) continue;
+ Point P (R.right(), rt);
+ if (are_near (P, corner1))
+ P = corner1;
+ else if (are_near (P, corner2))
+ P = corner2;
+
+ cpts.push_back (P);
+ }
+ if (cpts.size() == 2 && are_near (cpts[0], cpts[1]))
+ {
+ cpts[0] = middle_point (cpts[0], cpts[1]);
+ cpts.pop_back();
+ }
+ }
+
+ // top edge
+ cs.roots (rts, R.top(), Y);
+ if (!rts.empty())
+ {
+ no_crossing = false;
+ DBGPRINT ("CLIP: top: rts[0] = ", rts[0])
+ DBGPRINTIF ((rts.size() == 2), "CLIP: top: rts[1] = ", rts[1])
+
+ Point corner1 = R.corner(TOP_RIGHT);
+ Point corner2 = R.corner(TOP_LEFT);
+
+ for (double rt : rts)
+ {
+ if (rt < R.left() || rt > R.right()) continue;
+ Point P (rt, R.top());
+ if (are_near (P, corner1))
+ P = corner1;
+ else if (are_near (P, corner2))
+ P = corner2;
+
+ cpts.push_back (P);
+ }
+ if (cpts.size() == 2 && are_near (cpts[0], cpts[1]))
+ {
+ cpts[0] = middle_point (cpts[0], cpts[1]);
+ cpts.pop_back();
+ }
+ }
+
+ // left edge
+ cs.roots (rts, R.left(), X);
+ if (!rts.empty())
+ {
+ no_crossing = false;
+ DBGPRINT ("CLIP: left: rts[0] = ", rts[0])
+ DBGPRINTIF ((rts.size() == 2), "CLIP: left: rts[1] = ", rts[1])
+
+ Point corner1 = R.corner(TOP_LEFT);
+ Point corner2 = R.corner(BOTTOM_LEFT);
+
+ for (double rt : rts)
+ {
+ if (rt < R.top() || rt > R.bottom()) continue;
+ Point P (R.left(), rt);
+ if (are_near (P, corner1))
+ P = corner1;
+ else if (are_near (P, corner2))
+ P = corner2;
+
+ cpts.push_back (P);
+ }
+ if (cpts.size() == 2 && are_near (cpts[0], cpts[1]))
+ {
+ cpts[0] = middle_point (cpts[0], cpts[1]);
+ cpts.pop_back();
+ }
+ }
+
+ // bottom edge
+ cs.roots (rts, R.bottom(), Y);
+ if (!rts.empty())
+ {
+ no_crossing = false;
+ DBGPRINT ("CLIP: bottom: rts[0] = ", rts[0])
+ DBGPRINTIF ((rts.size() == 2), "CLIP: bottom: rts[1] = ", rts[1])
+
+ Point corner1 = R.corner(BOTTOM_RIGHT);
+ Point corner2 = R.corner(BOTTOM_LEFT);
+
+ for (double rt : rts)
+ {
+ if (rt < R.left() || rt > R.right()) continue;
+ Point P (rt, R.bottom());
+ if (are_near (P, corner1))
+ P = corner1;
+ else if (are_near (P, corner2))
+ P = corner2;
+
+ cpts.push_back (P);
+ }
+ if (cpts.size() == 2 && are_near (cpts[0], cpts[1]))
+ {
+ cpts[0] = middle_point (cpts[0], cpts[1]);
+ cpts.pop_back();
+ }
+ }
+
+ DBGPRINT ("CLIP: intersect: crossing_points.size (with duplicates) = ",
+ cpts.size())
+
+ // remove duplicates
+ std::sort (cpts.begin(), cpts.end(), Point::LexLess<X>());
+ cpts.erase (std::unique (cpts.begin(), cpts.end()), cpts.end());
+
+
+ // Order crossing points on the rectangle edge clockwise, so two consecutive
+ // crossing points would be the end points of a conic arc all inside or all
+ // outside the rectangle.
+ std::map<double, size_t> cp_angles;
+ for (size_t i = 0; i < cpts.size(); ++i)
+ {
+ cp_angles.insert (std::make_pair (cs.angle_at (cpts[i]), i));
+ }
+
+ std::map<double, size_t>::const_iterator pos;
+ for (pos = cp_angles.begin(); pos != cp_angles.end(); ++pos)
+ {
+ crossing_points.push_back (cpts[pos->second]);
+ }
+
+ DBGPRINT ("CLIP: intersect: crossing_points.size = ", crossing_points.size())
+ DBGPRINTCOLL ("CLIP: intersect: crossing_points:", crossing_points)
+
+ return no_crossing;
+} // end function intersect
+
+
+
+inline
+double signed_triangle_area (Point const& p1, Point const& p2, Point const& p3)
+{
+ return (cross(p2, p3) - cross(p1, p3) + cross(p1, p2));
+}
+
+
+/*
+ * Test if two crossing points are the end points of a conic arc inner to the
+ * rectangle. In such a case the method returns true, else it returns false.
+ * Moreover by the parameter "M" it returns a point inner to the conic arc
+ * with the given end-points.
+ *
+ */
+bool CLIPPER_CLASS::are_paired (Point& M, const Point & P1, const Point & P2) const
+{
+ using std::swap;
+
+ /*
+ * we looks for the points on the conic whose tangent is parallel to the
+ * arc chord P1P2, they will be extrema of the conic arc P1P2 wrt the
+ * direction orthogonal to the chord
+ */
+ Point dir = P2 - P1;
+ DBGPRINT ("CLIP: are_paired: first point: ", P1)
+ DBGPRINT ("CLIP: are_paired: second point: ", P2)
+
+ double grad0 = 2 * cs.coeff(0) * dir[0] + cs.coeff(1) * dir[1];
+ double grad1 = cs.coeff(1) * dir[0] + 2 * cs.coeff(2) * dir[1];
+ double grad2 = cs.coeff(3) * dir[0] + cs.coeff(4) * dir[1];
+
+
+ /*
+ * such points are found intersecating the conic section with the line
+ * orthogonal to "grad": the derivative wrt the "dir" direction
+ */
+ Line gl (grad0, grad1, grad2);
+ std::vector<double> rts;
+ rts = cs.roots (gl);
+ DBGPRINT ("CLIP: are_paired: extrema: rts.size() = ", rts.size())
+
+
+
+ std::vector<Point> extrema;
+ for (double rt : rts)
+ {
+ extrema.push_back (gl.pointAt (rt));
+ }
+
+ if (extrema.size() == 2)
+ {
+ // in case we are dealing with an hyperbola we could have two extrema
+ // on the same side wrt the line passing through P1 and P2, but
+ // only the nearer extremum is on the arc P1P2
+ double side0 = signed_triangle_area (P1, extrema[0], P2);
+ double side1 = signed_triangle_area (P1, extrema[1], P2);
+
+ if (sgn(side0) == sgn(side1))
+ {
+ if (std::fabs(side0) > std::fabs(side1)) {
+ swap(extrema[0], extrema[1]);
+ }
+ extrema.pop_back();
+ }
+ }
+
+ std::vector<Point> inner_points;
+ for (auto & i : extrema)
+ {
+ if (!R.contains (i)) continue;
+ // in case we are dealing with an ellipse tangent to two orthogonal
+ // rectangle edges we could have two extrema on opposite sides wrt the
+ // line passing through P1P2 and both inner the rectangle; anyway, since
+ // we order the crossing points clockwise we have only one extremum
+ // that follows such an ordering wrt P1 and P2;
+ // remark: the other arc will be selected when we test for the arc P2P1.
+ double P1angle = cs.angle_at (P1);
+ double P2angle = cs.angle_at (P2);
+ double Qangle = cs.angle_at (i);
+ if (P1angle < P2angle && !(P1angle <= Qangle && Qangle <= P2angle))
+ continue;
+ if (P1angle > P2angle && !(P1angle <= Qangle || Qangle <= P2angle))
+ continue;
+
+ inner_points.push_back (i);
+ }
+
+ if (inner_points.size() > 1)
+ {
+ THROW_LOGICALERROR ("conic section clipper: "
+ "more than one extremum found");
+ }
+ else if (inner_points.size() == 1)
+ {
+ M = inner_points.front();
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * Pair the points contained in the "crossing_points" vector; the paired points
+ * are put in the paired_points vector so that given a point with an even index
+ * and the next one they are the end points of a conic arc that is inner to the
+ * rectangle. In the "inner_points" are returned points that are inner to the
+ * arc, where the inner point with index k is related to the arc with end
+ * points with indexes 2k, 2k+1. In case there are unpaired points the are put
+ * in to the "single_points" vector.
+ */
+void CLIPPER_CLASS::pairing (std::vector<Point> & paired_points,
+ std::vector<Point> & inner_points,
+ const std::vector<Point> & crossing_points)
+{
+ paired_points.clear();
+ paired_points.reserve (crossing_points.size());
+
+ inner_points.clear();
+ inner_points.reserve (crossing_points.size() / 2);
+
+ single_points.clear();
+
+ // to keep trace of which crossing points have been paired
+ std::vector<bool> paired (crossing_points.size(), false);
+
+ Point M;
+
+ // by the way we have ordered crossing points we need to test one point wrt
+ // the next point only, for pairing; moreover the last point need to be
+ // tested wrt the first point; pay attention: one point can be paired both
+ // with the previous and the next one: this is not an error, think of
+ // crossing points that are tangent to the rectangle edge (and inner);
+ for (size_t i = 0; i < crossing_points.size(); ++i)
+ {
+ // we need to test the last point wrt the first one
+ size_t j = (i == 0) ? (crossing_points.size() - 1) : (i-1);
+ if (are_paired (M, crossing_points[j], crossing_points[i]))
+ {
+#ifdef CLIP_WITH_CAIRO_SUPPORT
+ cairo_set_source_rgba(cr, 0.1, 0.1, 0.8, 1.0);
+ draw_line_seg (cr, crossing_points[j], crossing_points[i]);
+ draw_handle (cr, crossing_points[j]);
+ draw_handle (cr, crossing_points[i]);
+ draw_handle (cr, M);
+ cairo_stroke (cr);
+#endif
+ paired[j] = paired[i] = true;
+ paired_points.push_back (crossing_points[j]);
+ paired_points.push_back (crossing_points[i]);
+ inner_points.push_back (M);
+ }
+ }
+
+ // some point are not paired with any point, e.g. a crossing point tangent
+ // to a rectangle edge but with the conic arc outside the rectangle
+ for (size_t i = 0; i < paired.size(); ++i)
+ {
+ if (!paired[i])
+ single_points.push_back (crossing_points[i]);
+ }
+ DBGPRINTCOLL ("single_points", single_points)
+
+}
+
+
+/*
+ * This method clip the section conic wrt the rectangle and returns the inner
+ * conic arcs as a vector of RatQuad objects by the "arcs" parameter.
+ */
+bool CLIPPER_CLASS::clip (std::vector<RatQuad> & arcs)
+{
+ using std::swap;
+
+ arcs.clear();
+ std::vector<Point> crossing_points;
+ std::vector<Point> paired_points;
+ std::vector<Point> inner_points;
+
+ Line l1, l2;
+ if (cs.decompose (l1, l2))
+ {
+ bool inner_empty = true;
+
+ DBGINFO ("CLIP: degenerate section conic")
+
+ std::optional<LineSegment> ls1 = Geom::clip (l1, R);
+ if (ls1)
+ {
+ if (ls1->isDegenerate())
+ {
+ single_points.push_back (ls1->initialPoint());
+ }
+ else
+ {
+ Point M = middle_point (*ls1);
+ arcs.emplace_back(ls1->initialPoint(), M, ls1->finalPoint(), 1);
+ inner_empty = false;
+ }
+ }
+
+ std::optional<LineSegment> ls2 = Geom::clip (l2, R);
+ if (ls2)
+ {
+ if (ls2->isDegenerate())
+ {
+ single_points.push_back (ls2->initialPoint());
+ }
+ else
+ {
+ Point M = middle_point (*ls2);
+ arcs.emplace_back(ls2->initialPoint(), M, ls2->finalPoint(), 1);
+ inner_empty = false;
+ }
+ }
+
+ return !inner_empty;
+ }
+
+
+ bool no_crossing = intersect (crossing_points);
+
+ // if the only crossing point is a rectangle corner than the section conic
+ // is all outside the rectangle
+ if (crossing_points.size() == 1)
+ {
+ for (size_t i = 0; i < 4; ++i)
+ {
+ if (crossing_points[0] == R.corner(i))
+ {
+ single_points.push_back (R.corner(i));
+ return false;
+ }
+ }
+ }
+
+ // if the conic does not cross any line passing through a rectangle edge or
+ // it is tangent to only one edge then it is an ellipse
+ if (no_crossing
+ || (crossing_points.size() == 1 && single_points.empty()))
+ {
+ // if the ellipse centre is inside the rectangle
+ // then so it is the ellipse
+ std::optional<Point> c = cs.centre();
+ if (c && R.contains (*c))
+ {
+ DBGPRINT ("CLIP: ellipse with centre", *c)
+ // we set paired and inner points by finding the ellipse
+ // intersection with its axes; this choice let us having a more
+ // accurate RatQuad parametric arc
+ paired_points.resize(4);
+ std::vector<double> rts;
+ double angle = cs.axis_angle();
+ Line axis1 (*c, angle);
+ rts = cs.roots (axis1);
+ if (rts[0] > rts[1]) swap (rts[0], rts[1]);
+ paired_points[0] = axis1.pointAt (rts[0]);
+ paired_points[1] = axis1.pointAt (rts[1]);
+ paired_points[2] = paired_points[1];
+ paired_points[3] = paired_points[0];
+ Line axis2 (*c, angle + M_PI/2);
+ rts = cs.roots (axis2);
+ if (rts[0] > rts[1]) swap (rts[0], rts[1]);
+ inner_points.push_back (axis2.pointAt (rts[0]));
+ inner_points.push_back (axis2.pointAt (rts[1]));
+ }
+ else if (crossing_points.size() == 1)
+ {
+ // so we have a tangent crossing point but the ellipse is outside
+ // the rectangle
+ single_points.push_back (crossing_points[0]);
+ }
+ }
+ else
+ {
+ // in case the conic section intersects any of the four lines passing
+ // through the rectangle edges but it does not cross any rectangle edge
+ // then the conic is all outer of the rectangle
+ if (crossing_points.empty()) return false;
+ // else we need to pair crossing points, and to find an arc inner point
+ // in order to generate a RatQuad object
+ pairing (paired_points, inner_points, crossing_points);
+ }
+
+
+ // we split arcs until the end-point distance is less than a given value,
+ // in this way the RatQuad parametrization is enough accurate
+ std::list<Point> points;
+ std::list<Point>::iterator sp, ip, fp;
+ for (size_t i = 0, j = 0; i < paired_points.size(); i += 2, ++j)
+ {
+ //DBGPRINT ("CLIP: clip: P = ", paired_points[i])
+ //DBGPRINT ("CLIP: clip: M = ", inner_points[j])
+ //DBGPRINT ("CLIP: clip: Q = ", paired_points[i+1])
+
+ // in case inner point and end points are near is better not split
+ // the conic arc further or we could get a degenerate RatQuad object
+ if (are_near (paired_points[i], inner_points[j], 1e-4)
+ && are_near (paired_points[i+1], inner_points[j], 1e-4))
+ {
+ arcs.push_back (cs.toRatQuad (paired_points[i],
+ inner_points[j],
+ paired_points[i+1]));
+ continue;
+ }
+
+ // populate the list
+ points.push_back(paired_points[i]);
+ points.push_back(inner_points[j]);
+ points.push_back(paired_points[i+1]);
+
+ // an initial unconditioned splitting
+ sp = points.begin();
+ ip = sp; ++ip;
+ fp = ip; ++fp;
+ rsplit (points, sp, ip, size_t(1u));
+ rsplit (points, ip, fp, size_t(1u));
+
+ // length conditioned split
+ sp = points.begin();
+ fp = sp; ++fp;
+ while (fp != points.end())
+ {
+ rsplit (points, sp, fp, 100.0);
+ sp = fp;
+ ++fp;
+ }
+
+ sp = points.begin();
+ ip = sp; ++ip;
+ fp = ip; ++fp;
+ //DBGPRINT ("CLIP: points ", j)
+ //DBGPRINT ("CLIP: points.size = ", points.size())
+ while (ip != points.end())
+ {
+#ifdef CLIP_WITH_CAIRO_SUPPORT
+ cairo_set_source_rgba(cr, 0.1, 0.1, 0.8, 1.0);
+ draw_handle (cr, *sp);
+ draw_handle (cr, *ip);
+ cairo_stroke (cr);
+#endif
+ //std::cerr << "CLIP: arc: [" << *sp << ", " << *ip << ", "
+ // << *fp << "]" << std::endl;
+ arcs.push_back (cs.toRatQuad (*sp, *ip, *fp));
+ sp = fp;
+ ip = sp; ++ip;
+ fp = ip; ++fp;
+ }
+ points.clear();
+ }
+ DBGPRINT ("CLIP: arcs.size() = ", arcs.size())
+ return (arcs.size() != 0);
+} // end method clip
+
+
+} // end namespace geom
+
+
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/conicsec.cpp b/src/2geom/conicsec.cpp
new file mode 100644
index 0000000..0865c0e
--- /dev/null
+++ b/src/2geom/conicsec.cpp
@@ -0,0 +1,1640 @@
+/*
+ * Authors:
+ * Nathan Hurst <njh@njhurst.com
+ *
+ * Copyright 2009 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+#include <2geom/conicsec.h>
+#include <2geom/conic_section_clipper.h>
+#include <2geom/numeric/fitting-tool.h>
+#include <2geom/numeric/fitting-model.h>
+
+
+// File: convert.h
+#include <utility>
+#include <sstream>
+#include <stdexcept>
+#include <optional>
+
+namespace Geom
+{
+
+LineSegment intersection(Line l, Rect r) {
+ std::optional<LineSegment> seg = l.clip(r);
+ if (seg) {
+ return *seg;
+ } else {
+ return LineSegment(Point(0,0), Point(0,0));
+ }
+}
+
+static double det(Point a, Point b) {
+ return a[0]*b[1] - a[1]*b[0];
+}
+
+template <typename T>
+static T det(T a, T b, T c, T d) {
+ return a*d - b*c;
+}
+
+template <typename T>
+static T det(T M[2][2]) {
+ return M[0][0]*M[1][1] - M[1][0]*M[0][1];
+}
+
+template <typename T>
+static T det3(T M[3][3]) {
+ return ( M[0][0] * det(M[1][1], M[1][2],
+ M[2][1], M[2][2])
+ -M[1][0] * det(M[0][1], M[0][2],
+ M[2][1], M[2][2])
+ +M[2][0] * det(M[0][1], M[0][2],
+ M[1][1], M[1][2]));
+}
+
+static double boxprod(Point a, Point b, Point c) {
+ return det(a,b) - det(a,c) + det(b,c);
+}
+
+class BadConversion : public std::runtime_error {
+public:
+ BadConversion(const std::string& s)
+ : std::runtime_error(s)
+ { }
+};
+
+template <typename T>
+inline std::string stringify(T x)
+{
+ std::ostringstream o;
+ if (!(o << x))
+ throw BadConversion("stringify(T)");
+ return o.str();
+}
+
+ /* A G4 continuous cubic parametric approximation for rational quadratics.
+ See
+ An analysis of cubic approximation schemes for conic sections
+ Michael Floater
+ SINTEF
+
+ This is less accurate overall than some of his other schemes, but
+ produces very smooth joins and is still optimally h^-6
+ convergent.
+ */
+
+double RatQuad::lambda() const {
+ return 2*(6*w*w +1 -std::sqrt(3*w*w+1))/(12*w*w+3);
+}
+
+RatQuad RatQuad::fromPointsTangents(Point P0, Point dP0,
+ Point P,
+ Point P2, Point dP2) {
+ Line Line0 = Line::from_origin_and_vector(P0, dP0);
+ Line Line2 = Line::from_origin_and_vector(P2, dP2);
+ try {
+ OptCrossing oc = intersection(Line0, Line2);
+ if(!oc) // what to do?
+ return RatQuad(Point(), Point(), Point(), 0); // need opt really
+ //assert(0);
+ Point P1 = Line0.pointAt((*oc).ta);
+ double triarea = boxprod(P0, P1, P2);
+// std::cout << "RatQuad::fromPointsTangents: triarea = " << triarea << std::endl;
+ if (triarea == 0)
+ {
+ return RatQuad(P0, 0.5*(P0+P2), P2, 1);
+ }
+ double tau0 = boxprod(P, P1, P2)/triarea;
+ double tau1 = boxprod(P0, P, P2)/triarea;
+ double tau2 = boxprod(P0, P1, P)/triarea;
+ if (tau0 == 0 || tau1 == 0 || tau2 == 0)
+ {
+ return RatQuad(P0, 0.5*(P0+P2), P2, 1);
+ }
+ double w = tau1/(2*std::sqrt(tau0*tau2));
+// std::cout << "RatQuad::fromPointsTangents: tau0 = " << tau0 << std::endl;
+// std::cout << "RatQuad::fromPointsTangents: tau1 = " << tau1 << std::endl;
+// std::cout << "RatQuad::fromPointsTangents: tau2 = " << tau2 << std::endl;
+// std::cout << "RatQuad::fromPointsTangents: w = " << w << std::endl;
+ return RatQuad(P0, P1, P2, w);
+ } catch(Geom::InfiniteSolutions const&) {
+ return RatQuad(P0, 0.5*(P0+P2), P2, 1);
+ }
+ return RatQuad(Point(), Point(), Point(), 0); // need opt really
+}
+
+RatQuad RatQuad::circularArc(Point P0, Point P1, Point P2) {
+ return RatQuad(P0, P1, P2, dot(unit_vector(P0 - P1), unit_vector(P0 - P2)));
+}
+
+
+CubicBezier RatQuad::toCubic() const {
+ return toCubic(lambda());
+}
+
+CubicBezier RatQuad::toCubic(double lamb) const {
+ return CubicBezier(P[0],
+ (1-lamb)*P[0] + lamb*P[1],
+ (1-lamb)*P[2] + lamb*P[1],
+ P[2]);
+}
+
+Point RatQuad::pointAt(double t) const {
+ Bezier xt(P[0][0], P[1][0]*w, P[2][0]);
+ Bezier yt(P[0][1], P[1][1]*w, P[2][1]);
+ double wt = Bezier(1, w, 1).valueAt(t);
+ return Point(xt.valueAt(t)/wt,
+ yt.valueAt(t)/wt);
+}
+
+void RatQuad::split(RatQuad &a, RatQuad &b) const {
+ a.P[0] = P[0];
+ b.P[2] = P[2];
+ a.P[1] = (P[0]+w*P[1])/(1+w);
+ b.P[1] = (w*P[1]+P[2])/(1+w);
+ a.w = b.w = std::sqrt((1+w)/2);
+ a.P[2] = b.P[0] = (0.5*a.P[1]+0.5*b.P[1]);
+}
+
+
+D2<SBasis> RatQuad::hermite() const {
+ SBasis t = Linear(0, 1);
+ SBasis omt = Linear(1, 0);
+
+ D2<SBasis> out(omt*omt*P[0][0]+2*omt*t*P[1][0]*w+t*t*P[2][0],
+ omt*omt*P[0][1]+2*omt*t*P[1][1]*w+t*t*P[2][1]);
+ for(int dim = 0; dim < 2; dim++) {
+ out[dim] = divide(out[dim], (omt*omt+2*omt*t*w+t*t), 2);
+ }
+ return out;
+}
+
+ std::vector<SBasis> RatQuad::homogeneous() const {
+ std::vector<SBasis> res(3, SBasis());
+ Bezier xt(P[0][0], P[1][0]*w, P[2][0]);
+ bezier_to_sbasis(res[0],xt);
+ Bezier yt(P[0][1], P[1][1]*w, P[2][1]);
+ bezier_to_sbasis(res[1],yt);
+ Bezier wt(1, w, 1);
+ bezier_to_sbasis(res[2],wt);
+ return res;
+}
+
+#if 0
+ std::string xAx::categorise() const {
+ double M[3][3] = {{c[0], c[1], c[3]},
+ {c[1], c[2], c[4]},
+ {c[3], c[4], c[5]}};
+ double D = det3(M);
+ if (c[0] == 0 && c[1] == 0 && c[2] == 0)
+ return "line";
+ std::string res = stringify(D);
+ double descr = c[1]*c[1] - c[0]*c[2];
+ if (descr < 0) {
+ if (c[0] == c[2] && c[1] == 0)
+ return res + "circle";
+ return res + "ellipse";
+ } else if (descr == 0) {
+ return res + "parabola";
+ } else if (descr > 0) {
+ if (c[0] + c[2] == 0) {
+ if (D == 0)
+ return res + "two lines";
+ return res + "rectangular hyperbola";
+ }
+ return res + "hyperbola";
+
+ }
+ return "no idea!";
+}
+#endif
+
+
+std::vector<Point> decompose_degenerate(xAx const & C1, xAx const & C2, xAx const & xC0) {
+ std::vector<Point> res;
+ double A[2][2] = {{2*xC0.c[0], xC0.c[1]},
+ {xC0.c[1], 2*xC0.c[2]}};
+//Point B0 = xC0.bottom();
+ double const determ = det(A);
+ //std::cout << determ << "\n";
+ if (fabs(determ) >= 1e-20) { // hopeful, I know
+ Geom::Coord const ideterm = 1.0 / determ;
+
+ double b[2] = {-xC0.c[3], -xC0.c[4]};
+ Point B0((A[1][1]*b[0] -A[0][1]*b[1]),
+ (-A[1][0]*b[0] + A[0][0]*b[1]));
+ B0 *= ideterm;
+ Point n0, n1;
+ // Are these just the eigenvectors of A11?
+ if(xC0.c[0] == xC0.c[2]) {
+ double b = 0.5*xC0.c[1]/xC0.c[0];
+ double c = xC0.c[2]/xC0.c[0];
+ //assert(fabs(b*b-c) > 1e-10);
+ double d = std::sqrt(b*b-c);
+ //assert(fabs(b-d) > 1e-10);
+ n0 = Point(1, b+d);
+ n1 = Point(1, b-d);
+ } else if(fabs(xC0.c[0]) > fabs(xC0.c[2])) {
+ double b = 0.5*xC0.c[1]/xC0.c[0];
+ double c = xC0.c[2]/xC0.c[0];
+ //assert(fabs(b*b-c) > 1e-10);
+ double d = std::sqrt(b*b-c);
+ //assert(fabs(b-d) > 1e-10);
+ n0 = Point(1, b+d);
+ n1 = Point(1, b-d);
+ } else {
+ double b = 0.5*xC0.c[1]/xC0.c[2];
+ double c = xC0.c[0]/xC0.c[2];
+ //assert(fabs(b*b-c) > 1e-10);
+ double d = std::sqrt(b*b-c);
+ //assert(fabs(b-d) > 1e-10);
+ n0 = Point(b+d, 1);
+ n1 = Point(b-d, 1);
+ }
+
+ Line L0 = Line::from_origin_and_vector(B0, rot90(n0));
+ Line L1 = Line::from_origin_and_vector(B0, rot90(n1));
+
+ std::vector<double> rts = C1.roots(L0);
+ for(double rt : rts) {
+ Point P = L0.pointAt(rt);
+ res.push_back(P);
+ }
+ rts = C1.roots(L1);
+ for(double rt : rts) {
+ Point P = L1.pointAt(rt);
+ res.push_back(P);
+ }
+ } else {
+ // single or double line
+ // check for completely zero case (what to do?)
+ assert(xC0.c[0] || xC0.c[1] ||
+ xC0.c[2] || xC0.c[3] ||
+ xC0.c[4] || xC0.c[5]);
+ Point trial_pt(0,0);
+ Point g = xC0.gradient(trial_pt);
+ if(L2sq(g) == 0) {
+ trial_pt[0] += 1;
+ g = xC0.gradient(trial_pt);
+ if(L2sq(g) == 0) {
+ trial_pt[1] += 1;
+ g = xC0.gradient(trial_pt);
+ if(L2sq(g) == 0) {
+ trial_pt[0] += 1;
+ g = xC0.gradient(trial_pt);
+ if(L2sq(g) == 0) {
+ trial_pt = Point(1.5,0.5);
+ g = xC0.gradient(trial_pt);
+ }
+ }
+ }
+ }
+ //std::cout << trial_pt << ", " << g << "\n";
+ /**
+ * At this point we have tried up to 4 points: 0,0, 1,0, 1,1, 2,1, 1.5,1.5
+ *
+ * No degenerate conic can pass through these points, so we can assume
+ * that we've found a perpendicular to the double line.
+ * Proof:
+ * any degenerate must consist of at most 2 lines. 1.5,0.5 is not on any pair of lines
+ * passing through the previous 4 trials.
+ *
+ * alternatively, there may be a way to determine this directly from xC0
+ */
+ assert(L2sq(g) != 0);
+
+ Line Lx = Line::from_origin_and_vector(trial_pt, g); // a line along the gradient
+ std::vector<double> rts = xC0.roots(Lx);
+ for(double rt : rts) {
+ Point P0 = Lx.pointAt(rt);
+ //std::cout << P0 << "\n";
+ Line L = Line::from_origin_and_vector(P0, rot90(g));
+ std::vector<double> cnrts;
+ // It's very likely that at least one of the conics is degenerate, this will hopefully pick the more generate of the two.
+ if(fabs(C1.hessian().det()) > fabs(C2.hessian().det()))
+ cnrts = C1.roots(L);
+ else
+ cnrts = C2.roots(L);
+ for(double cnrt : cnrts) {
+ Point P = L.pointAt(cnrt);
+ res.push_back(P);
+ }
+ }
+ }
+ return res;
+}
+
+double xAx_descr(xAx const & C) {
+ double mC[3][3] = {{C.c[0], (C.c[1])/2, (C.c[3])/2},
+ {(C.c[1])/2, C.c[2], (C.c[4])/2},
+ {(C.c[3])/2, (C.c[4])/2, C.c[5]}};
+
+ return det3(mC);
+}
+
+
+std::vector<Point> intersect(xAx const & C1, xAx const & C2) {
+ // You know, if either of the inputs are degenerate we should use them first!
+ if(xAx_descr(C1) == 0) {
+ return decompose_degenerate(C1, C2, C1);
+ }
+ if(xAx_descr(C2) == 0) {
+ return decompose_degenerate(C1, C2, C2);
+ }
+ std::vector<Point> res;
+ SBasis T(Linear(-1,1));
+ SBasis S(Linear(1,1));
+ SBasis C[3][3] = {{T*C1.c[0]+S*C2.c[0], (T*C1.c[1]+S*C2.c[1])/2, (T*C1.c[3]+S*C2.c[3])/2},
+ {(T*C1.c[1]+S*C2.c[1])/2, T*C1.c[2]+S*C2.c[2], (T*C1.c[4]+S*C2.c[4])/2},
+ {(T*C1.c[3]+S*C2.c[3])/2, (T*C1.c[4]+S*C2.c[4])/2, T*C1.c[5]+S*C2.c[5]}};
+
+ SBasis D = det3(C);
+ std::vector<double> rts = Geom::roots(D);
+ if(rts.empty()) {
+ T = Linear(1,1);
+ S = Linear(-1,1);
+ SBasis C[3][3] = {{T*C1.c[0]+S*C2.c[0], (T*C1.c[1]+S*C2.c[1])/2, (T*C1.c[3]+S*C2.c[3])/2},
+ {(T*C1.c[1]+S*C2.c[1])/2, T*C1.c[2]+S*C2.c[2], (T*C1.c[4]+S*C2.c[4])/2},
+ {(T*C1.c[3]+S*C2.c[3])/2, (T*C1.c[4]+S*C2.c[4])/2, T*C1.c[5]+S*C2.c[5]}};
+
+ D = det3(C);
+ rts = Geom::roots(D);
+ }
+ // at this point we have a T and S and perhaps some roots that represent our degenerate conic
+ // Let's just pick one randomly (can we do better?)
+ //for(unsigned i = 0; i < rts.size(); i++) {
+ if(!rts.empty()) {
+ unsigned i = 0;
+ double t = T.valueAt(rts[i]);
+ double s = S.valueAt(rts[i]);
+ xAx xC0 = C1*t + C2*s;
+ //::draw(cr, xC0, screen_rect); // degen
+
+ return decompose_degenerate(C1, C2, xC0);
+
+
+ } else {
+ std::cout << "What?" << std::endl;
+ ;//std::cout << D << "\n";
+ }
+ return res;
+}
+
+
+xAx xAx::fromPoint(Point p) {
+ return xAx(1., 0, 1., -2*p[0], -2*p[1], dot(p,p));
+}
+
+xAx xAx::fromDistPoint(Point /*p*/, double /*d*/) {
+ return xAx();//1., 0, 1., -2*(1+d)*p[0], -2*(1+d)*p[1], dot(p,p)+d*d);
+}
+
+xAx xAx::fromLine(Point n, double d) {
+ return xAx(n[0]*n[0], 2*n[0]*n[1], n[1]*n[1], 2*d*n[0], 2*d*n[1], d*d);
+}
+
+xAx xAx::fromLine(Line l) {
+ double dist;
+ Point norm = l.normalAndDist(dist);
+
+ return fromLine(norm, dist);
+}
+
+xAx xAx::fromPoints(std::vector<Geom::Point> const &pt) {
+ Geom::NL::Vector V(pt.size(), -1.0);
+ Geom::NL::Matrix M(pt.size(), 5);
+ for(unsigned i = 0; i < pt.size(); i++) {
+ Geom::Point P = pt[i];
+ Geom::NL::VectorView vv = M.row_view(i);
+ vv[0] = P[0]*P[0];
+ vv[1] = P[0]*P[1];
+ vv[2] = P[1]*P[1];
+ vv[3] = P[0];
+ vv[4] = P[1];
+ }
+
+ Geom::NL::LinearSystem ls(M, V);
+
+ Geom::NL::Vector x = ls.SV_solve();
+ return Geom::xAx(x[0], x[1], x[2], x[3], x[4], 1);
+
+}
+
+
+
+double xAx::valueAt(Point P) const {
+ return evaluate_at(P[0], P[1]);
+}
+
+xAx xAx::scale(double sx, double sy) const {
+ return xAx(c[0]*sx*sx, c[1]*sx*sy, c[2]*sy*sy,
+ c[3]*sx, c[4]*sy, c[5]);
+}
+
+Point xAx::gradient(Point p) const{
+ double x = p[0];
+ double y = p[1];
+ return Point(2*c[0]*x + c[1]*y + c[3],
+ c[1]*x + 2*c[2]*y + c[4]);
+}
+
+xAx xAx::operator-(xAx const &b) const {
+ xAx res;
+ for(int i = 0; i < 6; i++) {
+ res.c[i] = c[i] - b.c[i];
+ }
+ return res;
+}
+xAx xAx::operator+(xAx const &b) const {
+ xAx res;
+ for(int i = 0; i < 6; i++) {
+ res.c[i] = c[i] + b.c[i];
+ }
+ return res;
+}
+xAx xAx::operator+(double const &b) const {
+ xAx res;
+ for(int i = 0; i < 5; i++) {
+ res.c[i] = c[i];
+ }
+ res.c[5] = c[5] + b;
+ return res;
+}
+
+xAx xAx::operator*(double const &b) const {
+ xAx res;
+ for(int i = 0; i < 6; i++) {
+ res.c[i] = c[i] * b;
+ }
+ return res;
+}
+
+ std::vector<Point> xAx::crossings(Rect r) const {
+ std::vector<Point> res;
+ for(int ei = 0; ei < 4; ei++) {
+ Geom::LineSegment ls(r.corner(ei), r.corner(ei+1));
+ D2<SBasis> lssb = ls.toSBasis();
+ SBasis edge_curve = evaluate_at(lssb[0], lssb[1]);
+ std::vector<double> rts = Geom::roots(edge_curve);
+ for(double rt : rts) {
+ res.push_back(lssb.valueAt(rt));
+ }
+ }
+ return res;
+}
+
+ std::optional<RatQuad> xAx::toCurve(Rect const & bnd) const {
+ std::vector<Point> crs = crossings(bnd);
+ if(crs.size() == 1) {
+ Point A = crs[0];
+ Point dA = rot90(gradient(A));
+ if(L2sq(dA) <= 1e-10) { // perhaps a single point?
+ return std::optional<RatQuad> ();
+ }
+ LineSegment ls = intersection(Line::from_origin_and_vector(A, dA), bnd);
+ return RatQuad::fromPointsTangents(A, dA, ls.pointAt(0.5), ls[1], dA);
+ }
+ else if(crs.size() >= 2 && crs.size() < 4) {
+ Point A = crs[0];
+ Point C = crs[1];
+ if(crs.size() == 3) {
+ if(distance(A, crs[2]) > distance(A, C))
+ C = crs[2];
+ else if(distance(C, crs[2]) > distance(A, C))
+ A = crs[2];
+ }
+ Line bisector = make_bisector_line(LineSegment(A, C));
+ std::vector<double> bisect_rts = this->roots(bisector);
+ if(!bisect_rts.empty()) {
+ int besti = -1;
+ for(unsigned i =0; i < bisect_rts.size(); i++) {
+ Point p = bisector.pointAt(bisect_rts[i]);
+ if(bnd.contains(p)) {
+ besti = i;
+ }
+ }
+ if(besti >= 0) {
+ Point B = bisector.pointAt(bisect_rts[besti]);
+
+ Point dA = gradient(A);
+ Point dC = gradient(C);
+ if(L2sq(dA) <= 1e-10 || L2sq(dC) <= 1e-10) {
+ return RatQuad::fromPointsTangents(A, C-A, B, C, A-C);
+ }
+
+ RatQuad rq = RatQuad::fromPointsTangents(A, rot90(dA),
+ B, C, rot90(dC));
+ return rq;
+ //std::vector<SBasis> hrq = rq.homogeneous();
+ /*SBasis vertex_poly = evaluate_at(hrq[0], hrq[1], hrq[2]);
+ std::vector<double> rts = roots(vertex_poly);
+ for(unsigned i = 0; i < rts.size(); i++) {
+ //draw_circ(cr, Point(rq.pointAt(rts[i])));
+ }*/
+ }
+ }
+ }
+ return std::optional<RatQuad>();
+}
+
+ std::vector<double> xAx::roots(Point d, Point o) const {
+ // Find the roots on line l
+ // form the quadratic Q(t) = 0 by composing l with xAx
+ double q2 = c[0]*d[0]*d[0] + c[1]*d[0]*d[1] + c[2]*d[1]*d[1];
+ double q1 = (2*c[0]*d[0]*o[0] +
+ c[1]*(d[0]*o[1]+d[1]*o[0]) +
+ 2*c[2]*d[1]*o[1] +
+ c[3]*d[0] + c[4]*d[1]);
+ double q0 = c[0]*o[0]*o[0] + c[1]*o[0]*o[1] + c[2]*o[1]*o[1] + c[3]*o[0] + c[4]*o[1] + c[5];
+ std::vector<double> r;
+ if(q2 == 0) {
+ if(q1 == 0) {
+ return r;
+ }
+ r.push_back(-q0/q1);
+ } else {
+ double desc = q1*q1 - 4*q2*q0;
+ /*std::cout << q2 << ", "
+ << q1 << ", "
+ << q0 << "; "
+ << desc << "\n";*/
+ if (desc < 0)
+ return r;
+ else if (desc == 0)
+ r.push_back(-q1/(2*q2));
+ else {
+ desc = std::sqrt(desc);
+ double t;
+ if (q1 == 0)
+ {
+ t = -0.5 * desc;
+ }
+ else
+ {
+ t = -0.5 * (q1 + sgn(q1) * desc);
+ }
+ r.push_back(t/q2);
+ r.push_back(q0/t);
+ }
+ }
+ return r;
+}
+
+std::vector<double> xAx::roots(Line const &l) const {
+ return roots(l.versor(), l.origin());
+}
+
+Interval xAx::quad_ex(double a, double b, double c, Interval ivl) {
+ double cx = -b*0.5/a;
+ Interval bnds((a*ivl.min()+b)*ivl.min()+c, (a*ivl.max()+b)*ivl.max()+c);
+ if(ivl.contains(cx))
+ bnds.expandTo((a*cx+b)*cx+c);
+ return bnds;
+}
+
+Geom::Affine xAx::hessian() const {
+ Geom::Affine m(2*c[0], c[1],
+ c[1], 2*c[2],
+ 0, 0);
+ return m;
+}
+
+
+std::optional<Point> solve(double A[2][2], double b[2]) {
+ double const determ = det(A);
+ if (determ != 0.0) { // hopeful, I know
+ Geom::Coord const ideterm = 1.0 / determ;
+
+ return Point ((A[1][1]*b[0] -A[0][1]*b[1]),
+ (-A[1][0]*b[0] + A[0][0]*b[1]))* ideterm;
+ } else {
+ return std::optional<Point>();
+ }
+}
+
+std::optional<Point> xAx::bottom() const {
+ double A[2][2] = {{2*c[0], c[1]},
+ {c[1], 2*c[2]}};
+ double b[2] = {-c[3], -c[4]};
+ return solve(A, b);
+ //return Point(-c[3], -c[4])*hessian().inverse();
+}
+
+Interval xAx::extrema(Rect r) const {
+ if (c[0] == 0 && c[1] == 0 && c[2] == 0) {
+ Interval ext(valueAt(r.corner(0)));
+ for(int i = 1; i < 4; i++)
+ ext |= Interval(valueAt(r.corner(i)));
+ return ext;
+ }
+ double k = r[X].min();
+ Interval ext = quad_ex(c[2], c[1]*k+c[4], (c[0]*k + c[3])*k + c[5], r[Y]);
+ k = r[X].max();
+ ext |= quad_ex(c[2], c[1]*k+c[4], (c[0]*k + c[3])*k + c[5], r[Y]);
+ k = r[Y].min();
+ ext |= quad_ex(c[0], c[1]*k+c[3], (c[2]*k + c[4])*k + c[5], r[X]);
+ k = r[Y].max();
+ ext |= quad_ex(c[0], c[1]*k+c[3], (c[2]*k + c[4])*k + c[5], r[X]);
+ std::optional<Point> B0 = bottom();
+ if (B0 && r.contains(*B0))
+ ext.expandTo(0);
+ return ext;
+}
+
+
+
+
+
+
+
+
+
+/*
+ * helper functions
+ */
+
+bool at_infinity (Point const& p)
+{
+ if (p[X] == infinity() || p[X] == -infinity()
+ || p[Y] == infinity() || p[Y] == -infinity())
+ {
+ return true;
+ }
+ return false;
+}
+
+inline
+double signed_triangle_area (Point const& p1, Point const& p2, Point const& p3)
+{
+ return (cross(p2, p3) - cross(p1, p3) + cross(p1, p2));
+}
+
+
+
+/*
+ * Define a conic section by computing the one that fits better with
+ * N points.
+ *
+ * points: points to fit
+ *
+ * precondition: there must be at least 5 non-overlapping points
+ */
+void xAx::set(std::vector<Point> const& points)
+{
+ size_t sz = points.size();
+ if (sz < 5)
+ {
+ THROW_RANGEERROR("fitting error: too few points passed");
+ }
+ NL::LFMConicSection model;
+ NL::least_squeares_fitter<NL::LFMConicSection> fitter(model, sz);
+
+ for (size_t i = 0; i < sz; ++i)
+ {
+ fitter.append(points[i]);
+ }
+ fitter.update();
+
+ NL::Vector z(sz, 0.0);
+ model.instance(*this, fitter.result(z));
+}
+
+/*
+ * Define a section conic by providing the coordinates of one of its vertex,
+ * the major axis inclination angle and the coordinates of its foci
+ * with respect to the unidimensional system defined by the major axis with
+ * origin set at the provided vertex.
+ *
+ * _vertex : section conic vertex V
+ * _angle : section conic major axis angle
+ * _dist1: +/-distance btw V and nearest focus
+ * _dist2: +/-distance btw V and farest focus
+ *
+ * prerequisite: _dist1 <= _dist2
+ */
+void xAx::set (const Point& _vertex, double _angle, double _dist1, double _dist2)
+{
+ using std::swap;
+
+ if (_dist2 == infinity() || _dist2 == -infinity()) // parabola
+ {
+ if (_dist1 == infinity()) // degenerate to a line
+ {
+ Line l(_vertex, _angle);
+ std::vector<double> lcoeff = l.coefficients();
+ coeff(3) = lcoeff[0];
+ coeff(4) = lcoeff[1];
+ coeff(5) = lcoeff[2];
+ return;
+ }
+
+ // y^2 - 4px == 0
+ double cD = -4 * _dist1;
+
+ double cosa = std::cos (_angle);
+ double sina = std::sin (_angle);
+ double cca = cosa * cosa;
+ double ssa = sina * sina;
+ double csa = cosa * sina;
+
+ coeff(0) = ssa;
+ coeff(1) = -2 * csa;
+ coeff(2) = cca;
+ coeff(3) = cD * cosa;
+ coeff(4) = cD * sina;
+
+ double VxVx = _vertex[X] * _vertex[X];
+ double VxVy = _vertex[X] * _vertex[Y];
+ double VyVy = _vertex[Y] * _vertex[Y];
+
+ coeff(5) = coeff(0) * VxVx + coeff(1) * VxVy + coeff(2) * VyVy
+ - coeff(3) * _vertex[X] - coeff(4) * _vertex[Y];
+ coeff(3) -= (2 * coeff(0) * _vertex[X] + coeff(1) * _vertex[Y]);
+ coeff(4) -= (2 * coeff(2) * _vertex[Y] + coeff(1) * _vertex[X]);
+
+ return;
+ }
+
+ if (std::fabs(_dist1) > std::fabs(_dist2))
+ {
+ swap (_dist1, _dist2);
+ }
+ if (_dist1 < 0)
+ {
+ _angle -= M_PI;
+ _dist1 = -_dist1;
+ _dist2 = -_dist2;
+ }
+
+ // ellipse and hyperbola
+ double lin_ecc = (_dist2 - _dist1) / 2;
+ double rx = (_dist2 + _dist1) / 2;
+
+ double cA = rx * rx - lin_ecc * lin_ecc;
+ double cC = rx * rx;
+ double cF = - cA * cC;
+// std::cout << "cA: " << cA << std::endl;
+// std::cout << "cC: " << cC << std::endl;
+// std::cout << "cF: " << cF << std::endl;
+
+ double cosa = std::cos (_angle);
+ double sina = std::sin (_angle);
+ double cca = cosa * cosa;
+ double ssa = sina * sina;
+ double csa = cosa * sina;
+
+ coeff(0) = cca * cA + ssa * cC;
+ coeff(2) = ssa * cA + cca * cC;
+ coeff(1) = 2 * csa * (cA - cC);
+
+ Point C (rx * cosa + _vertex[X], rx * sina + _vertex[Y]);
+ double CxCx = C[X] * C[X];
+ double CxCy = C[X] * C[Y];
+ double CyCy = C[Y] * C[Y];
+
+ coeff(3) = -2 * coeff(0) * C[X] - coeff(1) * C[Y];
+ coeff(4) = -2 * coeff(2) * C[Y] - coeff(1) * C[X];
+ coeff(5) = cF + coeff(0) * CxCx + coeff(1) * CxCy + coeff(2) * CyCy;
+}
+
+/*
+ * Define a conic section by providing one of its vertex and its foci.
+ *
+ * _vertex: section conic vertex
+ * _focus1: section conic focus
+ * _focus2: section conic focus
+ */
+void xAx::set (const Point& _vertex, const Point& _focus1, const Point& _focus2)
+{
+ if (at_infinity(_vertex))
+ {
+ THROW_RANGEERROR("case not handled: vertex at infinity");
+ }
+ if (at_infinity(_focus2))
+ {
+ if (at_infinity(_focus1))
+ {
+ THROW_RANGEERROR("case not handled: both focus at infinity");
+ }
+ Point VF = _focus1 - _vertex;
+ double dist1 = L2(VF);
+ double angle = atan2(VF);
+ set(_vertex, angle, dist1, infinity());
+ return;
+ }
+ else if (at_infinity(_focus1))
+ {
+ Point VF = _focus2 - _vertex;
+ double dist1 = L2(VF);
+ double angle = atan2(VF);
+ set(_vertex, angle, dist1, infinity());
+ return;
+ }
+ assert (are_collinear (_vertex, _focus1, _focus2));
+ if (!are_near(_vertex, _focus1))
+ {
+ Point VF = _focus1 - _vertex;
+ Line axis(_vertex, _focus1);
+ double angle = atan2(VF);
+ double dist1 = L2(VF);
+ double dist2 = distance (_vertex, _focus2);
+ double t = axis.timeAt(_focus2);
+ if (t < 0) dist2 = -dist2;
+// std::cout << "t = " << t << std::endl;
+// std::cout << "dist2 = " << dist2 << std::endl;
+ set (_vertex, angle, dist1, dist2);
+ }
+ else if (!are_near(_vertex, _focus2))
+ {
+ Point VF = _focus2 - _vertex;
+ double angle = atan2(VF);
+ double dist1 = 0;
+ double dist2 = L2(VF);
+ set (_vertex, angle, dist1, dist2);
+ }
+ else
+ {
+ coeff(0) = coeff(2) = 1;
+ coeff(1) = coeff(3) = coeff(4) = coeff(5) = 0;
+ }
+}
+
+/*
+ * Define a conic section by passing a focus, the related directrix,
+ * and the eccentricity (e)
+ * (e < 1 -> ellipse; e = 1 -> parabola; e > 1 -> hyperbola)
+ *
+ * _focus: a focus of the conic section
+ * _directrix: the directrix related to the given focus
+ * _eccentricity: the eccentricity parameter of the conic section
+ */
+void xAx::set (const Point & _focus, const Line & _directrix, double _eccentricity)
+{
+ Point O = _directrix.pointAt (_directrix.timeAtProjection (_focus));
+ //std::cout << "O = " << O << std::endl;
+ Point OF = _focus - O;
+ double p = L2(OF);
+
+ coeff(0) = 1 - _eccentricity * _eccentricity;
+ coeff(1) = 0;
+ coeff(2) = 1;
+ coeff(3) = -2 * p;
+ coeff(4) = 0;
+ coeff(5) = p * p;
+
+ double angle = atan2 (OF);
+
+ (*this) = rotate (angle);
+ //std::cout << "O = " << O << std::endl;
+ (*this) = translate (O);
+}
+
+/*
+ * Made up a degenerate conic section as a pair of lines
+ *
+ * l1, l2: lines that made up the conic section
+ */
+void xAx::set (const Line& l1, const Line& l2)
+{
+ std::vector<double> cl1 = l1.coefficients();
+ std::vector<double> cl2 = l2.coefficients();
+
+ coeff(0) = cl1[0] * cl2[0];
+ coeff(2) = cl1[1] * cl2[1];
+ coeff(5) = cl1[2] * cl2[2];
+ coeff(1) = cl1[0] * cl2[1] + cl1[1] * cl2[0];
+ coeff(3) = cl1[0] * cl2[2] + cl1[2] * cl2[0];
+ coeff(4) = cl1[1] * cl2[2] + cl1[2] * cl2[1];
+}
+
+
+
+/*
+ * Return the section conic kind
+ */
+xAx::kind_t xAx::kind () const
+{
+
+ xAx conic(*this);
+ NL::SymmetricMatrix<3> C = conic.get_matrix();
+ NL::ConstSymmetricMatrixView<2> A = C.main_minor_const_view();
+
+ double t1 = trace(A);
+ double t2 = det(A);
+ //double T3 = det(C);
+ int st1 = trace_sgn(A);
+ int st2 = det_sgn(A);
+ int sT3 = det_sgn(C);
+
+ //std::cout << "T3 = " << T3 << std::endl;
+ //std::cout << "sT3 = " << sT3 << std::endl;
+ //std::cout << "t2 = " << t2 << std::endl;
+ //std::cout << "t1 = " << t1 << std::endl;
+ //std::cout << "st2 = " << st2 << std::endl;
+
+ if (sT3 != 0)
+ {
+ if (st2 == 0)
+ {
+ return PARABOLA;
+ }
+ else if (st2 == 1)
+ {
+
+ if (sT3 * st1 < 0)
+ {
+ NL::SymmetricMatrix<2> discr;
+ discr(0,0) = 4; discr(1,1) = t2; discr(1,0) = t1;
+ int discr_sgn = - det_sgn (discr);
+ //std::cout << "t1 * t1 - 4 * t2 = "
+ // << (t1 * t1 - 4 * t2) << std::endl;
+ //std::cout << "discr_sgn = " << discr_sgn << std::endl;
+ if (discr_sgn == 0)
+ {
+ return CIRCLE;
+ }
+ else
+ {
+ return REAL_ELLIPSE;
+ }
+ }
+ else // sT3 * st1 > 0
+ {
+ return IMAGINARY_ELLIPSE;
+ }
+ }
+ else // t2 < 0
+ {
+ if (st1 == 0)
+ {
+ return RECTANGULAR_HYPERBOLA;
+ }
+ else
+ {
+ return HYPERBOLA;
+ }
+ }
+ }
+ else // T3 == 0
+ {
+ if (st2 == 0)
+ {
+ //double T2 = NL::trace<2>(C);
+ int sT2 = NL::trace_sgn<2>(C);
+ //std::cout << "T2 = " << T2 << std::endl;
+ //std::cout << "sT2 = " << sT2 << std::endl;
+
+ if (sT2 == 0)
+ {
+ return DOUBLE_LINE;
+ }
+ if (sT2 == -1)
+ {
+ return TWO_REAL_PARALLEL_LINES;
+ }
+ else // T2 > 0
+ {
+ return TWO_IMAGINARY_PARALLEL_LINES;
+ }
+ }
+ else if (st2 == -1)
+ {
+ return TWO_REAL_CROSSING_LINES;
+ }
+ else // t2 > 0
+ {
+ return TWO_IMAGINARY_CROSSING_LINES;
+ }
+ }
+ return UNKNOWN;
+}
+
+/*
+ * Return a string representing the conic section kind
+ */
+std::string xAx::categorise() const
+{
+ kind_t KIND = kind();
+
+ switch (KIND)
+ {
+ case PARABOLA :
+ return "parabola";
+ case CIRCLE :
+ return "circle";
+ case REAL_ELLIPSE :
+ return "real ellispe";
+ case IMAGINARY_ELLIPSE :
+ return "imaginary ellispe";
+ case RECTANGULAR_HYPERBOLA :
+ return "rectangular hyperbola";
+ case HYPERBOLA :
+ return "hyperbola";
+ case DOUBLE_LINE :
+ return "double line";
+ case TWO_REAL_PARALLEL_LINES :
+ return "two real parallel lines";
+ case TWO_IMAGINARY_PARALLEL_LINES :
+ return "two imaginary parallel lines";
+ case TWO_REAL_CROSSING_LINES :
+ return "two real crossing lines";
+ case TWO_IMAGINARY_CROSSING_LINES :
+ return "two imaginary crossing lines";
+ default :
+ return "unknown";
+ }
+}
+
+/*
+ * Compute the solutions of the conic section algebraic equation with respect to
+ * one coordinate after substituting to the other coordinate the passed value
+ *
+ * sol: the computed solutions
+ * v: the provided value
+ * d: the index of the coordinate the passed value have to be substituted to
+ */
+void xAx::roots (std::vector<double>& sol, Coord v, Dim2 d) const
+{
+ sol.clear();
+ if (d < 0 || d > Y)
+ {
+ THROW_RANGEERROR("dimension parameter out of range");
+ }
+
+ // p*t^2 + q*t + r = 0;
+ double p, q, r;
+
+ if (d == X)
+ {
+ p = coeff(2);
+ q = coeff(4) + coeff(1) * v;
+ r = coeff(5) + (coeff(0) * v + coeff(3)) * v;
+ }
+ else
+ {
+ p = coeff(0);
+ q = coeff(3) + coeff(1) * v;
+ r = coeff(5) + (coeff(2) * v + coeff(4)) * v;
+ }
+
+ if (p == 0)
+ {
+ if (q == 0) return;
+ double t = -r/q;
+ sol.push_back(t);
+ return;
+ }
+
+ if (q == 0)
+ {
+ if ((p > 0 && r > 0) || (p < 0 && r < 0)) return;
+ double t = -r / p;
+ t = std::sqrt (t);
+ sol.push_back(-t);
+ sol.push_back(t);
+ return;
+ }
+
+ if (r == 0)
+ {
+ double t = -q/p;
+ sol.push_back(0);
+ sol.push_back(t);
+ return;
+ }
+
+
+ //std::cout << "p = " << p << ", q = " << q << ", r = " << r << std::endl;
+ double delta = q * q - 4 * p * r;
+ if (delta < 0) return;
+ if (delta == 0)
+ {
+ double t = -q / (2 * p);
+ sol.push_back(t);
+ return;
+ }
+ // else
+ double srd = std::sqrt(delta);
+ double t = - (q + sgn(q) * srd) / 2;
+ sol.push_back (t/p);
+ sol.push_back (r/t);
+
+}
+
+/*
+ * Return the inclination angle of the major axis of the conic section
+ */
+double xAx::axis_angle() const
+{
+ if (coeff(0) == 0 && coeff(1) == 0 && coeff(2) == 0)
+ {
+ Line l (coeff(3), coeff(4), coeff(5));
+ return l.angle();
+ }
+ if (coeff(1) == 0 && (coeff(0) == coeff(2))) return 0;
+
+ double angle;
+
+ int sgn_discr = det_sgn (get_matrix().main_minor_const_view());
+ if (sgn_discr == 0)
+ {
+ //std::cout << "rotation_angle: sgn_discr = "
+ // << sgn_discr << std::endl;
+ angle = std::atan2 (-coeff(1), 2 * coeff(2));
+ if (angle < 0) angle += 2*M_PI;
+ if (angle >= M_PI) angle -= M_PI;
+
+ }
+ else
+ {
+ angle = std::atan2 (coeff(1), coeff(0) - coeff(2));
+ if (angle < 0) angle += 2*M_PI;
+ angle -= M_PI;
+ if (angle < 0) angle += 2*M_PI;
+ angle /= 2;
+ if (angle >= M_PI) angle -= M_PI;
+ }
+ //std::cout << "rotation_angle : angle = " << angle << std::endl;
+ return angle;
+}
+
+/*
+ * Translate the conic section by the given vector offset
+ *
+ * _offset: represent the vector offset
+ */
+xAx xAx::translate (const Point & _offset) const
+{
+ double B = coeff(1) / 2;
+ double D = coeff(3) / 2;
+ double E = coeff(4) / 2;
+
+ Point T = - _offset;
+
+ xAx cs;
+ cs.coeff(0) = coeff(0);
+ cs.coeff(1) = coeff(1);
+ cs.coeff(2) = coeff(2);
+
+ Point DE;
+ DE[0] = coeff(0) * T[0] + B * T[1];
+ DE[1] = B * T[0] + coeff(2) * T[1];
+
+ cs.coeff(3) = (DE[0] + D) * 2;
+ cs.coeff(4) = (DE[1] + E) * 2;
+
+ cs.coeff(5) = dot (T, DE) + 2 * (T[0] * D + T[1] * E) + coeff(5);
+
+ return cs;
+}
+
+
+/*
+ * Rotate the conic section by the given angle wrt the point (0,0)
+ *
+ * angle: represent the rotation angle
+ */
+xAx xAx::rotate (double angle) const
+{
+ double c = std::cos(-angle);
+ double s = std::sin(-angle);
+ double cc = c * c;
+ double ss = s * s;
+ double cs = c * s;
+
+ xAx result;
+ result.coeff(5) = coeff(5);
+
+ // quadratic terms
+ double Bcs = coeff(1) * cs;
+
+ result.coeff(0) = coeff(0) * cc + Bcs + coeff(2) * ss;
+ result.coeff(2) = coeff(0) * ss - Bcs + coeff(2) * cc;
+ result.coeff(1) = coeff(1) * (cc - ss) + 2 * (coeff(2) - coeff(0)) * cs;
+
+ // linear terms
+ result.coeff(3) = coeff(3) * c + coeff(4) * s;
+ result.coeff(4) = coeff(4) * c - coeff(3) * s;
+
+ return result;
+}
+
+
+/*
+ * Decompose a degenerate conic in two lines the conic section is made by.
+ * Return true if the decomposition is successful, else if it fails.
+ *
+ * l1, l2: out parameters where the decomposed conic section is returned
+ */
+bool xAx::decompose (Line& l1, Line& l2) const
+{
+ NL::SymmetricMatrix<3> C = get_matrix();
+ if (!is_quadratic() || !isDegenerate())
+ {
+ return false;
+ }
+ NL::Matrix M(C);
+ NL::SymmetricMatrix<3> D = -adj(C);
+
+ if (!D.is_zero()) // D == 0 <=> rank(C) < 2
+ {
+
+ //if (D.get<0,0>() < 0 || D.get<1,1>() < 0 || D.get<2,2>() < 0)
+ //{
+ //std::cout << "C: \n" << C << std::endl;
+ //std::cout << "D: \n" << D << std::endl;
+
+ /*
+ * This case should be impossible because any diagonal element
+ * of D is a square, but due to non exact aritmethic computation
+ * it can actually happen; however the algorithm seems to work
+ * correctly even if some diagonal term is negative, the only
+ * difference is that we should compute the absolute value of
+ * diagonal elements. So until we elaborate a better degenerate
+ * test it's better not rising exception when we have a negative
+ * diagonal element.
+ */
+ //}
+
+ NL::Vector d(3);
+ d[0] = std::fabs (D.get<0,0>());
+ d[1] = std::fabs (D.get<1,1>());
+ d[2] = std::fabs (D.get<2,2>());
+
+ size_t idx = d.max_index();
+ if (d[idx] == 0)
+ {
+ THROW_LOGICALERROR ("xAx::decompose: "
+ "rank 2 but adjoint with null diagonal");
+ }
+ d[0] = D(idx,0); d[1] = D(idx,1); d[2] = D(idx,2);
+ d.scale (1 / std::sqrt (std::fabs (D(idx,idx))));
+ M(1,2) += d[0]; M(2,1) -= d[0];
+ M(0,2) -= d[1]; M(2,0) += d[1];
+ M(0,1) += d[2]; M(1,0) -= d[2];
+
+ //std::cout << "C: \n" << C << std::endl;
+ //std::cout << "D: \n" << D << std::endl;
+ //std::cout << "d = " << d << std::endl;
+ //std::cout << "M = " << M << std::endl;
+ }
+
+ std::pair<size_t, size_t> max_ij = M.max_index();
+ std::pair<size_t, size_t> min_ij = M.min_index();
+ double abs_max = std::fabs (M(max_ij.first, max_ij.second));
+ double abs_min = std::fabs (M(min_ij.first, min_ij.second));
+ size_t i_max, j_max;
+ if (abs_max > abs_min)
+ {
+ i_max = max_ij.first;
+ j_max = max_ij.second;
+ }
+ else
+ {
+ i_max = min_ij.first;
+ j_max = min_ij.second;
+ }
+ l1.setCoefficients (M(i_max,0), M(i_max,1), M(i_max,2));
+ l2.setCoefficients (M(0, j_max), M(1,j_max), M(2,j_max));
+
+ return true;
+}
+
+std::array<Line, 2> xAx::decompose_df(Coord epsilon) const
+{
+ // For the classification of degenerate conics, see https://mathworld.wolfram.com/QuadraticCurve.html
+ using std::sqrt, std::abs;
+
+ // Create 2 degenerate lines
+ auto const origin = Point(0, 0);
+ std::array<Line, 2> result = {Line(origin, origin), Line(origin, origin)};
+
+ double A = c[0];
+ double B = c[1];
+ double C = c[2];
+ double D = c[3];
+ double E = c[4];
+ double F = c[5];
+ Coord discriminant = sqr(B) - 4 * A * C;
+ if (discriminant < -epsilon) {
+ return result;
+ }
+
+ bool single_line = false; // In the generic case, there will be 2 lines.
+ bool parallel_lines = false;
+ if (discriminant < epsilon) {
+ discriminant = 0;
+ parallel_lines = true;
+ // Check the secondary discriminant
+ Coord const secondary = sqr(D) + sqr(E) - 4 * F * (A + C);
+ if (secondary < -epsilon) {
+ return result;
+ }
+ single_line = (secondary < epsilon);
+ }
+
+ if (abs(A) > epsilon || abs(C) > epsilon) {
+ // This is the typical case: either x² or y² come with a nonzero coefficient.
+ // To guard against numerical errors, we check which of the coefficients A, C has larger absolute value.
+
+ bool const swap_xy = abs(C) > abs(A);
+ if (swap_xy) {
+ std::swap(A, C);
+ std::swap(D, E);
+ }
+
+ // From now on, we may assume that A is "reasonably large".
+ if (parallel_lines) {
+ if (single_line) {
+ // Special case: a single line.
+ std::array<double, 3> coeffs = {sqrt(abs(A)), sqrt(abs(C)), sqrt(abs(F))};
+ if (swap_xy) {
+ std::swap(coeffs[0], coeffs[1]);
+ }
+ rescale_homogenous(coeffs);
+ result[0].setCoefficients(coeffs[0], coeffs[1], coeffs[2]);
+ return result;
+ }
+
+ // Two parallel lines.
+ Coord const quotient_discriminant = sqr(D) - 4 * A * F;
+ if (quotient_discriminant < 0) {
+ return result;
+ }
+ Coord const sqrt_disc = sqrt(quotient_discriminant);
+ double const c1 = 0.5 * (D - sqrt_disc);
+ double const c2 = c1 + sqrt_disc;
+ std::array<double, 3> coeffs = {A, 0.5 * B, c1};
+ if (swap_xy) {
+ std::swap(coeffs[0], coeffs[1]);
+ }
+ rescale_homogenous(coeffs);
+ result[0].setCoefficients(coeffs[0], coeffs[1], coeffs[2]);
+
+ coeffs = {A, 0.5 * B, c2};
+ if (swap_xy) {
+ std::swap(coeffs[0], coeffs[1]);
+ }
+ rescale_homogenous(coeffs);
+ result[1].setCoefficients(coeffs[0], coeffs[1], coeffs[2]);
+ return result;
+ }
+
+ // Now for the typical case of 2 non-parallel lines.
+
+ // We know that A is further away from 0 than C is.
+ // The mathematical derivation of the solution is as follows:
+ // let Δ = B² - 4AC (the discriminant); we know Δ > 0.
+ // Write δ = sqrt(Δ); we know that this is also positive.
+ // Then the product AΔ is nonzero, so the equation
+ // Ax² + Bxy + Cy² + Dx + Ey + F = 0
+ // is equivalent to
+ // AΔ (Ax² + Bxy + Cy² + Dx + Ey + F) = 0.
+ // Consider the two factors
+ // L_1 = Aδx + 0.5 (Bδ-Δ)y + EA - 0.5 D(B-δ)
+ // L_2 = Aδx + 0.5 (Bδ+Δ)y - EA + 0.5 D(B+δ)
+ // With a bit of algebra, you can show that L_1 * L_2 expands
+ // to AΔ (Ax² + Bxy + Cy² + Dx + Ey + F) (in order to get the
+ // correct value of F, you have to use the fact that the conic
+ // is degenerate). Therefore, the factors L_1 and L_2 are in
+ // fact equations of the two lines to be found.
+ Coord const delta = sqrt(discriminant);
+ std::array<double, 3> coeffs1 = {A * delta, 0.5 * (B * delta - discriminant), E * A - 0.5 * D * (B - delta)};
+ std::array<double, 3> coeffs2 = {coeffs1[0], coeffs1[1] + discriminant, D * delta - coeffs1[2]};
+ if (swap_xy) { // We must unswap the coefficients of x and y
+ std::swap(coeffs1[0], coeffs1[1]);
+ std::swap(coeffs2[0], coeffs2[1]);
+ }
+
+ unsigned index = 0;
+ if (coeffs1[0] != 0 || coeffs1[1] != 0) {
+ rescale_homogenous(coeffs1);
+ result[index++].setCoefficients(coeffs1[0], coeffs1[1], coeffs1[2]);
+ }
+ if (coeffs2[0] != 0 || coeffs2[1] != 0) {
+ rescale_homogenous(coeffs2);
+ result[index].setCoefficients(coeffs2[0], coeffs2[1], coeffs2[2]);
+ }
+ return result;
+ }
+
+ // If we're here, then A==0 and C==0.
+ if (abs(B) < epsilon) { // A == B == C == 0, so the conic reduces to Dx + Ey + F.
+ if (D == 0 && E == 0) {
+ return result;
+ }
+ std::array<double, 3> coeffs = {D, E, F};
+ rescale_homogenous(coeffs);
+ result[0].setCoefficients(coeffs[0], coeffs[1], coeffs[2]);
+ return result;
+ }
+
+ // OK, so A == C == 0 but B != 0. In other words, the conic has the form
+ // Bxy + Dx + Ey + F. Since B != 0, the zero set stays the same if we multiply the
+ // equation by B, which gives us this equation:
+ // B²xy + BDx + BEy + BF = 0.
+ // The above factors as (Bx + E)(By + D) = 0.
+ std::array<double, 2> nonzero_coeffs = {B, E};
+ rescale_homogenous(nonzero_coeffs);
+ result[0].setCoefficients(nonzero_coeffs[0], 0, nonzero_coeffs[1]);
+
+ nonzero_coeffs = {B, D};
+ rescale_homogenous(nonzero_coeffs);
+ result[1].setCoefficients(0, nonzero_coeffs[0], nonzero_coeffs[1]);
+ return result;
+}
+
+/*
+ * Return the rectangle that bound the conic section arc characterized by
+ * the passed points.
+ *
+ * P1: the initial point of the arc
+ * Q: the inner point of the arc
+ * P2: the final point of the arc
+ *
+ * prerequisite: the passed points must lie on the conic
+ */
+Rect xAx::arc_bound (const Point & P1, const Point & Q, const Point & P2) const
+{
+ using std::swap;
+ //std::cout << "BOUND: P1 = " << P1 << std::endl;
+ //std::cout << "BOUND: Q = " << Q << std::endl;
+ //std::cout << "BOUND: P2 = " << P2 << std::endl;
+
+ Rect B(P1, P2);
+ double Qside = signed_triangle_area (P1, Q, P2);
+ //std::cout << "BOUND: Qside = " << Qside << std::endl;
+
+ Line gl[2];
+ bool empty[2] = {false, false};
+
+ try // if the passed coefficients lead to an equation 0x + 0y + c == 0,
+ { // with c != 0 the setCoefficients rise an exception
+ gl[0].setCoefficients (coeff(1), 2 * coeff(2), coeff(4));
+ }
+ catch(Geom::LogicalError const &e)
+ {
+ empty[0] = true;
+ }
+
+ try
+ {
+ gl[1].setCoefficients (2 * coeff(0), coeff(1), coeff(3));
+ }
+ catch(Geom::LogicalError const &e)
+ {
+ empty[1] = true;
+ }
+
+ std::vector<double> rts;
+ std::vector<Point> M;
+ for (size_t dim = 0; dim < 2; ++dim)
+ {
+ if (empty[dim]) continue;
+ rts = roots (gl[dim]);
+ M.clear();
+ for (double rt : rts)
+ M.push_back (gl[dim].pointAt (rt));
+ if (M.size() == 1)
+ {
+ double Mside = signed_triangle_area (P1, M[0], P2);
+ if (sgn(Mside) == sgn(Qside))
+ {
+ //std::cout << "BOUND: M.size() == 1" << std::endl;
+ B[dim].expandTo(M[0][dim]);
+ }
+ }
+ else if (M.size() == 2)
+ {
+ //std::cout << "BOUND: M.size() == 2" << std::endl;
+ if (M[0][dim] > M[1][dim])
+ swap (M[0], M[1]);
+
+ if (M[0][dim] > B[dim].max())
+ {
+ double Mside = signed_triangle_area (P1, M[0], P2);
+ if (sgn(Mside) == sgn(Qside))
+ B[dim].setMax(M[0][dim]);
+ }
+ else if (M[1][dim] < B[dim].min())
+ {
+ double Mside = signed_triangle_area (P1, M[1], P2);
+ if (sgn(Mside) == sgn(Qside))
+ B[dim].setMin(M[1][dim]);
+ }
+ else
+ {
+ double Mside = signed_triangle_area (P1, M[0], P2);
+ if (sgn(Mside) == sgn(Qside))
+ B[dim].setMin(M[0][dim]);
+ Mside = signed_triangle_area (P1, M[1], P2);
+ if (sgn(Mside) == sgn(Qside))
+ B[dim].setMax(M[1][dim]);
+ }
+ }
+ }
+
+ return B;
+}
+
+/*
+ * Return all points on the conic section nearest to the passed point "P".
+ *
+ * P: the point to compute the nearest one
+ */
+std::vector<Point> xAx::allNearestTimes (const Point &P) const
+{
+ // TODO: manage the circle - centre case
+ std::vector<Point> points;
+
+ // named C the conic we look for points (x,y) on C such that
+ // dot (grad (C(x,y)), rot90 (P -(x,y))) == 0; the set of points satisfying
+ // this equation is still a conic G, so the wanted points can be found by
+ // intersecting C with G
+ xAx G (-coeff(1),
+ 2 * (coeff(0) - coeff(2)),
+ coeff(1),
+ -coeff(4) + coeff(1) * P[X] - 2 * coeff(0) * P[Y],
+ coeff(3) - coeff(1) * P[Y] + 2 * coeff(2) * P[X],
+ -coeff(3) * P[Y] + coeff(4) * P[X]);
+
+ std::vector<Point> crs = intersect (*this, G);
+
+ //std::cout << "NEAREST POINT: crs.size = " << crs.size() << std::endl;
+ if (crs.empty()) return points;
+
+ size_t idx = 0;
+ double mindist = distanceSq (crs[0], P);
+ std::vector<double> dist;
+ dist.push_back (mindist);
+
+ for (size_t i = 1; i < crs.size(); ++i)
+ {
+ dist.push_back (distanceSq (crs[i], P));
+ if (mindist > dist.back())
+ {
+ idx = i;
+ mindist = dist.back();
+ }
+ }
+
+ points.push_back (crs[idx]);
+ for (size_t i = 0; i < crs.size(); ++i)
+ {
+ if (i == idx) continue;
+ if (dist[i] == mindist)
+ points.push_back (crs[i]);
+ }
+
+ return points;
+}
+
+
+
+bool clip (std::vector<RatQuad> & rq, const xAx & cs, const Rect & R)
+{
+ clipper aclipper (cs, R);
+ return aclipper.clip (rq);
+}
+
+
+} // end namespace Geom
+
+
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/convex-hull.cpp b/src/2geom/convex-hull.cpp
new file mode 100644
index 0000000..f801fcc
--- /dev/null
+++ b/src/2geom/convex-hull.cpp
@@ -0,0 +1,746 @@
+/** @file
+ * @brief Convex hull of a set of points
+ *//*
+ * Authors:
+ * Nathan Hurst <njh@mail.csse.monash.edu.au>
+ * Michael G. Sloan <mgsloan@gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ * Copyright 2006-2015 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/convex-hull.h>
+#include <2geom/exception.h>
+#include <algorithm>
+#include <map>
+#include <iostream>
+#include <cassert>
+#include <boost/array.hpp>
+
+/** Todo:
+ + modify graham scan to work top to bottom, rather than around angles
+ + intersection
+ + minimum distance between convex hulls
+ + maximum distance between convex hulls
+ + hausdorf metric?
+ + check all degenerate cases carefully
+ + check all algorithms meet all invariants
+ + generalise rotating caliper algorithm (iterator/circulator?)
+*/
+
+using std::vector;
+using std::map;
+using std::pair;
+using std::make_pair;
+using std::swap;
+
+namespace Geom {
+
+ConvexHull::ConvexHull(Point const &a, Point const &b)
+ : _boundary(2)
+ , _lower(0)
+{
+ _boundary[0] = a;
+ _boundary[1] = b;
+ std::sort(_boundary.begin(), _boundary.end(), Point::LexLess<X>());
+ _construct();
+}
+
+ConvexHull::ConvexHull(Point const &a, Point const &b, Point const &c)
+ : _boundary(3)
+ , _lower(0)
+{
+ _boundary[0] = a;
+ _boundary[1] = b;
+ _boundary[2] = c;
+ std::sort(_boundary.begin(), _boundary.end(), Point::LexLess<X>());
+ _construct();
+}
+
+ConvexHull::ConvexHull(Point const &a, Point const &b, Point const &c, Point const &d)
+ : _boundary(4)
+ , _lower(0)
+{
+ _boundary[0] = a;
+ _boundary[1] = b;
+ _boundary[2] = c;
+ _boundary[3] = d;
+ std::sort(_boundary.begin(), _boundary.end(), Point::LexLess<X>());
+ _construct();
+}
+
+ConvexHull::ConvexHull(std::vector<Point> const &pts)
+ : _lower(0)
+{
+ //if (pts.size() > 16) { // arbitrary threshold
+ // _prune(pts.begin(), pts.end(), _boundary);
+ //} else {
+ _boundary = pts;
+ std::sort(_boundary.begin(), _boundary.end(), Point::LexLess<X>());
+ //}
+ _construct();
+}
+
+bool ConvexHull::_is_clockwise_turn(Point const &a, Point const &b, Point const &c)
+{
+ if (b == c) return false;
+ return cross(b-a, c-a) > 0;
+}
+
+void ConvexHull::_construct()
+{
+ // _boundary must already be sorted in LexLess<X> order
+ if (_boundary.empty()) {
+ _lower = 0;
+ return;
+ }
+ if (_boundary.size() == 1 || (_boundary.size() == 2 && _boundary[0] == _boundary[1])) {
+ _boundary.resize(1);
+ _lower = 1;
+ return;
+ }
+ if (_boundary.size() == 2) {
+ _lower = 2;
+ return;
+ }
+
+ std::size_t k = 2;
+ for (std::size_t i = 2; i < _boundary.size(); ++i) {
+ while (k >= 2 && !_is_clockwise_turn(_boundary[k-2], _boundary[k-1], _boundary[i])) {
+ --k;
+ }
+ std::swap(_boundary[k++], _boundary[i]);
+ }
+
+ _lower = k;
+ std::sort(_boundary.begin() + k, _boundary.end(), Point::LexGreater<X>());
+ _boundary.push_back(_boundary.front());
+ for (std::size_t i = _lower; i < _boundary.size(); ++i) {
+ while (k > _lower && !_is_clockwise_turn(_boundary[k-2], _boundary[k-1], _boundary[i])) {
+ --k;
+ }
+ std::swap(_boundary[k++], _boundary[i]);
+ }
+
+ _boundary.resize(k-1);
+}
+
+double ConvexHull::area() const
+{
+ if (size() <= 2) return 0;
+
+ double a = 0;
+ for (std::size_t i = 0; i < size()-1; ++i) {
+ a += cross(_boundary[i], _boundary[i+1]);
+ }
+ a += cross(_boundary.back(), _boundary.front());
+ return fabs(a * 0.5);
+}
+
+OptRect ConvexHull::bounds() const
+{
+ OptRect ret;
+ if (empty()) return ret;
+ ret = Rect(left(), top(), right(), bottom());
+ return ret;
+}
+
+Point ConvexHull::topPoint() const
+{
+ Point ret;
+ ret[Y] = std::numeric_limits<Coord>::infinity();
+
+ for (auto i : upperHull()) {
+ if (ret[Y] >= i.y()) {
+ ret = i;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+Point ConvexHull::bottomPoint() const
+{
+ Point ret;
+ ret[Y] = -std::numeric_limits<Coord>::infinity();
+
+ for (auto j : lowerHull()) {
+ if (ret[Y] <= j.y()) {
+ ret = j;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+template <typename Iter, typename Lex>
+bool below_x_monotonic_polyline(Point const &p, Iter first, Iter last, Lex lex)
+{
+ typename Lex::Secondary above;
+ Iter f = std::lower_bound(first, last, p, lex);
+ if (f == last) return false;
+ if (f == first) {
+ if (p == *f) return true;
+ return false;
+ }
+
+ Point a = *(f-1), b = *f;
+ if (a[X] == b[X]) {
+ if (above(p[Y], a[Y]) || above(b[Y], p[Y])) return false;
+ } else {
+ // TODO: maybe there is a more numerically stable method
+ Coord y = lerp((p[X] - a[X]) / (b[X] - a[X]), a[Y], b[Y]);
+ if (above(p[Y], y)) return false;
+ }
+ return true;
+}
+
+bool ConvexHull::contains(Point const &p) const
+{
+ if (_boundary.empty()) return false;
+ if (_boundary.size() == 1) {
+ if (_boundary[0] == p) return true;
+ return false;
+ }
+
+ // 1. verify that the point is in the relevant X range
+ if (p[X] < _boundary[0][X] || p[X] > _boundary[_lower-1][X]) return false;
+
+ // 2. check whether it is below the upper hull
+ UpperIterator ub = upperHull().begin(), ue = upperHull().end();
+ if (!below_x_monotonic_polyline(p, ub, ue, Point::LexLess<X>())) return false;
+
+ // 3. check whether it is above the lower hull
+ LowerIterator lb = lowerHull().begin(), le = lowerHull().end();
+ if (!below_x_monotonic_polyline(p, lb, le, Point::LexGreater<X>())) return false;
+
+ return true;
+}
+
+bool ConvexHull::contains(Rect const &r) const
+{
+ for (unsigned i = 0; i < 4; ++i) {
+ if (!contains(r.corner(i))) return false;
+ }
+ return true;
+}
+
+bool ConvexHull::contains(ConvexHull const &ch) const
+{
+ // TODO: requires interiorContains.
+ // We have to check all points of ch, and each point takes logarithmic time.
+ // If there are more points in ch that here, it is faster to make the check
+ // the other way around.
+ /*if (ch.size() > size()) {
+ for (iterator i = begin(); i != end(); ++i) {
+ if (ch.interiorContains(*i)) return false;
+ }
+ return true;
+ }*/
+
+ for (auto i : ch) {
+ if (!contains(i)) return false;
+ }
+ return true;
+}
+
+void ConvexHull::swap(ConvexHull &other)
+{
+ _boundary.swap(other._boundary);
+ std::swap(_lower, other._lower);
+}
+
+void ConvexHull::swap(std::vector<Point> &pts)
+{
+ _boundary.swap(pts);
+ _lower = 0;
+ std::sort(_boundary.begin(), _boundary.end(), Point::LexLess<X>());
+ _construct();
+}
+
+#if 0
+/*** SignedTriangleArea
+ * returns the area of the triangle defined by p0, p1, p2. A clockwise triangle has positive area.
+ */
+double
+SignedTriangleArea(Point p0, Point p1, Point p2) {
+ return cross((p1 - p0), (p2 - p0));
+}
+
+class angle_cmp{
+public:
+ Point o;
+ angle_cmp(Point o) : o(o) {}
+
+#if 0
+ bool
+ operator()(Point a, Point b) {
+ // not remove this check or std::sort could crash
+ if (a == b) return false;
+ Point da = a - o;
+ Point db = b - o;
+ if (da == -db) return false;
+
+#if 1
+ double aa = da[0];
+ double ab = db[0];
+ if((da[1] == 0) && (db[1] == 0))
+ return da[0] < db[0];
+ if(da[1] == 0)
+ return true; // infinite tangent
+ if(db[1] == 0)
+ return false; // infinite tangent
+ aa = da[0] / da[1];
+ ab = db[0] / db[1];
+ if(aa > ab)
+ return true;
+#else
+ //assert((ata > atb) == (aa < ab));
+ double aa = atan2(da);
+ double ab = atan2(db);
+ if(aa < ab)
+ return true;
+#endif
+ if(aa == ab)
+ return L2sq(da) < L2sq(db);
+ return false;
+ }
+#else
+ bool operator() (Point const& a, Point const& b)
+ {
+ // not remove this check or std::sort could generate
+ // a segmentation fault because it needs a strict '<'
+ // but due to round errors a == b doesn't mean dxy == dyx
+ if (a == b) return false;
+ Point da = a - o;
+ Point db = b - o;
+ if (da == -db) return false;
+ double dxy = da[X] * db[Y];
+ double dyx = da[Y] * db[X];
+ if (dxy > dyx) return true;
+ else if (dxy < dyx) return false;
+ return L2sq(da) < L2sq(db);
+ }
+#endif
+};
+
+//Mathematically incorrect mod, but more useful.
+int mod(int i, int l) {
+ return i >= 0 ?
+ i % l : (i % l) + l;
+}
+//OPT: usages can often be replaced by conditions
+
+/*** ConvexHull::add_point
+ * to add a point we need to find whether the new point extends the boundary, and if so, what it
+ * obscures. Tarjan? Jarvis?*/
+void
+ConvexHull::merge(Point p) {
+ std::vector<Point> out;
+
+ int len = boundary.size();
+
+ if(len < 2) {
+ if(boundary.empty() || boundary[0] != p)
+ boundary.push_back(p);
+ return;
+ }
+
+ bool pushed = false;
+
+ bool pre = is_left(p, -1);
+ for(int i = 0; i < len; i++) {
+ bool cur = is_left(p, i);
+ if(pre) {
+ if(cur) {
+ if(!pushed) {
+ out.push_back(p);
+ pushed = true;
+ }
+ continue;
+ }
+ else if(!pushed) {
+ out.push_back(p);
+ pushed = true;
+ }
+ }
+ out.push_back(boundary[i]);
+ pre = cur;
+ }
+
+ boundary = out;
+}
+//OPT: quickly find an obscured point and find the bounds by extending from there. then push all points not within the bounds in order.
+ //OPT: use binary searches to find the actual starts/ends, use known rights as boundaries. may require cooperation of find_left algo.
+
+/*** ConvexHull::is_clockwise
+ * We require that successive pairs of edges always turn right.
+ * We return false on collinear points
+ * proposed algorithm: walk successive edges and require triangle area is positive.
+ */
+bool
+ConvexHull::is_clockwise() const {
+ if(is_degenerate())
+ return true;
+ Point first = boundary[0];
+ Point second = boundary[1];
+ for(std::vector<Point>::const_iterator it(boundary.begin()+2), e(boundary.end());
+ it != e;) {
+ if(SignedTriangleArea(first, second, *it) > 0)
+ return false;
+ first = second;
+ second = *it;
+ ++it;
+ }
+ return true;
+}
+
+/*** ConvexHull::top_point_first
+ * We require that the first point in the convex hull has the least y coord, and that off all such points on the hull, it has the least x coord.
+ * proposed algorithm: track lexicographic minimum while walking the list.
+ */
+bool
+ConvexHull::top_point_first() const {
+ if(size() <= 1) return true;
+ std::vector<Point>::const_iterator pivot = boundary.begin();
+ for(std::vector<Point>::const_iterator it(boundary.begin()+1),
+ e(boundary.end());
+ it != e; it++) {
+ if((*it)[1] < (*pivot)[1])
+ pivot = it;
+ else if(((*it)[1] == (*pivot)[1]) &&
+ ((*it)[0] < (*pivot)[0]))
+ pivot = it;
+ }
+ return pivot == boundary.begin();
+}
+//OPT: since the Y values are orderly there should be something like a binary search to do this.
+
+bool
+ConvexHull::meets_invariants() const {
+ return is_clockwise() && top_point_first();
+}
+
+/*** ConvexHull::is_degenerate
+ * We allow three degenerate cases: empty, 1 point and 2 points. In many cases these should be handled explicitly.
+ */
+bool
+ConvexHull::is_degenerate() const {
+ return boundary.size() < 3;
+}
+
+
+int sgn(double x) {
+ if(x == 0) return 0;
+ return (x<0)?-1:1;
+}
+
+bool same_side(Point L[2], Point xs[4]) {
+ int side = 0;
+ for(int i = 0; i < 4; i++) {
+ int sn = sgn(SignedTriangleArea(L[0], L[1], xs[i]));
+ if(sn && !side)
+ side = sn;
+ else if(sn != side) return false;
+ }
+ return true;
+}
+
+/** find bridging pairs between two convex hulls.
+ * this code is based on Hormoz Pirzadeh's masters thesis. There is room for optimisation:
+ * 1. reduce recomputation
+ * 2. use more efficient angle code
+ * 3. write as iterator
+ */
+std::vector<pair<int, int> > bridges(ConvexHull a, ConvexHull b) {
+ vector<pair<int, int> > ret;
+
+ // 1. find maximal points on a and b
+ int ai = 0, bi = 0;
+ // 2. find first copodal pair
+ double ap_angle = atan2(a[ai+1] - a[ai]);
+ double bp_angle = atan2(b[bi+1] - b[bi]);
+ Point L[2] = {a[ai], b[bi]};
+ while(ai < int(a.size()) || bi < int(b.size())) {
+ if(ap_angle == bp_angle) {
+ // In the case of parallel support lines, we must consider all four pairs of copodal points
+ {
+ assert(0); // untested
+ Point xs[4] = {a[ai-1], a[ai+1], b[bi-1], b[bi+1]};
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ xs[2] = b[bi];
+ xs[3] = b[bi+2];
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ xs[0] = a[ai];
+ xs[1] = a[ai+2];
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ xs[2] = b[bi-1];
+ xs[3] = b[bi+1];
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ }
+ ai++;
+ ap_angle += angle_between(a[ai] - a[ai-1], a[ai+1] - a[ai]);
+ L[0] = a[ai];
+ bi++;
+ bp_angle += angle_between(b[bi] - b[bi-1], b[bi+1] - b[bi]);
+ L[1] = b[bi];
+ std::cout << "parallel\n";
+ } else if(ap_angle < bp_angle) {
+ ai++;
+ ap_angle += angle_between(a[ai] - a[ai-1], a[ai+1] - a[ai]);
+ L[0] = a[ai];
+ Point xs[4] = {a[ai-1], a[ai+1], b[bi-1], b[bi+1]};
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ } else {
+ bi++;
+ bp_angle += angle_between(b[bi] - b[bi-1], b[bi+1] - b[bi]);
+ L[1] = b[bi];
+ Point xs[4] = {a[ai-1], a[ai+1], b[bi-1], b[bi+1]};
+ if(same_side(L, xs)) ret.push_back(make_pair(ai, bi));
+ }
+ }
+ return ret;
+}
+
+unsigned find_bottom_right(ConvexHull const &a) {
+ unsigned it = 1;
+ while(it < a.boundary.size() &&
+ a.boundary[it][Y] > a.boundary[it-1][Y])
+ it++;
+ return it-1;
+}
+
+/*** ConvexHull sweepline_intersection(ConvexHull a, ConvexHull b);
+ * find the intersection between two convex hulls. The intersection is also a convex hull.
+ * (Proof: take any two points both in a and in b. Any point between them is in a by convexity,
+ * and in b by convexity, thus in both. Need to prove still finite bounds.)
+ * This algorithm works by sweeping a line down both convex hulls in parallel, working out the left and right edges of the new hull.
+ */
+ConvexHull sweepline_intersection(ConvexHull const &a, ConvexHull const &b) {
+ ConvexHull ret;
+
+ unsigned al = 0;
+ unsigned bl = 0;
+
+ while(al+1 < a.boundary.size() &&
+ (a.boundary[al+1][Y] > b.boundary[bl][Y])) {
+ al++;
+ }
+ while(bl+1 < b.boundary.size() &&
+ (b.boundary[bl+1][Y] > a.boundary[al][Y])) {
+ bl++;
+ }
+ // al and bl now point to the top of the first pair of edges that overlap in y value
+ //double sweep_y = std::min(a.boundary[al][Y],
+ // b.boundary[bl][Y]);
+ return ret;
+}
+
+/*** ConvexHull intersection(ConvexHull a, ConvexHull b);
+ * find the intersection between two convex hulls. The intersection is also a convex hull.
+ * (Proof: take any two points both in a and in b. Any point between them is in a by convexity,
+ * and in b by convexity, thus in both. Need to prove still finite bounds.)
+ */
+ConvexHull intersection(ConvexHull /*a*/, ConvexHull /*b*/) {
+ ConvexHull ret;
+ /*
+ int ai = 0, bi = 0;
+ int aj = a.boundary.size() - 1;
+ int bj = b.boundary.size() - 1;
+ */
+ /*while (true) {
+ if(a[ai]
+ }*/
+ return ret;
+}
+
+template <typename T>
+T idx_to_pair(pair<T, T> p, int idx) {
+ return idx?p.second:p.first;
+}
+
+/*** ConvexHull merge(ConvexHull a, ConvexHull b);
+ * find the smallest convex hull that surrounds a and b.
+ */
+ConvexHull merge(ConvexHull a, ConvexHull b) {
+ ConvexHull ret;
+
+ std::cout << "---\n";
+ std::vector<pair<int, int> > bpair = bridges(a, b);
+
+ // Given our list of bridges {(pb1, qb1), ..., (pbk, qbk)}
+ // we start with the highest point in p0, q0, say it is p0.
+ // then the merged hull is p0, ..., pb1, qb1, ..., qb2, pb2, ...
+ // In other words, either of the two polygons vertices are added in order until the vertex coincides with a bridge point, at which point we swap.
+
+ unsigned state = (a[0][Y] < b[0][Y])?0:1;
+ ret.boundary.reserve(a.size() + b.size());
+ ConvexHull chs[2] = {a, b};
+ unsigned idx = 0;
+
+ for(unsigned k = 0; k < bpair.size(); k++) {
+ unsigned limit = idx_to_pair(bpair[k], state);
+ std::cout << bpair[k].first << " , " << bpair[k].second << "; "
+ << idx << ", " << limit << ", s: "
+ << state
+ << " \n";
+ while(idx <= limit) {
+ ret.boundary.push_back(chs[state][idx++]);
+ }
+ state = 1-state;
+ idx = idx_to_pair(bpair[k], state);
+ }
+ while(idx < chs[state].size()) {
+ ret.boundary.push_back(chs[state][idx++]);
+ }
+ return ret;
+}
+
+ConvexHull graham_merge(ConvexHull a, ConvexHull b) {
+ ConvexHull result;
+
+ // we can avoid the find pivot step because of top_point_first
+ if(b.boundary[0] <= a.boundary[0])
+ swap(a, b);
+
+ result.boundary = a.boundary;
+ result.boundary.insert(result.boundary.end(),
+ b.boundary.begin(), b.boundary.end());
+
+/** if we modified graham scan to work top to bottom as proposed in lect754.pdf we could replace the
+ angle sort with a simple merge sort type algorithm. furthermore, we could do the graham scan
+ online, avoiding a bunch of memory copies. That would probably be linear. -- njh*/
+ result.angle_sort();
+ result.graham_scan();
+
+ return result;
+}
+
+ConvexHull andrew_merge(ConvexHull a, ConvexHull b) {
+ ConvexHull result;
+
+ // we can avoid the find pivot step because of top_point_first
+ if(b.boundary[0] <= a.boundary[0])
+ swap(a, b);
+
+ result.boundary = a.boundary;
+ result.boundary.insert(result.boundary.end(),
+ b.boundary.begin(), b.boundary.end());
+
+/** if we modified graham scan to work top to bottom as proposed in lect754.pdf we could replace the
+ angle sort with a simple merge sort type algorithm. furthermore, we could do the graham scan
+ online, avoiding a bunch of memory copies. That would probably be linear. -- njh*/
+ result.andrew_scan();
+
+ return result;
+}
+
+//TODO: reinstate
+/*ConvexCover::ConvexCover(Path const &sp) : path(&sp) {
+ cc.reserve(sp.size());
+ for(Geom::Path::const_iterator it(sp.begin()), end(sp.end()); it != end; ++it) {
+ cc.push_back(ConvexHull((*it).begin(), (*it).end()));
+ }
+}*/
+
+double ConvexHull::centroid_and_area(Geom::Point& centroid) const {
+ const unsigned n = boundary.size();
+ if (n < 2)
+ return 0;
+ if(n < 3) {
+ centroid = (boundary[0] + boundary[1])/2;
+ return 0;
+ }
+ Geom::Point centroid_tmp(0,0);
+ double atmp = 0;
+ for (unsigned i = n-1, j = 0; j < n; i = j, j++) {
+ const double ai = cross(boundary[j], boundary[i]);
+ atmp += ai;
+ centroid_tmp += (boundary[j] + boundary[i])*ai; // first moment.
+ }
+ if (atmp != 0) {
+ centroid = centroid_tmp / (3 * atmp);
+ }
+ return atmp / 2;
+}
+
+// TODO: This can be made lg(n) using golden section/fibonacci search three starting points, say 0,
+// n/2, n-1 construct a new point, say (n/2 + n)/2 throw away the furthest boundary point iterate
+// until interval is a single value
+Point const * ConvexHull::furthest(Point direction) const {
+ Point const * p = &boundary[0];
+ double d = dot(*p, direction);
+ for(unsigned i = 1; i < boundary.size(); i++) {
+ double dd = dot(boundary[i], direction);
+ if(d < dd) {
+ p = &boundary[i];
+ d = dd;
+ }
+ }
+ return p;
+}
+
+
+// returns (a, (b,c)), three points which define the narrowest diameter of the hull as the pair of
+// lines going through b,c, and through a, parallel to b,c TODO: This can be made linear time by
+// moving point tc incrementally from the previous value (it can only move in one direction). It
+// is currently n*O(furthest)
+double ConvexHull::narrowest_diameter(Point &a, Point &b, Point &c) {
+ Point tb = boundary.back();
+ double d = std::numeric_limits<double>::max();
+ for(unsigned i = 0; i < boundary.size(); i++) {
+ Point tc = boundary[i];
+ Point n = -rot90(tb-tc);
+ Point ta = *furthest(n);
+ double td = dot(n, ta-tb)/dot(n,n);
+ if(td < d) {
+ a = ta;
+ b = tb;
+ c = tc;
+ d = td;
+ }
+ tb = tc;
+ }
+ return d;
+}
+#endif
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/coord.cpp b/src/2geom/coord.cpp
new file mode 100644
index 0000000..205a82f
--- /dev/null
+++ b/src/2geom/coord.cpp
@@ -0,0 +1,123 @@
+/** @file
+ * @brief Conversion between Coord and strings
+ *//*
+ * Authors:
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2014 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+// Most of the code in this file is derived from:
+// https://code.google.com/p/double-conversion/
+// The copyright notice for that code is attached below.
+//
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <2geom/coord.h>
+#include <cstdint>
+#include <cstdlib>
+#include <cassert>
+#include <cstring>
+#include <climits>
+#include <cstdarg>
+#include <cmath>
+
+#include <double-conversion/double-conversion.h>
+
+namespace Geom {
+
+std::string format_coord_shortest(Coord x)
+{
+ static const double_conversion::DoubleToStringConverter conv(
+ double_conversion::DoubleToStringConverter::UNIQUE_ZERO,
+ "inf", "NaN", 'e', -3, 6, 0, 0);
+ std::string ret(' ', 32);
+ double_conversion::StringBuilder builder(&ret[0], 32);
+ conv.ToShortest(x, &builder);
+ ret.resize(builder.position());
+ return ret;
+}
+
+std::string format_coord_nice(Coord x)
+{
+ static const double_conversion::DoubleToStringConverter conv(
+ double_conversion::DoubleToStringConverter::UNIQUE_ZERO,
+ "inf", "NaN", 'e', -6, 21, 0, 0);
+ std::string ret(' ', 32);
+ double_conversion::StringBuilder builder(&ret[0], 32);
+ conv.ToShortest(x, &builder);
+ ret.resize(builder.position());
+ return ret;
+}
+
+Coord parse_coord(std::string const &s)
+{
+ static const double_conversion::StringToDoubleConverter conv(
+ double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
+ double_conversion::StringToDoubleConverter::ALLOW_TRAILING_SPACES |
+ double_conversion::StringToDoubleConverter::ALLOW_SPACES_AFTER_SIGN,
+ 0.0, nan(""), "inf", "NaN");
+ int dummy;
+ return conv.StringToDouble(s.c_str(), s.length(), &dummy);
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/crossing.cpp b/src/2geom/crossing.cpp
new file mode 100644
index 0000000..1159fb0
--- /dev/null
+++ b/src/2geom/crossing.cpp
@@ -0,0 +1,233 @@
+#include <2geom/crossing.h>
+#include <2geom/path.h>
+
+namespace Geom {
+
+//bool edge_involved_in(Edge const &e, Crossing const &c) {
+// if(e.path == c.a) {
+// if(e.time == c.ta) return true;
+// } else if(e.path == c.b) {
+// if(e.time == c.tb) return true;
+// }
+// return false;
+//}
+
+double wrap_dist(double from, double to, double size, bool rev) {
+ if(rev) {
+ if(to > from) {
+ return from + (size - to);
+ } else {
+ return from - to;
+ }
+ } else {
+ if(to < from) {
+ return to + (size - from);
+ } else {
+ return to - from;
+ }
+ }
+}
+/*
+CrossingGraph create_crossing_graph(PathVector const &p, Crossings const &crs) {
+ std::vector<Point> locs;
+ CrossingGraph ret;
+ for(unsigned i = 0; i < crs.size(); i++) {
+ Point pnt = p[crs[i].a].pointAt(crs[i].ta);
+ unsigned j = 0;
+ for(; j < locs.size(); j++) {
+ if(are_near(pnt, locs[j])) break;
+ }
+ if(j == locs.size()) {
+ ret.push_back(CrossingNode());
+ locs.push_back(pnt);
+ }
+ ret[j].add_edge(Edge(crs[i].a, crs[i].ta, false));
+ ret[j].add_edge(Edge(crs[i].a, crs[i].ta, true));
+ ret[j].add_edge(Edge(crs[i].b, crs[i].tb, false));
+ ret[j].add_edge(Edge(crs[i].b, crs[i].tb, true));
+ }
+
+ for(unsigned i = 0; i < ret.size(); i++) {
+ for(unsigned j = 0; j < ret[i].edges.size(); j++) {
+ unsigned pth = ret[i].edges[j].path;
+ double t = ret[i].edges[j].time;
+ bool rev = ret[i].edges[j].reverse;
+ double size = p[pth].size()+1;
+ double best = size;
+ unsigned bix = ret.size();
+ for(unsigned k = 0; k < ret.size(); k++) {
+ for(unsigned l = 0; l < ret[k].edges.size(); l++) {
+ if(ret[i].edges[j].path == ret[k].edges[l].path && (k != i || l != j)) {
+ double d = wrap_dist(t, ret[i].edges[j].time, size, rev);
+ if(d < best) {
+ best = d;
+ bix = k;
+ }
+ }
+ }
+ }
+ if(bix == ret.size()) {
+ std::cout << "couldn't find an adequate next-crossing node";
+ bix = i;
+ }
+ ret[i].edges[j].node = bix;
+ }
+ }
+
+ return ret;
+ */
+ /* Various incoherent code bits
+ // list of sets of edges, each set corresponding to those emanating from the path
+ CrossingGraph ret;
+ std::vector<Edge> edges(crs.size());
+
+ std::vector<std::vector<bool> > used;
+ unsigned i, j;
+ do {
+ first_false(used, i, j);
+ CrossingNode cn;
+ do {
+ unsigned di = i, dj = j;
+ crossing_dual(di, dj);
+ if(!used[di,dj]) {
+
+ }
+ }
+
+ } while(!used[i,j])
+
+
+ for(unsigned j = 0; j < crs[i].size(); j++) {
+
+ edges.push_back(Edge(i, crs[i][j].getOtherTime(i), false));
+ edges.push_back(Edge(i, crs[i][j].getOtherTime(i), true));
+ }
+ std::sort(edges.begin(), edges.end(), TimeOrder());
+ for(unsigned j = 0; j < edges.size(); ) {
+ CrossingNode cn;
+ double t = edges[j].time;
+ while(j < edges.size() && are_near(edges[j].time, t)) {
+ cn.edges.push_back(edges[j]);
+ }
+ }
+*/
+//}
+
+// provide specific method for Paths because paths can be closed or open. Path::size() is named somewhat wrong...
+std::vector<Rect> bounds(Path const &a) {
+ std::vector<Rect> rs;
+ for (unsigned i = 0; i < a.size_default(); i++) {
+ OptRect bb = a[i].boundsFast();
+ if (bb) {
+ rs.push_back(*bb);
+ }
+ }
+ return rs;
+}
+
+void merge_crossings(Crossings &a, Crossings &b, unsigned i) {
+ Crossings n;
+ sort_crossings(b, i);
+ n.resize(a.size() + b.size());
+ std::merge(a.begin(), a.end(), b.begin(), b.end(), n.begin(), CrossingOrder(i));
+ a = n;
+}
+
+void offset_crossings(Crossings &cr, double a, double b) {
+ for(auto & i : cr) {
+ i.ta += a;
+ i.tb += b;
+ }
+}
+
+Crossings reverse_ta(Crossings const &cr, std::vector<double> max) {
+ Crossings ret;
+ for(const auto & i : cr) {
+ double mx = max[i.a];
+ ret.push_back(Crossing(i.ta > mx+0.01 ? (1 - (i.ta - mx) + mx) : mx - i.ta,
+ i.tb, !i.dir));
+ }
+ return ret;
+}
+
+Crossings reverse_tb(Crossings const &cr, unsigned split, std::vector<double> max) {
+ Crossings ret;
+ for(const auto & i : cr) {
+ double mx = max[i.b - split];
+ ret.push_back(Crossing(i.ta, i.tb > mx+0.01 ? (1 - (i.tb - mx) + mx) : mx - i.tb,
+ !i.dir));
+ }
+ return ret;
+}
+
+CrossingSet reverse_ta(CrossingSet const &cr, unsigned split, std::vector<double> max) {
+ CrossingSet ret;
+ for(unsigned i = 0; i < cr.size(); i++) {
+ Crossings res = reverse_ta(cr[i], max);
+ if(i < split) std::reverse(res.begin(), res.end());
+ ret.push_back(res);
+ }
+ return ret;
+}
+
+CrossingSet reverse_tb(CrossingSet const &cr, unsigned split, std::vector<double> max) {
+ CrossingSet ret;
+ for(unsigned i = 0; i < cr.size(); i++) {
+ Crossings res = reverse_tb(cr[i], split, max);
+ if(i >= split) std::reverse(res.begin(), res.end());
+ ret.push_back(res);
+ }
+ return ret;
+}
+
+// Delete any duplicates in a vector of crossings
+// A crossing is considered to be a duplicate when it has both t_a and t_b near to another crossing's t_a and t_b
+// For example, duplicates will be found when calculating the intersections of a linesegment with a polygon, if the
+// endpoint of that line coincides with a cusp node of the polygon. In that case, an intersection will be found of
+// the linesegment with each of the polygon's linesegments extending from the cusp node (i.e. two intersections)
+void delete_duplicates(Crossings &crs) {
+ Crossings::reverse_iterator rit = crs.rbegin();
+
+ for (rit = crs.rbegin(); rit!= crs.rend(); ++rit) {
+ Crossings::reverse_iterator rit2 = rit;
+ while (++rit2 != crs.rend()) {
+ if (Geom::are_near((*rit).ta, (*rit2).ta) && Geom::are_near((*rit).tb, (*rit2).tb)) {
+ crs.erase((rit + 1).base()); // This +1 and .base() construction is needed to convert to a regular iterator
+ break; // out of while loop, and continue with next iteration of for loop
+ }
+ }
+ }
+}
+
+void clean(Crossings &/*cr_a*/, Crossings &/*cr_b*/) {
+/* if(cr_a.empty()) return;
+
+ //Remove anything with dupes
+
+ for(Eraser<Crossings> i(&cr_a); !i.ended(); i++) {
+ const Crossing cur = *i;
+ Eraser<Crossings> next(i);
+ next++;
+ if(are_near(cur, *next)) {
+ cr_b.erase(std::find(cr_b.begin(), cr_b.end(), cur));
+ for(i = next; near(*i, cur); i++) {
+ cr_b.erase(std::find(cr_b.begin(), cr_b.end(), *i));
+ }
+ continue;
+ }
+ }
+*/
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/curve.cpp b/src/2geom/curve.cpp
new file mode 100644
index 0000000..f79edb3
--- /dev/null
+++ b/src/2geom/curve.cpp
@@ -0,0 +1,235 @@
+/* Abstract curve type - implementation of default methods
+ *
+ * Authors:
+ * MenTaLguY <mental@rydia.net>
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ * Rafał Siejakowski <rs@rs-math.net>
+ *
+ * Copyright 2007-2009 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/curve.h>
+#include <2geom/exception.h>
+#include <2geom/nearest-time.h>
+#include <2geom/sbasis-geometric.h>
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/ord.h>
+#include <2geom/path-sink.h>
+
+namespace Geom
+{
+
+Coord Curve::nearestTime(Point const& p, Coord a, Coord b) const
+{
+ return nearest_time(p, toSBasis(), a, b);
+}
+
+std::vector<Coord> Curve::allNearestTimes(Point const& p, Coord from, Coord to) const
+{
+ return all_nearest_times(p, toSBasis(), from, to);
+}
+
+Coord Curve::length(Coord tolerance) const
+{
+ return ::Geom::length(toSBasis(), tolerance);
+}
+
+int Curve::winding(Point const &p) const
+{
+ try {
+ std::vector<Coord> ts = roots(p[Y], Y);
+ if(ts.empty()) return 0;
+ std::sort(ts.begin(), ts.end());
+
+ // skip endpoint roots when they are local maxima on the Y axis
+ // this follows the convention used in other winding routines,
+ // i.e. that the bottommost coordinate is not part of the shape
+ bool ignore_0 = unitTangentAt(0)[Y] <= 0;
+ bool ignore_1 = unitTangentAt(1)[Y] >= 0;
+
+ int wind = 0;
+ for (double t : ts) {
+ //std::cout << t << std::endl;
+ if ((t == 0 && ignore_0) || (t == 1 && ignore_1)) continue;
+ if (valueAt(t, X) > p[X]) { // root is ray intersection
+ Point tangent = unitTangentAt(t);
+ if (tangent[Y] > 0) {
+ // at the point of intersection, curve goes in +Y direction,
+ // so it winds in the direction of positive angles
+ ++wind;
+ } else if (tangent[Y] < 0) {
+ --wind;
+ }
+ }
+ }
+ return wind;
+ } catch (InfiniteSolutions const &e) {
+ // this means we encountered a line segment exactly coincident with the point
+ // skip, since this will be taken care of by endpoint roots in other segments
+ return 0;
+ }
+}
+
+std::vector<CurveIntersection> Curve::intersect(Curve const &/*other*/, Coord /*eps*/) const
+{
+ // TODO: approximate as Bezier
+ THROW_NOTIMPLEMENTED();
+}
+
+std::vector<CurveIntersection> Curve::intersectSelf(Coord eps) const
+{
+ /// Represents a sub-arc of the curve.
+ struct Subcurve
+ {
+ std::unique_ptr<Curve> curve;
+ Interval parameter_range;
+
+ Subcurve(Curve *piece, Coord from, Coord to)
+ : curve{piece}
+ , parameter_range{from, to}
+ {}
+ };
+
+ /// A closure to split the curve into portions at the prescribed split points.
+ auto const split_into_subcurves = [=](std::vector<Coord> const &splits) {
+ std::vector<Subcurve> result;
+ result.reserve(splits.size() + 1);
+ Coord previous = 0;
+ for (Coord split : splits) {
+ // Use global EPSILON since we're operating on normalized curve times.
+ if (split < EPSILON || split > 1.0 - EPSILON) {
+ continue;
+ }
+ result.emplace_back(portion(previous, split), previous, split);
+ previous = split;
+ }
+ result.emplace_back(portion(previous, 1.0), previous, 1.0);
+ return result;
+ };
+
+ /// A closure to find pairwise intersections between the passed subcurves.
+ auto const pairwise_intersect = [=](std::vector<Subcurve> const &subcurves) {
+ std::vector<CurveIntersection> result;
+ for (unsigned i = 0; i < subcurves.size(); i++) {
+ for (unsigned j = i + 1; j < subcurves.size(); j++) {
+ auto const xings = subcurves[i].curve->intersect(*subcurves[j].curve, eps);
+ for (auto const &xing : xings) {
+ // To avoid duplicate intersections, skip values at exactly 1.
+ if (xing.first == 1. || xing.second == 1.) {
+ continue;
+ }
+ Coord const ti = subcurves[i].parameter_range.valueAt(xing.first);
+ Coord const tj = subcurves[j].parameter_range.valueAt(xing.second);
+ result.emplace_back(ti, tj, xing.point());
+ }
+ }
+ }
+ std::sort(result.begin(), result.end());
+ return result;
+ };
+
+ // Monotonic segments cannot have self-intersections. Thus, we can split
+ // the curve at critical points of the X or Y coordinate and intersect
+ // the portions. However, there's the risk that a juncture between two
+ // adjacent portions is mistaken for an intersection due to numerical errors.
+ // Hence, we run the algorithm for both the X and Y coordinates and only
+ // keep the intersections that show up in both intersection lists.
+
+ // Find the critical points of both coordinates.
+ std::unique_ptr<Curve> deriv{derivative()};
+ auto const crits_x = deriv->roots(0, X);
+ auto const crits_y = deriv->roots(0, Y);
+ if (crits_x.empty() || crits_y.empty()) {
+ return {};
+ }
+
+ // Split into pieces in two ways and find self-intersections.
+ auto const pieces_x = split_into_subcurves(crits_x);
+ auto const pieces_y = split_into_subcurves(crits_y);
+ auto const crossings_from_x = pairwise_intersect(pieces_x);
+ auto const crossings_from_y = pairwise_intersect(pieces_y);
+ if (crossings_from_x.empty() || crossings_from_y.empty()) {
+ return {};
+ }
+
+ // Filter the results, only keeping self-intersections found by both approaches.
+ std::vector<CurveIntersection> result;
+ unsigned index_y = 0;
+ for (auto &&candidate_x : crossings_from_x) {
+ // Find a crossing corresponding to this one in the y-method collection.
+ while (index_y != crossings_from_y.size()) {
+ auto const gap = crossings_from_y[index_y].first - candidate_x.first;
+ if (std::abs(gap) < EPSILON) {
+ // We found the matching intersection!
+ result.emplace_back(candidate_x);
+ index_y++;
+ break;
+ } else if (gap < 0.0) {
+ index_y++;
+ } else {
+ break;
+ }
+ }
+ }
+ return result;
+}
+
+Point Curve::unitTangentAt(Coord t, unsigned n) const
+{
+ std::vector<Point> derivs = pointAndDerivatives(t, n);
+ for (unsigned deriv_n = 1; deriv_n < derivs.size(); deriv_n++) {
+ Coord length = derivs[deriv_n].length();
+ if ( ! are_near(length, 0) ) {
+ // length of derivative is non-zero, so return unit vector
+ return derivs[deriv_n] / length;
+ }
+ }
+ return Point (0,0);
+};
+
+void Curve::feed(PathSink &sink, bool moveto_initial) const
+{
+ std::vector<Point> pts;
+ sbasis_to_bezier(pts, toSBasis(), 2); //TODO: use something better!
+ if (moveto_initial) {
+ sink.moveTo(initialPoint());
+ }
+ sink.curveTo(pts[0], pts[1], pts[2]);
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/d2-sbasis.cpp b/src/2geom/d2-sbasis.cpp
new file mode 100644
index 0000000..4e95f6f
--- /dev/null
+++ b/src/2geom/d2-sbasis.cpp
@@ -0,0 +1,364 @@
+/**
+ * \file
+ * \brief Some two-dimensional SBasis operations
+ *//*
+ * Authors:
+ * MenTaLguy <mental@rydia.net>
+ * Jean-François Barraud <jf.barraud@gmail.com>
+ * Johan Engelen <j.b.c.engelen@alumnus.utwente.nl>
+ *
+ * Copyright 2007-2012 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, output to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/d2.h>
+#include <2geom/piecewise.h>
+
+namespace Geom {
+
+SBasis L2(D2<SBasis> const & a, unsigned k) { return sqrt(dot(a, a), k); }
+
+D2<SBasis> multiply(Linear const & a, D2<SBasis> const & b) {
+ return D2<SBasis>(multiply(a, b[X]), multiply(a, b[Y]));
+}
+
+D2<SBasis> multiply(SBasis const & a, D2<SBasis> const & b) {
+ return D2<SBasis>(multiply(a, b[X]), multiply(a, b[Y]));
+}
+
+D2<SBasis> truncate(D2<SBasis> const & a, unsigned terms) {
+ return D2<SBasis>(truncate(a[X], terms), truncate(a[Y], terms));
+}
+
+unsigned sbasis_size(D2<SBasis> const & a) {
+ return std::max((unsigned) a[0].size(), (unsigned) a[1].size());
+}
+
+//TODO: Is this sensical? shouldn't it be like pythagorean or something?
+double tail_error(D2<SBasis> const & a, unsigned tail) {
+ return std::max(a[0].tailError(tail), a[1].tailError(tail));
+}
+
+Piecewise<D2<SBasis> > sectionize(D2<Piecewise<SBasis> > const &a) {
+ Piecewise<SBasis> x = partition(a[0], a[1].cuts), y = partition(a[1], a[0].cuts);
+ assert(x.size() == y.size());
+ Piecewise<D2<SBasis> > ret;
+ for(unsigned i = 0; i < x.size(); i++)
+ ret.push_seg(D2<SBasis>(x[i], y[i]));
+ ret.cuts.insert(ret.cuts.end(), x.cuts.begin(), x.cuts.end());
+ return ret;
+}
+
+D2<Piecewise<SBasis> > make_cuts_independent(Piecewise<D2<SBasis> > const &a) {
+ D2<Piecewise<SBasis> > ret;
+ for(unsigned d = 0; d < 2; d++) {
+ for(unsigned i = 0; i < a.size(); i++)
+ ret[d].push_seg(a[i][d]);
+ ret[d].cuts.insert(ret[d].cuts.end(), a.cuts.begin(), a.cuts.end());
+ }
+ return ret;
+}
+
+Piecewise<D2<SBasis> > rot90(Piecewise<D2<SBasis> > const &M){
+ Piecewise<D2<SBasis> > result;
+ if (M.empty()) return M;
+ result.push_cut(M.cuts[0]);
+ for (unsigned i=0; i<M.size(); i++){
+ result.push(rot90(M[i]),M.cuts[i+1]);
+ }
+ return result;
+}
+
+/** @brief Calculates the 'dot product' or 'inner product' of \c a and \c b
+ * @return \f[
+ * f(t) \rightarrow \left\{
+ * \begin{array}{c}
+ * a_1 \bullet b_1 \\
+ * a_2 \bullet b_2 \\
+ * \ldots \\
+ * a_n \bullet b_n \\
+ * \end{array}\right.
+ * \f]
+ * @relates Piecewise */
+Piecewise<SBasis> dot(Piecewise<D2<SBasis> > const &a, Piecewise<D2<SBasis> > const &b)
+{
+ Piecewise<SBasis > result;
+ if (a.empty() || b.empty()) return result;
+ Piecewise<D2<SBasis> > aa = partition(a,b.cuts);
+ Piecewise<D2<SBasis> > bb = partition(b,a.cuts);
+
+ result.push_cut(aa.cuts.front());
+ for (unsigned i=0; i<aa.size(); i++){
+ result.push(dot(aa.segs[i],bb.segs[i]),aa.cuts[i+1]);
+ }
+ return result;
+}
+
+/** @brief Calculates the 'dot product' or 'inner product' of \c a and \c b
+ * @return \f[
+ * f(t) \rightarrow \left\{
+ * \begin{array}{c}
+ * a_1 \bullet b \\
+ * a_2 \bullet b \\
+ * \ldots \\
+ * a_n \bullet b \\
+ * \end{array}\right.
+ * \f]
+ * @relates Piecewise */
+Piecewise<SBasis> dot(Piecewise<D2<SBasis> > const &a, Point const &b)
+{
+ Piecewise<SBasis > result;
+ if (a.empty()) return result;
+
+ result.push_cut(a.cuts.front());
+ for (unsigned i = 0; i < a.size(); ++i){
+ result.push(dot(a.segs[i],b), a.cuts[i+1]);
+ }
+ return result;
+}
+
+
+Piecewise<SBasis> cross(Piecewise<D2<SBasis> > const &a,
+ Piecewise<D2<SBasis> > const &b){
+ Piecewise<SBasis > result;
+ if (a.empty() || b.empty()) return result;
+ Piecewise<D2<SBasis> > aa = partition(a,b.cuts);
+ Piecewise<D2<SBasis> > bb = partition(b,a.cuts);
+
+ result.push_cut(aa.cuts.front());
+ for (unsigned i=0; i<a.size(); i++){
+ result.push(cross(aa.segs[i],bb.segs[i]),aa.cuts[i+1]);
+ }
+ return result;
+}
+
+Piecewise<D2<SBasis> > operator*(Piecewise<D2<SBasis> > const &a, Affine const &m) {
+ Piecewise<D2<SBasis> > result;
+ if(a.empty()) return result;
+ result.push_cut(a.cuts[0]);
+ for (unsigned i = 0; i < a.size(); i++) {
+ result.push(a[i] * m, a.cuts[i+1]);
+ }
+ return result;
+}
+
+//if tol>0, only force continuity where the jump is smaller than tol.
+Piecewise<D2<SBasis> > force_continuity(Piecewise<D2<SBasis> > const &f, double tol, bool closed)
+{
+ if (f.size()==0) return f;
+ Piecewise<D2<SBasis> > result=f;
+ unsigned cur = (closed)? 0:1;
+ unsigned prev = (closed)? f.size()-1:0;
+ while(cur<f.size()){
+ Point pt0 = f.segs[prev].at1();
+ Point pt1 = f.segs[cur ].at0();
+ if (tol<=0 || L2sq(pt0-pt1)<tol*tol){
+ pt0 = (pt0+pt1)/2;
+ for (unsigned dim=0; dim<2; dim++){
+ SBasis &prev_sb=result.segs[prev][dim];
+ SBasis &cur_sb =result.segs[cur][dim];
+ Coord const c=pt0[dim];
+ if (prev_sb.isZero(0)) {
+ prev_sb = SBasis(Linear(0.0, c));
+ } else {
+ prev_sb[0][1] = c;
+ }
+ if (cur_sb.isZero(0)) {
+ cur_sb = SBasis(Linear(c, 0.0));
+ } else {
+ cur_sb[0][0] = c;
+ }
+ }
+ }
+ prev = cur++;
+ }
+ return result;
+}
+
+std::vector<Geom::Piecewise<Geom::D2<Geom::SBasis> > >
+split_at_discontinuities (Geom::Piecewise<Geom::D2<Geom::SBasis> > const & pwsbin, double tol)
+{
+ using namespace Geom;
+ std::vector<Piecewise<D2<SBasis> > > ret;
+ unsigned piece_start = 0;
+ for (unsigned i=0; i<pwsbin.segs.size(); i++){
+ if (i==(pwsbin.segs.size()-1) || L2(pwsbin.segs[i].at1()- pwsbin.segs[i+1].at0()) > tol){
+ Piecewise<D2<SBasis> > piece;
+ piece.cuts.push_back(pwsbin.cuts[piece_start]);
+ for (unsigned j = piece_start; j<i+1; j++){
+ piece.segs.push_back(pwsbin.segs[j]);
+ piece.cuts.push_back(pwsbin.cuts[j+1]);
+ }
+ ret.push_back(piece);
+ piece_start = i+1;
+ }
+ }
+ return ret;
+}
+
+Point unitTangentAt(D2<SBasis> const & a, Coord t, unsigned n)
+{
+ std::vector<Point> derivs = a.valueAndDerivatives(t, n);
+ for (unsigned deriv_n = 1; deriv_n < derivs.size(); deriv_n++) {
+ Coord length = derivs[deriv_n].length();
+ if ( ! are_near(length, 0) ) {
+ // length of derivative is non-zero, so return unit vector
+ return derivs[deriv_n] / length;
+ }
+ }
+ return Point (0,0);
+}
+
+static void set_first_point(Piecewise<D2<SBasis> > &f, Point const &a){
+ if ( f.empty() ){
+ f.concat(Piecewise<D2<SBasis> >(D2<SBasis>(SBasis(Linear(a[X])), SBasis(Linear(a[Y])))));
+ return;
+ }
+ for (unsigned dim=0; dim<2; dim++){
+ f.segs.front()[dim][0][0] = a[dim];
+ }
+}
+static void set_last_point(Piecewise<D2<SBasis> > &f, Point const &a){
+ if ( f.empty() ){
+ f.concat(Piecewise<D2<SBasis> >(D2<SBasis>(SBasis(Linear(a[X])), SBasis(Linear(a[Y])))));
+ return;
+ }
+ for (unsigned dim=0; dim<2; dim++){
+ f.segs.back()[dim][0][1] = a[dim];
+ }
+}
+
+std::vector<Piecewise<D2<SBasis> > > fuse_nearby_ends(std::vector<Piecewise<D2<SBasis> > > const &f, double tol){
+
+ if ( f.empty()) return f;
+ std::vector<Piecewise<D2<SBasis> > > result;
+ std::vector<std::vector<unsigned> > pre_result;
+ for (unsigned i=0; i<f.size(); i++){
+ bool inserted = false;
+ Point a = f[i].firstValue();
+ Point b = f[i].lastValue();
+ for (auto & j : pre_result){
+ Point aj = f.at(j.back()).lastValue();
+ Point bj = f.at(j.front()).firstValue();
+ if ( L2(a-aj) < tol ) {
+ j.push_back(i);
+ inserted = true;
+ break;
+ }
+ if ( L2(b-bj) < tol ) {
+ j.insert(j.begin(),i);
+ inserted = true;
+ break;
+ }
+ }
+ if (!inserted) {
+ pre_result.emplace_back();
+ pre_result.back().push_back(i);
+ }
+ }
+ for (auto & i : pre_result){
+ Piecewise<D2<SBasis> > comp;
+ for (unsigned j=0; j<i.size(); j++){
+ Piecewise<D2<SBasis> > new_comp = f.at(i[j]);
+ if ( j>0 ){
+ set_first_point( new_comp, comp.segs.back().at1() );
+ }
+ comp.concat(new_comp);
+ }
+ if ( L2(comp.firstValue()-comp.lastValue()) < tol ){
+ //TODO: check sizes!!!
+ set_last_point( comp, comp.segs.front().at0() );
+ }
+ result.push_back(comp);
+ }
+ return result;
+}
+
+/*
+ * Computes the intersection of two sets given as (ordered) union of intervals.
+ */
+static std::vector<Interval> intersect( std::vector<Interval> const &a, std::vector<Interval> const &b){
+ std::vector<Interval> result;
+ //TODO: use order!
+ for (auto i : a){
+ for (auto j : b){
+ OptInterval c( i );
+ c &= j;
+ if ( c ) {
+ result.push_back( *c );
+ }
+ }
+ }
+ return result;
+}
+
+std::vector<Interval> level_set( D2<SBasis> const &f, Rect region){
+ std::vector<Rect> regions( 1, region );
+ return level_sets( f, regions ).front();
+}
+std::vector<Interval> level_set( D2<SBasis> const &f, Point p, double tol){
+ Rect region(p, p);
+ region.expandBy( tol );
+ return level_set( f, region );
+}
+std::vector<std::vector<Interval> > level_sets( D2<SBasis> const &f, std::vector<Rect> regions){
+ std::vector<Interval> regsX (regions.size(), Interval() );
+ std::vector<Interval> regsY (regions.size(), Interval() );
+ for ( unsigned i=0; i < regions.size(); i++ ){
+ regsX[i] = regions[i][X];
+ regsY[i] = regions[i][Y];
+ }
+ std::vector<std::vector<Interval> > x_in_regs = level_sets( f[X], regsX );
+ std::vector<std::vector<Interval> > y_in_regs = level_sets( f[Y], regsY );
+ std::vector<std::vector<Interval> >result(regions.size(), std::vector<Interval>() );
+ for (unsigned i=0; i<regions.size(); i++){
+ result[i] = intersect ( x_in_regs[i], y_in_regs[i] );
+ }
+ return result;
+}
+std::vector<std::vector<Interval> > level_sets( D2<SBasis> const &f, std::vector<Point> pts, double tol){
+ std::vector<Rect> regions( pts.size(), Rect() );
+ for (unsigned i=0; i<pts.size(); i++){
+ regions[i] = Rect( pts[i], pts[i] );
+ regions[i].expandBy( tol );
+ }
+ return level_sets( f, regions );
+}
+
+
+} // namespace Geom
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/doxygen.cpp b/src/2geom/doxygen.cpp
new file mode 100644
index 0000000..3c64eec
--- /dev/null
+++ b/src/2geom/doxygen.cpp
@@ -0,0 +1,301 @@
+/*
+ * Doxygen documentation for the lib2geom library
+ *
+ * Authors:
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2009-2011 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+// Main page of the documentation - contains logo and introductory text
+/**
+ * @mainpage
+ *
+ * @image html 2geom-logo.png
+ *
+ * @section Introduction
+ *
+ * 2Geom is a computational geometry library intended for use with 2D vector graphics.
+ * It concentrates on high-level algorithms, such as computing the length of a curve
+ * or Boolean operations on paths. It evolved from the geometry code used
+ * in Inkscape, a free software, cross-platform vector graphics editor.
+ *
+ * @section UserGuide User guide
+ *
+ * - @subpage Overview "Overview of 2Geom"
+ * - @ref Primitives "Primitives" - points, angles, lines, axis-aligned rectangles...
+ * - @ref Transforms "Transformations" - mathematical representation for operations
+ * like translation, scaling and rotation.
+ * - @ref Fragments "Fragments" - one-dimensional functions and related utilities.
+ * - @ref Curves "Curves" - functions mapping the unit interval to points on a plane.
+ * - @ref Shapes "Shapes" - circles, ellipses, polygons and the like.
+ * - @ref Paths "Paths" - sequences of contiguous curves, aka splines, and their processing.
+ * - @ref ShapeOps "Shape operations" - boolean algebra, offsets and other advanced operations.
+ * - @ref Containers "Geometric containers" - efficient ways to store and retrieve
+ * geometric information.
+ * - @ref Utilities "Utilities" - other useful code that does not fit under the above categories.
+ * - @subpage ReleaseNotes "Release notes" - what's new in 2Geom
+ *
+ * @section DeveloperInfo Developer information
+ *
+ * - @subpage CodingStandards "Coding standards used in 2Geom"
+ */
+
+// Overview subpage
+/**
+ * @page Overview Overview of 2Geom
+ *
+ * 2Geom has two APIs: a high level one, which uses virtual functions to allow handling
+ * objects of in a generic way without knowing their actual type at compile time,
+ * and a lower-level one based on templates, which is designed with performance in mind.
+ * For performance-critical tasks it may be necessary to use the lower level API.
+ *
+ * @section CoordSys Standard coordinate system
+ *
+ * 2Geom's standard coordinate system is common for computer graphics: the X axis grows
+ * to the right and the Y axis grows downwards. Angles start from the +X axis
+ * and grow towards the +Y axis (clockwise).
+ *
+ * @image html coords.png Standard coordinate system in 2Geom
+ *
+ * Most functions can be used without taking the coordinate system into account,
+ * as their interpretation is the same regardless of the coordinate system. However,
+ * a few of them depend on this definition, for example Rect's top() and bottom() methods.
+ *
+ * @section OpNote Operator note
+ *
+ * Most operators are provided by Boost operator helpers. This means that not all operators
+ * are defined in the class. For example, Rect only implements the operators
+ * +=, -= for points and *= for affines. The corresponding +, - and * operators
+ * are generated automatically by Boost.
+ */
+
+// RELEASE NOTES
+// Update this to describe the most important API changes.
+/**
+ * @page ReleaseNotes 2Geom release notes
+ *
+ * @section Ver04 Version 0.4
+ * - API additions:
+ * - Integer versions of Point, Interval and OptInterval, called
+ * IntPoint, IntInterval and OptIntInterval.
+ * - New geometric primitives: Angle and AngleInterval.
+ * - Major changes:
+ * - Matrix has been renamed to Affine.
+ * - Classification methods of Affine, for example Affine::isRotation(), will now
+ * return true for transforms that are close to identity. This is to reflect the
+ * fact that an identity transform can be interpreted as a rotation by zero
+ * degrees. To get the old behavior of returning false for identity, use
+ * methods prefixed with "Nonzero", e.g. Affine::isNonzeroRotation().
+ * - EllipticalArc and SVGEllipticalArc have been merged. Now there is only the former.
+ * All arcs are SVG-compliant.
+ * - Minor changes:
+ * - Affine::without_translation() is now called Affine::withoutTranslation().
+ * - Interval::strict_contains() is now called Interval::interiorContains().
+ * The same change has been made for Rect.
+ * - Some unclear and unused operators of D2 were removed, for instance D2 * Point.
+ * - Interval is now a derived class of a GenericInterval template.
+ * - Rect is no longer a D2 specialization.
+ * - isnan.h merged with math-utils.h.
+ * @section Ver03 Version 0.3
+ * - release notes were started after this version.
+ */
+
+/**
+ * @page CodingStandards Coding standards and conventions used in 2Geom
+ *
+ * @section Filenames
+ *
+ * Files and directories should be all lowercase. Words should be separated with hyphens (-).
+ * Underscores, capital letters and non-ASCII characters should not be used.
+ *
+ * @section Indenting
+ *
+ * All files should use 4 spaces as indentation.
+ *
+ * @section Namespaces
+ *
+ * All classes intended for direct use by the end users should be in the Geom namespace.
+ * Contents of namespaces should not be indented. Closing brace of a namespace
+ * should have a comment indicating which namespace it is closing.
+ * @code
+ namespace Geom {
+ namespace FooInternal {
+
+ unsigned some_function()
+ {
+ // ...code...
+ }
+
+ } // namespace FooInternal
+ } // namespace Geom
+ @endcode
+ *
+ * @section Classes
+ *
+ * @code
+ // superclass list should use Boost notation,
+ // especially if there is more than one.
+ class Foo
+ : public Bar
+ , public Baz
+ {
+ // constructors should use Boost notation if the class has superclasses.
+ Foo(int a)
+ : Bar(a)
+ , Baz(b)
+ {
+ // constructor body
+ }
+ Foo(int a) {
+ // constructor with default initialization of superclasses
+ }
+
+ // methods use camelCaseNames.
+ // one-line methods can be collapsed.
+ bool isActive() { return _blurp; }
+ // multi-line methods have the opening brace on the same line.
+ void invert() {
+ // ... code ...
+ }
+
+ // static functions use lowercase_with_underscores.
+ // static factory functions should be called from_something.
+ static Foo from_point(Point const &p) {
+ // ...
+ }
+ }; // end of class Foo
+
+ // Closing brace of a class should have the above comment, unless it's very short.
+ @endcode
+ *
+ * @section FreeFuns Free functions
+ *
+ * Functions should use lowercase_with_underscores names. The opening brace of
+ * the definition should be on a separate line.
+ *
+ * @section InlineInClasses When to use inline
+ *
+ * The "inline" keyword is not required when the body of the function is given
+ * in the definition of the class. Do not mark such functions inline, because
+ * they are automatically marked as inline by the compiler. It is only
+ * necessary to use the inline keyword when the body of the function is given
+ * after the class definition.
+ */
+
+// Documentation for groups
+/**
+ * @defgroup Transforms Affine transformations
+ * @brief Transformations of the plane such as rotation and scaling
+ *
+ * Each transformation class represent a set of affine transforms that is closed
+ * under multiplication. Those are translation, scaling, rotation, horizontal shearing
+ * and vertical shearing. Any affine transform can be obtained by combining those
+ * basic operations.
+ *
+ * Each of the transforms can be applied to points and matrices (using multiplication).
+ * Each can also be converted into a matrix (which can represent any composition
+ * of transforms generically). All (except translation) use the origin (0,0) as the invariant
+ * point (e.g. one that stays in the same place after applying the transform to the plane).
+ * To obtain transforms with different invariant points, combine them with translation to
+ * and back from the origin. For example, to get a 60 degree rotation around the point @a p:
+ * @code Affine rot_around_p = Translate(-p) * Rotate::from_degrees(60) * Translate(p); @endcode
+ *
+ * Multiplication of transforms is associative: the result of an expression involving
+ * points and matrices is the same regardless of the order of evaluating multiplications.
+ *
+ * If you need to transform a complicated object
+ * by A, then B, and then C, you should first compute the total transform and apply it to the
+ * object in one go. This way instead of performing 3 expensive operations, you will only do
+ * two very fast matrix multiplications and one complex transformation. Here is an example:
+ * @code
+ transformed_path = long_path * A * B * C; // wrong! long_path will be transformed 3 times.
+ transformed_path = long_path * (A * B * C); // good! long_path will be transformed only once.
+ Affine total = A * B * C; // you can store the transform to apply it to several objects.
+ transformed_path = long_path * total; // good!
+ @endcode
+ * Ordering note: if you compose transformations via multiplication, they are applied
+ * from left to right. If you write <code> ptrans = p * A * B * C * D;</code>, then it means
+ * that @a ptrans is obtained from @a p by first transforming it by A, then by B, then by C,
+ * and finally by D. This is a consequence of interpreting points as row vectors, instead
+ * of the more common column vector interpretation; 2Geom's choice leads to more intuitive
+ * notation.
+ */
+
+/**
+ * @defgroup Primitives Primitives
+ * @brief Basic mathematical objects such as intervals and points
+ *
+ * 2Geom has several basic geometrical objects: points, lines, intervals, angles,
+ * and others. Most of those objects can be treated as sets of points or numbers
+ * satisfying some equation or as functions.
+ */
+
+/**
+ * @defgroup Fragments Fragments and related classes
+ * @brief 1D functions on the unit interval
+ *
+ * Each type of fragments represents one of the various ways in which a function from
+ * the unit interval to the real line may be given. These are the most important mathematical
+ * primitives in 2Geom.
+ */
+
+/**
+ * @defgroup Curves Curves
+ * @brief Functions mapping the unit interval to a plane
+ *
+ * Curves are functions \f$\mathbf{C}: [0, 1] \to \mathbb{R}^2\f$. For details, see
+ * the documentation for the Curve class. All curves can be included in paths and path sequences.
+ */
+
+/**
+ * @defgroup Shapes Basic shapes
+ * @brief Circles, ellipes, polygons...
+ *
+ * Among the shapes supported by 2Geom are circles, ellipses and polygons.
+ * Polygons can also be represented by paths containing only linear segments.
+ */
+
+/**
+ * @defgroup Paths Paths and path sequences
+ * @brief Sequences of contiguous curves, aka splines, and their processing
+ */
+
+/**
+ * @defgroup Utilities Miscellaneous utilities
+ * @brief Useful code that does not fit under other categories.
+ */
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/ellipse.cpp b/src/2geom/ellipse.cpp
new file mode 100644
index 0000000..42cb36d
--- /dev/null
+++ b/src/2geom/ellipse.cpp
@@ -0,0 +1,790 @@
+/** @file
+ * @brief Ellipse shape
+ *//*
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2008-2014 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/conicsec.h>
+#include <2geom/ellipse.h>
+#include <2geom/elliptical-arc.h>
+#include <2geom/numeric/fitting-tool.h>
+#include <2geom/numeric/fitting-model.h>
+
+namespace Geom {
+
+Ellipse::Ellipse(Geom::Circle const &c)
+ : _center(c.center())
+ , _rays(c.radius(), c.radius())
+ , _angle(0)
+{}
+
+void Ellipse::setCoefficients(double A, double B, double C, double D, double E, double F)
+{
+ double den = 4*A*C - B*B;
+ if (den == 0) {
+ THROW_RANGEERROR("den == 0, while computing ellipse centre");
+ }
+ _center[X] = (B*E - 2*C*D) / den;
+ _center[Y] = (B*D - 2*A*E) / den;
+
+ // evaluate the a coefficient of the ellipse equation in normal form
+ // E(x,y) = a*(x-cx)^2 + b*(x-cx)*(y-cy) + c*(y-cy)^2 = 1
+ // where b = a*B , c = a*C, (cx,cy) == centre
+ double num = A * sqr(_center[X])
+ + B * _center[X] * _center[Y]
+ + C * sqr(_center[Y])
+ - F;
+
+
+ //evaluate ellipse rotation angle
+ _angle = std::atan2( -B, -(A - C) )/2;
+
+ // evaluate the length of the ellipse rays
+ double sinrot, cosrot;
+ sincos(_angle, sinrot, cosrot);
+ double cos2 = cosrot * cosrot;
+ double sin2 = sinrot * sinrot;
+ double cossin = cosrot * sinrot;
+
+ den = A * cos2 + B * cossin + C * sin2;
+ if (den == 0) {
+ THROW_RANGEERROR("den == 0, while computing 'rx' coefficient");
+ }
+ double rx2 = num / den;
+ if (rx2 < 0) {
+ THROW_RANGEERROR("rx2 < 0, while computing 'rx' coefficient");
+ }
+ _rays[X] = std::sqrt(rx2);
+
+ den = C * cos2 - B * cossin + A * sin2;
+ if (den == 0) {
+ THROW_RANGEERROR("den == 0, while computing 'ry' coefficient");
+ }
+ double ry2 = num / den;
+ if (ry2 < 0) {
+ THROW_RANGEERROR("ry2 < 0, while computing 'rx' coefficient");
+ }
+ _rays[Y] = std::sqrt(ry2);
+
+ // the solution is not unique so we choose always the ellipse
+ // with a rotation angle between 0 and PI/2
+ makeCanonical();
+}
+
+Point Ellipse::initialPoint() const
+{
+ Coord sinrot, cosrot;
+ sincos(_angle, sinrot, cosrot);
+ Point p(ray(X) * cosrot + center(X), ray(X) * sinrot + center(Y));
+ return p;
+}
+
+
+Affine Ellipse::unitCircleTransform() const
+{
+ Affine ret = Scale(ray(X), ray(Y)) * Rotate(_angle);
+ ret.setTranslation(center());
+ return ret;
+}
+
+Affine Ellipse::inverseUnitCircleTransform() const
+{
+ if (ray(X) == 0 || ray(Y) == 0) {
+ THROW_RANGEERROR("a degenerate ellipse doesn't have an inverse unit circle transform");
+ }
+ Affine ret = Translate(-center()) * Rotate(-_angle) * Scale(1/ray(X), 1/ray(Y));
+ return ret;
+}
+
+
+LineSegment Ellipse::axis(Dim2 d) const
+{
+ Point a(0, 0), b(0, 0);
+ a[d] = -1;
+ b[d] = 1;
+ LineSegment ls(a, b);
+ ls.transform(unitCircleTransform());
+ return ls;
+}
+
+LineSegment Ellipse::semiaxis(Dim2 d, int sign) const
+{
+ Point a(0, 0), b(0, 0);
+ b[d] = sgn(sign);
+ LineSegment ls(a, b);
+ ls.transform(unitCircleTransform());
+ return ls;
+}
+
+Rect Ellipse::boundsExact() const
+{
+ auto const trans = unitCircleTransform();
+
+ auto proj_bounds = [&] (Dim2 d) {
+ // The dth coordinate function pulls back to trans[d] * x + trans[d + 2] * y + trans[d + 4]
+ // in the coordinate system where the ellipse is a unit circle. We compute its range of
+ // values on the unit circle.
+ auto const r = std::hypot(trans[d], trans[d + 2]);
+ auto const mid = trans[d + 4];
+ return Interval(mid - r, mid + r);
+ };
+
+ return { proj_bounds(X), proj_bounds(Y) };
+}
+
+Rect Ellipse::boundsFast() const
+{
+ // Every ellipse is contained in the circle with the same center and radius
+ // equal to the larger of the two rays. We return the bounding square
+ // of this circle (this is really fast but only exact for circles).
+ auto const larger_ray = (ray(X) > ray(Y) ? ray(X) : ray(Y));
+ assert(larger_ray >= 0.0);
+ auto const rr = Point(larger_ray, larger_ray);
+ return Rect(_center - rr, _center + rr);
+}
+
+std::vector<double> Ellipse::coefficients() const
+{
+ std::vector<double> c(6);
+ coefficients(c[0], c[1], c[2], c[3], c[4], c[5]);
+ return c;
+}
+
+void Ellipse::coefficients(Coord &A, Coord &B, Coord &C, Coord &D, Coord &E, Coord &F) const
+{
+ if (ray(X) == 0 || ray(Y) == 0) {
+ THROW_RANGEERROR("a degenerate ellipse doesn't have an implicit form");
+ }
+
+ double cosrot, sinrot;
+ sincos(_angle, sinrot, cosrot);
+ double cos2 = cosrot * cosrot;
+ double sin2 = sinrot * sinrot;
+ double cossin = cosrot * sinrot;
+ double invrx2 = 1 / (ray(X) * ray(X));
+ double invry2 = 1 / (ray(Y) * ray(Y));
+
+ A = invrx2 * cos2 + invry2 * sin2;
+ B = 2 * (invrx2 - invry2) * cossin;
+ C = invrx2 * sin2 + invry2 * cos2;
+ D = -2 * A * center(X) - B * center(Y);
+ E = -2 * C * center(Y) - B * center(X);
+ F = A * center(X) * center(X)
+ + B * center(X) * center(Y)
+ + C * center(Y) * center(Y)
+ - 1;
+}
+
+
+void Ellipse::fit(std::vector<Point> const &points)
+{
+ size_t sz = points.size();
+ if (sz < 5) {
+ THROW_RANGEERROR("fitting error: too few points passed");
+ }
+ NL::LFMEllipse model;
+ NL::least_squeares_fitter<NL::LFMEllipse> fitter(model, sz);
+
+ for (size_t i = 0; i < sz; ++i) {
+ fitter.append(points[i]);
+ }
+ fitter.update();
+
+ NL::Vector z(sz, 0.0);
+ model.instance(*this, fitter.result(z));
+}
+
+
+EllipticalArc *
+Ellipse::arc(Point const &ip, Point const &inner, Point const &fp)
+{
+ // This is resistant to degenerate ellipses:
+ // both flags evaluate to false in that case.
+
+ bool large_arc_flag = false;
+ bool sweep_flag = false;
+
+ // Determination of large arc flag:
+ // large_arc is false when the inner point is on the same side
+ // of the center---initial point line as the final point, AND
+ // is on the same side of the center---final point line as the
+ // initial point.
+ // Additionally, large_arc is always false when we have exactly
+ // 1/2 of an arc, i.e. the cross product of the center -> initial point
+ // and center -> final point vectors is zero.
+ // Negating the above leads to the condition for large_arc being true.
+ Point fv = fp - _center;
+ Point iv = ip - _center;
+ Point innerv = inner - _center;
+ double ifcp = cross(fv, iv);
+
+ if (ifcp != 0 && (sgn(cross(fv, innerv)) != sgn(ifcp) ||
+ sgn(cross(iv, innerv)) != sgn(-ifcp)))
+ {
+ large_arc_flag = true;
+ }
+
+ // Determination of sweep flag:
+ // For clarity, let's assume that Y grows up. Then the cross product
+ // is positive for points on the left side of a vector and negative
+ // on the right side of a vector.
+ //
+ // cross(?, v) > 0
+ // o------------------->
+ // cross(?, v) < 0
+ //
+ // If the arc is small (large_arc_flag is false) and the final point
+ // is on the right side of the vector initial point -> center,
+ // we have to go in the direction of increasing angles
+ // (counter-clockwise) and the sweep flag is true.
+ // If the arc is large, the opposite is true, since we have to reach
+ // the final point going the long way - in the other direction.
+ // We can express this observation as:
+ // cross(_center - ip, fp - _center) < 0 xor large_arc flag
+ // This is equal to:
+ // cross(-iv, fv) < 0 xor large_arc flag
+ // But cross(-iv, fv) is equal to cross(fv, iv) due to antisymmetry
+ // of the cross product, so we end up with the condition below.
+ if ((ifcp < 0) ^ large_arc_flag) {
+ sweep_flag = true;
+ }
+
+ EllipticalArc *ret_arc = new EllipticalArc(ip, ray(X), ray(Y), rotationAngle(),
+ large_arc_flag, sweep_flag, fp);
+ return ret_arc;
+}
+
+Ellipse &Ellipse::operator*=(Rotate const &r)
+{
+ _angle += r.angle();
+ _center *= r;
+ return *this;
+}
+
+Ellipse &Ellipse::operator*=(Affine const& m)
+{
+ Affine a = Scale(ray(X), ray(Y)) * Rotate(_angle);
+ Affine mwot = m.withoutTranslation();
+ Affine am = a * mwot;
+ Point new_center = _center * m;
+
+ if (are_near(am.descrim(), 0)) {
+ double angle;
+ if (am[0] != 0) {
+ angle = std::atan2(am[2], am[0]);
+ } else if (am[1] != 0) {
+ angle = std::atan2(am[3], am[1]);
+ } else {
+ angle = M_PI/2;
+ }
+ Point v = Point::polar(angle) * am;
+ _center = new_center;
+ _rays[X] = L2(v);
+ _rays[Y] = 0;
+ _angle = atan2(v);
+ return *this;
+ } else if (mwot.isScale(0) && _angle.radians() == 0) {
+ _rays[X] *= std::abs(mwot[0]);
+ _rays[Y] *= std::abs(mwot[3]);
+ _center = new_center;
+ return *this;
+ }
+
+ std::vector<double> coeff = coefficients();
+ Affine q( coeff[0], coeff[1]/2,
+ coeff[1]/2, coeff[2],
+ 0, 0 );
+
+ Affine invm = mwot.inverse();
+ q = invm * q ;
+ std::swap(invm[1], invm[2]);
+ q *= invm;
+ setCoefficients(q[0], 2*q[1], q[3], 0, 0, -1);
+ _center = new_center;
+
+ return *this;
+}
+
+Ellipse Ellipse::canonicalForm() const
+{
+ Ellipse result(*this);
+ result.makeCanonical();
+ return result;
+}
+
+void Ellipse::makeCanonical()
+{
+ if (_rays[X] == _rays[Y]) {
+ _angle = 0;
+ return;
+ }
+
+ if (_angle < 0) {
+ _angle += M_PI;
+ }
+ if (_angle >= M_PI/2) {
+ std::swap(_rays[X], _rays[Y]);
+ _angle -= M_PI/2;
+ }
+}
+
+Point Ellipse::pointAt(Coord t) const
+{
+ Point p = Point::polar(t);
+ p *= unitCircleTransform();
+ return p;
+}
+
+Coord Ellipse::valueAt(Coord t, Dim2 d) const
+{
+ Coord sinrot, cosrot, cost, sint;
+ sincos(rotationAngle(), sinrot, cosrot);
+ sincos(t, sint, cost);
+
+ if ( d == X ) {
+ return ray(X) * cosrot * cost
+ - ray(Y) * sinrot * sint
+ + center(X);
+ } else {
+ return ray(X) * sinrot * cost
+ + ray(Y) * cosrot * sint
+ + center(Y);
+ }
+}
+
+Coord Ellipse::timeAt(Point const &p) const
+{
+ // degenerate ellipse is basically a reparametrized line segment
+ if (ray(X) == 0 || ray(Y) == 0) {
+ if (ray(X) != 0) {
+ return asin(Line(axis(X)).timeAt(p));
+ } else if (ray(Y) != 0) {
+ return acos(Line(axis(Y)).timeAt(p));
+ } else {
+ return 0;
+ }
+ }
+ Affine iuct = inverseUnitCircleTransform();
+ return Angle(atan2(p * iuct)).radians0(); // return a value in [0, 2pi)
+}
+
+Point Ellipse::unitTangentAt(Coord t) const
+{
+ Point p = Point::polar(t + M_PI/2);
+ p *= unitCircleTransform().withoutTranslation();
+ p.normalize();
+ return p;
+}
+
+bool Ellipse::contains(Point const &p) const
+{
+ Point tp = p * inverseUnitCircleTransform();
+ return tp.length() <= 1;
+}
+
+/** @brief Convert curve time on the major axis to the corresponding angle
+ * parameters on a degenerate ellipse collapsed onto that axis.
+ * @param t The curve time on the major axis of an ellipse.
+ * @param vertical If true, the major axis goes from angle -π/2 to +π/2;
+ * otherwise, the major axis connects angles π and 0.
+ * @return The two angles at which the collapsed ellipse passes through the
+ * major axis point corresponding to the given time \f$t \in [0, 1]\f$.
+ */
+static std::array<Coord, 2> axis_time_to_angles(Coord t, bool vertical)
+{
+ Coord const to_unit = std::clamp(2.0 * t - 1.0, -1.0, 1.0);
+ if (vertical) {
+ double const arcsin = std::asin(to_unit);
+ return {arcsin, M_PI - arcsin};
+ } else {
+ double const arccos = std::acos(to_unit);
+ return {arccos, -arccos};
+ }
+}
+
+/** @brief For each intersection of some shape with the major axis of an ellipse, produce one or two
+ * intersections of a degenerate ellipse (collapsed onto that axis) with the same shape.
+ *
+ * @param axis_intersections The intersections of some shape with the major axis.
+ * @param vertical Whether this is the vertical major axis (in the ellipse's natural coordinates).
+ * @return A vector with doubled intersections (corresponding to the two passages of the squashed
+ * ellipse through that point) and swapped order of the intersected shapes.
+*/
+static std::vector<ShapeIntersection> double_axis_intersections(std::vector<ShapeIntersection> &&axis_intersections,
+ bool vertical)
+{
+ if (axis_intersections.empty()) {
+ return {};
+ }
+ std::vector<ShapeIntersection> result;
+ result.reserve(2 * axis_intersections.size());
+
+ for (auto const &x : axis_intersections) {
+ for (auto a : axis_time_to_angles(x.second, vertical)) {
+ result.emplace_back(a, x.first, x.point()); // Swap first <-> converted second.
+ if (x.second == 0.0 || x.second == 1.0) {
+ break; // Do not double up endpoint intersections.
+ }
+ }
+ }
+ return result;
+}
+
+std::vector<ShapeIntersection> Ellipse::intersect(Line const &line) const
+{
+ std::vector<ShapeIntersection> result;
+
+ if (line.isDegenerate()) {
+ return result;
+ }
+ if (ray(X) == 0 || ray(Y) == 0) {
+ return double_axis_intersections(line.intersect(majorAxis()), ray(X) == 0);
+ }
+
+ // Ax^2 + Bxy + Cy^2 + Dx + Ey + F
+ std::array<Coord, 6> coeffs;
+ coefficients(coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]);
+ rescale_homogenous(coeffs);
+ auto [A, B, C, D, E, F] = coeffs;
+ Affine iuct = inverseUnitCircleTransform();
+
+ // generic case
+ std::array<Coord, 3> line_coeffs;
+ line.coefficients(line_coeffs[0], line_coeffs[1], line_coeffs[2]);
+ rescale_homogenous(line_coeffs);
+ auto [a, b, c] = line_coeffs;
+ Point lv = line.vector();
+
+ if (fabs(lv[X]) > fabs(lv[Y])) {
+ // y = -a/b x - c/b
+ Coord q = -a/b;
+ Coord r = -c/b;
+
+ // substitute that into the ellipse equation, making it quadratic in x
+ Coord I = A + B*q + C*q*q; // x^2 terms
+ Coord J = B*r + C*2*q*r + D + E*q; // x^1 terms
+ Coord K = C*r*r + E*r + F; // x^0 terms
+ std::vector<Coord> xs = solve_quadratic(I, J, K);
+
+ for (double x : xs) {
+ Point p(x, q*x + r);
+ result.emplace_back(atan2(p * iuct), line.timeAt(p), p);
+ }
+ } else {
+ Coord q = -b/a;
+ Coord r = -c/a;
+
+ Coord I = A*q*q + B*q + C;
+ Coord J = A*2*q*r + B*r + D*q + E;
+ Coord K = A*r*r + D*r + F;
+ std::vector<Coord> xs = solve_quadratic(I, J, K);
+
+ for (double x : xs) {
+ Point p(q*x + r, x);
+ result.emplace_back(atan2(p * iuct), line.timeAt(p), p);
+ }
+ }
+ return result;
+}
+
+std::vector<ShapeIntersection> Ellipse::intersect(LineSegment const &seg) const
+{
+ if (!boundsFast().intersects(seg.boundsFast())) {
+ return {};
+ }
+
+ // We simply reuse the procedure for lines and filter out
+ // results where the line time value is outside of the unit interval,
+ // but we apply a small tolerance to account for numerical errors.
+ double const param_prec = EPSILON / seg.length(0.0);
+ // TODO: accept a precision setting instead of always using EPSILON
+ // (requires an ABI break).
+
+ auto xings = intersect(Line(seg));
+ if (xings.empty()) {
+ return xings;
+ }
+ decltype(xings) result;
+ result.reserve(xings.size());
+
+ for (auto const &x : xings) {
+ if (x.second < -param_prec || x.second > 1.0 + param_prec) {
+ continue;
+ }
+ result.emplace_back(x.first, std::clamp(x.second, 0.0, 1.0), x.point());
+ }
+ return result;
+}
+
+std::vector<ShapeIntersection> Ellipse::intersect(Ellipse const &other) const
+{
+ // Handle degenerate cases first.
+ if (ray(X) == 0 || ray(Y) == 0) { // Degenerate ellipse, collapsed to the major axis.
+ return double_axis_intersections(other.intersect(majorAxis()), ray(X) == 0);
+ }
+ if (*this == other) { // Two identical ellipses.
+ THROW_INFINITELY_MANY_SOLUTIONS("The two ellipses are identical.");
+ }
+ if (!boundsFast().intersects(other.boundsFast())) {
+ return {};
+ }
+
+ // Find coefficients of the implicit equations of the two ellipses and rescale
+ // them (losslessly) for better numerical conditioning.
+ std::array<double, 6> coeffs;
+ coefficients(coeffs[0], coeffs[1], coeffs[2], coeffs[3], coeffs[4], coeffs[5]);
+ rescale_homogenous(coeffs);
+ auto [A, B, C, D, E, F] = coeffs;
+
+ std::array<double, 6> otheffs;
+ other.coefficients(otheffs[0], otheffs[1], otheffs[2], otheffs[3], otheffs[4], otheffs[5]);
+ rescale_homogenous(otheffs);
+ auto [a, b, c, d, e, f] = otheffs;
+
+ // Assume that Q(x, y) = 0 is the ellipse equation given by uppercase letters
+ // and R(x, y) = 0 is the equation given by lowercase ones.
+ // In other words, Q is the quadratic function describing this ellipse and
+ // R is the quadratic function for the other ellipse.
+ //
+ // A point (x, y) is common to both ellipses if and only if it solves the system
+ // { Q(x, y) = 0,
+ // { R(x, y) = 0.
+ //
+ // If µ is any real number, we can multiply the first equation by µ and add that
+ // to the first equation, obtaining the new system of equations:
+ // { Q(x, y) = 0,
+ // { µQ(x, y) + R(x, y) = 0.
+ //
+ // The first equation still says that (x, y) is a point on this ellipse, but the
+ // second equation uses the new expression (µQ + R) instead of the original R.
+ //
+ // Why do we do this? The reason is that the set of functions {µQ + R : µ real}
+ // is a "real system of conics" and there's a theorem which guarantees that such a system
+ // always contains a "degenerate conic" [proof below].
+ // In practice, the degenerate conic will describe a line or a pair of lines, and intersecting
+ // a line with an ellipse is much easier than intersecting two ellipses directly.
+ //
+ // But in order to be able to do this, we must find a value of µ for which µQ + R is degenerate.
+ // We can write the expression (µQ + R)(x, y) in the following way:
+ //
+ // | aa bb/2 dd/2 | |x|
+ // (µQ + R)(x, y) = [x y 1] | bb/2 cc ee/2 | |y|
+ // | dd/2 ee/2 ff | |1|
+ //
+ // where aa = µA + a and so on. The determinant can be explicitly written out,
+ // giving an equation which is cubic in µ and can be solved analytically.
+ // The conic µQ + R is degenerate if and only if this determinant is 0.
+ //
+ // Proof that there's always a degenerate conic: a cubic real polynomial always has a root,
+ // and if the polynomial in µ isn't cubic (coefficient of µ^3 is zero), then the starting
+ // conic is already degenerate.
+
+ Coord I, J, K, L; // Coefficients of µ in the expression for the determinant.
+ I = (-B*B*F + 4*A*C*F + D*E*B - A*E*E - C*D*D) / 4;
+ J = -((B*B - 4*A*C) * f + (2*B*F - D*E) * b + (2*A*E - D*B) * e +
+ (2*C*D - E*B) * d + (D*D - 4*A*F) * c + (E*E - 4*C*F) * a) / 4;
+ K = -((b*b - 4*a*c) * F + (2*b*f - d*e) * B + (2*a*e - d*b) * E +
+ (2*c*d - e*b) * D + (d*d - 4*a*f) * C + (e*e - 4*c*f) * A) / 4;
+ L = (-b*b*f + 4*a*c*f + d*e*b - a*e*e - c*d*d) / 4;
+
+ std::vector<Coord> mus = solve_cubic(I, J, K, L);
+ Coord mu = infinity();
+
+ // Now that we have solved for µ, we need to check whether the conic
+ // determined by µQ + R is reducible to a product of two lines. If it's not,
+ // it means that there are no intersections. If it is, the intersections of these
+ // lines with the original ellipses (if there are any) give the coordinates
+ // of intersections.
+
+ // Prefer middle root if there are three.
+ // Out of three possible pairs of lines that go through four points of intersection
+ // of two ellipses, this corresponds to cross-lines. These intersect the ellipses
+ // at less shallow angles than the other two options.
+ if (mus.size() == 3) {
+ std::swap(mus[1], mus[0]);
+ }
+ /// Discriminant within this radius of 0 will be considered zero.
+ static Coord const discriminant_precision = 1e-10;
+
+ for (Coord candidate_mu : mus) {
+ Coord const aa = std::fma(candidate_mu, A, a);
+ Coord const bb = std::fma(candidate_mu, B, b);
+ Coord const cc = std::fma(candidate_mu, C, c);
+ Coord const delta = sqr(bb) - 4*aa*cc;
+ if (delta < -discriminant_precision) {
+ continue;
+ }
+ mu = candidate_mu;
+ break;
+ }
+
+ // if no suitable mu was found, there are no intersections
+ if (mu == infinity()) {
+ return {};
+ }
+
+ // Create the degenerate conic and decompose it into lines.
+ std::array<double, 6> degen = {std::fma(mu, A, a), std::fma(mu, B, b), std::fma(mu, C, c),
+ std::fma(mu, D, d), std::fma(mu, E, e), std::fma(mu, F, f)};
+ rescale_homogenous(degen);
+ auto const lines = xAx(degen[0], degen[1], degen[2],
+ degen[3], degen[4], degen[5]).decompose_df(discriminant_precision);
+
+ // intersect with the obtained lines and report intersections
+ std::vector<ShapeIntersection> result;
+ for (auto const &line : lines) {
+ if (line.isDegenerate()) {
+ continue;
+ }
+ auto as = intersect(line);
+ // NOTE: If we only cared about the intersection points, we could simply
+ // intersect this ellipse with the lines and ignore the other ellipse.
+ // But we need the time coordinates on the other ellipse as well.
+ auto bs = other.intersect(line);
+ if (as.empty() || bs.empty()) {
+ continue;
+ }
+ // Due to numerical errors, a tangency may sometimes be found as 1 intersection
+ // on one ellipse and 2 intersections on the other. If this happens, we average
+ // the points of the two intersections.
+ auto const intersection_average = [](ShapeIntersection const &i,
+ ShapeIntersection const &j) -> ShapeIntersection
+ {
+ return ShapeIntersection(i.first, j.first, middle_point(i.point(), j.point()));
+ };
+ auto const synthesize_intersection = [&](ShapeIntersection const &i,
+ ShapeIntersection const &j) -> void
+ {
+ result.emplace_back(i.first, j.first, middle_point(i.point(), j.point()));
+ };
+ if (as.size() == 2) {
+ if (bs.size() == 2) {
+ synthesize_intersection(as[0], bs[0]);
+ synthesize_intersection(as[1], bs[1]);
+ } else if (bs.size() == 1) {
+ synthesize_intersection(intersection_average(as[0], as[1]), bs[0]);
+ }
+ } else if (as.size() == 1) {
+ if (bs.size() == 2) {
+ synthesize_intersection(as[0], intersection_average(bs[0], bs[1]));
+ } else if (bs.size() == 1) {
+ synthesize_intersection(as[0], bs[0]);
+ }
+ }
+ }
+ return result;
+}
+
+std::vector<ShapeIntersection> Ellipse::intersect(D2<Bezier> const &b) const
+{
+ Coord A, B, C, D, E, F;
+ coefficients(A, B, C, D, E, F);
+
+ // We plug the X and Y curves into the implicit equation and solve for t.
+ Bezier x = A*b[X]*b[X] + B*b[X]*b[Y] + C*b[Y]*b[Y] + D*b[X] + E*b[Y] + F;
+ std::vector<Coord> r = x.roots();
+
+ std::vector<ShapeIntersection> result;
+ for (double & i : r) {
+ Point p = b.valueAt(i);
+ result.emplace_back(timeAt(p), i, p);
+ }
+ return result;
+}
+
+bool Ellipse::operator==(Ellipse const &other) const
+{
+ if (_center != other._center) return false;
+
+ Ellipse a = this->canonicalForm();
+ Ellipse b = other.canonicalForm();
+
+ if (a._rays != b._rays) return false;
+ if (a._angle != b._angle) return false;
+
+ return true;
+}
+
+
+bool are_near(Ellipse const &a, Ellipse const &b, Coord precision)
+{
+ // We want to know whether no point on ellipse a is further than precision
+ // from the corresponding point on ellipse b. To check this, we compute
+ // the four extreme points at the end of each ray for each ellipse
+ // and check whether they are sufficiently close.
+
+ // First, we need to correct the angles on the ellipses, so that they are
+ // no further than M_PI/4 apart. This can always be done by rotating
+ // and exchanging axes.
+ Ellipse ac = a, bc = b;
+ if (distance(ac.rotationAngle(), bc.rotationAngle()).radians0() >= M_PI/2) {
+ ac.setRotationAngle(ac.rotationAngle() + M_PI);
+ }
+ if (distance(ac.rotationAngle(), bc.rotationAngle()) >= M_PI/4) {
+ Angle d1 = distance(ac.rotationAngle() + M_PI/2, bc.rotationAngle());
+ Angle d2 = distance(ac.rotationAngle() - M_PI/2, bc.rotationAngle());
+ Coord adj = d1.radians0() < d2.radians0() ? M_PI/2 : -M_PI/2;
+ ac.setRotationAngle(ac.rotationAngle() + adj);
+ ac.setRays(ac.ray(Y), ac.ray(X));
+ }
+
+ // Do the actual comparison by computing four points on each ellipse.
+ Point tps[] = {Point(1,0), Point(0,1), Point(-1,0), Point(0,-1)};
+ for (auto & tp : tps) {
+ if (!are_near(tp * ac.unitCircleTransform(),
+ tp * bc.unitCircleTransform(),
+ precision))
+ return false;
+ }
+ return true;
+}
+
+std::ostream &operator<<(std::ostream &out, Ellipse const &e)
+{
+ out << "Ellipse(" << e.center() << ", " << e.rays()
+ << ", " << format_coord_nice(e.rotationAngle()) << ")";
+ return out;
+}
+
+} // end namespace Geom
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
+
+
diff --git a/src/2geom/elliptical-arc-from-sbasis.cpp b/src/2geom/elliptical-arc-from-sbasis.cpp
new file mode 100644
index 0000000..c536d89
--- /dev/null
+++ b/src/2geom/elliptical-arc-from-sbasis.cpp
@@ -0,0 +1,341 @@
+/** @file
+ * @brief Fitting elliptical arc to SBasis
+ *
+ * This file contains the implementation of the function arc_from_sbasis.
+ *//*
+ * Copyright 2008 Marco Cecchetti <mrcekets at gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/curve.h>
+#include <2geom/angle.h>
+#include <2geom/utils.h>
+#include <2geom/bezier-curve.h>
+#include <2geom/elliptical-arc.h>
+#include <2geom/sbasis-curve.h> // for non-native methods
+#include <2geom/numeric/vector.h>
+#include <2geom/numeric/fitting-tool.h>
+#include <2geom/numeric/fitting-model.h>
+#include <algorithm>
+
+namespace Geom {
+
+// forward declaration
+namespace detail
+{
+ struct ellipse_equation;
+}
+
+/*
+ * make_elliptical_arc
+ *
+ * convert a parametric polynomial curve given in symmetric power basis form
+ * into an EllipticalArc type; in order to be successful the input curve
+ * has to look like an actual elliptical arc even if a certain tolerance
+ * is allowed through an ad-hoc parameter.
+ * The conversion is performed through an interpolation on a certain amount of
+ * sample points computed on the input curve;
+ * the interpolation computes the coefficients of the general implicit equation
+ * of an ellipse (A*X^2 + B*XY + C*Y^2 + D*X + E*Y + F = 0), then from the
+ * implicit equation we compute the parametric form.
+ *
+ */
+class make_elliptical_arc
+{
+ public:
+ typedef D2<SBasis> curve_type;
+
+ /*
+ * constructor
+ *
+ * it doesn't execute the conversion but set the input and output parameters
+ *
+ * _ea: the output EllipticalArc that will be generated;
+ * _curve: the input curve to be converted;
+ * _total_samples: the amount of sample points to be taken
+ * on the input curve for performing the conversion
+ * _tolerance: how much likelihood is required between the input curve
+ * and the generated elliptical arc; the smaller it is the
+ * the tolerance the higher it is the likelihood.
+ */
+ make_elliptical_arc( EllipticalArc& _ea,
+ curve_type const& _curve,
+ unsigned int _total_samples,
+ double _tolerance );
+
+ private:
+ bool bound_exceeded( unsigned int k, detail::ellipse_equation const & ee,
+ double e1x, double e1y, double e2 );
+
+ bool check_bound(double A, double B, double C, double D, double E, double F);
+
+ void fit();
+
+ bool make_elliptiarc();
+
+ void print_bound_error(unsigned int k)
+ {
+ std::cerr
+ << "tolerance error" << std::endl
+ << "at point: " << k << std::endl
+ << "error value: "<< dist_err << std::endl
+ << "bound: " << dist_bound << std::endl
+ << "angle error: " << angle_err
+ << " (" << angle_tol << ")" << std::endl;
+ }
+
+ public:
+ /*
+ * perform the actual conversion
+ * return true if the conversion is successful, false on the contrary
+ */
+ bool operator()()
+ {
+ // initialize the reference
+ const NL::Vector & coeff = fitter.result();
+ fit();
+ if ( !check_bound(1, coeff[0], coeff[1], coeff[2], coeff[3], coeff[4]) )
+ return false;
+ if ( !(make_elliptiarc()) ) return false;
+ return true;
+ }
+
+ private:
+ EllipticalArc& ea; // output elliptical arc
+ const curve_type & curve; // input curve
+ Piecewise<D2<SBasis> > dcurve; // derivative of the input curve
+ NL::LFMEllipse model; // model used for fitting
+ // perform the actual fitting task
+ NL::least_squeares_fitter<NL::LFMEllipse> fitter;
+ // tolerance: the user-defined tolerance parameter;
+ // tol_at_extr: the tolerance at end-points automatically computed
+ // on the value of "tolerance", and usually more strict;
+ // tol_at_center: tolerance at the center of the ellipse
+ // angle_tol: tolerance for the angle btw the input curve tangent
+ // versor and the ellipse normal versor at the sample points
+ double tolerance, tol_at_extr, tol_at_center, angle_tol;
+ Point initial_point, final_point; // initial and final end-points
+ unsigned int N; // total samples
+ unsigned int last; // N-1
+ double partitions; // N-1
+ std::vector<Point> p; // sample points
+ double dist_err, dist_bound, angle_err;
+};
+
+namespace detail
+{
+/*
+ * ellipse_equation
+ *
+ * this is an helper struct, it provides two routines:
+ * the first one evaluates the implicit form of an ellipse on a given point
+ * the second one computes the normal versor at a given point of an ellipse
+ * in implicit form
+ */
+struct ellipse_equation
+{
+ ellipse_equation(double a, double b, double c, double d, double e, double f)
+ : A(a), B(b), C(c), D(d), E(e), F(f)
+ {
+ }
+
+ double operator()(double x, double y) const
+ {
+ // A * x * x + B * x * y + C * y * y + D * x + E * y + F;
+ return (A * x + B * y + D) * x + (C * y + E) * y + F;
+ }
+
+ double operator()(Point const& p) const
+ {
+ return (*this)(p[X], p[Y]);
+ }
+
+ Point normal(double x, double y) const
+ {
+ Point n( 2 * A * x + B * y + D, 2 * C * y + B * x + E );
+ return unit_vector(n);
+ }
+
+ Point normal(Point const& p) const
+ {
+ return normal(p[X], p[Y]);
+ }
+
+ double A, B, C, D, E, F;
+};
+
+} // end namespace detail
+
+make_elliptical_arc::
+make_elliptical_arc( EllipticalArc& _ea,
+ curve_type const& _curve,
+ unsigned int _total_samples,
+ double _tolerance )
+ : ea(_ea), curve(_curve),
+ dcurve( unitVector(derivative(curve)) ),
+ model(), fitter(model, _total_samples),
+ tolerance(_tolerance), tol_at_extr(tolerance/2),
+ tol_at_center(0.1), angle_tol(0.1),
+ initial_point(curve.at0()), final_point(curve.at1()),
+ N(_total_samples), last(N-1), partitions(N-1), p(N)
+{
+}
+
+/*
+ * check that the coefficients computed by the fit method satisfy
+ * the tolerance parameters at the k-th sample point
+ */
+bool
+make_elliptical_arc::
+bound_exceeded( unsigned int k, detail::ellipse_equation const & ee,
+ double e1x, double e1y, double e2 )
+{
+ dist_err = std::fabs( ee(p[k]) );
+ dist_bound = std::fabs( e1x * p[k][X] + e1y * p[k][Y] + e2 );
+ // check that the angle btw the tangent versor to the input curve
+ // and the normal versor of the elliptical arc, both evaluate
+ // at the k-th sample point, are really othogonal
+ angle_err = std::fabs( dot( dcurve(k/partitions), ee.normal(p[k]) ) );
+ //angle_err *= angle_err;
+ return ( dist_err > dist_bound || angle_err > angle_tol );
+}
+
+/*
+ * check that the coefficients computed by the fit method satisfy
+ * the tolerance parameters at each sample point
+ */
+bool
+make_elliptical_arc::
+check_bound(double A, double B, double C, double D, double E, double F)
+{
+ detail::ellipse_equation ee(A, B, C, D, E, F);
+
+ // check error magnitude at the end-points
+ double e1x = (2*A + B) * tol_at_extr;
+ double e1y = (B + 2*C) * tol_at_extr;
+ double e2 = ((D + E) + (A + B + C) * tol_at_extr) * tol_at_extr;
+ if (bound_exceeded(0, ee, e1x, e1y, e2))
+ {
+ print_bound_error(0);
+ return false;
+ }
+ if (bound_exceeded(0, ee, e1x, e1y, e2))
+ {
+ print_bound_error(last);
+ return false;
+ }
+
+ // e1x = derivative((ee(x,y), x) | x->tolerance, y->tolerance
+ e1x = (2*A + B) * tolerance;
+ // e1y = derivative((ee(x,y), y) | x->tolerance, y->tolerance
+ e1y = (B + 2*C) * tolerance;
+ // e2 = ee(tolerance, tolerance) - F;
+ e2 = ((D + E) + (A + B + C) * tolerance) * tolerance;
+// std::cerr << "e1x = " << e1x << std::endl;
+// std::cerr << "e1y = " << e1y << std::endl;
+// std::cerr << "e2 = " << e2 << std::endl;
+
+ // check error magnitude at sample points
+ for ( unsigned int k = 1; k < last; ++k )
+ {
+ if ( bound_exceeded(k, ee, e1x, e1y, e2) )
+ {
+ print_bound_error(k);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * fit
+ *
+ * supply the samples to the fitter and compute
+ * the ellipse implicit equation coefficients
+ */
+void make_elliptical_arc::fit()
+{
+ for (unsigned int k = 0; k < N; ++k)
+ {
+ p[k] = curve( k / partitions );
+ fitter.append(p[k]);
+ }
+ fitter.update();
+
+ NL::Vector z(N, 0.0);
+ fitter.result(z);
+}
+
+bool make_elliptical_arc::make_elliptiarc()
+{
+ const NL::Vector & coeff = fitter.result();
+ Ellipse e;
+ try
+ {
+ e.setCoefficients(1, coeff[0], coeff[1], coeff[2], coeff[3], coeff[4]);
+ }
+ catch(LogicalError const &exc)
+ {
+ return false;
+ }
+
+ Point inner_point = curve(0.5);
+
+ std::unique_ptr<EllipticalArc> arc( e.arc(initial_point, inner_point, final_point) );
+ ea = *arc;
+
+ if ( !are_near( e.center(),
+ ea.center(),
+ tol_at_center * std::min(e.ray(X),e.ray(Y))
+ )
+ )
+ {
+ return false;
+ }
+ return true;
+}
+
+
+
+bool arc_from_sbasis(EllipticalArc &ea, D2<SBasis> const &in,
+ double tolerance, unsigned num_samples)
+{
+ make_elliptical_arc convert(ea, in, num_samples, tolerance);
+ return convert();
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/elliptical-arc.cpp b/src/2geom/elliptical-arc.cpp
new file mode 100644
index 0000000..63e534c
--- /dev/null
+++ b/src/2geom/elliptical-arc.cpp
@@ -0,0 +1,1045 @@
+/*
+ * SVG Elliptical Arc Class
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ * Copyright 2008-2009 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <cfloat>
+#include <limits>
+#include <memory>
+
+#include <2geom/bezier-curve.h>
+#include <2geom/ellipse.h>
+#include <2geom/elliptical-arc.h>
+#include <2geom/path-sink.h>
+#include <2geom/sbasis-geometric.h>
+#include <2geom/transforms.h>
+#include <2geom/utils.h>
+
+#include <2geom/numeric/vector.h>
+#include <2geom/numeric/fitting-tool.h>
+#include <2geom/numeric/fitting-model.h>
+
+namespace Geom
+{
+
+/**
+ * @class EllipticalArc
+ * @brief Elliptical arc curve
+ *
+ * Elliptical arc is a curve taking the shape of a section of an ellipse.
+ *
+ * The arc function has two forms: the regular one, mapping the unit interval to points
+ * on 2D plane (the linear domain), and a second form that maps some interval
+ * \f$A \subseteq [0,2\pi)\f$ to the same points (the angular domain). The interval \f$A\f$
+ * determines which part of the ellipse forms the arc. The arc is said to contain an angle
+ * if its angular domain includes that angle (and therefore it is defined for that angle).
+ *
+ * The angular domain considers each ellipse to be
+ * a rotated, scaled and translated unit circle: 0 corresponds to \f$(1,0)\f$ on the unit circle,
+ * \f$\pi/2\f$ corresponds to \f$(0,1)\f$, \f$\pi\f$ to \f$(-1,0)\f$ and \f$3\pi/2\f$
+ * to \f$(0,-1)\f$. After the angle is mapped to a point from a unit circle, the point is
+ * transformed using a matrix of this form
+ * \f[ M = \left[ \begin{array}{ccc}
+ r_X \cos(\theta) & -r_Y \sin(\theta) & 0 \\
+ r_X \sin(\theta) & r_Y \cos(\theta) & 0 \\
+ c_X & c_Y & 1 \end{array} \right] \f]
+ * where \f$r_X, r_Y\f$ are the X and Y rays of the ellipse, \f$\theta\f$ is its angle of rotation,
+ * and \f$c_X, c_Y\f$ the coordinates of the ellipse's center - thus mapping the angle
+ * to some point on the ellipse. Note that for example the point at angluar coordinate 0,
+ * the center and the point at angular coordinate \f$\pi/4\f$ do not necessarily
+ * create an angle of \f$\pi/4\f$ radians; it is only the case if both axes of the ellipse
+ * are of the same length (i.e. it is a circle).
+ *
+ * @image html ellipse-angular-coordinates.png "An illustration of the angular domain"
+ *
+ * Each arc is defined by five variables: The initial and final point, the ellipse's rays,
+ * and the ellipse's rotation. Each set of those parameters corresponds to four different arcs,
+ * with two of them larger than half an ellipse and two of them turning clockwise while traveling
+ * from initial to final point. The two flags disambiguate between them: "large arc flag" selects
+ * the bigger arc, while the "sweep flag" selects the arc going in the direction of positive
+ * angles. Angles always increase when going from the +X axis in the direction of the +Y axis,
+ * so if Y grows downwards, this means clockwise.
+ *
+ * @image html elliptical-arc-flags.png "Meaning of arc flags (Y grows downwards)"
+ *
+ * @ingroup Curves
+ */
+
+
+/** @brief Compute bounds of an elliptical arc.
+ * The bounds computation works as follows. The extreme X and Y points
+ * are either the endpoints or local minima / maxima of the ellipse.
+ * We already have endpoints, and we compute the local extremes.
+ * The local extremes correspond to two angles separated by \f$\pi\f$.
+ * Once we compute these angles, we check whether they belong to the arc,
+ * and if they do, we evaluate the ellipse at these angles.
+ * The bounding box of the arc is equal to the bounding box of the endpoints
+ * and the local extrema that belong to the arc.
+ */
+Rect EllipticalArc::boundsExact() const
+{
+ if (isChord()) {
+ return { _initial_point, _final_point };
+ }
+
+ if (_angles.isFull()) {
+ return _ellipse.boundsExact();
+ }
+
+ auto const trans = unitCircleTransform();
+
+ auto proj_bounds = [&] (Dim2 d) {
+ // The dth coordinate function pulls back to trans[d] * x + trans[d + 2] * y + trans[d + 4]
+ // in the coordinate system where the ellipse is a unit circle. We compute its range of
+ // values on the unit circle arc.
+ auto result = Interval(_initial_point[d], _final_point[d]);
+
+ auto const v = Point(trans[d], trans[d + 2]);
+ auto const r = v.length();
+ auto const mid = trans[d + 4];
+ auto const angle = Angle(v);
+
+ if (_angles.contains(angle)) {
+ result.expandTo(mid + r);
+ }
+ if (_angles.contains(angle + M_PI)) {
+ result.expandTo(mid - r);
+ }
+
+ return result;
+ };
+
+ return { proj_bounds(X), proj_bounds(Y) };
+}
+
+void EllipticalArc::expandToTransformed(Rect &bbox, Affine const &transform) const
+{
+ bbox.expandTo(_final_point * transform);
+
+ if (isChord() || bbox.contains(_ellipse.boundsFast())) {
+ return;
+ }
+
+ auto const trans = unitCircleTransform() * transform;
+
+ for (auto d : { X, Y }) {
+ // See boundsExact() for explanation.
+ auto const v = Point(trans[d], trans[d + 2]);
+ auto const r = v.length();
+ auto const mid = trans[d + 4];
+
+ if (_angles.isFull()) {
+ bbox[d].unionWith(Interval(mid - r, mid + r));
+ } else {
+ auto const angle = Angle(v);
+ if (_angles.contains(angle)) {
+ bbox[d].expandTo(mid + r);
+ }
+ if (_angles.contains(angle + M_PI)) {
+ bbox[d].expandTo(mid - r);
+ }
+ }
+ }
+}
+
+Point EllipticalArc::pointAtAngle(Coord t) const
+{
+ Point ret = _ellipse.pointAt(t);
+ return ret;
+}
+
+Coord EllipticalArc::valueAtAngle(Coord t, Dim2 d) const
+{
+ return _ellipse.valueAt(t, d);
+}
+
+std::vector<Coord> EllipticalArc::roots(Coord v, Dim2 d) const
+{
+ std::vector<Coord> sol;
+
+ if (isChord()) {
+ sol = chord().roots(v, d);
+ return sol;
+ }
+
+ Interval unit_interval(0, 1);
+
+ double rotx, roty;
+ if (d == X) {
+ sincos(rotationAngle(), roty, rotx);
+ roty = -roty;
+ } else {
+ sincos(rotationAngle(), rotx, roty);
+ }
+
+ double rxrotx = ray(X) * rotx;
+ double c_v = center(d) - v;
+
+ double a = -rxrotx + c_v;
+ double b = ray(Y) * roty;
+ double c = rxrotx + c_v;
+ //std::cerr << "a = " << a << std::endl;
+ //std::cerr << "b = " << b << std::endl;
+ //std::cerr << "c = " << c << std::endl;
+
+ if (a == 0)
+ {
+ sol.push_back(M_PI);
+ if (b != 0)
+ {
+ double s = 2 * std::atan(-c/(2*b));
+ if ( s < 0 ) s += 2*M_PI;
+ sol.push_back(s);
+ }
+ }
+ else
+ {
+ double delta = b * b - a * c;
+ //std::cerr << "delta = " << delta << std::endl;
+ if (delta == 0) {
+ double s = 2 * std::atan(-b/a);
+ if ( s < 0 ) s += 2*M_PI;
+ sol.push_back(s);
+ }
+ else if ( delta > 0 )
+ {
+ double sq = std::sqrt(delta);
+ double s = 2 * std::atan( (-b - sq) / a );
+ if ( s < 0 ) s += 2*M_PI;
+ sol.push_back(s);
+ s = 2 * std::atan( (-b + sq) / a );
+ if ( s < 0 ) s += 2*M_PI;
+ sol.push_back(s);
+ }
+ }
+
+ std::vector<double> arc_sol;
+ for (double & i : sol) {
+ //std::cerr << "s = " << deg_from_rad(sol[i]);
+ i = timeAtAngle(i);
+ //std::cerr << " -> t: " << sol[i] << std::endl;
+ if (unit_interval.contains(i)) {
+ arc_sol.push_back(i);
+ }
+ }
+ return arc_sol;
+}
+
+
+// D(E(t,C),t) = E(t+PI/2,O), where C is the ellipse center
+// the derivative doesn't rotate the ellipse but there is a translation
+// of the parameter t by an angle of PI/2 so the ellipse points are shifted
+// of such an angle in the cw direction
+Curve *EllipticalArc::derivative() const
+{
+ if (isChord()) {
+ return chord().derivative();
+ }
+
+ EllipticalArc *result = static_cast<EllipticalArc*>(duplicate());
+ result->_ellipse.setCenter(0, 0);
+ result->_angles.setInitial(result->_angles.initialAngle() + M_PI/2);
+ result->_angles.setFinal(result->_angles.finalAngle() + M_PI/2);
+ result->_initial_point = result->pointAtAngle( result->initialAngle() );
+ result->_final_point = result->pointAtAngle( result->finalAngle() );
+ return result;
+}
+
+
+std::vector<Point>
+EllipticalArc::pointAndDerivatives(Coord t, unsigned int n) const
+{
+ if (isChord()) {
+ return chord().pointAndDerivatives(t, n);
+ }
+
+ unsigned int nn = n+1; // nn represents the size of the result vector.
+ std::vector<Point> result;
+ result.reserve(nn);
+ double angle = angleAt(t);
+ std::unique_ptr<EllipticalArc> ea( static_cast<EllipticalArc*>(duplicate()) );
+ ea->_ellipse.setCenter(0, 0);
+ unsigned int m = std::min(nn, 4u);
+ for ( unsigned int i = 0; i < m; ++i )
+ {
+ result.push_back( ea->pointAtAngle(angle) );
+ angle += (sweep() ? M_PI/2 : -M_PI/2);
+ if ( !(angle < 2*M_PI) ) angle -= 2*M_PI;
+ }
+ m = nn / 4;
+ for ( unsigned int i = 1; i < m; ++i )
+ {
+ for ( unsigned int j = 0; j < 4; ++j )
+ result.push_back( result[j] );
+ }
+ m = nn - 4 * m;
+ for ( unsigned int i = 0; i < m; ++i )
+ {
+ result.push_back( result[i] );
+ }
+ if ( !result.empty() ) // nn != 0
+ result[0] = pointAtAngle(angle);
+ return result;
+}
+
+Point EllipticalArc::pointAt(Coord t) const
+{
+ if (t == 0.0) {
+ return initialPoint();
+ }
+ if (t == 1.0) {
+ return finalPoint();
+ }
+ if (isChord()) {
+ return chord().pointAt(t);
+ }
+ return _ellipse.pointAt(angleAt(t));
+}
+
+Coord EllipticalArc::valueAt(Coord t, Dim2 d) const
+{
+ if (isChord()) return chord().valueAt(t, d);
+ return valueAtAngle(angleAt(t), d);
+}
+
+Curve* EllipticalArc::portion(double f, double t) const
+{
+ // fix input arguments
+ f = std::clamp(f, 0.0, 1.0);
+ t = std::clamp(t, 0.0, 1.0);
+
+ if (f == t) {
+ EllipticalArc *arc = new EllipticalArc();
+ arc->_initial_point = arc->_final_point = pointAt(f);
+ return arc;
+ }
+ if (f == 0.0 && t == 1.0) {
+ return duplicate();
+ }
+ if (f == 1.0 && t == 0.0) {
+ return reverse();
+ }
+
+ EllipticalArc *arc = static_cast<EllipticalArc*>(duplicate());
+ arc->_initial_point = pointAt(f);
+ arc->_final_point = pointAt(t);
+ arc->_angles.setAngles(angleAt(f), angleAt(t));
+ if (f > t) arc->_angles.setSweep(!sweep());
+ if ( _large_arc && fabs(angularExtent() * (t-f)) <= M_PI) {
+ arc->_large_arc = false;
+ }
+ return arc;
+}
+
+// the arc is the same but traversed in the opposite direction
+Curve *EllipticalArc::reverse() const
+{
+ using std::swap;
+ EllipticalArc *rarc = static_cast<EllipticalArc*>(duplicate());
+ rarc->_angles.reverse();
+ swap(rarc->_initial_point, rarc->_final_point);
+ return rarc;
+}
+
+#ifdef HAVE_GSL // GSL is required for function "solve_reals"
+std::vector<double> EllipticalArc::allNearestTimes( Point const& p, double from, double to ) const
+{
+ std::vector<double> result;
+
+ if ( from > to ) std::swap(from, to);
+ if ( from < 0 || to > 1 )
+ {
+ THROW_RANGEERROR("[from,to] interval out of range");
+ }
+
+ if ( ( are_near(ray(X), 0) && are_near(ray(Y), 0) ) || are_near(from, to) )
+ {
+ result.push_back(from);
+ return result;
+ }
+ else if ( are_near(ray(X), 0) || are_near(ray(Y), 0) )
+ {
+ LineSegment seg(pointAt(from), pointAt(to));
+ Point np = seg.pointAt( seg.nearestTime(p) );
+ if ( are_near(ray(Y), 0) )
+ {
+ if ( are_near(rotationAngle(), M_PI/2)
+ || are_near(rotationAngle(), 3*M_PI/2) )
+ {
+ result = roots(np[Y], Y);
+ }
+ else
+ {
+ result = roots(np[X], X);
+ }
+ }
+ else
+ {
+ if ( are_near(rotationAngle(), M_PI/2)
+ || are_near(rotationAngle(), 3*M_PI/2) )
+ {
+ result = roots(np[X], X);
+ }
+ else
+ {
+ result = roots(np[Y], Y);
+ }
+ }
+ return result;
+ }
+ else if ( are_near(ray(X), ray(Y)) )
+ {
+ Point r = p - center();
+ if ( are_near(r, Point(0,0)) )
+ {
+ THROW_INFINITESOLUTIONS(0);
+ }
+ // TODO: implement case r != 0
+// Point np = ray(X) * unit_vector(r);
+// std::vector<double> solX = roots(np[X],X);
+// std::vector<double> solY = roots(np[Y],Y);
+// double t;
+// if ( are_near(solX[0], solY[0]) || are_near(solX[0], solY[1]))
+// {
+// t = solX[0];
+// }
+// else
+// {
+// t = solX[1];
+// }
+// if ( !(t < from || t > to) )
+// {
+// result.push_back(t);
+// }
+// else
+// {
+//
+// }
+ }
+
+ // solve the equation <D(E(t),t)|E(t)-p> == 0
+ // that provides min and max distance points
+ // on the ellipse E wrt the point p
+ // after the substitutions:
+ // cos(t) = (1 - s^2) / (1 + s^2)
+ // sin(t) = 2t / (1 + s^2)
+ // where s = tan(t/2)
+ // we get a 4th degree equation in s
+ /*
+ * ry s^4 ((-cy + py) Cos[Phi] + (cx - px) Sin[Phi]) +
+ * ry ((cy - py) Cos[Phi] + (-cx + px) Sin[Phi]) +
+ * 2 s^3 (rx^2 - ry^2 + (-cx + px) rx Cos[Phi] + (-cy + py) rx Sin[Phi]) +
+ * 2 s (-rx^2 + ry^2 + (-cx + px) rx Cos[Phi] + (-cy + py) rx Sin[Phi])
+ */
+
+ Point p_c = p - center();
+ double rx2_ry2 = (ray(X) - ray(Y)) * (ray(X) + ray(Y));
+ double sinrot, cosrot;
+ sincos(rotationAngle(), sinrot, cosrot);
+ double expr1 = ray(X) * (p_c[X] * cosrot + p_c[Y] * sinrot);
+ Poly coeff;
+ coeff.resize(5);
+ coeff[4] = ray(Y) * ( p_c[Y] * cosrot - p_c[X] * sinrot );
+ coeff[3] = 2 * ( rx2_ry2 + expr1 );
+ coeff[2] = 0;
+ coeff[1] = 2 * ( -rx2_ry2 + expr1 );
+ coeff[0] = -coeff[4];
+
+// for ( unsigned int i = 0; i < 5; ++i )
+// std::cerr << "c[" << i << "] = " << coeff[i] << std::endl;
+
+ std::vector<double> real_sol;
+ // gsl_poly_complex_solve raises an error
+ // if the leading coefficient is zero
+ if ( are_near(coeff[4], 0) )
+ {
+ real_sol.push_back(0);
+ if ( !are_near(coeff[3], 0) )
+ {
+ double sq = -coeff[1] / coeff[3];
+ if ( sq > 0 )
+ {
+ double s = std::sqrt(sq);
+ real_sol.push_back(s);
+ real_sol.push_back(-s);
+ }
+ }
+ }
+ else
+ {
+ real_sol = solve_reals(coeff);
+ }
+
+ for (double & i : real_sol)
+ {
+ i = 2 * std::atan(i);
+ if ( i < 0 ) i += 2*M_PI;
+ }
+ // when s -> Infinity then <D(E)|E-p> -> 0 iff coeff[4] == 0
+ // so we add M_PI to the solutions being lim arctan(s) = PI when s->Infinity
+ if ( (real_sol.size() % 2) != 0 )
+ {
+ real_sol.push_back(M_PI);
+ }
+
+ double mindistsq1 = std::numeric_limits<double>::max();
+ double mindistsq2 = std::numeric_limits<double>::max();
+ double dsq = 0;
+ unsigned int mi1 = 0, mi2 = 0;
+ for ( unsigned int i = 0; i < real_sol.size(); ++i )
+ {
+ dsq = distanceSq(p, pointAtAngle(real_sol[i]));
+ if ( mindistsq1 > dsq )
+ {
+ mindistsq2 = mindistsq1;
+ mi2 = mi1;
+ mindistsq1 = dsq;
+ mi1 = i;
+ }
+ else if ( mindistsq2 > dsq )
+ {
+ mindistsq2 = dsq;
+ mi2 = i;
+ }
+ }
+
+ double t = timeAtAngle(real_sol[mi1]);
+ if ( !(t < from || t > to) )
+ {
+ result.push_back(t);
+ }
+
+ bool second_sol = false;
+ t = timeAtAngle(real_sol[mi2]);
+ if ( real_sol.size() == 4 && !(t < from || t > to) )
+ {
+ if ( result.empty() || are_near(mindistsq1, mindistsq2) )
+ {
+ result.push_back(t);
+ second_sol = true;
+ }
+ }
+
+ // we need to test extreme points too
+ double dsq1 = distanceSq(p, pointAt(from));
+ double dsq2 = distanceSq(p, pointAt(to));
+ if ( second_sol )
+ {
+ if ( mindistsq2 > dsq1 )
+ {
+ result.clear();
+ result.push_back(from);
+ mindistsq2 = dsq1;
+ }
+ else if ( are_near(mindistsq2, dsq) )
+ {
+ result.push_back(from);
+ }
+ if ( mindistsq2 > dsq2 )
+ {
+ result.clear();
+ result.push_back(to);
+ }
+ else if ( are_near(mindistsq2, dsq2) )
+ {
+ result.push_back(to);
+ }
+
+ }
+ else
+ {
+ if ( result.empty() )
+ {
+ if ( are_near(dsq1, dsq2) )
+ {
+ result.push_back(from);
+ result.push_back(to);
+ }
+ else if ( dsq2 > dsq1 )
+ {
+ result.push_back(from);
+ }
+ else
+ {
+ result.push_back(to);
+ }
+ }
+ }
+
+ return result;
+}
+#endif
+
+/** @brief Convert the passed intersections to curve time parametrization
+ * and filter out any invalid intersections.
+ */
+std::vector<ShapeIntersection> EllipticalArc::_filterIntersections(std::vector<ShapeIntersection> &&xs,
+ bool is_first) const
+{
+ std::vector<ShapeIntersection> result;
+ result.reserve(xs.size());
+ for (auto &xing : xs) {
+ if (_validateIntersection(xing, is_first)) {
+ result.emplace_back(std::move(xing));
+ }
+ }
+ return result;
+}
+
+/** @brief Convert the passed intersection to curve time and check whether the intersection
+ * is numerically sane.
+ *
+ * @param xing The intersection to convert to curve time and to validate.
+ * @param is_first If true, this arc is the first of the intersected curves; if false, it's second.
+ * @return Whether the intersection is valid.
+ *
+ * Note that the intersection is guaranteed to be converted only if the return value is true.
+ */
+bool EllipticalArc::_validateIntersection(ShapeIntersection &xing, bool is_first) const
+{
+ static auto const UNIT_INTERVAL = Interval(0, 1);
+ constexpr auto EPS = 1e-4;
+
+ Coord &t = is_first ? xing.first : xing.second;
+ if (!are_near_rel(_ellipse.pointAt(t), xing.point(), EPS)) {
+ return false;
+ }
+
+ t = timeAtAngle(t);
+ if (!UNIT_INTERVAL.contains(t)) {
+ return false;
+ }
+ if (!are_near_rel(pointAt(t), xing.point(), EPS)) {
+ return false;
+ }
+ return true;
+}
+
+std::vector<CurveIntersection> EllipticalArc::intersect(Curve const &other, Coord eps) const
+{
+ if (isLineSegment()) {
+ LineSegment ls(_initial_point, _final_point);
+ return ls.intersect(other, eps);
+ }
+
+ if (other.isLineSegment()) {
+ LineSegment ls(other.initialPoint(), other.finalPoint());
+ return _filterIntersections(_ellipse.intersect(ls), true);
+ }
+
+ if (auto bez = dynamic_cast<BezierCurve const *>(&other)) {
+ return _filterIntersections(_ellipse.intersect(bez->fragment()), true);
+ }
+
+ if (auto arc = dynamic_cast<EllipticalArc const *>(&other)) {
+ std::vector<CurveIntersection> crossings;
+ try {
+ crossings = _ellipse.intersect(arc->_ellipse);
+ } catch (InfinitelyManySolutions &) {
+ // This could happen if the two arcs come from the same ellipse.
+ return _intersectSameEllipse(arc);
+ }
+ return arc->_filterIntersections(_filterIntersections(std::move(crossings), true), false);
+ }
+
+ // in case someone wants to make a custom curve type
+ auto result = other.intersect(*this, eps);
+ transpose_in_place(result);
+ return result;
+}
+
+/** @brief Check if two arcs on the same ellipse intersect/overlap.
+ *
+ * @param other Another elliptical arc on the same ellipse as this one.
+ * @return If the arcs overlap, the returned vector contains synthesized intersections
+ * at the start and end of the overlap.
+ * If the arcs do not overlap, an empty vector is returned.
+ */
+std::vector<ShapeIntersection> EllipticalArc::_intersectSameEllipse(EllipticalArc const *other) const
+{
+ assert(_ellipse == other->_ellipse);
+ auto const &other_angles = other->angularInterval();
+ std::vector<ShapeIntersection> result;
+
+ /// A closure to create an "intersection" at the prescribed angle.
+ auto const synthesize_intersection = [&](Angle angle) {
+ auto const time = timeAtAngle(angle);
+ if (result.end() == std::find_if(result.begin(), result.end(),
+ [=](ShapeIntersection const &xing) -> bool {
+ return xing.first == time;
+ }))
+ {
+ result.emplace_back(time, other->timeAtAngle(angle), _ellipse.pointAt(angle));
+ }
+ };
+
+ for (auto a : {_angles.initialAngle(), _angles.finalAngle()}) {
+ if (other_angles.contains(a)) {
+ synthesize_intersection(a);
+ }
+ }
+ for (auto a : {other_angles.initialAngle(), other_angles.finalAngle()}) {
+ if (_angles.contains(a)) {
+ synthesize_intersection(a);
+ }
+ }
+ return result;
+}
+
+void EllipticalArc::_updateCenterAndAngles()
+{
+ // See: http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
+ Point d = initialPoint() - finalPoint();
+ Point mid = middle_point(initialPoint(), finalPoint());
+
+ auto degenerate_ellipse = [&] {
+ _ellipse = Ellipse();
+ _ellipse.setCenter(initialPoint());
+ _angles = AngleInterval();
+ _large_arc = false;
+ };
+
+ // if ip = fp, the arc contains no other points
+ if (initialPoint() == finalPoint()) {
+ degenerate_ellipse();
+ return;
+ }
+
+ // rays should be positive
+ _ellipse.setRays(std::fabs(ray(X)), std::fabs(ray(Y)));
+
+ if (isChord()) {
+ _ellipse.setRays(L2(d) / 2, 0);
+ _ellipse.setRotationAngle(atan2(d));
+ _ellipse.setCenter(mid);
+ _angles.setAngles(0, M_PI);
+ _angles.setSweep(false);
+ _large_arc = false;
+ return;
+ }
+
+ Rotate rot(rotationAngle()); // the matrix in F.6.5.3
+ Rotate invrot = rot.inverse(); // the matrix in F.6.5.1
+
+ Point r = rays();
+ Point p = d / 2 * invrot; // x', y' in F.6.5.1
+ Point c(0,0); // cx', cy' in F.6.5.2
+
+ // Correct out-of-range radii
+ Coord lambda = hypot(p[X]/r[X], p[Y]/r[Y]);
+ if (lambda > 1) {
+ r *= lambda;
+ _ellipse.setRays(r);
+ _ellipse.setCenter(mid);
+ } else {
+ // evaluate F.6.5.2
+ Coord rxry = r[X]*r[X] * r[Y]*r[Y];
+ Coord pxry = p[X]*p[X] * r[Y]*r[Y];
+ Coord rxpy = r[X]*r[X] * p[Y]*p[Y];
+ Coord const denominator = rxpy + pxry;
+ if (denominator == 0.0) {
+ degenerate_ellipse();
+ return;
+ }
+ Coord rad = (rxry - pxry - rxpy) / denominator;
+ // normally rad should never be negative, but numerical inaccuracy may cause this
+ if (rad > 0) {
+ rad = std::sqrt(rad);
+ if (sweep() == _large_arc) {
+ rad = -rad;
+ }
+ c = rad * Point(r[X]*p[Y]/r[Y], -r[Y]*p[X]/r[X]);
+ _ellipse.setCenter(c * rot + mid);
+ } else {
+ _ellipse.setCenter(mid);
+ }
+ }
+
+ // Compute start and end angles.
+ // If the ellipse was enlarged, c will be zero - this is correct.
+ Point sp((p[X] - c[X]) / r[X], (p[Y] - c[Y]) / r[Y]);
+ Point ep((-p[X] - c[X]) / r[X], (-p[Y] - c[Y]) / r[Y]);
+ Point v(1, 0);
+
+ _angles.setInitial(angle_between(v, sp));
+ _angles.setFinal(angle_between(v, ep));
+
+ /*double sweep_angle = angle_between(sp, ep);
+ if (!sweep() && sweep_angle > 0) sweep_angle -= 2*M_PI;
+ if (sweep() && sweep_angle < 0) sweep_angle += 2*M_PI;*/
+}
+
+D2<SBasis> EllipticalArc::toSBasis() const
+{
+ if (isChord()) {
+ return chord().toSBasis();
+ }
+
+ D2<SBasis> arc;
+ // the interval of parametrization has to be [0,1]
+ Coord et = initialAngle().radians() + sweepAngle();
+ Linear param(initialAngle().radians(), et);
+ Coord cosrot, sinrot;
+ sincos(rotationAngle(), sinrot, cosrot);
+
+ // order = 4 seems to be enough to get a perfect looking elliptical arc
+ SBasis arc_x = ray(X) * cos(param,4);
+ SBasis arc_y = ray(Y) * sin(param,4);
+ arc[0] = arc_x * cosrot - arc_y * sinrot + Linear(center(X), center(X));
+ arc[1] = arc_x * sinrot + arc_y * cosrot + Linear(center(Y), center(Y));
+
+ // ensure that endpoints remain exact
+ for ( int d = 0 ; d < 2 ; d++ ) {
+ arc[d][0][0] = initialPoint()[d];
+ arc[d][0][1] = finalPoint()[d];
+ }
+
+ return arc;
+}
+
+// All operations that do not contain skew can be evaluated
+// without passing through the implicit form of the ellipse,
+// which preserves precision.
+
+void EllipticalArc::operator*=(Translate const &tr)
+{
+ _initial_point *= tr;
+ _final_point *= tr;
+ _ellipse *= tr;
+}
+
+void EllipticalArc::operator*=(Scale const &s)
+{
+ _initial_point *= s;
+ _final_point *= s;
+ _ellipse *= s;
+}
+
+void EllipticalArc::operator*=(Rotate const &r)
+{
+ _initial_point *= r;
+ _final_point *= r;
+ _ellipse *= r;
+}
+
+void EllipticalArc::operator*=(Zoom const &z)
+{
+ _initial_point *= z;
+ _final_point *= z;
+ _ellipse *= z;
+}
+
+void EllipticalArc::operator*=(Affine const& m)
+{
+ if (isChord()) {
+ _initial_point *= m;
+ _final_point *= m;
+ _ellipse.setCenter(middle_point(_initial_point, _final_point));
+ _ellipse.setRays(0, 0);
+ _ellipse.setRotationAngle(0);
+ return;
+ }
+
+ _initial_point *= m;
+ _final_point *= m;
+ _ellipse *= m;
+ if (m.det() < 0) {
+ _angles.setSweep(!sweep());
+ }
+
+ // ellipse transformation does not preserve its functional form,
+ // i.e. e.pointAt(0.5)*m and (e*m).pointAt(0.5) can be different.
+ // We need to recompute start / end angles.
+ _angles.setInitial(_ellipse.timeAt(_initial_point));
+ _angles.setFinal(_ellipse.timeAt(_final_point));
+}
+
+bool EllipticalArc::operator==(Curve const &c) const
+{
+ EllipticalArc const *other = dynamic_cast<EllipticalArc const *>(&c);
+ if (!other) return false;
+ if (_initial_point != other->_initial_point) return false;
+ if (_final_point != other->_final_point) return false;
+ // TODO: all arcs with ellipse rays which are too small
+ // and fall back to a line should probably be equal
+ if (rays() != other->rays()) return false;
+ if (rotationAngle() != other->rotationAngle()) return false;
+ if (_large_arc != other->_large_arc) return false;
+ if (sweep() != other->sweep()) return false;
+ return true;
+}
+
+bool EllipticalArc::isNear(Curve const &c, Coord precision) const
+{
+ EllipticalArc const *other = dynamic_cast<EllipticalArc const *>(&c);
+ if (!other) {
+ if (isChord()) {
+ return c.isNear(chord(), precision);
+ }
+ return false;
+ }
+
+ if (!are_near(_initial_point, other->_initial_point, precision)) return false;
+ if (!are_near(_final_point, other->_final_point, precision)) return false;
+ if (isChord() && other->isChord()) return true;
+
+ if (sweep() != other->sweep()) return false;
+ if (!are_near(_ellipse, other->_ellipse, precision)) return false;
+ return true;
+}
+
+void EllipticalArc::feed(PathSink &sink, bool moveto_initial) const
+{
+ if (moveto_initial) {
+ sink.moveTo(_initial_point);
+ }
+ sink.arcTo(ray(X), ray(Y), rotationAngle(), _large_arc, sweep(), _final_point);
+}
+
+int EllipticalArc::winding(Point const &p) const
+{
+ using std::swap;
+
+ double sinrot, cosrot;
+ sincos(rotationAngle(), sinrot, cosrot);
+
+ Angle ymin_a = std::atan2( ray(Y) * cosrot, ray(X) * sinrot );
+ Angle ymax_a = ymin_a + M_PI;
+
+ Point ymin = pointAtAngle(ymin_a);
+ Point ymax = pointAtAngle(ymax_a);
+ if (ymin[Y] > ymax[Y]) {
+ swap(ymin, ymax);
+ swap(ymin_a, ymax_a);
+ }
+
+ if (!Interval(ymin[Y], ymax[Y]).lowerContains(p[Y])) {
+ return 0;
+ }
+
+ bool const left = cross(ymax - ymin, p - ymin) > 0;
+ bool const inside = _ellipse.contains(p);
+ if (_angles.isFull()) {
+ if (inside) {
+ return sweep() ? 1 : -1;
+ }
+ return 0;
+ }
+ bool const includes_ymin = _angles.contains(ymin_a);
+ bool const includes_ymax = _angles.contains(ymax_a);
+
+ AngleInterval rarc(ymin_a, ymax_a, true),
+ larc(ymax_a, ymin_a, true);
+
+ // we'll compute the result for an arc in the direction of increasing angles
+ // and then negate if necessary
+ Angle ia = initialAngle(), fa = finalAngle();
+ Point ip = _initial_point, fp = _final_point;
+ if (!sweep()) {
+ swap(ia, fa);
+ swap(ip, fp);
+ }
+
+ bool const initial_left = larc.contains(ia);
+ bool const final_left = larc.contains(fa);
+
+ bool intersects_left = false, intersects_right = false;
+ if (inside || left) {
+ // The point is inside the ellipse or to the left of it, so the rightwards horizontal ray
+ // may intersect the part of the arc contained in the right half of the ellipse.
+ // There are four ways in which this can happen.
+
+ intersects_right =
+ // Possiblity 1: the arc extends into the right half through the min-Y point
+ // and the ray intersects this extension:
+ (includes_ymin && !final_left && Interval(ymin[Y], fp[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 2: the arc starts and ends within the right half (hence, it cannot be the
+ // "large arc") and the ray's Y-coordinate is within the Y-coordinate range of the arc:
+ (!initial_left && !final_left && !largeArc() && Interval(ip[Y], fp[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 3: the arc starts in the right half and continues through the max-Y
+ // point into the left half:
+ (!initial_left && includes_ymax && Interval(ip[Y], ymax[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 4: the entire right half of the ellipse is contained in the arc.
+ (initial_left && final_left && includes_ymin && includes_ymax);
+ }
+ if (left && !inside) {
+ // The point is to the left of the ellipse, so the rightwards horizontal ray
+ // may intersect the part of the arc contained in the left half of the ellipse.
+ // There are four ways in which this can happen.
+
+ intersects_left =
+ // Possibility 1: the arc starts in the left half and continues through the min-Y
+ // point into the right half:
+ (includes_ymin && initial_left && Interval(ymin[Y], ip[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 2: the arc starts and ends within the left half (hence, it cannot be the
+ // "large arc") and the ray's Y-coordinate is within the Y-coordinate range of the arc:
+ (initial_left && final_left && !largeArc() && Interval(ip[Y], fp[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 3: the arc extends into the left half through the max-Y point
+ // and the ray intersects this extension:
+ (final_left && includes_ymax && Interval(fp[Y], ymax[Y]).lowerContains(p[Y]))
+ ||
+ // Possibility 4: the entire left half of the ellipse is contained in the arc.
+ (!initial_left && !final_left && includes_ymin && includes_ymax);
+
+ }
+ int const winding_assuming_increasing_angles = (int)intersects_right - (int)intersects_left;
+ return sweep() ? winding_assuming_increasing_angles : -winding_assuming_increasing_angles;
+}
+
+std::ostream &operator<<(std::ostream &out, EllipticalArc const &ea)
+{
+ out << "EllipticalArc("
+ << ea.initialPoint() << ", "
+ << format_coord_nice(ea.ray(X)) << ", " << format_coord_nice(ea.ray(Y)) << ", "
+ << format_coord_nice(ea.rotationAngle()) << ", "
+ << "large_arc=" << (ea.largeArc() ? "true" : "false") << ", "
+ << "sweep=" << (ea.sweep() ? "true" : "false") << ", "
+ << ea.finalPoint() << ")";
+ return out;
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
+
diff --git a/src/2geom/geom.cpp b/src/2geom/geom.cpp
new file mode 100644
index 0000000..791e3a6
--- /dev/null
+++ b/src/2geom/geom.cpp
@@ -0,0 +1,396 @@
+/**
+ * \brief Various geometrical calculations.
+ */
+
+#include <2geom/geom.h>
+#include <2geom/point.h>
+#include <algorithm>
+#include <optional>
+#include <2geom/rect.h>
+
+using std::swap;
+
+namespace Geom {
+
+enum IntersectorKind {
+ intersects = 0,
+ parallel,
+ coincident,
+ no_intersection
+};
+
+/**
+ * Finds the intersection of the two (infinite) lines
+ * defined by the points p such that dot(n0, p) == d0 and dot(n1, p) == d1.
+ *
+ * If the two lines intersect, then \a result becomes their point of
+ * intersection; otherwise, \a result remains unchanged.
+ *
+ * This function finds the intersection of the two lines (infinite)
+ * defined by n0.X = d0 and x1.X = d1. The algorithm is as follows:
+ * To compute the intersection point use kramer's rule:
+ * \verbatim
+ * convert lines to form
+ * ax + by = c
+ * dx + ey = f
+ *
+ * (
+ * e.g. a = (x2 - x1), b = (y2 - y1), c = (x2 - x1)*x1 + (y2 - y1)*y1
+ * )
+ *
+ * In our case we use:
+ * a = n0.x d = n1.x
+ * b = n0.y e = n1.y
+ * c = d0 f = d1
+ *
+ * so:
+ *
+ * adx + bdy = cd
+ * adx + aey = af
+ *
+ * bdy - aey = cd - af
+ * (bd - ae)y = cd - af
+ *
+ * y = (cd - af)/(bd - ae)
+ *
+ * repeat for x and you get:
+ *
+ * x = (fb - ce)/(bd - ae) \endverbatim
+ *
+ * If the denominator (bd-ae) is 0 then the lines are parallel, if the
+ * numerators are 0 then the lines coincide.
+ *
+ * \todo Why not use existing but outcommented code below
+ * (HAVE_NEW_INTERSECTOR_CODE)?
+ */
+IntersectorKind
+line_intersection(Geom::Point const &n0, double const d0,
+ Geom::Point const &n1, double const d1,
+ Geom::Point &result)
+{
+ double denominator = dot(Geom::rot90(n0), n1);
+ double X = n1[Geom::Y] * d0 -
+ n0[Geom::Y] * d1;
+ /* X = (-d1, d0) dot (n0[Y], n1[Y]) */
+
+ if (denominator == 0) {
+ if ( X == 0 ) {
+ return coincident;
+ } else {
+ return parallel;
+ }
+ }
+
+ double Y = n0[Geom::X] * d1 -
+ n1[Geom::X] * d0;
+
+ result = Geom::Point(X, Y) / denominator;
+
+ return intersects;
+}
+
+
+
+/* ccw exists as a building block */
+int
+intersector_ccw(const Geom::Point& p0, const Geom::Point& p1,
+ const Geom::Point& p2)
+/* Determine which way a set of three points winds. */
+{
+ Geom::Point d1 = p1 - p0;
+ Geom::Point d2 = p2 - p0;
+ /* compare slopes but avoid division operation */
+ double c = dot(Geom::rot90(d1), d2);
+ if(c > 0)
+ return +1; // ccw - do these match def'n in header?
+ if(c < 0)
+ return -1; // cw
+
+ /* Colinear [or NaN]. Decide the order. */
+ if ( ( d1[0] * d2[0] < 0 ) ||
+ ( d1[1] * d2[1] < 0 ) ) {
+ return -1; // p2 < p0 < p1
+ } else if ( dot(d1,d1) < dot(d2,d2) ) {
+ return +1; // p0 <= p1 < p2
+ } else {
+ return 0; // p0 <= p2 <= p1
+ }
+}
+
+/** Determine whether the line segment from p00 to p01 intersects the
+ infinite line passing through p10 and p11. This doesn't find the
+ point of intersection, use the line_intersect function above,
+ or the segment_intersection interface below.
+
+ \pre neither segment is zero-length; i.e. p00 != p01 and p10 != p11.
+*/
+bool
+line_segment_intersectp(Geom::Point const &p00, Geom::Point const &p01,
+ Geom::Point const &p10, Geom::Point const &p11)
+{
+ if(p00 == p01) return false;
+ if(p10 == p11) return false;
+
+ return ((intersector_ccw(p00, p01, p10) * intersector_ccw(p00, p01, p11)) <= 0 );
+}
+
+
+/** Determine whether two line segments intersect. This doesn't find
+ the point of intersection, use the line_intersect function above,
+ or the segment_intersection interface below.
+
+ \pre neither segment is zero-length; i.e. p00 != p01 and p10 != p11.
+*/
+bool
+segment_intersectp(Geom::Point const &p00, Geom::Point const &p01,
+ Geom::Point const &p10, Geom::Point const &p11)
+{
+ if(p00 == p01) return false;
+ if(p10 == p11) return false;
+
+ /* true iff ( (the p1 segment straddles the p0 infinite line)
+ * and (the p0 segment straddles the p1 infinite line) ). */
+ return (line_segment_intersectp(p00, p01, p10, p11) &&
+ line_segment_intersectp(p10, p11, p00, p01));
+}
+
+/** Determine whether \& where a line segments intersects an (infinite) line.
+
+If there is no intersection, then \a result remains unchanged.
+
+\pre neither segment is zero-length; i.e. p00 != p01 and p10 != p11.
+**/
+IntersectorKind
+line_segment_intersect(Geom::Point const &p00, Geom::Point const &p01,
+ Geom::Point const &p10, Geom::Point const &p11,
+ Geom::Point &result)
+{
+ if(line_segment_intersectp(p00, p01, p10, p11)) {
+ Geom::Point n0 = (p01 - p00).ccw();
+ double d0 = dot(n0,p00);
+
+ Geom::Point n1 = (p11 - p10).ccw();
+ double d1 = dot(n1,p10);
+ return line_intersection(n0, d0, n1, d1, result);
+ } else {
+ return no_intersection;
+ }
+}
+
+
+/** Determine whether \& where two line segments intersect.
+
+If the two segments don't intersect, then \a result remains unchanged.
+
+\pre neither segment is zero-length; i.e. p00 != p01 and p10 != p11.
+**/
+IntersectorKind
+segment_intersect(Geom::Point const &p00, Geom::Point const &p01,
+ Geom::Point const &p10, Geom::Point const &p11,
+ Geom::Point &result)
+{
+ if(segment_intersectp(p00, p01, p10, p11)) {
+ Geom::Point n0 = (p01 - p00).ccw();
+ double d0 = dot(n0,p00);
+
+ Geom::Point n1 = (p11 - p10).ccw();
+ double d1 = dot(n1,p10);
+ return line_intersection(n0, d0, n1, d1, result);
+ } else {
+ return no_intersection;
+ }
+}
+
+/** Determine whether \& where two line segments intersect.
+
+If the two segments don't intersect, then \a result remains unchanged.
+
+\pre neither segment is zero-length; i.e. p00 != p01 and p10 != p11.
+**/
+IntersectorKind
+line_twopoint_intersect(Geom::Point const &p00, Geom::Point const &p01,
+ Geom::Point const &p10, Geom::Point const &p11,
+ Geom::Point &result)
+{
+ Geom::Point n0 = (p01 - p00).ccw();
+ double d0 = dot(n0,p00);
+
+ Geom::Point n1 = (p11 - p10).ccw();
+ double d1 = dot(n1,p10);
+ return line_intersection(n0, d0, n1, d1, result);
+}
+
+// this is used to compare points for std::sort below
+static bool
+is_less(Point const &A, Point const &B)
+{
+ if (A[X] < B[X]) {
+ return true;
+ } else if (A[X] == B[X] && A[Y] < B[Y]) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// TODO: this can doubtlessly be improved
+static void
+eliminate_duplicates_p(std::vector<Point> &pts)
+{
+ unsigned int size = pts.size();
+
+ if (size < 2)
+ return;
+
+ if (size == 2) {
+ if (pts[0] == pts[1]) {
+ pts.pop_back();
+ }
+ } else {
+ std::sort(pts.begin(), pts.end(), &is_less);
+ if (size == 3) {
+ if (pts[0] == pts[1]) {
+ pts.erase(pts.begin());
+ } else if (pts[1] == pts[2]) {
+ pts.pop_back();
+ }
+ } else {
+ // we have size == 4
+ if (pts[2] == pts[3]) {
+ pts.pop_back();
+ }
+ if (pts[0] == pts[1]) {
+ pts.erase(pts.begin());
+ }
+ }
+ }
+}
+
+/** Determine whether \& where an (infinite) line intersects a rectangle.
+ *
+ * \a c0, \a c1 are diagonal corners of the rectangle and
+ * \a p1, \a p1 are distinct points on the line
+ *
+ * \return A list (possibly empty) of points of intersection. If two such points (say \a r0 and \a
+ * r1) then it is guaranteed that the order of \a r0, \a r1 along the line is the same as the that
+ * of \a c0, \a c1 (i.e., the vectors \a r1 - \a r0 and \a p1 - \a p0 point into the same
+ * direction).
+ */
+std::vector<Geom::Point>
+rect_line_intersect(Geom::Point const &c0, Geom::Point const &c1,
+ Geom::Point const &p0, Geom::Point const &p1)
+{
+ using namespace Geom;
+
+ std::vector<Point> results;
+
+ Point A(c0);
+ Point C(c1);
+
+ Point B(A[X], C[Y]);
+ Point D(C[X], A[Y]);
+
+ Point res;
+
+ if (line_segment_intersect(p0, p1, A, B, res) == intersects) {
+ results.push_back(res);
+ }
+ if (line_segment_intersect(p0, p1, B, C, res) == intersects) {
+ results.push_back(res);
+ }
+ if (line_segment_intersect(p0, p1, C, D, res) == intersects) {
+ results.push_back(res);
+ }
+ if (line_segment_intersect(p0, p1, D, A, res) == intersects) {
+ results.push_back(res);
+ }
+
+ eliminate_duplicates_p(results);
+
+ if (results.size() == 2) {
+ // sort the results so that the order is the same as that of p0 and p1
+ Point dir1 (results[1] - results[0]);
+ Point dir2 (p1 - p0);
+ if (dot(dir1, dir2) < 0) {
+ swap(results[0], results[1]);
+ }
+ }
+
+ return results;
+}
+
+/** Determine whether \& where an (infinite) line intersects a rectangle.
+ *
+ * \a c0, \a c1 are diagonal corners of the rectangle and
+ * \a p1, \a p1 are distinct points on the line
+ *
+ * \return A list (possibly empty) of points of intersection. If two such points (say \a r0 and \a
+ * r1) then it is guaranteed that the order of \a r0, \a r1 along the line is the same as the that
+ * of \a c0, \a c1 (i.e., the vectors \a r1 - \a r0 and \a p1 - \a p0 point into the same
+ * direction).
+ */
+std::optional<LineSegment>
+rect_line_intersect(Geom::Rect &r,
+ Geom::LineSegment ls)
+{
+ std::vector<Point> results;
+
+ results = rect_line_intersect(r.min(), r.max(), ls[0], ls[1]);
+ if(results.size() == 2) {
+ return LineSegment(results[0], results[1]);
+ }
+ return std::optional<LineSegment>();
+}
+
+std::optional<LineSegment>
+rect_line_intersect(Geom::Rect &r,
+ Geom::Line l)
+{
+ return rect_line_intersect(r, l.segment(0, 1));
+}
+
+/**
+ * polyCentroid: Calculates the centroid (xCentroid, yCentroid) and area of a polygon, given its
+ * vertices (x[0], y[0]) ... (x[n-1], y[n-1]). It is assumed that the contour is closed, i.e., that
+ * the vertex following (x[n-1], y[n-1]) is (x[0], y[0]). The algebraic sign of the area is
+ * positive for counterclockwise ordering of vertices in x-y plane; otherwise negative.
+
+ * Returned values:
+ 0 for normal execution;
+ 1 if the polygon is degenerate (number of vertices < 3);
+ 2 if area = 0 (and the centroid is undefined).
+
+ * for now we require the path to be a polyline and assume it is closed.
+**/
+
+int centroid(std::vector<Geom::Point> const &p, Geom::Point& centroid, double &area) {
+ const unsigned n = p.size();
+ if (n < 3)
+ return 1;
+ Geom::Point centroid_tmp(0,0);
+ double atmp = 0;
+ for (unsigned i = n-1, j = 0; j < n; i = j, j++) {
+ const double ai = cross(p[j], p[i]);
+ atmp += ai;
+ centroid_tmp += (p[j] + p[i])*ai; // first moment.
+ }
+ area = atmp / 2;
+ if (atmp != 0) {
+ centroid = centroid_tmp / (3 * atmp);
+ return 0;
+ }
+ return 2;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/intersection-graph.cpp b/src/2geom/intersection-graph.cpp
new file mode 100644
index 0000000..524267e
--- /dev/null
+++ b/src/2geom/intersection-graph.cpp
@@ -0,0 +1,535 @@
+/**
+ * \file
+ * \brief Intersection graph for Boolean operations
+ *//*
+ * Authors:
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2015 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/intersection-graph.h>
+#include <2geom/path.h>
+#include <2geom/pathvector.h>
+#include <2geom/utils.h>
+#include <iostream>
+#include <iterator>
+
+namespace Geom {
+
+/// Function object for comparing intersection vertices based on the intersection time.
+struct PathIntersectionGraph::IntersectionVertexLess {
+ bool operator()(IntersectionVertex const &a, IntersectionVertex const &b) const {
+ return a.pos < b.pos;
+ }
+};
+
+PathIntersectionGraph::PathIntersectionGraph(PathVector const &a, PathVector const &b, Coord precision)
+ : _graph_valid(true)
+{
+ _pv[0] = a;
+ _pv[1] = b;
+
+ if (a.empty() || b.empty()) return;
+
+ _prepareArguments();
+ bool has_intersections = _prepareIntersectionLists(precision);
+ if (!has_intersections) return;
+
+ _assignEdgeWindingParities(precision);
+
+ // If a path has only degenerate intersections, assign its status now.
+ // This protects against later accidentally picking a point for winding
+ // determination that is exactly at a removed intersection.
+ _assignComponentStatusFromDegenerateIntersections();
+ _removeDegenerateIntersections();
+ if (_graph_valid) {
+ _verify();
+ }
+}
+
+/** Prepare the operands stored in PathIntersectionGraph::_pv by closing all of their constituent
+ * paths and removing degenerate segments from them.
+ */
+void PathIntersectionGraph::_prepareArguments()
+{
+ // all paths must be closed, otherwise we will miss some intersections
+ for (auto & w : _pv) {
+ for (auto & i : w) {
+ i.close();
+ }
+ }
+ // remove degenerate segments
+ for (auto & w : _pv) {
+ for (std::size_t i = w.size(); i > 0; --i) {
+ if (w[i-1].empty()) {
+ w.erase(w.begin() + (i-1));
+ continue;
+ }
+ for (std::size_t j = w[i-1].size(); j > 0; --j) {
+ if (w[i-1][j-1].isDegenerate()) {
+ w[i-1].erase(w[i-1].begin() + (j-1));
+ }
+ }
+ }
+ }
+}
+
+/** @brief Compute the lists of intersections between the constituent paths of both operands.
+ * @param precision – the precision setting for the sweepline algorithm.
+ * @return Whether any intersections were found.
+ */
+bool PathIntersectionGraph::_prepareIntersectionLists(Coord precision)
+{
+ std::vector<PVIntersection> pxs = _pv[0].intersect(_pv[1], precision);
+ // NOTE: this early return means that the path data structures will not be created
+ // if there are no intersections at all!
+ if (pxs.empty()) return false;
+
+ // prepare intersection lists for each path component
+ for (unsigned w = 0; w < 2; ++w) {
+ for (std::size_t i = 0; i < _pv[w].size(); ++i) {
+ _components[w].push_back(new PathData(w, i));
+ }
+ }
+
+ // create intersection vertices
+ for (auto & px : pxs) {
+ IntersectionVertex *xa, *xb;
+ xa = new IntersectionVertex();
+ xb = new IntersectionVertex();
+ //xa->processed = xb->processed = false;
+ xa->which = 0; xb->which = 1;
+ xa->pos = px.first;
+ xb->pos = px.second;
+ xa->p = xb->p = px.point();
+ xa->neighbor = xb;
+ xb->neighbor = xa;
+ xa->next_edge = xb->next_edge = OUTSIDE;
+ xa->defective = xb->defective = false;
+ _xs.push_back(xa);
+ _xs.push_back(xb);
+ _components[0][xa->pos.path_index].xlist.push_back(*xa);
+ _components[1][xb->pos.path_index].xlist.push_back(*xb);
+ }
+
+ // sort intersections in each component according to time value
+ for (auto & _component : _components) {
+ for (std::size_t i = 0; i < _component.size(); ++i) {
+ _component[i].xlist.sort(IntersectionVertexLess());
+ }
+ }
+
+ return true;
+}
+
+/** Determine whether path portions between consecutive intersections lie inside or outside
+ * of the other path-vector.
+ */
+void PathIntersectionGraph::_assignEdgeWindingParities(Coord precision)
+{
+ for (unsigned w = 0; w < 2; ++w) {
+ unsigned ow = (w+1) % 2; ///< The index of the other operand
+
+ for (unsigned li = 0; li < _components[w].size(); ++li) { // Traverse all paths in the component
+ IntersectionList &xl = _components[w][li].xlist;
+ for (ILIter i = xl.begin(); i != xl.end(); ++i) { // Traverse all intersections in the path
+ ILIter n = cyclic_next(i, xl);
+ std::size_t pi = i->pos.path_index;
+
+ /// Path time interval from the current crossing to the next one
+ PathInterval ival = forward_interval(i->pos, n->pos, _pv[w][pi].size());
+ PathTime mid = ival.inside(precision);
+
+ Point wpoint = _pv[w][pi].pointAt(mid);
+ _winding_points.push_back(wpoint);
+ int wdg = _pv[ow].winding(wpoint);
+ if (wdg % 2) {
+ i->next_edge = INSIDE;
+ } else {
+ i->next_edge = OUTSIDE;
+ }
+ }
+ }
+ }
+}
+
+/** Detect the situation where a path is either entirely inside or entirely outside of the other
+ * path-vector and set the status flag accordingly.
+ */
+void PathIntersectionGraph::_assignComponentStatusFromDegenerateIntersections()
+{
+ for (auto & _component : _components) {
+ for (unsigned li = 0; li < _component.size(); ++li) {
+ IntersectionList &xl = _component[li].xlist;
+ bool has_in = false;
+ bool has_out = false;
+ for (auto & i : xl) {
+ has_in |= (i.next_edge == INSIDE);
+ has_out |= (i.next_edge == OUTSIDE);
+ }
+ if (has_in && !has_out) {
+ _component[li].status = INSIDE;
+ }
+ if (!has_in && has_out) {
+ _component[li].status = OUTSIDE;
+ }
+ }
+ }
+}
+
+/** Remove intersections that don't change between in/out.
+ *
+ * In general, a degenerate intersection can happen at a point where
+ * two shapes "kiss" (are tangent) but do not cross into each other.
+ */
+void PathIntersectionGraph::_removeDegenerateIntersections()
+{
+ for (auto & _component : _components) {
+ for (unsigned li = 0; li < _component.size(); ++li) {
+ IntersectionList &xl = _component[li].xlist;
+ for (ILIter i = xl.begin(); i != xl.end();) {
+ ILIter n = cyclic_next(i, xl);
+ if (i->next_edge == n->next_edge) { // Both edges inside or both outside
+ bool last_node = (i == n); ///< Whether this is the last remaining crossing.
+ ILIter nn = _getNeighbor(n);
+ IntersectionList &oxl = _getPathData(nn).xlist;
+
+ // When exactly 3 out of 4 edges adjacent to an intersection
+ // have the same winding, we have a defective intersection,
+ // which is neither degenerate nor normal. Those can occur in paths
+ // that contain overlapping segments.
+ if (cyclic_prior(nn, oxl)->next_edge != nn->next_edge) {
+ // Not a backtrack - set the defective flag.
+ _graph_valid = false;
+ n->defective = true;
+ nn->defective = true;
+ ++i;
+ continue;
+ }
+ // Erase the degenerate or defective crossings
+ oxl.erase(nn);
+ xl.erase(n);
+ if (last_node) break;
+ } else {
+ ++i;
+ }
+ }
+ }
+ }
+}
+
+/** Verify that all paths contain an even number of intersections and that
+ * the intersection graph does not contain leaves (degree one vertices).
+ */
+void PathIntersectionGraph::_verify()
+{
+#ifndef NDEBUG
+ for (auto & _component : _components) {
+ for (unsigned li = 0; li < _component.size(); ++li) {
+ IntersectionList &xl = _component[li].xlist;
+ assert(xl.size() % 2 == 0);
+ for (ILIter i = xl.begin(); i != xl.end(); ++i) {
+ ILIter j = cyclic_next(i, xl);
+ assert(i->next_edge != j->next_edge);
+ }
+ }
+ }
+#endif
+}
+
+PathVector PathIntersectionGraph::getUnion()
+{
+ PathVector result = _getResult(false, false);
+ _handleNonintersectingPaths(result, 0, false);
+ _handleNonintersectingPaths(result, 1, false);
+ return result;
+}
+
+PathVector PathIntersectionGraph::getIntersection()
+{
+ PathVector result = _getResult(true, true);
+ _handleNonintersectingPaths(result, 0, true);
+ _handleNonintersectingPaths(result, 1, true);
+ return result;
+}
+
+PathVector PathIntersectionGraph::getAminusB()
+{
+ PathVector result = _getResult(false, true);
+ _handleNonintersectingPaths(result, 0, false);
+ _handleNonintersectingPaths(result, 1, true);
+ return result;
+}
+
+PathVector PathIntersectionGraph::getBminusA()
+{
+ PathVector result = _getResult(true, false);
+ _handleNonintersectingPaths(result, 1, false);
+ _handleNonintersectingPaths(result, 0, true);
+ return result;
+}
+
+PathVector PathIntersectionGraph::getXOR()
+{
+ PathVector r1, r2;
+ r1 = getAminusB();
+ r2 = getBminusA();
+ std::copy(r2.begin(), r2.end(), std::back_inserter(r1));
+ return r1;
+}
+
+std::size_t PathIntersectionGraph::size() const
+{
+ std::size_t result = 0;
+ for (std::size_t i = 0; i < _components[0].size(); ++i) {
+ result += _components[0][i].xlist.size();
+ }
+ return result;
+}
+
+std::vector<Point> PathIntersectionGraph::intersectionPoints(bool defective) const
+{
+ std::vector<Point> result;
+
+ for (std::size_t i = 0; i < _components[0].size(); ++i) {
+ for (const auto & j : _components[0][i].xlist) {
+ if (j.defective == defective) {
+ result.push_back(j.p);
+ }
+ }
+ }
+ return result;
+}
+
+void PathIntersectionGraph::fragments(PathVector &in, PathVector &out) const
+{
+ typedef boost::ptr_vector<PathData>::const_iterator PIter;
+ for (unsigned w = 0; w < 2; ++w) {
+ for (PIter li = _components[w].begin(); li != _components[w].end(); ++li) {
+ for (CILIter k = li->xlist.begin(); k != li->xlist.end(); ++k) {
+ CILIter n = cyclic_next(k, li->xlist);
+ // TODO: investigate why non-contiguous paths are sometimes generated here
+ Path frag(k->p);
+ frag.setStitching(true);
+ PathInterval ival = forward_interval(k->pos, n->pos, _pv[w][k->pos.path_index].size());
+ _pv[w][k->pos.path_index].appendPortionTo(frag, ival, k->p, n->p);
+ if (k->next_edge == INSIDE) {
+ in.push_back(frag);
+ } else {
+ out.push_back(frag);
+ }
+ }
+ }
+ }
+}
+
+/** @brief Compute the partial result of a boolean operation by looking at components containing
+ * intersections and stitching the correct path portions between them, depending on the truth
+ * table of the operation.
+ *
+ * @param enter_a – whether the path portions contained inside operand A should be part of the boundary
+ * of the boolean operation's result.
+ * @param enter_b – whether the path portions contained inside operand B should be part of the boundary
+ * of the boolean operation's result.
+ *
+ * These two flags completely determine how to resolve the crossings when building the result
+ * and therefore encode which boolean operation we are performing. For example, the boolean intersection
+ * corresponds to enter_a == true and enter_b == true, as can be seen by looking at a Venn diagram.
+ */
+PathVector PathIntersectionGraph::_getResult(bool enter_a, bool enter_b)
+{
+ PathVector result;
+ if (_xs.empty()) return result;
+
+ // Create the list of intersections to process
+ _ulist.clear();
+ for (auto & _component : _components) {
+ for (auto & li : _component) {
+ for (auto & k : li.xlist) {
+ _ulist.push_back(k);
+ }
+ }
+ }
+
+ unsigned n_processed = 0;
+
+ while (true) {
+ // get unprocessed intersection
+ if (_ulist.empty()) break;
+ IntersectionVertex &iv = _ulist.front();
+ unsigned w = iv.which;
+ ILIter i = _components[w][iv.pos.path_index].xlist.iterator_to(iv);
+
+ result.push_back(Path(i->p));
+ result.back().setStitching(true);
+ bool reverse = false; ///< Whether to traverse the current component in the backwards direction.
+ while (i->_proc_hook.is_linked()) {
+ ILIter prev = i;
+ std::size_t pi = i->pos.path_index; ///< Index of the path in its PathVector
+ // determine which direction to go
+ // union: always go outside
+ // intersection: always go inside
+ // a minus b: go inside in b, outside in a
+ // b minus a: go inside in a, outside in b
+ if (w == 0) { // The path we're on is a part of A
+ reverse = (i->next_edge == INSIDE) ^ enter_a;
+ } else { // The path we're on is a part of B
+ reverse = (i->next_edge == INSIDE) ^ enter_b;
+ }
+
+ // get next intersection
+ if (reverse) {
+ i = cyclic_prior(i, _components[w][pi].xlist);
+ } else {
+ i = cyclic_next(i, _components[w][pi].xlist);
+ }
+
+ // append portion of path to the result
+ PathInterval ival = PathInterval::from_direction(
+ prev->pos.asPathTime(), i->pos.asPathTime(),
+ reverse, _pv[i->which][pi].size());
+
+ _pv[i->which][pi].appendPortionTo(result.back(), ival, prev->p, i->p);
+
+ // count both vertices as processed
+ n_processed += 2;
+ if (prev->_proc_hook.is_linked()) {
+ _ulist.erase(_ulist.iterator_to(*prev));
+ }
+ if (i->_proc_hook.is_linked()) {
+ _ulist.erase(_ulist.iterator_to(*i));
+ }
+
+ // switch to the other path
+ i = _getNeighbor(i);
+ w = i->which;
+ }
+ result.back().close(true);
+ if (reverse){
+ result.back() = result.back().reversed();
+ }
+ if (result.back().empty()) {
+ // std::cerr << "Path is empty" << std::endl;
+ throw GEOM_ERR_INTERSECGRAPH;
+ }
+ }
+
+ if (n_processed != size() * 2) {
+ // std::cerr << "Processed " << n_processed << " intersections, expected " << (size() * 2) << std::endl;
+ throw GEOM_ERR_INTERSECGRAPH;
+ }
+
+ return result;
+}
+
+/** @brief Select intersection-free path components ahead of a boolean operation based on whether
+ * they should be a part of that operation's result.
+ *
+ * Every component that has intersections will be processed by _getResult().
+ * Here we take care of paths that don't have any intersections. They are either
+ * completely inside or completely outside the other path-vector.
+ *
+ * @param result – output parameter to store the selected components.
+ * @param which – which of the two operands to search for intersection-free paths.
+ * @param inside – If set to true, add paths entirely contained inside the other path-vector to
+ * the result. If set to false, add paths entirely outside of the other path-vector instead.
+ */
+void PathIntersectionGraph::_handleNonintersectingPaths(PathVector &result, unsigned which, bool inside)
+{
+ unsigned w = which;
+ unsigned ow = (w+1) % 2;
+
+ for (std::size_t i = 0; i < _pv[w].size(); ++i) {
+ // the path data vector might have been left empty if there were no intersections at all
+ bool has_path_data = !_components[w].empty();
+ // Skip if the path has intersections
+ if (has_path_data && !_components[w][i].xlist.empty()) continue;
+ bool path_inside = false;
+
+ // Use the status flag set in the constructor if available.
+ if (has_path_data && _components[w][i].status == INSIDE) {
+ path_inside = true;
+ } else if (has_path_data && _components[w][i].status == OUTSIDE) {
+ path_inside = false;
+ } else {
+ // The status flag is ambiguous: we evaluate the winding number of the initial point.
+ int wdg = _pv[ow].winding(_pv[w][i].initialPoint());
+ path_inside = wdg % 2 != 0;
+ }
+
+ if (path_inside == inside) {
+ result.push_back(_pv[w][i]);
+ }
+ }
+}
+
+/** @brief Get an iterator to the corresponding crossing on the other path-vector.
+ *
+ * @param ILIter – an iterator to a list of intersections in one of the path-vectors.
+ * @return An iterator to the corresponding intersection in the other path-vector.
+ */
+PathIntersectionGraph::ILIter PathIntersectionGraph::_getNeighbor(ILIter iter)
+{
+ unsigned ow = (iter->which + 1) % 2;
+ return _components[ow][iter->neighbor->pos.path_index].xlist.iterator_to(*iter->neighbor);
+}
+
+/** Get the path data for the path containing the intersection given an iterator to the intersection */
+PathIntersectionGraph::PathData &
+PathIntersectionGraph::_getPathData(ILIter iter)
+{
+ return _components[iter->which][iter->pos.path_index];
+}
+
+/** Format the PathIntersectionGraph for output. */
+std::ostream &operator<<(std::ostream &os, PathIntersectionGraph const &pig)
+{
+ os << "Intersection graph:\n"
+ << pig._xs.size()/2 << " total intersections\n"
+ << pig.size() << " considered intersections\n";
+ for (std::size_t i = 0; i < pig._components[0].size(); ++i) {
+ PathIntersectionGraph::IntersectionList const &xl = pig._components[0][i].xlist;
+ for (const auto & j : xl) {
+ os << j.pos << " - " << j.neighbor->pos << " @ " << j.p << "\n";
+ }
+ }
+ return os;
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
+
diff --git a/src/2geom/intervaltree/interval_tree.cc b/src/2geom/intervaltree/interval_tree.cc
new file mode 100644
index 0000000..01a222a
--- /dev/null
+++ b/src/2geom/intervaltree/interval_tree.cc
@@ -0,0 +1,799 @@
+#include "interval_tree.h"
+#include <stdio.h>
+#include <math.h>
+#include <assert.h>
+
+// From Emin Martinian, licenced LGPL and MPL with permission
+
+
+using namespace std;
+
+// If the symbol CHECK_INTERVAL_TREE_ASSUMPTIONS is defined then the
+// code does a lot of extra checking to make sure certain assumptions
+// are satisfied. This only needs to be done if you suspect bugs are
+// present or if you make significant changes and want to make sure
+// your changes didn't mess anything up.
+//#define CHECK_INTERVAL_TREE_ASSUMPTIONS 1
+
+
+const int MIN_INT=-MAX_INT;
+
+IntervalTreeNode::IntervalTreeNode(){}
+
+IntervalTreeNode::IntervalTreeNode(Interval * newInterval)
+ : storedInterval (newInterval) ,
+ key(newInterval->GetLowPoint()),
+ high(newInterval->GetHighPoint()) ,
+ maxHigh(high) {
+}
+IntervalTreeNode::~IntervalTreeNode(){}
+Interval::Interval(){}
+Interval::~Interval(){}
+void Interval::Print() const {
+ cout << "No Print Method defined for instance of Interval" << endl;
+}
+
+IntervalTree::IntervalTree()
+{
+ nil = new IntervalTreeNode;
+ nil->left = nil->right = nil->parent = nil;
+ nil->red = 0;
+ nil->key = nil->high = nil->maxHigh = MIN_INT;
+ nil->storedInterval = NULL;
+
+ root = new IntervalTreeNode;
+ root->parent = root->left = root->right = nil;
+ root->key = root->high = root->maxHigh = MAX_INT;
+ root->red=0;
+ root->storedInterval = NULL;
+
+ /* the following are used for the Enumerate function */
+ recursionNodeStackSize = 8; // the tree depth is approximately lg(n), this is a 256 element tree. The code will adapt to deeper trees, but this saves considerable space for small trees.
+ recursionNodeStack = new it_recursion_node[recursionNodeStackSize];
+ recursionNodeStackTop = 1;
+ recursionNodeStack[0].start_node = NULL;
+
+}
+
+/***********************************************************************/
+/* FUNCTION: LeftRotate */
+/**/
+/* INPUTS: the node to rotate on */
+/**/
+/* OUTPUT: None */
+/**/
+/* Modifies Input: this, x */
+/**/
+/* EFFECTS: Rotates as described in _Introduction_To_Algorithms by */
+/* Cormen, Leiserson, Rivest (Chapter 14). Basically this */
+/* makes the parent of x be to the left of x, x the parent of */
+/* its parent before the rotation and fixes other pointers */
+/* accordingly. Also updates the maxHigh fields of x and y */
+/* after rotation. */
+/***********************************************************************/
+
+void IntervalTree::LeftRotate(IntervalTreeNode* x) {
+ IntervalTreeNode* y;
+
+ /* I originally wrote this function to use the sentinel for */
+ /* nil to avoid checking for nil. However this introduces a */
+ /* very subtle bug because sometimes this function modifies */
+ /* the parent pointer of nil. This can be a problem if a */
+ /* function which calls LeftRotate also uses the nil sentinel */
+ /* and expects the nil sentinel's parent pointer to be unchanged */
+ /* after calling this function. For example, when DeleteFixUP */
+ /* calls LeftRotate it expects the parent pointer of nil to be */
+ /* unchanged. */
+
+ y=x->right;
+ x->right=y->left;
+
+ if (y->left != nil) y->left->parent=x; /* used to use sentinel here */
+ /* and do an unconditional assignment instead of testing for nil */
+
+ y->parent=x->parent;
+
+ /* instead of checking if x->parent is the root as in the book, we */
+ /* count on the root sentinel to implicitly take care of this case */
+ if( x == x->parent->left) {
+ x->parent->left=y;
+ } else {
+ x->parent->right=y;
+ }
+ y->left=x;
+ x->parent=y;
+
+ x->maxHigh=std::max(x->left->maxHigh,std::max(x->right->maxHigh,x->high));
+ y->maxHigh=std::max(x->maxHigh,std::max(y->right->maxHigh,y->high));
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not red in ITLeftRotate");
+ assert((nil->maxHigh=MIN_INT),
+ "nil->maxHigh != MIN_INT in ITLeftRotate");
+#endif
+}
+
+
+/***********************************************************************/
+/* FUNCTION: RighttRotate */
+/**/
+/* INPUTS: node to rotate on */
+/**/
+/* OUTPUT: None */
+/**/
+/* Modifies Input?: this, y */
+/**/
+/* EFFECTS: Rotates as described in _Introduction_To_Algorithms by */
+/* Cormen, Leiserson, Rivest (Chapter 14). Basically this */
+/* makes the parent of x be to the left of x, x the parent of */
+/* its parent before the rotation and fixes other pointers */
+/* accordingly. Also updates the maxHigh fields of x and y */
+/* after rotation. */
+/***********************************************************************/
+
+
+void IntervalTree::RightRotate(IntervalTreeNode* y) {
+ IntervalTreeNode* x;
+
+ /* I originally wrote this function to use the sentinel for */
+ /* nil to avoid checking for nil. However this introduces a */
+ /* very subtle bug because sometimes this function modifies */
+ /* the parent pointer of nil. This can be a problem if a */
+ /* function which calls LeftRotate also uses the nil sentinel */
+ /* and expects the nil sentinel's parent pointer to be unchanged */
+ /* after calling this function. For example, when DeleteFixUP */
+ /* calls LeftRotate it expects the parent pointer of nil to be */
+ /* unchanged. */
+
+ x=y->left;
+ y->left=x->right;
+
+ if (nil != x->right) x->right->parent=y; /*used to use sentinel here */
+ /* and do an unconditional assignment instead of testing for nil */
+
+ /* instead of checking if x->parent is the root as in the book, we */
+ /* count on the root sentinel to implicitly take care of this case */
+ x->parent=y->parent;
+ if( y == y->parent->left) {
+ y->parent->left=x;
+ } else {
+ y->parent->right=x;
+ }
+ x->right=y;
+ y->parent=x;
+
+ y->maxHigh=std::max(y->left->maxHigh,std::max(y->right->maxHigh,y->high));
+ x->maxHigh=std::max(x->left->maxHigh,std::max(y->maxHigh,x->high));
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not red in ITRightRotate");
+ assert((nil->maxHigh=MIN_INT),
+ "nil->maxHigh != MIN_INT in ITRightRotate");
+#endif
+}
+
+/***********************************************************************/
+/* FUNCTION: TreeInsertHelp */
+/**/
+/* INPUTS: z is the node to insert */
+/**/
+/* OUTPUT: none */
+/**/
+/* Modifies Input: this, z */
+/**/
+/* EFFECTS: Inserts z into the tree as if it were a regular binary tree */
+/* using the algorithm described in _Introduction_To_Algorithms_ */
+/* by Cormen et al. This function is only intended to be called */
+/* by the InsertTree function and not by the user */
+/***********************************************************************/
+
+void IntervalTree::TreeInsertHelp(IntervalTreeNode* z) {
+ /* This function should only be called by InsertITTree (see above) */
+ IntervalTreeNode* x;
+ IntervalTreeNode* y;
+
+ z->left=z->right=nil;
+ y=root;
+ x=root->left;
+ while( x != nil) {
+ y=x;
+ if ( x->key > z->key) {
+ x=x->left;
+ } else { /* x->key <= z->key */
+ x=x->right;
+ }
+ }
+ z->parent=y;
+ if ( (y == root) ||
+ (y->key > z->key) ) {
+ y->left=z;
+ } else {
+ y->right=z;
+ }
+
+
+#if defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not red in ITTreeInsertHelp");
+ assert((nil->maxHigh=MIN_INT),
+ "nil->maxHigh != MIN_INT in ITTreeInsertHelp");
+#endif
+}
+
+
+/***********************************************************************/
+/* FUNCTION: FixUpMaxHigh */
+/**/
+/* INPUTS: x is the node to start from*/
+/**/
+/* OUTPUT: none */
+/**/
+/* Modifies Input: this */
+/**/
+/* EFFECTS: Travels up to the root fixing the maxHigh fields after */
+/* an insertion or deletion */
+/***********************************************************************/
+
+void IntervalTree::FixUpMaxHigh(IntervalTreeNode * x) {
+ while(x != root) {
+ x->maxHigh=std::max(x->high,std::max(x->left->maxHigh,x->right->maxHigh));
+ x=x->parent;
+ }
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#endif
+}
+
+/* Before calling InsertNode the node x should have its key set */
+
+/***********************************************************************/
+/* FUNCTION: InsertNode */
+/**/
+/* INPUTS: newInterval is the interval to insert*/
+/**/
+/* OUTPUT: This function returns a pointer to the newly inserted node */
+/* which is guaranteed to be valid until this node is deleted. */
+/* What this means is if another data structure stores this */
+/* pointer then the tree does not need to be searched when this */
+/* is to be deleted. */
+/**/
+/* Modifies Input: tree */
+/**/
+/* EFFECTS: Creates a node node which contains the appropriate key and */
+/* info pointers and inserts it into the tree. */
+/***********************************************************************/
+
+IntervalTreeNode * IntervalTree::Insert(Interval * newInterval)
+{
+ IntervalTreeNode * y;
+ IntervalTreeNode * x;
+ IntervalTreeNode * newNode;
+
+ x = new IntervalTreeNode(newInterval);
+ TreeInsertHelp(x);
+ FixUpMaxHigh(x->parent);
+ newNode = x;
+ x->red=1;
+ while(x->parent->red) { /* use sentinel instead of checking for root */
+ if (x->parent == x->parent->parent->left) {
+ y=x->parent->parent->right;
+ if (y->red) {
+ x->parent->red=0;
+ y->red=0;
+ x->parent->parent->red=1;
+ x=x->parent->parent;
+ } else {
+ if (x == x->parent->right) {
+ x=x->parent;
+ LeftRotate(x);
+ }
+ x->parent->red=0;
+ x->parent->parent->red=1;
+ RightRotate(x->parent->parent);
+ }
+ } else { /* case for x->parent == x->parent->parent->right */
+ /* this part is just like the section above with */
+ /* left and right interchanged */
+ y=x->parent->parent->left;
+ if (y->red) {
+ x->parent->red=0;
+ y->red=0;
+ x->parent->parent->red=1;
+ x=x->parent->parent;
+ } else {
+ if (x == x->parent->left) {
+ x=x->parent;
+ RightRotate(x);
+ }
+ x->parent->red=0;
+ x->parent->parent->red=1;
+ LeftRotate(x->parent->parent);
+ }
+ }
+ }
+ root->left->red=0;
+ return(newNode);
+
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not red in ITTreeInsert");
+ assert(!root->red,"root not red in ITTreeInsert");
+ assert((nil->maxHigh=MIN_INT),
+ "nil->maxHigh != MIN_INT in ITTreeInsert");
+#endif
+}
+
+/***********************************************************************/
+/* FUNCTION: GetSuccessorOf */
+/**/
+/* INPUTS: x is the node we want the successor of */
+/**/
+/* OUTPUT: This function returns the successor of x or NULL if no */
+/* successor exists. */
+/**/
+/* Modifies Input: none */
+/**/
+/* Note: uses the algorithm in _Introduction_To_Algorithms_ */
+/***********************************************************************/
+
+IntervalTreeNode * IntervalTree::GetSuccessorOf(IntervalTreeNode * x) const
+{
+ IntervalTreeNode* y;
+
+ if (nil != (y = x->right)) { /* assignment to y is intentional */
+ while(y->left != nil) { /* returns the minimum of the right subtree of x */
+ y=y->left;
+ }
+ return(y);
+ } else {
+ y=x->parent;
+ while(x == y->right) { /* sentinel used instead of checking for nil */
+ x=y;
+ y=y->parent;
+ }
+ if (y == root) return(nil);
+ return(y);
+ }
+}
+
+/***********************************************************************/
+/* FUNCTION: GetPredecessorOf */
+/**/
+/* INPUTS: x is the node to get predecessor of */
+/**/
+/* OUTPUT: This function returns the predecessor of x or NULL if no */
+/* predecessor exists. */
+/**/
+/* Modifies Input: none */
+/**/
+/* Note: uses the algorithm in _Introduction_To_Algorithms_ */
+/***********************************************************************/
+
+IntervalTreeNode * IntervalTree::GetPredecessorOf(IntervalTreeNode * x) const {
+ IntervalTreeNode* y;
+
+ if (nil != (y = x->left)) { /* assignment to y is intentional */
+ while(y->right != nil) { /* returns the maximum of the left subtree of x */
+ y=y->right;
+ }
+ return(y);
+ } else {
+ y=x->parent;
+ while(x == y->left) {
+ if (y == root) return(nil);
+ x=y;
+ y=y->parent;
+ }
+ return(y);
+ }
+}
+
+/***********************************************************************/
+/* FUNCTION: Print */
+/**/
+/* INPUTS: none */
+/**/
+/* OUTPUT: none */
+/**/
+/* EFFECTS: This function recursively prints the nodes of the tree */
+/* inorder. */
+/**/
+/* Modifies Input: none */
+/**/
+/* Note: This function should only be called from ITTreePrint */
+/***********************************************************************/
+
+void IntervalTreeNode::Print(IntervalTreeNode * nil,
+ IntervalTreeNode * root) const {
+ storedInterval->Print();
+ printf(", k=%i, h=%i, mH=%i",key,high,maxHigh);
+ printf(" l->key=");
+ if( left == nil) printf("NULL"); else printf("%i",left->key);
+ printf(" r->key=");
+ if( right == nil) printf("NULL"); else printf("%i",right->key);
+ printf(" p->key=");
+ if( parent == root) printf("NULL"); else printf("%i",parent->key);
+ printf(" red=%i\n",red);
+}
+
+void IntervalTree::TreePrintHelper( IntervalTreeNode* x) const {
+
+ if (x != nil) {
+ TreePrintHelper(x->left);
+ x->Print(nil,root);
+ TreePrintHelper(x->right);
+ }
+}
+
+IntervalTree::~IntervalTree() {
+ IntervalTreeNode * x = root->left;
+ vector<IntervalTreeNode *> stuffToFree;
+
+ if (x != nil) {
+ if (x->left != nil) {
+ stuffToFree.push_back(x->left);
+ }
+ if (x->right != nil) {
+ stuffToFree.push_back(x->right);
+ }
+ // delete x->storedInterval;
+ delete x;
+ while(! stuffToFree.empty() ) {
+ x = stuffToFree.back();
+ stuffToFree.pop_back();
+ if (x->left != nil) {
+ stuffToFree.push_back(x->left);
+ }
+ if (x->right != nil) {
+ stuffToFree.push_back(x->right);
+ }
+ // delete x->storedInterval;
+ delete x;
+ }
+ }
+ delete nil;
+ delete root;
+ delete[] recursionNodeStack;
+}
+
+
+/***********************************************************************/
+/* FUNCTION: Print */
+/**/
+/* INPUTS: none */
+/**/
+/* OUTPUT: none */
+/**/
+/* EFFECT: This function recursively prints the nodes of the tree */
+/* inorder. */
+/**/
+/* Modifies Input: none */
+/**/
+/***********************************************************************/
+
+void IntervalTree::Print() const {
+ TreePrintHelper(root->left);
+}
+
+/***********************************************************************/
+/* FUNCTION: DeleteFixUp */
+/**/
+/* INPUTS: x is the child of the spliced */
+/* out node in DeleteNode. */
+/**/
+/* OUTPUT: none */
+/**/
+/* EFFECT: Performs rotations and changes colors to restore red-black */
+/* properties after a node is deleted */
+/**/
+/* Modifies Input: this, x */
+/**/
+/* The algorithm from this function is from _Introduction_To_Algorithms_ */
+/***********************************************************************/
+
+void IntervalTree::DeleteFixUp(IntervalTreeNode* x) {
+ IntervalTreeNode * w;
+ IntervalTreeNode * rootLeft = root->left;
+
+ while( (!x->red) && (rootLeft != x)) {
+ if (x == x->parent->left) {
+ w=x->parent->right;
+ if (w->red) {
+ w->red=0;
+ x->parent->red=1;
+ LeftRotate(x->parent);
+ w=x->parent->right;
+ }
+ if ( (!w->right->red) && (!w->left->red) ) {
+ w->red=1;
+ x=x->parent;
+ } else {
+ if (!w->right->red) {
+ w->left->red=0;
+ w->red=1;
+ RightRotate(w);
+ w=x->parent->right;
+ }
+ w->red=x->parent->red;
+ x->parent->red=0;
+ w->right->red=0;
+ LeftRotate(x->parent);
+ x=rootLeft; /* this is to exit while loop */
+ }
+ } else { /* the code below is has left and right switched from above */
+ w=x->parent->left;
+ if (w->red) {
+ w->red=0;
+ x->parent->red=1;
+ RightRotate(x->parent);
+ w=x->parent->left;
+ }
+ if ( (!w->right->red) && (!w->left->red) ) {
+ w->red=1;
+ x=x->parent;
+ } else {
+ if (!w->left->red) {
+ w->right->red=0;
+ w->red=1;
+ LeftRotate(w);
+ w=x->parent->left;
+ }
+ w->red=x->parent->red;
+ x->parent->red=0;
+ w->left->red=0;
+ RightRotate(x->parent);
+ x=rootLeft; /* this is to exit while loop */
+ }
+ }
+ }
+ x->red=0;
+
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not black in ITDeleteFixUp");
+ assert((nil->maxHigh=MIN_INT),
+ "nil->maxHigh != MIN_INT in ITDeleteFixUp");
+#endif
+}
+
+
+/***********************************************************************/
+/* FUNCTION: DeleteNode */
+/**/
+/* INPUTS: tree is the tree to delete node z from */
+/**/
+/* OUTPUT: returns the Interval stored at deleted node */
+/**/
+/* EFFECT: Deletes z from tree and but don't call destructor */
+/* Then calls FixUpMaxHigh to fix maxHigh fields then calls */
+/* ITDeleteFixUp to restore red-black properties */
+/**/
+/* Modifies Input: z */
+/**/
+/* The algorithm from this function is from _Introduction_To_Algorithms_ */
+/***********************************************************************/
+
+Interval * IntervalTree::DeleteNode(IntervalTreeNode * z){
+ IntervalTreeNode* y;
+ IntervalTreeNode* x;
+ Interval * returnValue = z->storedInterval;
+
+ y= ((z->left == nil) || (z->right == nil)) ? z : GetSuccessorOf(z);
+ x= (y->left == nil) ? y->right : y->left;
+ if (root == (x->parent = y->parent)) { /* assignment of y->p to x->p is intentional */
+ root->left=x;
+ } else {
+ if (y == y->parent->left) {
+ y->parent->left=x;
+ } else {
+ y->parent->right=x;
+ }
+ }
+ if (y != z) { /* y should not be nil in this case */
+
+#ifdef DEBUG_ASSERT
+ assert( (y!=nil),"y is nil in DeleteNode \n");
+#endif
+ /* y is the node to splice out and x is its child */
+
+ y->maxHigh = MIN_INT;
+ y->left=z->left;
+ y->right=z->right;
+ y->parent=z->parent;
+ z->left->parent=z->right->parent=y;
+ if (z == z->parent->left) {
+ z->parent->left=y;
+ } else {
+ z->parent->right=y;
+ }
+ FixUpMaxHigh(x->parent);
+ if (!(y->red)) {
+ y->red = z->red;
+ DeleteFixUp(x);
+ } else
+ y->red = z->red;
+ delete z;
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not black in ITDelete");
+ assert((nil->maxHigh=MIN_INT),"nil->maxHigh != MIN_INT in ITDelete");
+#endif
+ } else {
+ FixUpMaxHigh(x->parent);
+ if (!(y->red)) DeleteFixUp(x);
+ delete y;
+#ifdef CHECK_INTERVAL_TREE_ASSUMPTIONS
+ CheckAssumptions();
+#elif defined(DEBUG_ASSERT)
+ assert(!nil->red,"nil not black in ITDelete");
+ assert((nil->maxHigh=MIN_INT),"nil->maxHigh != MIN_INT in ITDelete");
+#endif
+ }
+ return returnValue;
+}
+
+
+/***********************************************************************/
+/* FUNCTION: Overlap */
+/**/
+/* INPUTS: [a1,a2] and [b1,b2] are the low and high endpoints of two */
+/* closed intervals. */
+/**/
+/* OUTPUT: stack containing pointers to the nodes between [low,high] */
+/**/
+/* Modifies Input: none */
+/**/
+/* EFFECT: returns 1 if the intervals overlap, and 0 otherwise */
+/***********************************************************************/
+
+int Overlap(int a1, int a2, int b1, int b2) {
+ if (a1 <= b1) {
+ return( (b1 <= a2) );
+ } else {
+ return( (a1 <= b2) );
+ }
+}
+
+
+/***********************************************************************/
+/* FUNCTION: Enumerate */
+/**/
+/* INPUTS: tree is the tree to look for intervals overlapping the */
+/* closed interval [low,high] */
+/**/
+/* OUTPUT: stack containing pointers to the nodes overlapping */
+/* [low,high] */
+/**/
+/* Modifies Input: none */
+/**/
+/* EFFECT: Returns a stack containing pointers to nodes containing */
+/* intervals which overlap [low,high] in O(max(N,k*log(N))) */
+/* where N is the number of intervals in the tree and k is */
+/* the number of overlapping intervals */
+/**/
+/* Note: This basic idea for this function comes from the */
+/* _Introduction_To_Algorithms_ book by Cormen et al, but */
+/* modifications were made to return all overlapping intervals */
+/* instead of just the first overlapping interval as in the */
+/* book. The natural way to do this would require recursive */
+/* calls of a basic search function. I translated the */
+/* recursive version into an interative version with a stack */
+/* as described below. */
+/***********************************************************************/
+
+
+
+/* The basic idea for the function below is to take the IntervalSearch */
+/* function from the book and modify to find all overlapping intervals */
+/* instead of just one. This means that any time we take the left */
+/* branch down the tree we must also check the right branch if and only if */
+/* we find an overlapping interval in that left branch. Note this is a */
+/* recursive condition because if we go left at the root then go left */
+/* again at the first left child and find an overlap in the left subtree */
+/* of the left child of root we must recursively check the right subtree */
+/* of the left child of root as well as the right child of root. */
+
+vector<void *> IntervalTree::Enumerate(int low, int high) {
+ vector<void *> enumResultStack;
+ IntervalTreeNode* x=root->left;
+ int stuffToDo = (x != nil);
+
+ // Possible speed up: add min field to prune right searches //
+
+#ifdef DEBUG_ASSERT
+ assert((recursionNodeStackTop == 1),
+ "recursionStack not empty when entering IntervalTree::Enumerate");
+#endif
+ currentParent = 0;
+
+ while(stuffToDo) {
+ if (Overlap(low,high,x->key,x->high) ) {
+ enumResultStack.push_back(x->storedInterval);
+ recursionNodeStack[currentParent].tryRightBranch=true;
+ }
+ if(x->left->maxHigh >= low) { // implies x != nil
+ if ( recursionNodeStackTop == recursionNodeStackSize ) {
+ recursionNodeStackSize *= 2;
+ recursionNodeStack = (it_recursion_node *)
+ realloc(recursionNodeStack,
+ recursionNodeStackSize * sizeof(it_recursion_node));
+ if (recursionNodeStack == NULL)
+ assert("realloc failed in IntervalTree::Enumerate\n");
+ }
+ recursionNodeStack[recursionNodeStackTop].start_node = x;
+ recursionNodeStack[recursionNodeStackTop].tryRightBranch = 0;
+ recursionNodeStack[recursionNodeStackTop].parentIndex = currentParent;
+ currentParent = recursionNodeStackTop++;
+ x = x->left;
+ } else {
+ x = x->right;
+ }
+ stuffToDo = (x != nil);
+ while( (!stuffToDo) && (recursionNodeStackTop > 1) ) {
+ if(recursionNodeStack[--recursionNodeStackTop].tryRightBranch) {
+ x=recursionNodeStack[recursionNodeStackTop].start_node->right;
+ currentParent=recursionNodeStack[recursionNodeStackTop].parentIndex;
+ recursionNodeStack[currentParent].tryRightBranch=true;
+ stuffToDo = ( x != nil);
+ }
+ }
+ }
+#ifdef DEBUG_ASSERT
+ assert((recursionNodeStackTop == 1),
+ "recursionStack not empty when exiting IntervalTree::Enumerate");
+#endif
+ return enumResultStack;
+}
+
+
+
+int IntervalTree::CheckMaxHighFieldsHelper(IntervalTreeNode * y,
+ const int currentHigh,
+ int match) const
+{
+ if (y != nil) {
+ match = CheckMaxHighFieldsHelper(y->left,currentHigh,match) ?
+ 1 : match;
+ assert(y->high <= currentHigh);
+ if (y->high == currentHigh)
+ match = 1;
+ match = CheckMaxHighFieldsHelper(y->right,currentHigh,match) ?
+ 1 : match;
+ }
+ return match;
+}
+
+
+
+/* Make sure the maxHigh fields for everything makes sense. *
+ * If something is wrong, print a warning and exit */
+void IntervalTree::CheckMaxHighFields(IntervalTreeNode * x) const {
+ if (x != nil) {
+ CheckMaxHighFields(x->left);
+ if(!(CheckMaxHighFieldsHelper(x,x->maxHigh,0) > 0)) {
+ assert("error found in CheckMaxHighFields.\n");
+ }
+ CheckMaxHighFields(x->right);
+ }
+}
+
+void IntervalTree::CheckAssumptions() const {
+ assert(nil->key == MIN_INT);
+ assert(nil->high == MIN_INT);
+ assert(nil->maxHigh == MIN_INT);
+ assert(root->key == MAX_INT);
+ assert(root->high == MAX_INT);
+ assert(root->maxHigh == MAX_INT);
+ assert(nil->storedInterval == NULL);
+ assert(root->storedInterval == NULL);
+ assert(nil->red == 0);
+ assert(root->red == 0);
+ CheckMaxHighFields(root->left);
+}
+
+
+
diff --git a/src/2geom/intervaltree/test2.cc b/src/2geom/intervaltree/test2.cc
new file mode 100644
index 0000000..bce448e
--- /dev/null
+++ b/src/2geom/intervaltree/test2.cc
@@ -0,0 +1,74 @@
+#include <iostream>
+
+// Nathan Hurst and Emin Martinian, licenced LGPL and MPL with permission
+
+
+#include "interval_tree.h"
+
+class SimpleInterval : public Interval {
+public:
+ SimpleInterval() :
+ _low(0),
+ _high(0),
+ _node(NULL)
+ {}
+ SimpleInterval(const int low,const int high)
+ :_low(low),
+ _high(high),
+ _node(NULL)
+ { }
+
+ int GetLowPoint() const { return _low;}
+ int GetHighPoint() const { return _high;}
+ IntervalTreeNode * GetNode() { return _node;}
+ void SetNode(IntervalTreeNode * node) {_node = node;}
+ virtual void Print() const {
+ printf("(%d, %d)", _low, _high);
+ }
+protected:
+ int _low;
+ int _high;
+ IntervalTreeNode * _node;
+
+};
+
+using namespace std;
+
+#include <stdlib.h>
+#include <time.h>
+
+int main() {
+ const int N = 1L<<24;
+ SimpleInterval *x = new SimpleInterval[N];
+ for(int i = 0; i < N; i++) {
+ x[i] = SimpleInterval(random(), random());
+ }
+
+ cout << "sizeof(SimpleInterval)" << sizeof(SimpleInterval) << endl;
+ cout << "sizeof(IntervalTreeNode)" << sizeof(IntervalTreeNode) << endl;
+ cout << "sizeof(it_recursion_node)" << sizeof(it_recursion_node) << endl;
+ cout << "sizeof(IntervalTree)" << sizeof(IntervalTree) << endl;
+
+ IntervalTree itree;
+ int onn = 0;
+ for(int nn = 1; nn < N; nn*=2) {
+ for(int i = onn; i < nn; i++) {
+ itree.Insert(&x[i]);
+ }
+ onn = nn;
+ clock_t s = clock();
+
+ int iters = 0;
+ int outputs = 0;
+ while(clock() - s < CLOCKS_PER_SEC/4) {
+ vector<void *> n = itree.Enumerate(random(), random()) ;
+ outputs += n.size();
+ //cout << n.size() << endl;
+ iters++;
+ }
+ clock_t e = clock();
+ double total = double(e - s)/(CLOCKS_PER_SEC);
+ cout << total << " " << outputs << " " << total/outputs << " " << nn << endl;
+ }
+ //itree.Print();
+}
diff --git a/src/2geom/line.cpp b/src/2geom/line.cpp
new file mode 100644
index 0000000..3db3039
--- /dev/null
+++ b/src/2geom/line.cpp
@@ -0,0 +1,610 @@
+/*
+ * Infinite Straight Line
+ *
+ * Copyright 2008 Marco Cecchetti <mrcekets at gmail.com>
+ * Nathan Hurst
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <algorithm>
+#include <optional>
+#include <2geom/line.h>
+#include <2geom/math-utils.h>
+
+namespace Geom
+{
+
+/**
+ * @class Line
+ * @brief Infinite line on a plane.
+ *
+ * A line is specified as two points through which it passes. Lines can be interpreted as functions
+ * \f$ f: (-\infty, \infty) \to \mathbb{R}^2\f$. Zero corresponds to the first (origin) point,
+ * one corresponds to the second (final) point. All other points are computed as a linear
+ * interpolation between those two: \f$p = (1-t) a + t b\f$. Many such functions have the same
+ * image and therefore represent the same lines; for example, adding \f$b-a\f$ to both points
+ * yields the same line.
+ *
+ * 2Geom can represent the same line in many ways by design: using a different representation
+ * would lead to precision loss. For example, a line from (1e30, 1e30) to (10,0) would actually
+ * evaluate to (0,0) at time 1 if it was stored as origin and normalized versor,
+ * or origin and angle.
+ *
+ * @ingroup Primitives
+ */
+
+/** @brief Set the line by solving the line equation.
+ * A line is a set of points that satisfies the line equation
+ * \f$Ax + By + C = 0\f$. This function changes the line so that its points
+ * satisfy the line equation with the given coefficients. */
+void Line::setCoefficients (Coord a, Coord b, Coord c)
+{
+ // degenerate case
+ if (a == 0 && b == 0) {
+ if (c != 0) {
+ THROW_LOGICALERROR("the passed coefficients give the empty set");
+ }
+ _initial = Point(0,0);
+ _final = Point(0,0);
+ return;
+ }
+
+ // The way final / initial points are set based on coefficients is somewhat unusual.
+ // This is done to make sure that calling coefficients() will give back
+ // (almost) the same values.
+
+ // vertical case
+ if (a == 0) {
+ // b must be nonzero
+ _initial = Point(-b/2, -c / b);
+ _final = _initial;
+ _final[X] = b/2;
+ return;
+ }
+
+ // horizontal case
+ if (b == 0) {
+ _initial = Point(-c / a, a/2);
+ _final = _initial;
+ _final[Y] = -a/2;
+ return;
+ }
+
+ // This gives reasonable results regardless of the magnitudes of a, b and c.
+ _initial = Point(-b/2,a/2);
+ _final = Point(b/2,-a/2);
+
+ Point offset(-c/(2*a), -c/(2*b));
+
+ _initial += offset;
+ _final += offset;
+}
+
+void Line::coefficients(Coord &a, Coord &b, Coord &c) const
+{
+ Point v = vector().cw();
+ a = v[X];
+ b = v[Y];
+ c = cross(_initial, _final);
+}
+
+/** @brief Get the implicit line equation coefficients.
+ * Note that conversion to implicit form always causes loss of
+ * precision when dealing with lines that start far from the origin
+ * and end very close to it. It is recommended to normalize the line
+ * before converting it to implicit form.
+ * @return Vector with three values corresponding to the A, B and C
+ * coefficients of the line equation for this line. */
+std::vector<Coord> Line::coefficients() const
+{
+ std::vector<Coord> c(3);
+ coefficients(c[0], c[1], c[2]);
+ return c;
+}
+
+/** @brief Find intersection with an axis-aligned line.
+ * @param v Coordinate of the axis-aligned line
+ * @param d Which axis the coordinate is on. X means a vertical line, Y means a horizontal line.
+ * @return Time values at which this line intersects the query line. */
+std::vector<Coord> Line::roots(Coord v, Dim2 d) const {
+ std::vector<Coord> result;
+ Coord r = root(v, d);
+ if (std::isfinite(r)) {
+ result.push_back(r);
+ }
+ return result;
+}
+
+Coord Line::root(Coord v, Dim2 d) const
+{
+ assert(d == X || d == Y);
+ Point vs = vector();
+ if (vs[d] != 0) {
+ return (v - _initial[d]) / vs[d];
+ } else {
+ return nan("");
+ }
+}
+
+std::optional<LineSegment> Line::clip(Rect const &r) const
+{
+ Point v = vector();
+ // handle horizontal and vertical lines first,
+ // since the root-based code below will break for them
+ for (unsigned i = 0; i < 2; ++i) {
+ Dim2 d = (Dim2) i;
+ Dim2 o = other_dimension(d);
+ if (v[d] != 0) continue;
+ if (r[d].contains(_initial[d])) {
+ Point a, b;
+ a[o] = r[o].min();
+ b[o] = r[o].max();
+ a[d] = b[d] = _initial[d];
+ if (v[o] > 0) {
+ return LineSegment(a, b);
+ } else {
+ return LineSegment(b, a);
+ }
+ } else {
+ return std::nullopt;
+ }
+ }
+
+ Interval xpart(root(r[X].min(), X), root(r[X].max(), X));
+ Interval ypart(root(r[Y].min(), Y), root(r[Y].max(), Y));
+ if (!xpart.isFinite() || !ypart.isFinite()) {
+ return std::nullopt;
+ }
+
+ OptInterval common = xpart & ypart;
+ if (common) {
+ Point p1 = pointAt(common->min()), p2 = pointAt(common->max());
+ LineSegment result(r.clamp(p1), r.clamp(p2));
+ return result;
+ } else {
+ return std::nullopt;
+ }
+
+ /* old implementation using coefficients:
+
+ if (fabs(b) > fabs(a)) {
+ p0 = Point(r[X].min(), (-c - a*r[X].min())/b);
+ if (p0[Y] < r[Y].min())
+ p0 = Point((-c - b*r[Y].min())/a, r[Y].min());
+ if (p0[Y] > r[Y].max())
+ p0 = Point((-c - b*r[Y].max())/a, r[Y].max());
+ p1 = Point(r[X].max(), (-c - a*r[X].max())/b);
+ if (p1[Y] < r[Y].min())
+ p1 = Point((-c - b*r[Y].min())/a, r[Y].min());
+ if (p1[Y] > r[Y].max())
+ p1 = Point((-c - b*r[Y].max())/a, r[Y].max());
+ } else {
+ p0 = Point((-c - b*r[Y].min())/a, r[Y].min());
+ if (p0[X] < r[X].min())
+ p0 = Point(r[X].min(), (-c - a*r[X].min())/b);
+ if (p0[X] > r[X].max())
+ p0 = Point(r[X].max(), (-c - a*r[X].max())/b);
+ p1 = Point((-c - b*r[Y].max())/a, r[Y].max());
+ if (p1[X] < r[X].min())
+ p1 = Point(r[X].min(), (-c - a*r[X].min())/b);
+ if (p1[X] > r[X].max())
+ p1 = Point(r[X].max(), (-c - a*r[X].max())/b);
+ }
+ return LineSegment(p0, p1); */
+}
+
+/** @brief Get a time value corresponding to a point.
+ * @param p Point on the line. If the point is not on the line,
+ * the returned value will be meaningless.
+ * @return Time value t such that \f$f(t) = p\f$.
+ * @see timeAtProjection */
+Coord Line::timeAt(Point const &p) const
+{
+ Point v = vector();
+ // degenerate case
+ if (v[X] == 0 && v[Y] == 0) {
+ return 0;
+ }
+
+ // use the coordinate that will give better precision
+ if (fabs(v[X]) > fabs(v[Y])) {
+ return (p[X] - _initial[X]) / v[X];
+ } else {
+ return (p[Y] - _initial[Y]) / v[Y];
+ }
+}
+
+/** @brief Create a transformation that maps one line to another.
+ * This will return a transformation \f$A\f$ such that
+ * \f$L_1(t) \cdot A = L_2(t)\f$, where \f$L_1\f$ is this line
+ * and \f$L_2\f$ is the line passed as the parameter. The returned
+ * transformation will preserve angles. */
+Affine Line::transformTo(Line const &other) const
+{
+ Affine result = Translate(-_initial);
+ result *= Rotate(angle_between(vector(), other.vector()));
+ result *= Scale(other.vector().length() / vector().length());
+ result *= Translate(other._initial);
+ return result;
+}
+
+std::vector<ShapeIntersection> Line::intersect(Line const &other) const
+{
+ std::vector<ShapeIntersection> result;
+
+ Point v1 = vector();
+ Point v2 = other.vector();
+ Coord cp = cross(v1, v2);
+ if (cp == 0) return result;
+
+ Point odiff = other.initialPoint() - initialPoint();
+ Coord t1 = cross(odiff, v2) / cp;
+ Coord t2 = cross(odiff, v1) / cp;
+ result.emplace_back(*this, other, t1, t2);
+ return result;
+}
+
+std::vector<ShapeIntersection> Line::intersect(Ray const &r) const
+{
+ Line other(r);
+ std::vector<ShapeIntersection> result = intersect(other);
+ filter_ray_intersections(result, false, true);
+ return result;
+}
+
+std::vector<ShapeIntersection> Line::intersect(LineSegment const &ls) const
+{
+ Line other(ls);
+ std::vector<ShapeIntersection> result = intersect(other);
+ filter_line_segment_intersections(result, false, true);
+ return result;
+}
+
+
+
+void filter_line_segment_intersections(std::vector<ShapeIntersection> &xs, bool a, bool b)
+{
+ Interval unit(0, 1);
+ std::vector<ShapeIntersection>::reverse_iterator i = xs.rbegin(), last = xs.rend();
+ while (i != last) {
+ if ((a && !unit.contains(i->first)) || (b && !unit.contains(i->second))) {
+ xs.erase((++i).base());
+ } else {
+ ++i;
+ }
+ }
+}
+
+void filter_ray_intersections(std::vector<ShapeIntersection> &xs, bool a, bool b)
+{
+ Interval unit(0, 1);
+ std::vector<ShapeIntersection>::reverse_iterator i = xs.rbegin(), last = xs.rend();
+ while (i != last) {
+ if ((a && i->first < 0) || (b && i->second < 0)) {
+ xs.erase((++i).base());
+ } else {
+ ++i;
+ }
+ }
+}
+
+namespace detail
+{
+
+inline
+OptCrossing intersection_impl(Point const &v1, Point const &o1,
+ Point const &v2, Point const &o2)
+{
+ Coord cp = cross(v1, v2);
+ if (cp == 0) return OptCrossing();
+
+ Point odiff = o2 - o1;
+
+ Crossing c;
+ c.ta = cross(odiff, v2) / cp;
+ c.tb = cross(odiff, v1) / cp;
+ return c;
+}
+
+
+OptCrossing intersection_impl(Ray const& r1, Line const& l2, unsigned int i)
+{
+ using std::swap;
+
+ OptCrossing crossing =
+ intersection_impl(r1.vector(), r1.origin(),
+ l2.vector(), l2.origin() );
+
+ if (crossing) {
+ if (crossing->ta < 0) {
+ return OptCrossing();
+ } else {
+ if (i != 0) {
+ swap(crossing->ta, crossing->tb);
+ }
+ return crossing;
+ }
+ }
+ if (are_near(r1.origin(), l2)) {
+ THROW_INFINITESOLUTIONS();
+ } else {
+ return OptCrossing();
+ }
+}
+
+
+OptCrossing intersection_impl( LineSegment const& ls1,
+ Line const& l2,
+ unsigned int i )
+{
+ using std::swap;
+
+ OptCrossing crossing =
+ intersection_impl(ls1.finalPoint() - ls1.initialPoint(),
+ ls1.initialPoint(),
+ l2.vector(),
+ l2.origin() );
+
+ if (crossing) {
+ if ( crossing->getTime(0) < 0
+ || crossing->getTime(0) > 1 )
+ {
+ return OptCrossing();
+ } else {
+ if (i != 0) {
+ swap((*crossing).ta, (*crossing).tb);
+ }
+ return crossing;
+ }
+ }
+ if (are_near(ls1.initialPoint(), l2)) {
+ THROW_INFINITESOLUTIONS();
+ } else {
+ return OptCrossing();
+ }
+}
+
+
+OptCrossing intersection_impl( LineSegment const& ls1,
+ Ray const& r2,
+ unsigned int i )
+{
+ using std::swap;
+
+ Point direction = ls1.finalPoint() - ls1.initialPoint();
+ OptCrossing crossing =
+ intersection_impl( direction,
+ ls1.initialPoint(),
+ r2.vector(),
+ r2.origin() );
+
+ if (crossing) {
+ if ( (crossing->getTime(0) < 0)
+ || (crossing->getTime(0) > 1)
+ || (crossing->getTime(1) < 0) )
+ {
+ return OptCrossing();
+ } else {
+ if (i != 0) {
+ swap(crossing->ta, crossing->tb);
+ }
+ return crossing;
+ }
+ }
+
+ if ( are_near(r2.origin(), ls1) ) {
+ bool eqvs = (dot(direction, r2.vector()) > 0);
+ if ( are_near(ls1.initialPoint(), r2.origin()) && !eqvs) {
+ crossing->ta = crossing->tb = 0;
+ return crossing;
+ } else if ( are_near(ls1.finalPoint(), r2.origin()) && eqvs) {
+ if (i == 0) {
+ crossing->ta = 1;
+ crossing->tb = 0;
+ } else {
+ crossing->ta = 0;
+ crossing->tb = 1;
+ }
+ return crossing;
+ } else {
+ THROW_INFINITESOLUTIONS();
+ }
+ } else if ( are_near(ls1.initialPoint(), r2) ) {
+ THROW_INFINITESOLUTIONS();
+ } else {
+ OptCrossing no_crossing;
+ return no_crossing;
+ }
+}
+
+} // end namespace detail
+
+
+
+OptCrossing intersection(Line const& l1, Line const& l2)
+{
+ OptCrossing c = detail::intersection_impl(
+ l1.vector(), l1.origin(),
+ l2.vector(), l2.origin());
+
+ if (!c && distance(l1.origin(), l2) == 0) {
+ THROW_INFINITESOLUTIONS();
+ }
+ return c;
+}
+
+OptCrossing intersection(Ray const& r1, Ray const& r2)
+{
+ OptCrossing crossing =
+ detail::intersection_impl( r1.vector(), r1.origin(),
+ r2.vector(), r2.origin() );
+
+ if (crossing)
+ {
+ if ( crossing->ta < 0
+ || crossing->tb < 0 )
+ {
+ OptCrossing no_crossing;
+ return no_crossing;
+ }
+ else
+ {
+ return crossing;
+ }
+ }
+
+ if ( are_near(r1.origin(), r2) || are_near(r2.origin(), r1) )
+ {
+ if ( are_near(r1.origin(), r2.origin())
+ && !are_near(r1.vector(), r2.vector()) )
+ {
+ crossing->ta = crossing->tb = 0;
+ return crossing;
+ }
+ else
+ {
+ THROW_INFINITESOLUTIONS();
+ }
+ }
+ else
+ {
+ OptCrossing no_crossing;
+ return no_crossing;
+ }
+}
+
+
+OptCrossing intersection( LineSegment const& ls1, LineSegment const& ls2 )
+{
+ Point direction1 = ls1.finalPoint() - ls1.initialPoint();
+ Point direction2 = ls2.finalPoint() - ls2.initialPoint();
+ OptCrossing crossing =
+ detail::intersection_impl( direction1,
+ ls1.initialPoint(),
+ direction2,
+ ls2.initialPoint() );
+
+ if (crossing)
+ {
+ if ( crossing->getTime(0) < 0
+ || crossing->getTime(0) > 1
+ || crossing->getTime(1) < 0
+ || crossing->getTime(1) > 1 )
+ {
+ OptCrossing no_crossing;
+ return no_crossing;
+ }
+ else
+ {
+ return crossing;
+ }
+ }
+
+ bool eqvs = (dot(direction1, direction2) > 0);
+ if ( are_near(ls2.initialPoint(), ls1) )
+ {
+ if ( are_near(ls1.initialPoint(), ls2.initialPoint()) && !eqvs )
+ {
+ crossing->ta = crossing->tb = 0;
+ return crossing;
+ }
+ else if ( are_near(ls1.finalPoint(), ls2.initialPoint()) && eqvs )
+ {
+ crossing->ta = 1;
+ crossing->tb = 0;
+ return crossing;
+ }
+ else
+ {
+ THROW_INFINITESOLUTIONS();
+ }
+ }
+ else if ( are_near(ls2.finalPoint(), ls1) )
+ {
+ if ( are_near(ls1.finalPoint(), ls2.finalPoint()) && !eqvs )
+ {
+ crossing->ta = crossing->tb = 1;
+ return crossing;
+ }
+ else if ( are_near(ls1.initialPoint(), ls2.finalPoint()) && eqvs )
+ {
+ crossing->ta = 0;
+ crossing->tb = 1;
+ return crossing;
+ }
+ else
+ {
+ THROW_INFINITESOLUTIONS();
+ }
+ }
+ else
+ {
+ OptCrossing no_crossing;
+ return no_crossing;
+ }
+}
+
+Line make_angle_bisector_line(Line const& l1, Line const& l2)
+{
+ OptCrossing crossing;
+ try
+ {
+ crossing = intersection(l1, l2);
+ }
+ catch(InfiniteSolutions const &e)
+ {
+ return l1;
+ }
+ if (!crossing)
+ {
+ THROW_RANGEERROR("passed lines are parallel");
+ }
+ Point O = l1.pointAt(crossing->ta);
+ Point A = l1.pointAt(crossing->ta + 1);
+ double angle = angle_between(l1.vector(), l2.vector());
+ Point B = (angle > 0) ? l2.pointAt(crossing->tb + 1)
+ : l2.pointAt(crossing->tb - 1);
+
+ return make_angle_bisector_line(A, O, B);
+}
+
+
+
+
+} // end namespace Geom
+
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(substatement-open . 0))
+ indent-tabs-mode:nil
+ c-brace-offset:0
+ fill-column:99
+ End:
+ vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4 :
+*/
diff --git a/src/2geom/nearest-time.cpp b/src/2geom/nearest-time.cpp
new file mode 100644
index 0000000..e52251c
--- /dev/null
+++ b/src/2geom/nearest-time.cpp
@@ -0,0 +1,322 @@
+/** @file
+ * @brief Nearest time routines for D2<SBasis> and Piecewise<D2<SBasis>>
+ *//*
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ *
+ * Copyright 2007-2008 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+#include <2geom/nearest-time.h>
+#include <algorithm>
+
+namespace Geom
+{
+
+Coord nearest_time(Point const &p, D2<Bezier> const &input, Coord from, Coord to)
+{
+ Interval domain(from, to);
+ bool partial = false;
+
+ if (domain.min() < 0 || domain.max() > 1) {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+
+ if (input.isConstant(0)) return from;
+
+ D2<Bezier> bez;
+ if (domain.min() != 0 || domain.max() != 1) {
+ bez = portion(input, domain) - p;
+ partial = true;
+ } else {
+ bez = input - p;
+ }
+
+ // find extrema of the function x(t)^2 + y(t)^2
+ // use the fact that (f^2)' = 2 f f'
+ // this reduces the order of the distance function by 1
+ D2<Bezier> deriv = derivative(bez);
+ std::vector<Coord> ts = (multiply(bez[X], deriv[X]) + multiply(bez[Y], deriv[Y])).roots();
+
+ Coord t = -1, mind = infinity();
+ for (double i : ts) {
+ Coord droot = L2sq(bez.valueAt(i));
+ if (droot < mind) {
+ mind = droot;
+ t = i;
+ }
+ }
+
+ // also check endpoints
+ Coord dinitial = L2sq(bez.at0());
+ Coord dfinal = L2sq(bez.at1());
+
+ if (dinitial < mind) {
+ mind = dinitial;
+ t = 0;
+ }
+ if (dfinal < mind) {
+ //mind = dfinal;
+ t = 1;
+ }
+
+ if (partial) {
+ t = domain.valueAt(t);
+ }
+ return t;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// D2<SBasis> versions
+
+/*
+ * Return the parameter t of the nearest time value on the portion of the curve "c",
+ * related to the interval [from, to], to the point "p".
+ * The needed curve derivative "dc" is passed as parameter.
+ * The function return the first nearest time value to "p" that is found.
+ */
+
+double nearest_time(Point const& p,
+ D2<SBasis> const& c,
+ D2<SBasis> const& dc,
+ double from, double to )
+{
+ if ( from > to ) std::swap(from, to);
+ if ( from < 0 || to > 1 )
+ {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+ if (c.isConstant()) return from;
+ SBasis dd = dot(c - p, dc);
+ //std::cout << dd << std::endl;
+ std::vector<double> zeros = Geom::roots(dd);
+
+ double closest = from;
+ double min_dist_sq = L2sq(c(from) - p);
+ for (double zero : zeros)
+ {
+ double distsq = L2sq(c(zero) - p);
+ if ( min_dist_sq > L2sq(c(zero) - p) )
+ {
+ closest = zero;
+ min_dist_sq = distsq;
+ }
+ }
+ if ( min_dist_sq > L2sq( c(to) - p ) )
+ closest = to;
+ return closest;
+
+}
+
+/*
+ * Return the parameters t of all the nearest points on the portion of
+ * the curve "c", related to the interval [from, to], to the point "p".
+ * The needed curve derivative "dc" is passed as parameter.
+ */
+
+std::vector<double>
+all_nearest_times(Point const &p,
+ D2<SBasis> const &c,
+ D2<SBasis> const &dc,
+ double from, double to)
+{
+ if (from > to) {
+ std::swap(from, to);
+ }
+ if (from < 0 || to > 1) {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+
+ std::vector<double> result;
+ if (c.isConstant()) {
+ result.push_back(from);
+ return result;
+ }
+ SBasis dd = dot(c - p, dc);
+
+ std::vector<double> zeros = Geom::roots(dd);
+ std::vector<double> candidates;
+ candidates.push_back(from);
+ candidates.insert(candidates.end(), zeros.begin(), zeros.end());
+ candidates.push_back(to);
+ std::vector<double> distsq;
+ distsq.reserve(candidates.size());
+ for (double candidate : candidates) {
+ distsq.push_back(L2sq(c(candidate) - p));
+ }
+ unsigned closest = 0;
+ double dsq = distsq[0];
+ for (unsigned i = 1; i < candidates.size(); ++i) {
+ if (dsq > distsq[i]) {
+ closest = i;
+ dsq = distsq[i];
+ }
+ }
+ for (unsigned i = 0; i < candidates.size(); ++i) {
+ if (distsq[closest] == distsq[i]) {
+ result.push_back(candidates[i]);
+ }
+ }
+ return result;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Piecewise< D2<SBasis> > versions
+
+
+double nearest_time(Point const &p,
+ Piecewise< D2<SBasis> > const &c,
+ double from, double to)
+{
+ if (from > to) std::swap(from, to);
+ if (from < c.cuts[0] || to > c.cuts[c.size()]) {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+
+ unsigned si = c.segN(from);
+ unsigned ei = c.segN(to);
+ if (si == ei) {
+ double nearest =
+ nearest_time(p, c[si], c.segT(from, si), c.segT(to, si));
+ return c.mapToDomain(nearest, si);
+ }
+
+ double t;
+ double nearest = nearest_time(p, c[si], c.segT(from, si));
+ unsigned int ni = si;
+ double dsq;
+ double mindistsq = distanceSq(p, c[si](nearest));
+ Rect bb;
+ for (unsigned i = si + 1; i < ei; ++i) {
+ bb = *bounds_fast(c[i]);
+ dsq = distanceSq(p, bb);
+ if ( mindistsq <= dsq ) continue;
+
+ t = nearest_time(p, c[i]);
+ dsq = distanceSq(p, c[i](t));
+ if (mindistsq > dsq) {
+ nearest = t;
+ ni = i;
+ mindistsq = dsq;
+ }
+ }
+ bb = *bounds_fast(c[ei]);
+ dsq = distanceSq(p, bb);
+ if (mindistsq > dsq) {
+ t = nearest_time(p, c[ei], 0, c.segT(to, ei));
+ dsq = distanceSq(p, c[ei](t));
+ if (mindistsq > dsq) {
+ nearest = t;
+ ni = ei;
+ }
+ }
+ return c.mapToDomain(nearest, ni);
+}
+
+std::vector<double>
+all_nearest_times(Point const &p,
+ Piecewise< D2<SBasis> > const &c,
+ double from, double to)
+{
+ if (from > to) {
+ std::swap(from, to);
+ }
+ if (from < c.cuts[0] || to > c.cuts[c.size()]) {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+
+ unsigned si = c.segN(from);
+ unsigned ei = c.segN(to);
+ if ( si == ei )
+ {
+ std::vector<double> all_nearest =
+ all_nearest_times(p, c[si], c.segT(from, si), c.segT(to, si));
+ for (double & i : all_nearest)
+ {
+ i = c.mapToDomain(i, si);
+ }
+ return all_nearest;
+ }
+ std::vector<double> all_t;
+ std::vector< std::vector<double> > all_np;
+ all_np.push_back( all_nearest_times(p, c[si], c.segT(from, si)) );
+ std::vector<unsigned> ni;
+ ni.push_back(si);
+ double dsq;
+ double mindistsq = distanceSq( p, c[si](all_np.front().front()) );
+ Rect bb;
+
+ for (unsigned i = si + 1; i < ei; ++i) {
+ bb = *bounds_fast(c[i]);
+ dsq = distanceSq(p, bb);
+ if ( mindistsq < dsq ) continue;
+ all_t = all_nearest_times(p, c[i]);
+ dsq = distanceSq( p, c[i](all_t.front()) );
+ if ( mindistsq > dsq )
+ {
+ all_np.clear();
+ all_np.push_back(all_t);
+ ni.clear();
+ ni.push_back(i);
+ mindistsq = dsq;
+ }
+ else if ( mindistsq == dsq )
+ {
+ all_np.push_back(all_t);
+ ni.push_back(i);
+ }
+ }
+ bb = *bounds_fast(c[ei]);
+ dsq = distanceSq(p, bb);
+ if (mindistsq >= dsq) {
+ all_t = all_nearest_times(p, c[ei], 0, c.segT(to, ei));
+ dsq = distanceSq( p, c[ei](all_t.front()) );
+ if (mindistsq > dsq) {
+ for (double & i : all_t) {
+ i = c.mapToDomain(i, ei);
+ }
+ return all_t;
+ } else if (mindistsq == dsq) {
+ all_np.push_back(all_t);
+ ni.push_back(ei);
+ }
+ }
+ std::vector<double> all_nearest;
+ for (unsigned i = 0; i < all_np.size(); ++i) {
+ for (unsigned int j = 0; j < all_np[i].size(); ++j) {
+ all_nearest.push_back( c.mapToDomain(all_np[i][j], ni[i]) );
+ }
+ }
+ all_nearest.erase(std::unique(all_nearest.begin(), all_nearest.end()),
+ all_nearest.end());
+ return all_nearest;
+}
+
+} // end namespace Geom
+
+
diff --git a/src/2geom/numeric/matrix.cpp b/src/2geom/numeric/matrix.cpp
new file mode 100644
index 0000000..98ff3b6
--- /dev/null
+++ b/src/2geom/numeric/matrix.cpp
@@ -0,0 +1,154 @@
+/*
+ * Matrix, MatrixView, ConstMatrixView classes wrap the gsl matrix routines;
+ * "views" mimic the semantic of C++ references: any operation performed
+ * on a "view" is actually performed on the "viewed object"
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ *
+ * Copyright 2008 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+#include <2geom/numeric/matrix.h>
+#include <2geom/numeric/vector.h>
+
+
+namespace Geom { namespace NL {
+
+Vector operator*( detail::BaseMatrixImpl const& A,
+ detail::BaseVectorImpl const& v )
+{
+ assert(A.columns() == v.size());
+
+ Vector result(A.rows(), 0.0);
+ for (size_t i = 0; i < A.rows(); ++i)
+ for (size_t j = 0; j < A.columns(); ++j)
+ result[i] += A(i,j) * v[j];
+
+ return result;
+}
+
+Matrix operator*( detail::BaseMatrixImpl const& A,
+ detail::BaseMatrixImpl const& B )
+{
+ assert(A.columns() == B.rows());
+
+ Matrix C(A.rows(), B.columns(), 0.0);
+ for (size_t i = 0; i < C.rows(); ++i)
+ for (size_t j = 0; j < C.columns(); ++j)
+ for (size_t k = 0; k < A.columns(); ++k)
+ C(i,j) += A(i,k) * B(k, j);
+
+ return C;
+}
+
+Matrix pseudo_inverse(detail::BaseMatrixImpl const& A)
+{
+
+ Matrix U(A);
+ Matrix V(A.columns(), A.columns());
+ Vector s(A.columns());
+ gsl_vector* work = gsl_vector_alloc(A.columns());
+
+ gsl_linalg_SV_decomp( U.get_gsl_matrix(),
+ V.get_gsl_matrix(),
+ s.get_gsl_vector(),
+ work );
+
+ Matrix P(A.columns(), A.rows(), 0.0);
+
+ int sz = s.size();
+ while ( sz-- > 0 && s[sz] == 0 ) {}
+ ++sz;
+ if (sz == 0) return P;
+ VectorView sv(s, sz);
+
+ for (size_t i = 0; i < sv.size(); ++i)
+ {
+ VectorView v = V.column_view(i);
+ v.scale(1/sv[i]);
+ for (size_t h = 0; h < P.rows(); ++h)
+ for (size_t k = 0; k < P.columns(); ++k)
+ P(h,k) += V(h,i) * U(k,i);
+ }
+
+ return P;
+}
+
+
+double trace (detail::BaseMatrixImpl const& A)
+{
+ if (A.rows() != A.columns())
+ {
+ THROW_RANGEERROR ("NL::Matrix: computing trace: "
+ "rows() != columns()");
+ }
+ double t = 0;
+ for (size_t i = 0; i < A.rows(); ++i)
+ {
+ t += A(i,i);
+ }
+ return t;
+}
+
+
+double det (detail::BaseMatrixImpl const& A)
+{
+ if (A.rows() != A.columns())
+ {
+ THROW_RANGEERROR ("NL::Matrix: computing determinant: "
+ "rows() != columns()");
+ }
+
+ Matrix LU(A);
+ int s;
+ gsl_permutation * p = gsl_permutation_alloc(LU.rows());
+ gsl_linalg_LU_decomp (LU.get_gsl_matrix(), p, &s);
+
+ double t = 1;
+ for (size_t i = 0; i < LU.rows(); ++i)
+ {
+ t *= LU(i,i);
+ }
+
+ gsl_permutation_free(p);
+ return t;
+}
+
+
+} } // end namespaces
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/arc-length.cpp b/src/2geom/orphan-code/arc-length.cpp
new file mode 100644
index 0000000..3f72862
--- /dev/null
+++ b/src/2geom/orphan-code/arc-length.cpp
@@ -0,0 +1,292 @@
+/*
+ * arc-length.cpp
+ *
+ * Copyright 2006 Nathan Hurst <njh@mail.csse.monash.edu.au>
+ * Copyright 2006 Michael G. Sloan <mgsloan@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/arc-length.h>
+#include <2geom/bezier-utils.h>
+#include <2geom/polynomial.h>
+using namespace Geom;
+
+/** Calculates the length of a cubic element through subdivision.
+ * The 'tol' parameter is the maximum error allowed. This is used to subdivide the curve where necessary.
+ */
+double cubic_length_subdividing(Path::Elem const & e, double tol) {
+ Point v[3];
+ for(int i = 0; i < 3; i++)
+ v[i] = e[i+1] - e[0];
+ Point orth = v[2]; // unit normal to path line
+ rot90(orth);
+ orth.normalize();
+ double err = fabs(dot(orth, v[1])) + fabs(dot(orth, v[0]));
+ if(err < tol) {
+ return distance(e.first(), e.last()); // approximately a line
+ } else {
+ Point mid[3];
+ double result;
+ for(int i = 0; i < 3; i++)
+ mid[i] = lerp(0.5, e[i], e[i+1]);
+ Point midmid[2];
+ for(int i = 0; i < 2; i++)
+ midmid[i] = lerp(0.5, mid[i], mid[i+1]);
+ Point midmidmid = lerp(0.5, midmid[0], midmid[1]);
+ {
+ Point curve[4] = {e[0], mid[0], midmid[0], midmidmid};
+ Path::Elem e0(cubicto, std::vector<Point>::const_iterator(curve), std::vector<Point>::const_iterator(curve) + 4);
+ result = cubic_length_subdividing(e0, tol);
+ } {
+ Point curve[4] = {midmidmid, midmid[1], mid[2], e[3]};
+ Path::Elem e1(cubicto, std::vector<Point>::const_iterator(curve), std::vector<Point>::const_iterator(curve) + 4);
+ return result + cubic_length_subdividing(e1, tol);
+ }
+ }
+}
+
+/** Calculates the length of a path through iteration and subsequent subdivision.
+ * Currently handles cubic curves and lines.
+ * The 'tol' parameter is the maximum error allowed. This is used to subdivide the curve where necessary.
+ */
+double arc_length_subdividing(Path const & p, double tol) {
+ double result = 0;
+
+ for(Path::const_iterator iter(p.begin()), end(p.end()); iter != end; ++iter) {
+ if(dynamic_cast<LineTo *>(iter.cmd()))
+ result += distance((*iter).first(), (*iter).last());
+ else if(dynamic_cast<CubicTo *>(iter.cmd()))
+ result += cubic_length_subdividing(*iter, tol);
+ else
+ ;
+ }
+
+ return result;
+}
+
+
+#ifdef HAVE_GSL
+#include <gsl/gsl_integration.h>
+static double poly_length_integrating(double t, void* param) {
+ Poly* pc = (Poly*)param;
+ return hypot(pc[0].eval(t), pc[1].eval(t));
+}
+
+/** Calculates the length of a path Element through gsl integration.
+ \param pe the Element.
+ \param t the parametric input 0 to 1 which specifies the amount of the curve to use.
+ \param tol the maximum error allowed.
+ \param result variable to be incremented with the length of the path
+ \param abs_error variable to be incremented with the estimated error
+*/
+void arc_length_integrating(Path::Elem pe, double t, double tol, double &result, double &abs_error) {
+ if(dynamic_cast<LineTo *>(iter.cmd()))
+ result += distance(pe.first(), pe.last()) * t;
+ else if(dynamic_cast<QuadTo *>(iter.cmd()) ||
+ dynamic_cast<CubicTo *>(iter.cmd())) {
+ Poly B[2] = {get_parametric_poly(pe, X), get_parametric_poly(pe, Y)};
+ for(int i = 0; i < 2; i++)
+ B[i] = derivative(B[i]);
+
+ gsl_function F;
+ gsl_integration_workspace * w
+ = gsl_integration_workspace_alloc (20);
+ F.function = &poly_length_integrating;
+ F.params = (void*)B;
+ double quad_result, err;
+ /* We could probably use the non adaptive code here if we removed any cusps first. */
+ int returncode =
+ gsl_integration_qag (&F, 0, t, 0, tol, 20,
+ GSL_INTEG_GAUSS21, w, &quad_result, &err);
+
+ abs_error += err;
+ result += quad_result;
+ } else
+ return;
+}
+
+/** Calculates the length of a Path through gsl integration. The parameter 'tol' is the maximum error allowed. */
+double arc_length_integrating(Path const & p, double tol) {
+ double result = 0, abserr = 0;
+
+ for(Path::const_iterator iter(p.begin()), end(p.end()); iter != end; ++iter) {
+ arc_length_integrating(*iter, 1.0, tol, result, abserr);
+ }
+ //printf("got %g with err %g\n", result, abserr);
+
+ return result;
+}
+
+/** Calculates the arc length to a specific location on the path. The parameter 'tol' is the maximum error allowed. */
+double arc_length_integrating(Path const & p, Path::Location const & pl, double tol) {
+ double result = 0, abserr = 0;
+ ptrdiff_t offset = pl.it - p.begin();
+
+ assert(offset >= 0);
+ assert(offset < p.size());
+
+ for(Path::const_iterator iter(p.begin()), end(p.end());
+ (iter != pl.it); ++iter) {
+ arc_length_integrating(*iter, 1.0, tol, result, abserr);
+ }
+ arc_length_integrating(*pl.it, pl.t, tol, result, abserr);
+
+ return result;
+}
+
+/* We use a somewhat surprising result for this that s'(t) = |p'(t)|
+ Thus, we can use a derivative based root finder.
+*/
+
+#include <stdio.h>
+#include <gsl/gsl_errno.h>
+#include <gsl/gsl_math.h>
+#include <gsl/gsl_roots.h>
+
+struct arc_length_params
+{
+ Path::Elem pe;
+ double s,tol, result, abs_error;
+ double left, right;
+};
+
+double
+arc_length (double t, void *params)
+{
+ struct arc_length_params *p
+ = (struct arc_length_params *) params;
+
+ double result = 0, abs_error = 0;
+ if(t < 0) t = 0;
+ if(t > 1) t = 1;
+ if(!((t >= 0) && (t <= 1))) {
+ printf("t = %g\n", t);
+ }
+ assert((t >= 0) && (t <= 1));
+ arc_length_integrating(p->pe, t, p->tol, result, abs_error);
+ return result - p->s ;
+}
+
+double
+arc_length_deriv (double t, void *params)
+{
+ struct arc_length_params *p
+ = (struct arc_length_params *) params;
+
+ Point pos, tgt, acc;
+ p->pe.point_tangent_acc_at(t, pos, tgt, acc);
+ return L2(tgt);
+}
+
+void
+arc_length_fdf (double t, void *params,
+ double *y, double *dy)
+{
+ *y = arc_length(t, params);
+ *dy = arc_length_deriv(t, params);
+}
+
+double polish_brent(double t, arc_length_params &alp) {
+ int status;
+ int iter = 0, max_iter = 10;
+ const gsl_root_fsolver_type *T;
+ gsl_root_fsolver *solver;
+ double x_lo = 0.0, x_hi = 1.0;
+ gsl_function F;
+
+ F.function = &arc_length;
+ F.params = &alp;
+
+ T = gsl_root_fsolver_brent;
+ solver = gsl_root_fsolver_alloc (T);
+ gsl_root_fsolver_set (solver, &F, x_lo, x_hi);
+
+ do
+ {
+ iter++;
+ status = gsl_root_fsolver_iterate (solver);
+ t = gsl_root_fsolver_root (solver);
+ x_lo = gsl_root_fsolver_x_lower (solver);
+ x_hi = gsl_root_fsolver_x_upper (solver);
+ status = gsl_root_test_interval (x_lo, x_hi,
+ 0, alp.tol);
+
+ //if (status == GSL_SUCCESS)
+ // printf ("Converged:\n");
+
+ }
+ while (status == GSL_CONTINUE && iter < max_iter);
+ return t;
+}
+
+double polish (double t, arc_length_params &alp) {
+ int status;
+ int iter = 0, max_iter = 5;
+ const gsl_root_fdfsolver_type *T;
+ gsl_root_fdfsolver *solver;
+ double t0;
+ gsl_function_fdf FDF;
+
+ FDF.f = &arc_length;
+ FDF.df = &arc_length_deriv;
+ FDF.fdf = &arc_length_fdf;
+ FDF.params = &alp;
+
+ T = gsl_root_fdfsolver_newton;
+ solver = gsl_root_fdfsolver_alloc (T);
+ gsl_root_fdfsolver_set (solver, &FDF, t);
+
+ do
+ {
+ iter++;
+ status = gsl_root_fdfsolver_iterate (solver);
+ t0 = t;
+ t = gsl_root_fdfsolver_root (solver);
+ status = gsl_root_test_delta (t, t0, 0, alp.tol);
+
+ if (status == GSL_SUCCESS)
+ ;//printf ("Converged:\n");
+
+ printf ("%5d %10.7f %+10.7f\n",
+ iter, t, t - t0);
+ }
+ while (status == GSL_CONTINUE && iter < max_iter);
+ return t;
+}
+
+
+#endif
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/chebyshev.cpp b/src/2geom/orphan-code/chebyshev.cpp
new file mode 100644
index 0000000..c886daf
--- /dev/null
+++ b/src/2geom/orphan-code/chebyshev.cpp
@@ -0,0 +1,126 @@
+#include <2geom/chebyshev.h>
+
+#include <2geom/sbasis.h>
+#include <2geom/sbasis-poly.h>
+
+#include <vector>
+using std::vector;
+
+#include <gsl/gsl_math.h>
+#include <gsl/gsl_chebyshev.h>
+
+namespace Geom{
+
+SBasis cheb(unsigned n) {
+ static std::vector<SBasis> basis;
+ if(basis.empty()) {
+ basis.push_back(Linear(1,1));
+ basis.push_back(Linear(0,1));
+ }
+ for(unsigned i = basis.size(); i <= n; i++) {
+ basis.push_back(Linear(0,2)*basis[i-1] - basis[i-2]);
+ }
+
+ return basis[n];
+}
+
+SBasis cheb_series(unsigned n, double* cheb_coeff) {
+ SBasis r;
+ for(unsigned i = 0; i < n; i++) {
+ double cof = cheb_coeff[i];
+ //if(i == 0)
+ //cof /= 2;
+ r += cheb(i)*cof;
+ }
+
+ return r;
+}
+
+SBasis clenshaw_series(unsigned m, double* cheb_coeff) {
+ /** b_n = a_n
+ b_n-1 = 2*x*b_n + a_n-1
+ b_n-k = 2*x*b_{n-k+1} + a_{n-k} - b_{n - k + 2}
+ b_0 = x*b_1 + a_0 - b_2
+ */
+
+ double a = -1, b = 1;
+ SBasis d, dd;
+ SBasis y = (Linear(0, 2) - (a+b)) / (b-a);
+ SBasis y2 = 2*y;
+ for(int j = m - 1; j >= 1; j--) {
+ SBasis sv = d;
+ d = y2*d - dd + cheb_coeff[j];
+ dd = sv;
+ }
+
+ return y*d - dd + 0.5*cheb_coeff[0];
+}
+
+SBasis chebyshev_approximant (double (*f)(double,void*), int order, Interval in, void* p) {
+ gsl_cheb_series *cs = gsl_cheb_alloc (order+2);
+
+ gsl_function F;
+
+ F.function = f;
+ F.params = p;
+
+ gsl_cheb_init (cs, &F, in[0], in[1]);
+
+ SBasis r = compose(clenshaw_series(order, cs->c), Linear(-1,1));
+
+ gsl_cheb_free (cs);
+ return r;
+}
+
+struct wrap {
+ double (*f)(double,void*);
+ void* pp;
+ double fa, fb;
+ Interval in;
+};
+
+double f_interp(double x, void* p) {
+ struct wrap *wr = (struct wrap *)p;
+ double z = (x - wr->in[0]) / (wr->in[1] - wr->in[0]);
+ return (wr->f)(x, wr->pp) - ((1 - z)*wr->fa + z*wr->fb);
+}
+
+SBasis chebyshev_approximant_interpolating (double (*f)(double,void*),
+ int order, Interval in, void* p) {
+ double fa = f(in[0], p);
+ double fb = f(in[1], p);
+ struct wrap wr;
+ wr.fa = fa;
+ wr.fb = fb;
+ wr.in = in;
+ printf("%f %f\n", fa, fb);
+ wr.f = f;
+ wr.pp = p;
+ return compose(Linear(in[0], in[1]), Linear(fa, fb)) + chebyshev_approximant(f_interp, order, in, &wr) + Linear(fa, fb);
+}
+
+SBasis chebyshev(unsigned n) {
+ static std::vector<SBasis> basis;
+ if(basis.empty()) {
+ basis.push_back(Linear(1,1));
+ basis.push_back(Linear(0,1));
+ }
+ for(unsigned i = basis.size(); i <= n; i++) {
+ basis.push_back(Linear(0,2)*basis[i-1] - basis[i-2]);
+ }
+
+ return basis[n];
+}
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/intersection-by-bezier-clipping.cpp b/src/2geom/orphan-code/intersection-by-bezier-clipping.cpp
new file mode 100644
index 0000000..c55f623
--- /dev/null
+++ b/src/2geom/orphan-code/intersection-by-bezier-clipping.cpp
@@ -0,0 +1,560 @@
+
+/*
+ * Find intersecions between two Bezier curves.
+ * The intersection points are found by using Bezier clipping.
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ *
+ * Copyright 2008 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+
+
+
+#include <2geom/basic-intersection.h>
+#include <2geom/bezier.h>
+#include <2geom/interval.h>
+#include <2geom/convex-hull.h>
+
+
+#include <vector>
+#include <utility>
+#include <iomanip>
+
+
+namespace Geom {
+
+namespace detail { namespace bezier_clipping {
+
+
+////////////////////////////////////////////////////////////////////////////////
+// for debugging
+//
+
+inline
+void print(std::vector<Point> const& cp)
+{
+ for (size_t i = 0; i < cp.size(); ++i)
+ std::cerr << i << " : " << cp[i] << std::endl;
+}
+
+template< class charT >
+inline
+std::basic_ostream<charT> &
+operator<< (std::basic_ostream<charT> & os, const Interval & I)
+{
+ os << "[" << I.min() << ", " << I.max() << "]";
+ return os;
+}
+
+inline
+double angle (std::vector<Point> const& A)
+{
+ size_t n = A.size() -1;
+ double a = std::atan2(A[n][Y] - A[0][Y], A[n][X] - A[0][X]);
+ return (180 * a / M_PI);
+}
+
+inline
+size_t get_precision(Interval const& I)
+{
+ double d = I.extent();
+ double e = 1, p = 1;
+ size_t n = 0;
+ while (n < 16 && (are_near(d, 0, e)))
+ {
+ p *= 10;
+ e = 1 /p;
+ ++n;
+ }
+ return n;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+/*
+ * return true if all the Bezier curve control points are near,
+ * false otherwise
+ */
+inline
+bool is_constant(std::vector<Point> const& A, double precision = EPSILON)
+{
+ for (unsigned int i = 1; i < A.size(); ++i)
+ {
+ if(!are_near(A[i], A[0], precision))
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Make up an orientation line using the control points c[i] and c[j]
+ * the line is returned in the output parameter "l" in the form of a 3 element
+ * vector : l[0] * x + l[1] * y + l[2] == 0; the line is normalized.
+ */
+inline
+void orientation_line (std::vector<double> & l,
+ std::vector<Point> const& c,
+ size_t i, size_t j)
+{
+ l[0] = c[j][Y] - c[i][Y];
+ l[1] = c[i][X] - c[j][X];
+ l[2] = cross(c[i], c[j]);
+ double length = std::sqrt(l[0] * l[0] + l[1] * l[1]);
+ assert (length != 0);
+ l[0] /= length;
+ l[1] /= length;
+ l[2] /= length;
+}
+
+/*
+ * Pick up an orientation line for the Bezier curve "c" and return it in
+ * the output parameter "l"
+ */
+inline
+void pick_orientation_line (std::vector<double> & l,
+ std::vector<Point> const& c)
+{
+ size_t i = c.size();
+ while (--i > 0 && are_near(c[0], c[i]))
+ {}
+ if (i == 0)
+ {
+ // this should never happen because when a new curve portion is created
+ // we check that it is not constant;
+ // however this requires that the precision used in the is_constant
+ // routine has to be the same used here in the are_near test
+ assert(i != 0);
+ }
+ orientation_line(l, c, 0, i);
+ //std::cerr << "i = " << i << std::endl;
+}
+
+/*
+ * Compute the signed distance of the point "P" from the normalized line l
+ */
+inline
+double distance (Point const& P, std::vector<double> const& l)
+{
+ return l[X] * P[X] + l[Y] * P[Y] + l[2];
+}
+
+/*
+ * Compute the min and max distance of the control points of the Bezier
+ * curve "c" from the normalized orientation line "l".
+ * This bounds are returned through the output Interval parameter"bound".
+ */
+inline
+void fat_line_bounds (Interval& bound,
+ std::vector<Point> const& c,
+ std::vector<double> const& l)
+{
+ bound[0] = 0;
+ bound[1] = 0;
+ double d;
+ for (size_t i = 0; i < c.size(); ++i)
+ {
+ d = distance(c[i], l);
+ if (bound[0] > d) bound[0] = d;
+ if (bound[1] < d) bound[1] = d;
+ }
+}
+
+/*
+ * return the x component of the intersection point between the line
+ * passing through points p1, p2 and the line Y = "y"
+ */
+inline
+double intersect (Point const& p1, Point const& p2, double y)
+{
+ // we are sure that p2[Y] != p1[Y] because this routine is called
+ // only when the lower or the upper bound is crossed
+ double dy = (p2[Y] - p1[Y]);
+ double s = (y - p1[Y]) / dy;
+ return (p2[X]-p1[X])*s + p1[X];
+}
+
+/*
+ * Clip the Bezier curve "B" wrt the fat line defined by the orientation
+ * line "l" and the interval range "bound", the new parameter interval for
+ * the clipped curve is returned through the output parameter "dom"
+ */
+void clip (Interval& dom,
+ std::vector<Point> const& B,
+ std::vector<double> const& l,
+ Interval const& bound)
+{
+ double n = B.size() - 1; // number of sub-intervals
+ std::vector<Point> D; // distance curve control points
+ D.reserve (B.size());
+ double d;
+ for (size_t i = 0; i < B.size(); ++i)
+ {
+ d = distance (B[i], l);
+ D.push_back (Point(i/n, d));
+ }
+ //print(D);
+ ConvexHull chD(D);
+ std::vector<Point> & p = chD.boundary; // convex hull vertices
+
+ //print(p);
+
+ bool plower, phigher;
+ bool clower, chigher;
+ double t, tmin = 1, tmax = 0;
+ //std::cerr << "bound : " << bound << std::endl;
+
+ plower = (p[0][Y] < bound.min());
+ phigher = (p[0][Y] > bound.max());
+ if (!(plower || phigher)) // inside the fat line
+ {
+ if (tmin > p[0][X]) tmin = p[0][X];
+ if (tmax < p[0][X]) tmax = p[0][X];
+ //std::cerr << "0 : inside " << p[0]
+ // << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+
+ for (size_t i = 1; i < p.size(); ++i)
+ {
+ clower = (p[i][Y] < bound.min());
+ chigher = (p[i][Y] > bound.max());
+ if (!(clower || chigher)) // inside the fat line
+ {
+ if (tmin > p[i][X]) tmin = p[i][X];
+ if (tmax < p[i][X]) tmax = p[i][X];
+ //std::cerr << i << " : inside " << p[i]
+ // << " : tmin = " << tmin << ", tmax = " << tmax
+ // << std::endl;
+ }
+ if (clower != plower) // cross the lower bound
+ {
+ t = intersect(p[i-1], p[i], bound.min());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ plower = clower;
+ //std::cerr << i << " : lower " << p[i]
+ // << " : tmin = " << tmin << ", tmax = " << tmax
+ // << std::endl;
+ }
+ if (chigher != phigher) // cross the upper bound
+ {
+ t = intersect(p[i-1], p[i], bound.max());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ phigher = chigher;
+ //std::cerr << i << " : higher " << p[i]
+ // << " : tmin = " << tmin << ", tmax = " << tmax
+ // << std::endl;
+ }
+ }
+
+ // we have to test the closing segment for intersection
+ size_t last = p.size() - 1;
+ clower = (p[0][Y] < bound.min());
+ chigher = (p[0][Y] > bound.max());
+ if (clower != plower) // cross the lower bound
+ {
+ t = intersect(p[last], p[0], bound.min());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ //std::cerr << "0 : lower " << p[0]
+ // << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+ if (chigher != phigher) // cross the upper bound
+ {
+ t = intersect(p[last], p[0], bound.max());
+ if (tmin > t) tmin = t;
+ if (tmax < t) tmax = t;
+ //std::cerr << "0 : higher " << p[0]
+ // << " : tmin = " << tmin << ", tmax = " << tmax << std::endl;
+ }
+
+ dom[0] = tmin;
+ dom[1] = tmax;
+}
+
+/*
+ * Compute the portion of the Bezier curve "B" wrt the interval "I"
+ */
+void portion (std::vector<Point> & B, Interval const& I)
+{
+ Bezier::Order bo(B.size()-1);
+ Bezier Bx(bo), By(bo);
+ for (size_t i = 0; i < B.size(); ++i)
+ {
+ Bx[i] = B[i][X];
+ By[i] = B[i][Y];
+ }
+ Bx = portion(Bx, I.min(), I.max());
+ By = portion(By, I.min(), I.max());
+ assert (Bx.size() == By.size());
+ B.resize(Bx.size());
+ for (size_t i = 0; i < Bx.size(); ++i)
+ {
+ B[i][X] = Bx[i];
+ B[i][Y] = By[i];
+ }
+}
+
+/*
+ * Map the sub-interval I in [0,1] into the interval J and assign it to J
+ */
+inline
+void map_to(Interval & J, Interval const& I)
+{
+ double length = J.extent();
+ J[1] = I.max() * length + J[0];
+ J[0] = I.min() * length + J[0];
+}
+
+/*
+ * The interval [1,0] is used to represent the empty interval, this routine
+ * is just an helper function for creating such an interval
+ */
+inline
+Interval make_empty_interval()
+{
+ Interval I(0);
+ I[0] = 1;
+ return I;
+}
+
+
+
+
+const double MAX_PRECISION = 1e-8;
+const double MIN_CLIPPED_SIZE_THRESHOLD = 0.8;
+const Interval UNIT_INTERVAL(0,1);
+const Interval EMPTY_INTERVAL = make_empty_interval();
+const Interval H1_INTERVAL(0, 0.5);
+const Interval H2_INTERVAL(0.5 + MAX_PRECISION, 1.0);
+
+/*
+ * intersection
+ *
+ * input:
+ * A, B: control point sets of two bezier curves
+ * domA, domB: real parameter intervals of the two curves
+ * precision: required computational precision of the returned parameter ranges
+ * output:
+ * domsA, domsB: sets of parameter intervals describing an intersection point
+ *
+ * The parameter intervals are computed by using a Bezier clipping algorithm,
+ * in case the clipping doesn't shrink the initial interval more than 20%,
+ * a subdivision step is performed.
+ * If during the computation one of the two curve interval length becomes less
+ * than MAX_PRECISION the routine exits independently by the precision reached
+ * in the computation of the other curve interval.
+ */
+void intersection (std::vector<Interval>& domsA,
+ std::vector<Interval>& domsB,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ Interval const& domA,
+ Interval const& domB,
+ double precision)
+{
+// std::cerr << ">> curve subdision performed <<" << std::endl;
+// std::cerr << "dom(A) : " << domA << std::endl;
+// std::cerr << "dom(B) : " << domB << std::endl;
+// std::cerr << "angle(A) : " << angle(A) << std::endl;
+// std::cerr << "angle(B) : " << angle(B) << std::endl;
+
+
+ if (precision < MAX_PRECISION)
+ precision = MAX_PRECISION;
+
+ std::vector<Point> pA = A;
+ std::vector<Point> pB = B;
+ std::vector<Point>* C1 = &pA;
+ std::vector<Point>* C2 = &pB;
+
+ Interval dompA = domA;
+ Interval dompB = domB;
+ Interval* dom1 = &dompA;
+ Interval* dom2 = &dompB;
+
+ std::vector<double> bl(3);
+ Interval bound, dom;
+
+
+ size_t iter = 0;
+ while (++iter < 100
+ && (dompA.extent() >= precision || dompB.extent() >= precision))
+ {
+// std::cerr << "iter: " << iter << std::endl;
+
+ pick_orientation_line(bl, *C1);
+ fat_line_bounds(bound, *C1, bl);
+ clip(dom, *C2, bl, bound);
+
+ // [1,0] is utilized to represent an empty interval
+ if (dom == EMPTY_INTERVAL)
+ {
+// std::cerr << "dom: empty" << std::endl;
+ return;
+ }
+// std::cerr << "dom : " << dom << std::endl;
+
+ // all other cases where dom[0] > dom[1] are invalid
+ if (dom.min() > dom.max())
+ {
+ assert(dom.min() < dom.max());
+ }
+
+ map_to(*dom2, dom);
+
+ // it's better to stop before losing computational precision
+ if (dom2->extent() <= MAX_PRECISION)
+ {
+// std::cerr << "beyond max precision limit" << std::endl;
+ break;
+ }
+
+ portion(*C2, dom);
+ if (is_constant(*C2))
+ {
+// std::cerr << "new curve portion is constant" << std::endl;
+ break;
+ }
+ // if we have clipped less than 20% than we need to subdive the curve
+ // with the largest domain into two sub-curves
+ if (dom.extent() > MIN_CLIPPED_SIZE_THRESHOLD)
+ {
+// std::cerr << "clipped less than 20% : " << dom.extent() << std::endl;
+// std::cerr << "angle(pA) : " << angle(pA) << std::endl;
+// std::cerr << "angle(pB) : " << angle(pB) << std::endl;
+
+ std::vector<Point> pC1, pC2;
+ Interval dompC1, dompC2;
+ if (dompA.extent() > dompB.extent())
+ {
+ if ((dompA.extent() / 2) < MAX_PRECISION)
+ {
+ break;
+ }
+ pC1 = pC2 = pA;
+ portion(pC1, H1_INTERVAL);
+ portion(pC2, H2_INTERVAL);
+ dompC1 = dompC2 = dompA;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ intersection(domsA, domsB, pC1, pB, dompC1, dompB, precision);
+ intersection(domsA, domsB, pC2, pB, dompC2, dompB, precision);
+ }
+ else
+ {
+ if ((dompB.extent() / 2) < MAX_PRECISION)
+ {
+ break;
+ }
+ pC1 = pC2 = pB;
+ portion(pC1, H1_INTERVAL);
+ portion(pC2, H2_INTERVAL);
+ dompC1 = dompC2 = dompB;
+ map_to(dompC1, H1_INTERVAL);
+ map_to(dompC2, H2_INTERVAL);
+ intersection(domsB, domsA, pC1, pA, dompC1, dompA, precision);
+ intersection(domsB, domsA, pC2, pA, dompC2, dompA, precision);
+ }
+ return;
+ }
+
+ using std::swap;
+ swap(C1, C2);
+ swap(dom1, dom2);
+// std::cerr << "dom(pA) : " << dompA << std::endl;
+// std::cerr << "dom(pB) : " << dompB << std::endl;
+ }
+ domsA.push_back(dompA);
+ domsB.push_back(dompB);
+}
+
+} /* end namespace bezier_clipping */ } /* end namespace detail */
+
+
+/*
+ * find_intersection
+ *
+ * input: A, B - set of control points of two Bezier curve
+ * input: precision - required precision of computation
+ * output: xs - set of pairs of parameter values
+ * at which crossing happens
+ *
+ * This routine is based on the Bezier Clipping Algorithm,
+ * see: Sederberg - Computer Aided Geometric Design
+ */
+void find_intersections (std::vector< std::pair<double, double> > & xs,
+ std::vector<Point> const& A,
+ std::vector<Point> const& B,
+ double precision)
+{
+ std::cout << "find_intersections: intersection-by-clipping.cpp version\n";
+// std::cerr << std::fixed << std::setprecision(16);
+
+ using detail::bezier_clipping::get_precision;
+ using detail::bezier_clipping::operator<<;
+ using detail::bezier_clipping::intersection;
+ using detail::bezier_clipping::UNIT_INTERVAL;
+
+ std::pair<double, double> ci;
+ std::vector<Interval> domsA, domsB;
+ intersection (domsA, domsB, A, B, UNIT_INTERVAL, UNIT_INTERVAL, precision);
+ if (domsA.size() != domsB.size())
+ {
+ assert (domsA.size() == domsB.size());
+ }
+ xs.clear();
+ xs.reserve(domsA.size());
+ for (size_t i = 0; i < domsA.size(); ++i)
+ {
+// std::cerr << i << " : domA : " << domsA[i] << std::endl;
+// std::cerr << "precision A: " << get_precision(domsA[i]) << std::endl;
+// std::cerr << i << " : domB : " << domsB[i] << std::endl;
+// std::cerr << "precision B: " << get_precision(domsB[i]) << std::endl;
+
+ ci.first = domsA[i].middle();
+ ci.second = domsB[i].middle();
+ xs.push_back(ci);
+ }
+}
+
+} // end namespace Geom
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/intersection-by-smashing.cpp b/src/2geom/orphan-code/intersection-by-smashing.cpp
new file mode 100644
index 0000000..02e44b1
--- /dev/null
+++ b/src/2geom/orphan-code/intersection-by-smashing.cpp
@@ -0,0 +1,349 @@
+#include <2geom/d2.h>
+#include <2geom/sbasis.h>
+#include <2geom/sbasis-geometric.h>
+#include <2geom/orphan-code/intersection-by-smashing.h>
+
+#include <cstdlib>
+#include <cstdio>
+#include <vector>
+#include <algorithm>
+
+namespace Geom {
+using namespace Geom;
+
+/*
+ * Computes the top and bottom boundaries of the L_\infty neighborhood
+ * of a curve. The curve is supposed to be a graph over the x-axis.
+ */
+static
+void computeLinfinityNeighborhood( D2<SBasis > const &f, double tol, D2<Piecewise<SBasis> > &topside, D2<Piecewise<SBasis> > &botside ){
+ double signx = ( f[X].at0() > f[X].at1() )? -1 : 1;
+ double signy = ( f[Y].at0() > f[Y].at1() )? -1 : 1;
+
+ Piecewise<D2<SBasis> > top, bot;
+ top = Piecewise<D2<SBasis> > (f);
+ top.cuts.insert( top.cuts.end(), 2);
+ top.segs.insert( top.segs.end(), D2<SBasis>(SBasis(Linear( f[X].at1(), f[X].at1()+2*tol*signx)),
+ SBasis(Linear( f[Y].at1() )) ));
+ bot = Piecewise<D2<SBasis> >(f);
+ bot.cuts.insert( bot.cuts.begin(), - 1 );
+ bot.segs.insert( bot.segs.begin(), D2<SBasis>(SBasis(Linear( f[X].at0()-2*tol*signx, f[X].at0())),
+ SBasis(Linear( f[Y].at0() )) ));
+ top += Point(-tol*signx, tol);
+ bot += Point( tol*signx, -tol);
+
+ if ( signy < 0 ){
+ std::swap( top, bot );
+ top += Point( 0, 2*tol);
+ bot += Point( 0, -2*tol);
+ }
+ topside = make_cuts_independent(top);
+ botside = make_cuts_independent(bot);
+}
+
+
+/*
+ * Compute top and bottom boundaries of the L^infty nbhd of the graph of a *monotonic* function f.
+ * if f is increasing, it is given by [f(t-tol)-tol, f(t+tol)+tol].
+ * if not, it is [f(t+tol)-tol, f(t-tol)+tol].
+ */
+static
+void computeLinfinityNeighborhood( Piecewise<SBasis> const &f, double tol, Piecewise<SBasis> &top, Piecewise<SBasis> &bot){
+ top = f + tol;
+ top.offsetDomain( - tol );
+ top.cuts.insert( top.cuts.end(), f.domain().max() + tol);
+ top.segs.insert( top.segs.end(), SBasis(Linear( f.lastValue() + tol )) );
+
+ bot = f - tol;
+ bot.offsetDomain( tol );
+ bot.cuts.insert( bot.cuts.begin(), f.domain().min() - tol);
+ bot.segs.insert( bot.segs.begin(), SBasis(Linear( f.firstValue() - tol )) );
+
+ if (f.firstValue() > f.lastValue()) {
+ std::swap(top, bot);
+ top += 2 * tol;
+ bot -= 2 * tol;
+ }
+}
+
+/*
+ * Returns the intervals over which the curve keeps its slope
+ * in one of the 8 sectors delimited by x=0, y=0, y=x, y=-x.
+ */
+std::vector<Interval> monotonicSplit(D2<SBasis> const &p){
+ std::vector<Interval> result;
+
+ D2<SBasis> v = derivative(p);
+
+ std::vector<double> someroots;
+ std::vector<double> cuts (2,0.);
+ cuts[1] = 1.;
+
+ someroots = roots(v[X]);
+ cuts.insert( cuts.end(), someroots.begin(), someroots.end() );
+
+ someroots = roots(v[Y]);
+ cuts.insert( cuts.end(), someroots.begin(), someroots.end() );
+
+ //we could split in the middle to avoid computing roots again...
+ someroots = roots(v[X]-v[Y]);
+ cuts.insert( cuts.end(), someroots.begin(), someroots.end() );
+
+ someroots = roots(v[X]+v[Y]);
+ cuts.insert( cuts.end(), someroots.begin(), someroots.end() );
+
+ sort(cuts.begin(),cuts.end());
+ unique(cuts.begin(), cuts.end() );
+
+ for (unsigned i=1; i<cuts.size(); i++){
+ result.push_back( Interval( cuts[i-1], cuts[i] ) );
+ }
+ return result;
+}
+
+//std::vector<Interval> level_set( D2<SBasis> const &f, Rect region){
+// std::vector<Interval> x_in_reg = level_set( f[X], region[X] );
+// std::vector<Interval> y_in_reg = level_set( f[Y], region[Y] );
+// std::vector<Interval> result = intersect ( x_in_reg, y_in_reg );
+// return result;
+//}
+
+/*TODO: remove this!!!
+ * the minimum would be to move it to piecewise.h but this would be stupid.
+ * The best would be to let 'compose' be aware of extension modes (constant, linear, polynomial..)
+ * (I think the extension modes (at start and end) should be properties of the pwsb).
+ */
+static
+void prolongateByConstants( Piecewise<SBasis> &f, double paddle_width ){
+ if ( f.size() == 0 ) return; //do we have a covention about the domain of empty pwsb?
+ f.cuts.insert( f.cuts.begin(), f.cuts.front() - paddle_width );
+ f.segs.insert( f.segs.begin(), SBasis( f.segs.front().at0() ) );
+ f.cuts.insert( f.cuts.end(), f.cuts.back() + paddle_width );
+ f.segs.insert( f.segs.end(), SBasis( f.segs.back().at1() ) );
+}
+
+static
+bool compareIntersectionsTimesX( SmashIntersection const &inter1, SmashIntersection const &inter2 ){
+ return inter1.times[X].min() < inter2.times[Y].min();
+}
+/*Fuse contiguous intersection domains
+ *
+ */
+static
+void cleanup_and_fuse( std::vector<SmashIntersection> &inters ){
+ std::sort( inters.begin(), inters.end(), compareIntersectionsTimesX);
+ for (unsigned i=0; i < inters.size(); i++ ){
+ for (unsigned j=i+1; j < inters.size() && inters[i].times[X].intersects( inters[j].times[X]) ; j++ ){
+ if (inters[i].times[Y].intersects( inters[j].times[Y] ) ){
+ inters[i].times.unionWith(inters[j].times);
+ inters[i].bbox.unionWith(inters[j].bbox);
+ inters.erase( inters.begin() + j );
+ }
+ }
+ }
+}
+
+/* Computes the intersection of two sets given as (ordered) union intervals.
+ */
+static
+std::vector<Interval> intersect( std::vector<Interval> const &a, std::vector<Interval> const &b){
+ std::vector<Interval> result;
+ //TODO: use order to optimize this!
+ for (auto i : a){
+ for (auto j : b){
+ OptInterval c( i );
+ c &= j;
+ if ( c ) {
+ result.push_back( *c );
+ }
+ }
+ }
+ return result;
+}
+
+/* Returns the intervals over which the curves are in the
+ * tol-neighborhood one of the other for the L_\infty metric.
+ * WARNING: each curve is supposed to be a graph over x or y axis
+ * (but not necessarily the same axis for both) and the smaller
+ * the slope the better (typically <=45°).
+ */
+std::vector<SmashIntersection> monotonic_smash_intersect( D2<SBasis> const &a, D2<SBasis> const &b, double tol){
+ using std::swap;
+
+ // a and b or X and Y may have to be exchanged, so make local copies.
+ D2<SBasis> aa = a;
+ D2<SBasis> bb = b;
+ bool swapresult = false;
+ bool swapcoord = false;//debug only!
+
+ //if the (enlarged) bounding boxes don't intersect, stop.
+ OptRect abounds = bounds_fast( a );
+ OptRect bbounds = bounds_fast( b );
+ if ( !abounds || !bbounds ) return std::vector<SmashIntersection>();
+ abounds->expandBy(tol);
+ if ( !(abounds->intersects(*bbounds))){
+ return std::vector<SmashIntersection>();
+ }
+
+ //Choose the best curve to be re-parametrized by x or y values.
+ OptRect dabounds = bounds_exact(derivative(a));
+ OptRect dbbounds = bounds_exact(derivative(b));
+ if ( dbbounds->min().length() > dabounds->min().length() ){
+ aa=b;
+ bb=a;
+ swap( dabounds, dbbounds );
+ swapresult = true;
+ }
+
+ //Choose the best coordinate to use as new parameter
+ double dxmin = std::min( std::abs((*dabounds)[X].max()), std::abs((*dabounds)[X].min()) );
+ double dymin = std::min( std::abs((*dabounds)[Y].max()), std::abs((*dabounds)[Y].min()) );
+ if ( (*dabounds)[X].max()*(*dabounds)[X].min() < 0 ) dxmin=0;
+ if ( (*dabounds)[Y].max()*(*dabounds)[Y].min() < 0 ) dymin=0;
+ assert (dxmin>=0 && dymin>=0);
+
+ if (dxmin < dymin) {
+ aa = D2<SBasis>( aa[Y], aa[X] );
+ bb = D2<SBasis>( bb[Y], bb[X] );
+ swapcoord = true;
+ }
+
+ //re-parametrize aa by the value of x.
+ Interval x_range_strict( aa[X].at0(), aa[X].at1() );
+ Piecewise<SBasis> y_of_x = pw_compose_inverse(aa[Y],aa[X], 2, 1e-5);
+
+ //Compute top and bottom boundaries of the L^infty nbhd of aa.
+ Piecewise<SBasis> top_ay, bot_ay;
+ computeLinfinityNeighborhood( y_of_x, tol, top_ay, bot_ay);
+
+ Interval ax_range = top_ay.domain();//i.e. aa[X] domain ewpanded by tol.
+ std::vector<Interval> bx_in_ax_range = level_set(bb[X], ax_range );
+
+ // find times when bb is in the neighborhood of aa.
+ std::vector<Interval> tbs;
+ for (auto & i : bx_in_ax_range){
+ D2<Piecewise<SBasis> > bb_in;
+ bb_in[X] = Piecewise<SBasis> ( portion( bb[X], i ) );
+ bb_in[Y] = Piecewise<SBasis> ( portion( bb[Y], i) );
+ bb_in[X].setDomain( i );
+ bb_in[Y].setDomain( i );
+
+ Piecewise<SBasis> h;
+ Interval level;
+ h = bb_in[Y] - compose( top_ay, bb_in[X] );
+ level = Interval( -infinity(), 0 );
+ std::vector<Interval> rts_lo = level_set( h, level);
+ h = bb_in[Y] - compose( bot_ay, bb_in[X] );
+ level = Interval( 0, infinity());
+ std::vector<Interval> rts_hi = level_set( h, level);
+
+ std::vector<Interval> rts = intersect( rts_lo, rts_hi );
+ tbs.insert(tbs.end(), rts.begin(), rts.end() );
+ }
+
+ std::vector<SmashIntersection> result(tbs.size(), SmashIntersection());
+
+ /* for each solution I, find times when aa is in the neighborhood of bb(I).
+ * (Note: the preimage of bb[X](I) by aa[X], enlarged by tol, is a good approximation of this:
+ * it would give points in the 2*tol neighborhood of bb (if the slope of aa is never more than 1).
+ * + faster computation.
+ * - implies little jumps depending on the subdivision of the input curve into monotonic pieces
+ * and on the choice of preferred axis. If noticeable, these jumps would feel random to the user :-(
+ */
+ for (unsigned j=0; j<tbs.size(); j++){
+ result[j].times[Y] = tbs[j];
+ std::vector<Interval> tas;
+ //TODO: replace this by some option in the "compose(pw,pw)" method!
+ Piecewise<SBasis> fat_y_of_x = y_of_x;
+ prolongateByConstants( fat_y_of_x, 100*(1+tol) );
+
+ D2<Piecewise<SBasis> > top_b, bot_b;
+ D2<SBasis> bbj = portion( bb, tbs[j] );
+ computeLinfinityNeighborhood( bbj, tol, top_b, bot_b );
+
+ Piecewise<SBasis> h;
+ Interval level;
+ h = top_b[Y] - compose( fat_y_of_x, top_b[X] );
+ level = Interval( +infinity(), 0 );
+ std::vector<Interval> rts_top = level_set( h, level);
+ for (auto & idx : rts_top){
+ idx = Interval( top_b[X].valueAt( idx.min() ),
+ top_b[X].valueAt( idx.max() ) );
+ }
+ assert( rts_top.size() == 1 );
+
+ h = bot_b[Y] - compose( fat_y_of_x, bot_b[X] );
+ level = Interval( 0, -infinity());
+ std::vector<Interval> rts_bot = level_set( h, level);
+ for (auto & idx : rts_bot){
+ idx = Interval( bot_b[X].valueAt( idx.min() ),
+ bot_b[X].valueAt( idx.max() ) );
+ }
+ assert( rts_bot.size() == 1 );
+ rts_top = intersect( rts_top, rts_bot );
+ assert (rts_top.size() == 1);
+ Interval x_dom = rts_top[0];
+
+ if ( x_dom.max() <= x_range_strict.min() ){
+ tas.push_back( Interval ( ( aa[X].at0() < aa[X].at1() ) ? 0 : 1 ) );
+ }else if ( x_dom.min() >= x_range_strict.max() ){
+ tas.push_back( Interval ( ( aa[X].at0() < aa[X].at1() ) ? 1 : 0 ) );
+ }else{
+ tas = level_set(aa[X], x_dom );
+ }
+ assert( tas.size()==1 );
+ result[j].times[X] = tas.front();
+
+ result[j].bbox = Rect( bbj.at0(), bbj.at1() );
+ Interval y_dom( aa[Y](result[j].times[X].min()), aa[Y](result[j].times[X].max()) );
+ result[j].bbox.unionWith( Rect( x_dom, y_dom ) );
+ }
+
+ if (swapresult) {
+ for (auto & i : result){
+ swap( i.times[X], i.times[Y]);
+ }
+ }
+ if (swapcoord) {
+ for (auto & i : result){
+ swap( i.bbox[X], i.bbox[Y] );
+ }
+ }
+
+ //TODO: cleanup result? fuse contiguous intersections?
+ return result;
+}
+
+std::vector<SmashIntersection> smash_intersect( D2<SBasis> const &a, D2<SBasis> const &b, double tol){
+ std::vector<SmashIntersection> result;
+
+ std::vector<Interval> acuts = monotonicSplit(a);
+ std::vector<Interval> bcuts = monotonicSplit(b);
+ for (auto & acut : acuts){
+ D2<SBasis> ai = portion( a, acut);
+ for (auto & bcut : bcuts){
+ D2<SBasis> bj = portion( b, bcut);
+ std::vector<SmashIntersection> ai_cap_bj = monotonic_smash_intersect( ai, bj, tol );
+ for (auto & k : ai_cap_bj){
+ k.times[X] = k.times[X] * acut.extent() + acut.min();
+ k.times[Y] = k.times[Y] * bcut.extent() + bcut.min();
+ }
+ result.insert( result.end(), ai_cap_bj.begin(), ai_cap_bj.end() );
+ }
+ }
+ cleanup_and_fuse( result );
+ return result;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/nearestpoint.cpp b/src/2geom/orphan-code/nearestpoint.cpp
new file mode 100644
index 0000000..870ed09
--- /dev/null
+++ b/src/2geom/orphan-code/nearestpoint.cpp
@@ -0,0 +1,405 @@
+/*
+** vim: ts=4 sw=4 et tw=0 wm=0
+**
+** RCS Information:
+** $Author: mjw $
+** $Revision: 1 $
+** $Date: 2006-03-28 15:59:38 +1100 (Tue, 28 Mar 2006) $
+**
+** Solving the Nearest Point-on-Curve Problem and
+** A Bezier Curve-Based Root-Finder
+** by Philip J. Schneider
+** from "Graphics Gems", Academic Press, 1990
+** modified by mwybrow, njh
+*/
+
+/* point_on_curve.c */
+
+static double SquaredLength(const Geom::Point a)
+{
+ return dot(a, a);
+}
+
+
+/*
+ * Forward declarations
+ */
+static int FindRoots(Geom::Point *w, int degree, double *t, int depth);
+static Geom::Point *ConvertToBezierForm( Geom::Point P, Geom::Point *V);
+static double ComputeXIntercept( Geom::Point *V, int degree);
+static int ControlPolygonFlatEnough( Geom::Point *V, int degree);
+static int CrossingCount(Geom::Point *V, int degree);
+static Geom::Point Bez(Geom::Point *V, int degree, double t, Geom::Point *Left,
+ Geom::Point *Right);
+
+int MAXDEPTH = 64; /* Maximum depth for recursion */
+
+#define EPSILON (ldexp(1.0,-MAXDEPTH-1)) /*Flatness control value */
+#define DEGREE 3 /* Cubic Bezier curve */
+#define W_DEGREE 5 /* Degree of eqn to find roots of */
+
+
+/*
+ * NearestPointOnCurve :
+ * Compute the parameter value of the point on a Bezier
+ * curve segment closest to some arbtitrary, user-input point.
+ * Return the point on the curve at that parameter value.
+ *
+ Geom::Point P; The user-supplied point
+ Geom::Point *V; Control points of cubic Bezier
+*/
+double NearestPointOnCurve(Geom::Point P, Geom::Point *V)
+{
+ double t_candidate[W_DEGREE]; /* Possible roots */
+
+ /* Convert problem to 5th-degree Bezier form */
+ Geom::Point *w = ConvertToBezierForm(P, V);
+
+ /* Find all possible roots of 5th-degree equation */
+ int n_solutions = FindRoots(w, W_DEGREE, t_candidate, 0);
+ std::free((char *)w);
+
+ /* Check distance to end of the curve, where t = 1 */
+ double dist = SquaredLength(P - V[DEGREE]);
+ double t = 1.0;
+
+ /* Find distances for candidate points */
+ for (int i = 0; i < n_solutions; i++) {
+ Geom::Point p = Bez(V, DEGREE, t_candidate[i], NULL, NULL);
+ double new_dist = SquaredLength(P - p);
+ if (new_dist < dist) {
+ dist = new_dist;
+ t = t_candidate[i];
+ }
+ }
+
+ /* Return the parameter value t */
+ return t;
+}
+
+
+/*
+ * ConvertToBezierForm :
+ * Given a point and a Bezier curve, generate a 5th-degree
+ * Bezier-format equation whose solution finds the point on the
+ * curve nearest the user-defined point.
+ */
+static Geom::Point *ConvertToBezierForm(
+ Geom::Point P, /* The point to find t for */
+ Geom::Point *V) /* The control points */
+{
+ Geom::Point c[DEGREE+1]; /* V(i)'s - P */
+ Geom::Point d[DEGREE]; /* V(i+1) - V(i) */
+ Geom::Point *w; /* Ctl pts of 5th-degree curve */
+ double cdTable[3][4]; /* Dot product of c, d */
+ static double z[3][4] = { /* Precomputed "z" for cubics */
+ {1.0, 0.6, 0.3, 0.1},
+ {0.4, 0.6, 0.6, 0.4},
+ {0.1, 0.3, 0.6, 1.0},
+ };
+
+
+ /*Determine the c's -- these are vectors created by subtracting*/
+ /* point P from each of the control points */
+ for (int i = 0; i <= DEGREE; i++) {
+ c[i] = V[i] - P;
+ }
+ /* Determine the d's -- these are vectors created by subtracting*/
+ /* each control point from the next */
+ for (int i = 0; i <= DEGREE - 1; i++) {
+ d[i] = 3.0*(V[i+1] - V[i]);
+ }
+
+ /* Create the c,d table -- this is a table of dot products of the */
+ /* c's and d's */
+ for (int row = 0; row <= DEGREE - 1; row++) {
+ for (int column = 0; column <= DEGREE; column++) {
+ cdTable[row][column] = dot(d[row], c[column]);
+ }
+ }
+
+ /* Now, apply the z's to the dot products, on the skew diagonal*/
+ /* Also, set up the x-values, making these "points" */
+ w = (Geom::Point *)malloc((unsigned)(W_DEGREE+1) * sizeof(Geom::Point));
+ for (int i = 0; i <= W_DEGREE; i++) {
+ w[i][Geom::Y] = 0.0;
+ w[i][Geom::X] = (double)(i) / W_DEGREE;
+ }
+
+ const int n = DEGREE;
+ const int m = DEGREE-1;
+ for (int k = 0; k <= n + m; k++) {
+ const int lb = std::max(0, k - m);
+ const int ub = std::min(k, n);
+ for (int i = lb; i <= ub; i++) {
+ int j = k - i;
+ w[i+j][Geom::Y] += cdTable[j][i] * z[j][i];
+ }
+ }
+
+ return w;
+}
+
+
+/*
+ * FindRoots :
+ * Given a 5th-degree equation in Bernstein-Bezier form, find
+ * all of the roots in the interval [0, 1]. Return the number
+ * of roots found.
+ */
+static int FindRoots(
+ Geom::Point *w, /* The control points */
+ int degree, /* The degree of the polynomial */
+ double *t, /* RETURN candidate t-values */
+ int depth) /* The depth of the recursion */
+{
+ int i;
+ Geom::Point Left[W_DEGREE+1], /* New left and right */
+ Right[W_DEGREE+1]; /* control polygons */
+ int left_count, /* Solution count from */
+ right_count; /* children */
+ double left_t[W_DEGREE+1], /* Solutions from kids */
+ right_t[W_DEGREE+1];
+
+ switch (CrossingCount(w, degree)) {
+ case 0 : { /* No solutions here */
+ return 0;
+ break;
+ }
+ case 1 : { /* Unique solution */
+ /* Stop recursion when the tree is deep enough */
+ /* if deep enough, return 1 solution at midpoint */
+ if (depth >= MAXDEPTH) {
+ t[0] = (w[0][Geom::X] + w[W_DEGREE][Geom::X]) / 2.0;
+ return 1;
+ }
+ if (ControlPolygonFlatEnough(w, degree)) {
+ t[0] = ComputeXIntercept(w, degree);
+ return 1;
+ }
+ break;
+ }
+ }
+
+ /* Otherwise, solve recursively after */
+ /* subdividing control polygon */
+ Bez(w, degree, 0.5, Left, Right);
+ left_count = FindRoots(Left, degree, left_t, depth+1);
+ right_count = FindRoots(Right, degree, right_t, depth+1);
+
+
+ /* Gather solutions together */
+ for (i = 0; i < left_count; i++) {
+ t[i] = left_t[i];
+ }
+ for (i = 0; i < right_count; i++) {
+ t[i+left_count] = right_t[i];
+ }
+
+ /* Send back total number of solutions */
+ return (left_count+right_count);
+}
+
+
+/*
+ * CrossingCount :
+ * Count the number of times a Bezier control polygon
+ * crosses the 0-axis. This number is >= the number of roots.
+ *
+ */
+static int CrossingCount(
+ Geom::Point *V, /* Control pts of Bezier curve */
+ int degree) /* Degree of Bezier curve */
+{
+ int n_crossings = 0; /* Number of zero-crossings */
+ int old_sign; /* Sign of coefficients */
+
+ old_sign = Geom::sgn(V[0][Geom::Y]);
+ for (int i = 1; i <= degree; i++) {
+ int sign = Geom::sgn(V[i][Geom::Y]);
+ if (sign != old_sign)
+ n_crossings++;
+ old_sign = sign;
+ }
+ return n_crossings;
+}
+
+
+
+/*
+ * ControlPolygonFlatEnough :
+ * Check if the control polygon of a Bezier curve is flat enough
+ * for recursive subdivision to bottom out.
+ *
+ */
+static int ControlPolygonFlatEnough(
+ Geom::Point *V, /* Control points */
+ int degree) /* Degree of polynomial */
+{
+ int i; /* Index variable */
+ double *distance; /* Distances from pts to line */
+ double max_distance_above; /* maximum of these */
+ double max_distance_below;
+ double error; /* Precision of root */
+ //Geom::Point t; /* Vector from V[0] to V[degree]*/
+ double intercept_1,
+ intercept_2,
+ left_intercept,
+ right_intercept;
+ double a, b, c; /* Coefficients of implicit */
+ /* eqn for line from V[0]-V[deg]*/
+
+ /* Find the perpendicular distance */
+ /* from each interior control point to */
+ /* line connecting V[0] and V[degree] */
+ distance = (double *)malloc((unsigned)(degree + 1) * sizeof(double));
+ {
+ double abSquared;
+
+ /* Derive the implicit equation for line connecting first */
+ /* and last control points */
+ a = V[0][Geom::Y] - V[degree][Geom::Y];
+ b = V[degree][Geom::X] - V[0][Geom::X];
+ c = V[0][Geom::X] * V[degree][Geom::Y] - V[degree][Geom::X] * V[0][Geom::Y];
+
+ abSquared = (a * a) + (b * b);
+
+ for (i = 1; i < degree; i++) {
+ /* Compute distance from each of the points to that line */
+ distance[i] = a * V[i][Geom::X] + b * V[i][Geom::Y] + c;
+ if (distance[i] > 0.0) {
+ distance[i] = (distance[i] * distance[i]) / abSquared;
+ }
+ if (distance[i] < 0.0) {
+ distance[i] = -((distance[i] * distance[i]) / abSquared);
+ }
+ }
+ }
+
+
+ /* Find the largest distance */
+ max_distance_above = 0.0;
+ max_distance_below = 0.0;
+ for (i = 1; i < degree; i++) {
+ if (distance[i] < 0.0) {
+ max_distance_below = std::min(max_distance_below, distance[i]);
+ };
+ if (distance[i] > 0.0) {
+ max_distance_above = std::max(max_distance_above, distance[i]);
+ }
+ }
+ free((char *)distance);
+
+ {
+ double det;
+ double a1, b1, c1, a2, b2, c2;
+
+ /* Implicit equation for zero line */
+ a1 = 0.0;
+ b1 = 1.0;
+ c1 = 0.0;
+
+ /* Implicit equation for "above" line */
+ a2 = a;
+ b2 = b;
+ c2 = c + max_distance_above;
+
+ det = a1 * b2 - a2 * b1;
+
+ intercept_1 = (b1 * c2 - b2 * c1) / det;
+
+ /* Implicit equation for "below" line */
+ a2 = a;
+ b2 = b;
+ c2 = c + max_distance_below;
+
+ det = a1 * b2 - a2 * b1;
+
+ intercept_2 = (b1 * c2 - b2 * c1) / det;
+ }
+
+ /* Compute intercepts of bounding box */
+ left_intercept = std::min(intercept_1, intercept_2);
+ right_intercept = std::max(intercept_1, intercept_2);
+
+ error = 0.5 * (right_intercept-left_intercept);
+ if (error < EPSILON) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+
+
+/*
+ * ComputeXIntercept :
+ * Compute intersection of chord from first control point to last
+ * with 0-axis.
+ *
+ */
+static double ComputeXIntercept(
+ Geom::Point *V, /* Control points */
+ int degree) /* Degree of curve */
+{
+ const Geom::Point A = V[degree] - V[0];
+
+ return (A[Geom::X]*V[0][Geom::Y] - A[Geom::Y]*V[0][Geom::X]) / -A[Geom::Y];
+}
+
+
+/*
+ * Bez :
+ * Evaluate a Bezier curve at a particular parameter value
+ * Fill in control points for resulting sub-curves if "Left" and
+ * "Right" are non-null.
+ *
+ */
+static Geom::Point Bez(
+ Geom::Point *V, /* Control pts */
+ int degree, /* Degree of bezier curve */
+ double t, /* Parameter value */
+ Geom::Point *Left, /* RETURN left half ctl pts */
+ Geom::Point *Right) /* RETURN right half ctl pts */
+{
+ Geom::Point Vtemp[W_DEGREE+1][W_DEGREE+1];
+
+
+ /* Copy control points */
+ for (int j =0; j <= degree; j++) {
+ Vtemp[0][j] = V[j];
+ }
+
+ /* Triangle computation */
+ for (int i = 1; i <= degree; i++) {
+ for (int j =0 ; j <= degree - i; j++) {
+ Vtemp[i][j] =
+ (1.0 - t) * Vtemp[i-1][j] + t * Vtemp[i-1][j+1];
+ }
+ }
+
+ if (Left != NULL) {
+ for (int j = 0; j <= degree; j++) {
+ Left[j] = Vtemp[j][0];
+ }
+ }
+ if (Right != NULL) {
+ for (int j = 0; j <= degree; j++) {
+ Right[j] = Vtemp[degree-j][j];
+ }
+ }
+
+ return (Vtemp[degree][0]);
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/redblack-toy.cpp b/src/2geom/orphan-code/redblack-toy.cpp
new file mode 100644
index 0000000..01ffa7d
--- /dev/null
+++ b/src/2geom/orphan-code/redblack-toy.cpp
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2009 Evangelos Katsikaros <vkatsikaros at yahoo dot gr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+/*
+ initial toy for redblack trees
+*/
+
+#include <2geom/toys/path-cairo.h>
+#include <2geom/toys/toy-framework-2.h>
+
+#include <2geom/orphan-code/redblacktree.h>
+#include <2geom/orphan-code/redblacktree.cpp>
+
+#include <time.h>
+using std::vector;
+using namespace Geom;
+using namespace std;
+
+
+class RedBlackToy: public Toy
+{
+ PointSetHandle handle_set;
+ Geom::Point starting_point; // during click and drag: start point of click
+ Geom::Point ending_point; // during click and drag: end point of click (release)
+ Geom::Point highlight_point; // not used
+
+ Geom::RedBlackTree rbtree_x;
+ RedBlack* search_result;
+ RedBlack temp_deleted_node;
+
+ // colors we are going to use for different purposes
+ colour color_rect, color_rect_guide; // black(a=0.6), black
+ colour color_select_area, color_select_area_guide; // red(a=0.6), red
+
+ int alter_existing_rect;
+ int add_new_rect;
+
+ Rect rect_chosen; // the rectangle of the search area
+ Rect dummy_draw; // the "helper" rectangle that is shown during the click and drag (before the mouse release)
+ int mode; // insert/alter, search, delete modes
+
+ // printing of the tree
+ int help_counter; // the "x" of the label of each node
+ static const int label_size = 15 ; // size the label of each node
+
+ // used for the keys that switch between modes
+ enum menu_item_t
+ {
+ INSERT = 0,
+ DELETE,
+ SEARCH,
+ TOTAL_ITEMS // this one must be the last item
+ };
+ static const char* menu_items[TOTAL_ITEMS];
+ static const char keys[TOTAL_ITEMS];
+
+
+
+ void draw(cairo_t *cr, std::ostringstream *notify, int width, int height, bool save, std::ostringstream *timer_stream) {
+ cairo_set_line_width( cr, 1 );
+
+ // draw the rects that we have in the handles
+ for( unsigned i=0; i<handle_set.pts.size(); i=i+2 ){
+ Rect r1( handle_set.pts[i], handle_set.pts[i+1] );
+ cairo_rectangle( cr, r1 );
+ }
+ cairo_set_source_rgba( cr, color_rect);
+ cairo_stroke( cr );
+
+ // draw a rect if we click & drag (so that we know what we are going to create)
+ if(add_new_rect){
+ dummy_draw = Rect( starting_point, ending_point );
+ cairo_rectangle( cr, dummy_draw );
+ if( mode == 0){
+ cairo_set_source_rgba( cr, color_rect_guide);
+ }
+ else if( mode == 1){
+ cairo_set_source_rgba( cr, color_select_area_guide );
+ }
+ cairo_stroke( cr );
+ }
+
+ // draw a rect for the search area
+ cairo_rectangle( cr, rect_chosen );
+ cairo_set_source_rgba( cr, color_select_area);
+ cairo_stroke( cr );
+
+ Toy::draw( cr, notify, width, height, save,timer_stream );
+ draw_tree_in_toy( cr ,rbtree_x.root, 0);
+ help_counter=0;
+ }
+
+ void mouse_moved(GdkEventMotion* e){
+ if( !( alter_existing_rect && mode == 1 ) ){
+ Toy::mouse_moved(e);
+ }
+
+ if(add_new_rect){
+ ending_point = Point(e->x, e->y);
+ }
+ }
+
+ void mouse_pressed(GdkEventButton* e) {
+ Toy::mouse_pressed(e);
+ if(e->button == 1){ // left mouse button
+ if( mode == 0 ){ // mode: insert / alter
+ if(!selected) {
+ starting_point = Point(e->x, e->y);
+ ending_point = starting_point;
+ add_new_rect = 1;
+ }
+ else
+ {
+ // TODO find the selected rect
+ // ideas : from Handle *selected ???
+ //std::cout <<find_selected_rect(selected) << std::endl ;
+ alter_existing_rect = 1;
+ }
+ }
+ else if( mode == 1 ){ // mode: search
+ if(!selected) {
+ starting_point = Point(e->x, e->y);
+ ending_point = starting_point;
+ add_new_rect = 1;
+ }
+ else{
+ alter_existing_rect = 1;
+ }
+ }
+ else if( mode == 2) { // mode: delete
+ }
+ }
+ else if(e->button == 2){ //middle button
+ }
+ else if(e->button == 3){ //right button
+ }
+ }
+
+ virtual void mouse_released(GdkEventButton* e) {
+ Toy::mouse_released(e);
+ if( e->button == 1 ) { //left mouse button
+ if( mode == 0) { // mode: insert / alter
+ if( add_new_rect ){
+ ending_point = Point(e->x, e->y);
+ handle_set.push_back(starting_point);
+ handle_set.push_back(ending_point);
+ insert_in_tree_the_last_rect();
+ add_new_rect = 0;
+ }
+ else if( alter_existing_rect ){
+ //TODO update rect (and tree)
+ // delete selected rect
+ // insert altered
+ alter_existing_rect = 0;
+ }
+ }
+ else if( mode == 1 ){ // mode: search
+ if( add_new_rect ){
+ ending_point = Point(e->x, e->y);
+ rect_chosen = Rect(starting_point, ending_point);
+
+ // search in the X axis
+ Coord a = rect_chosen[0].min();
+ Coord b = rect_chosen[0].max();
+ search_result = rbtree_x.search( Interval( a, b ) );
+ if(search_result){
+ std::cout << "Found: (" << search_result->data << ": " << search_result->key()
+ << ", " << search_result->high() << " : " << search_result->subtree_max << ") "
+ << std::endl;
+ }
+ else{
+ std::cout << "Nothing found..."<< std::endl;
+ }
+ add_new_rect = 0;
+ }
+ else if(alter_existing_rect){
+ // do nothing
+ alter_existing_rect = 0;
+ }
+ }
+ else if( mode == 2) { // mode: delete
+
+ }
+ }
+ else if(e->button == 2){ //middle button
+ }
+ else if(e->button == 3){ //right button
+
+ }
+ }
+
+
+ void key_hit(GdkEventKey *e)
+ {
+ char choice = std::toupper(e->keyval);
+ switch ( choice )
+ {
+ case 'A':
+ mode = 0;
+ break;
+ case 'B':
+ mode = 1;
+ break;
+ case 'C':
+ mode = 2;
+ break;
+ }
+ //redraw();
+ }
+
+ void insert_in_tree_the_last_rect(){
+ unsigned i = handle_set.pts.size() - 2;
+ Rect r1(handle_set.pts[i], handle_set.pts[i+1]);
+ // insert in X axis tree
+ rbtree_x.insert(r1, i, 0);
+ rbtree_x.print_tree();
+ };
+
+ void draw_tree_in_toy(cairo_t* cr, Geom::RedBlack* n, int depth = 0) {
+ if(n){
+ if(n->left){
+ draw_tree_in_toy(cr, n->left, depth+1);
+ }
+ help_counter += 1;
+ //drawthisnode(cr, x*10, depth*10);
+ if(n->isRed){
+ cairo_set_source_rgba (cr, color_select_area_guide);
+ }
+ else{
+ cairo_set_source_rgba (cr, color_rect_guide);
+ }
+
+ cairo_stroke(cr);
+
+ Geom::Point text_point = Point( help_counter*15, depth*15 );
+ char label[4];
+ sprintf( label,"%d",n->data ); // instead of std::itoa(depth, label, 10);
+
+ draw_text(cr, text_point, label);
+ ////////////////////////////////////////////////////////////////
+ if(n->right){
+ draw_tree_in_toy(cr, n->right, depth+1);
+ }
+ }
+ };
+
+/*
+ int find_selected_rect(PointHandle * selected){
+
+ for( unsigned i=0; i<handle_set.pts.size(); i=i+2 ){
+ if( handle_set.pts[i] == selected || handle_set.pts[i+1] == selected ){
+ return i;
+ }
+ }
+
+ return -1;
+ };
+*/
+
+
+public:
+ RedBlackToy(): color_rect(0, 0, 0, 0.6), color_rect_guide(0, 0, 0, 1),
+ color_select_area(1, 0, 0, 0.6 ), color_select_area_guide(1, 0, 0, 1 ),
+ alter_existing_rect(0), add_new_rect(0), mode(0), help_counter(0)
+ {
+ if(handles.empty()) {
+ handles.push_back(&handle_set);
+ }
+ Rect rect_chosen();
+ Rect dummy_draw();
+ }
+
+
+};
+
+
+
+int main(int argc, char **argv) {
+ std::cout << "---------------------------------------------------------"<< std::endl;
+ std::cout << "Let's play with the Red Black Tree! ONLY Insert works now!!!"<< std::endl;
+ std::cout << " Key A: insert/alter mode "<< std::endl;
+ std::cout << " * Left click and drag on white area: create a rectangle"<< std::endl;
+ std::cout << " *NOT READY: Left click and drag on handler: alter a rectangle"<< std::endl;
+ std::cout << " Key B: search mode "<< std::endl;
+ std::cout << " * Left click and drag on white area: \"search\" for nodes that intersect red area"<< std::endl;
+ std::cout << " NOT READY: Key C: delete mode "<< std::endl;
+ std::cout << " * Left click on handler: delete for a rectangle"<< std::endl;
+ std::cout << "---------------------------------------------------------"<< std::endl;
+ init(argc, argv, new RedBlackToy);
+ return 0;
+}
+
+const char* RedBlackToy::menu_items[] =
+{
+ "Insert / Alter Rectangle",
+ "Search Rectangle",
+ "Delete Reactangle"
+};
+
+const char RedBlackToy::keys[] =
+{
+ 'A', 'B', 'C'
+};
diff --git a/src/2geom/orphan-code/redblacktree.cpp b/src/2geom/orphan-code/redblacktree.cpp
new file mode 100644
index 0000000..bf9a728
--- /dev/null
+++ b/src/2geom/orphan-code/redblacktree.cpp
@@ -0,0 +1,575 @@
+#include <2geom/orphan-code/redblacktree.h>
+//#include <algorithm>
+
+
+#define _REDBLACK_PRINT(x) std::cout << x << std::endl;
+//comment the following if you want output during RedBlack Tree operations
+//#define _REDBLACK_PRINT(x) ;
+
+
+namespace Geom{
+
+
+
+RedBlack* RedBlackTree::search(Rect const &r, int dimension){
+ return search( Interval( r[dimension].min(), r[dimension].max() ) );
+ // TODO get rid of dimension
+ // TODO put 2 trees (X, Y axis in one lump)
+}
+
+/*
+INTERVAL-SEARCH(T, i)
+1 x <- root[T]
+2 while x != nil[T] and i does not overlap int[x]
+3 do if left[x] != nil[T] and max[left[x]] >= low[i]
+4 then x <- left[x]
+5 else x <- right[x]
+6 return x
+
+Two intervals i,x overlap in the 4 following cases:
+ 1) |--------| i
+ |---| x
+
+ 2) |-----| i
+ |----------| x
+
+ 3) |------| i
+ |------| x
+
+ 4) |----| i
+ |----| x
+
+And do not overlap when (the one os left or right of the other)
+ 1) |--------| i
+ |---| x
+
+ 2) |-----| i
+ |----------| x
+
+
+*/
+RedBlack* RedBlackTree::search(Interval i){
+ _REDBLACK_PRINT( "==============================================================" << std::endl << "ENTER: search(Interval i) : (" << i.min() << ", " << i.max() << ")" )
+ RedBlack *x;
+ x = root;
+
+ while( x!=0 &&
+ ( i.max() < x->interval.min() ||
+ i.min() > x->interval.max() )
+ ){
+ _REDBLACK_PRINT( "(" << x->data << ": " << x->key() << ", " << x->high() << " : " << x->subtree_max << ") "
+ << " i do not overlap with x")
+
+ if(x->left != 0 && (x->left)->subtree_max >= i.min() ){
+ x = x->left;
+ }
+ else{
+ x = x->right;
+ }
+ }
+ _REDBLACK_PRINT( "RETURN: search" << std::endl )
+ return x;
+}
+
+
+
+void RedBlackTree::insert(Rect const &r, int shape, int dimension) {
+ _REDBLACK_PRINT( "==============================================================" << std::endl << "ENTER: insert(Rect, int, dimension): " << " dimension:" << dimension << " shape:" << shape )
+ insert(r[dimension].min(), r[dimension].max(), shape);
+ _REDBLACK_PRINT( "RETURN: insert(Rect, int, dimension)")
+}
+
+// source: book pp 251
+void RedBlackTree::insert(Coord dimension_min, Coord dimension_max, int shape) {
+ _REDBLACK_PRINT( std::endl << "ENTER: insert(Coord, Coord, int): " << dimension_min << ", " << dimension_max << " , shape: " << shape )
+ // x is the new node we insert
+ RedBlack *x = new RedBlack();
+ x->interval = Interval( dimension_min, dimension_max );
+ x->data = shape;
+ x->isRed = true;
+
+ _REDBLACK_PRINT( " x: " << x << " KEY: " << x->key() << " high: " << x->high() )
+
+ tree_insert(x);
+
+ print_tree();
+
+ _REDBLACK_PRINT( " Begin coloring" )
+ // we now do the coloring of the tree.
+ _REDBLACK_PRINT( " while( x!= root && (x->parent)->isRed )" )
+ while( x!= root && (x->parent)->isRed ){
+ _REDBLACK_PRINT( " ((x->parent)->parent)->left:" << ((x->parent)->parent)->left << " ((x->parent)->parent)->right:" << ((x->parent)->parent)->right )
+
+ if( x->parent == ((x->parent)->parent)->left ){
+ _REDBLACK_PRINT( " Left:" )
+ RedBlack *y = new RedBlack;
+ y = ((x->parent)->parent)->right;
+ if( y == 0 ){
+ /*
+ This 1st brach is not in the book, but is needed. We must check y->isRed but it is
+ undefined, so we get segfault. But 0 (undefined) means that y is a leaf, so it is
+ black by definition. So, do the same as in the else part.
+ */
+ _REDBLACK_PRINT( " y==0" )
+ if( x == (x->parent)->right ){
+ x = x->parent;
+ left_rotate(x);
+ }
+ (x->parent)->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ right_rotate((x->parent)->parent);
+ }
+ else if( y->isRed ){
+ _REDBLACK_PRINT( " y->isRed" )
+ (x->parent)->isRed = false;
+ y->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ x = (x->parent)->parent;
+ }
+ else{
+ _REDBLACK_PRINT( " !( y->isRed)" )
+ if( x == (x->parent)->right ){
+ x = x->parent;
+ left_rotate(x);
+ }
+ (x->parent)->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ right_rotate((x->parent)->parent);
+ }
+ }
+ else{ // this branch is the same with the above if clause with "right", "left" exchanged
+ _REDBLACK_PRINT( " Right:" )
+ RedBlack *y = new RedBlack;
+ y = ((x->parent)->parent)->left;
+ if( y == 0 ){
+ /*
+ This 1st brach is not in the book, but is needed. We must check y->isRed but it is
+ undefined, so we get segfault. But 0 (undefined) means that y is a leaf, so it is
+ black by definition. So, do the same as in the else part.
+ */
+ _REDBLACK_PRINT( " y==0" )
+ if( x == (x->parent)->left ){
+ x = x->parent;
+ right_rotate(x);
+ }
+ (x->parent)->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ left_rotate((x->parent)->parent);
+ }
+ else if( y->isRed ){
+ _REDBLACK_PRINT( " y->isRed" )
+ (x->parent)->isRed = false;
+ y->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ x = (x->parent)->parent;
+ }
+ else{
+ _REDBLACK_PRINT( " !( y->isRed)" )
+ if( x == (x->parent)->left ){
+ x = x->parent;
+ right_rotate(x);
+ }
+ (x->parent)->isRed = false;
+ ((x->parent)->parent)->isRed = true;
+ left_rotate((x->parent)->parent);
+ }
+ }
+ }
+ root->isRed = false;
+
+ // update the max value with a slow/stupid yet certain way, walking all the tree :P
+ // TODO find better way
+ _REDBLACK_PRINT( " Update max" )
+
+ update_max(root);
+
+ _REDBLACK_PRINT( "RETURN: insert(Coord, Coord, int)" << std::endl)
+}
+
+// from book p. 266)
+void RedBlackTree::left_rotate(RedBlack* x){
+ // x->right != 0 (assumption book page 266)
+ // ??? hm problem ???
+ _REDBLACK_PRINT( "ENTER: left_rotate" )
+ RedBlack* y = new RedBlack;
+ y = x->right;
+ x->right = y->left;
+
+ if( y->left != 0){
+ (y->left)->parent = x;
+ }
+
+ y->parent = x->parent;
+
+ if( x->parent == 0){
+ root = y;
+ }
+ else{
+ if( x == (x->parent)->left ){
+ (x->parent)->left = y;
+ }
+ else{
+ (x->parent)->right = y;
+ }
+ }
+ y->left = x;
+ x->parent = y;
+ _REDBLACK_PRINT( "RETURN: left_rotate" << std::endl )
+}
+
+// from book p. 266: right_rotate is inverse of left_rotate
+// same to left_rotate with "right", "left" exchanged
+void RedBlackTree::right_rotate(RedBlack* x){
+ // x->right != 0 (assumption book page 266)
+ // ??? hm problem ??
+ _REDBLACK_PRINT( "ENTER: right_rotate" )
+ RedBlack* y = new RedBlack;
+
+ _REDBLACK_PRINT( "x->left: " << x->left )
+ y = x->left;
+ x->left = y->right;
+
+ if( y->right != 0){
+ (y->right)->parent = x;
+ }
+
+ y->parent = x->parent;
+
+ if( x->parent == 0){
+ root = y;
+ }
+ else{
+ if( x == (x->parent)->left ){
+ (x->parent)->left = y;
+ }
+ else{
+ (x->parent)->right = y;
+ }
+ }
+ y->right = x;
+ x->parent = y;
+ _REDBLACK_PRINT( "RETURN: right_rotate" << std::endl )
+}
+
+// insertion in binary search tree: book page 251
+// then the redblack insert performs the coloring
+void RedBlackTree::tree_insert(RedBlack* z){
+ _REDBLACK_PRINT( "ENTER: tree_insert(RedBlack* z)" )
+ RedBlack* y = 0; // y <- nil
+
+ RedBlack* x = root;
+
+ _REDBLACK_PRINT( " while x!=0 " )
+ while( x != 0 ){
+ y = x;
+// _REDBLACK_PRINT( " x:" << x << " y:" << y << " z:" << z )
+ _REDBLACK_PRINT( " z->key: " << z->key() << " y->key: " << y->key() << " compare")
+ if( z->key() < x->key() ){
+ _REDBLACK_PRINT( " z smaller: go left" )
+ x = x->left;
+ }
+ else{
+ _REDBLACK_PRINT( " z bigger: go right" )
+ x = x->right;
+ }
+ }
+
+ _REDBLACK_PRINT( " z->parent = y" )
+ z->parent = y;
+
+ if( y == 0 ){
+ _REDBLACK_PRINT( " set z root (empty tree)" )
+ root = z;
+ }
+ else{
+ _REDBLACK_PRINT( " z->key: " << z->key() << " y->key: " << y->key() << " compare")
+ if( z->key() < y->key() ){
+ _REDBLACK_PRINT( " z->key() smaller: y->left = z; " )
+ y->left = z;
+ }
+ else{
+ _REDBLACK_PRINT( " z->key() bigger: y->right = z " )
+ y->right = z;
+ }
+ }
+ _REDBLACK_PRINT( "RETURN: tree_insert(RedBlack* z)" << std::endl )
+}
+
+
+/*
+RB-DELETE(T, z)
+ 1 if left[z] = nil[T] or right[z] = nil[T]
+ 2 then y <- z
+ 3 else y <- TREE-SUCCESSOR(z)
+ 4 if left[y] != nil[T]
+ 5 then x <- left[y]
+ 6 else x <- right[y]
+ 7 p[x] <- p[y]
+ 8 if p[y] = nil[T]
+ 9 then root[T] <- x
+10 else if y = left[p[y]]
+11 then left[p[y]] <- x
+12 else right[p[y]] <- x
+13 if y != z
+14 then key[z] <- key[y]
+15 copy y's satellite data into z
+16 if color[y] = BLACK
+17 then RB-DELETE-FIXUP(T, x)
+18 return y
+*/
+RedBlack* RedBlackTree::erase(RedBlack* z){
+ _REDBLACK_PRINT( "==============================================================" << std::endl << "ENTER: earse(z)" )
+ RedBlack* x = new RedBlack();
+ RedBlack* y = new RedBlack();
+ if( z->left == 0 || z->right == 0 ){
+ y = z;
+ }
+ else{
+ y = tree_successor(z);
+ }
+
+ if( y->left != 0 ){
+ x = y->left;
+ }
+ else{
+ x = y->right;
+ }
+
+ x->parent = y->parent;
+
+ if( y->parent == 0){
+ root = x;
+ }
+ else {
+ if( y == (y->parent)->left ){
+ (y->parent)->left = x;
+ }
+ else{
+ (y->parent)->right = x;
+ }
+ }
+
+ if( y != z){
+ z->interval = y->interval ; // key[z] <- key[y] TODO check this
+ //copy y's satellite data into z
+ z->data = y->data;
+ z->isRed = y->isRed;
+
+ z->left = y->left;
+ z->right = y->right;
+ z->parent = y->parent;
+ }
+
+ if( y->isRed == false){
+ erase_fixup(x);
+ }
+
+ _REDBLACK_PRINT( "Update max" )
+ update_max(root);
+
+ _REDBLACK_PRINT( "RETURN: erase" )
+ return y;
+}
+
+/*
+RB-DELETE-FIXUP(T, x)
+ 1 while x != root[T] and color[x] = BLACK
+ 2 do if x = left[p[x]]
+ 3 then w <- right[p[x]]
+ 4 if color[w] = RED
+ 5 then color[w] <- BLACK Case 1
+ 6 color[p[x]] <- RED Case 1
+ 7 LEFT-ROTATE(T, p[x]) Case 1
+ 8 w <- right[p[x]]
+ 9 if color[left[w]] = BLACK and color[right[w]] = BLACK
+10 then color[w] <- RED Case 2
+11 x p[x] Case 2
+12 else if color[right[w]] = BLACK
+13 then color[left[w]] <- BLACK Case 3
+14 color[w] <- RED Case 3
+15 RIGHT-ROTATE(T, w) Case 3
+16 w <- right[p[x]] Case 3
+17 color[w] <- color[p[x]] Case 4
+18 color[p[x]] <- BLACK Case 4
+19 color[right[w]] <- BLACK Case 4
+20 LEFT-ROTATE(T, p[x]) Case 4
+21 x <- root[T] Case 4
+22 else (same as then clause with "right" and "left" exchanged)
+23 color[x] <- BLACK
+*/
+void RedBlackTree::erase_fixup(RedBlack* x){
+ RedBlack* w = 0;
+ while( x != root && x->isRed == false ){
+ if( x == (x->parent)->left ){
+ w = (x->parent)->right;
+ if(w->isRed == true){
+ w->isRed = false;
+ (w->parent)->isRed = true;
+ left_rotate(x->parent);
+ w = (x->parent)->right;
+ }
+ if( (w->left)->isRed == false && (w->right)->isRed == false ){
+ w->isRed = true;
+ x = x->parent; // TODO understand why this happens ???
+ }
+ else{
+ if( (w->right)->isRed == false ){
+ (w->left)->isRed = false;
+ right_rotate(w);
+ w = (x->parent)->right;
+ }
+ else{ // TODO ??? is this correct ???
+ w->isRed = (x->parent)->isRed;
+ (x->parent)->isRed = false;
+ (w->right)->isRed = false;
+ left_rotate(x->parent);
+ x = root; // TODO ??? is this correct ???
+ }
+ }
+ }
+ else{ // same as then clause with "right" and "left" exchanged
+ w = (x->parent)->left;
+ if(w->isRed == true){
+ w->isRed = false;
+ (w->parent)->isRed = true;
+ right_rotate(x->parent);
+ w = (x->parent)->left;
+ }
+ if( (w->right)->isRed == false && (w->left)->isRed == false ){
+ w->isRed = true;
+ x = x->parent; // ??? is this correct ???
+ }
+ else{
+ if( (w->left)->isRed == false ){
+ (w->right)->isRed = false;
+ left_rotate(w);
+ w = (x->parent)->left;
+ }
+ else{ // TODO ??? is this correct ???
+ w->isRed = (x->parent)->isRed;
+ (x->parent)->isRed = false;
+ (w->left)->isRed = false;
+ right_rotate(x->parent);
+ x = root; // TODO ??? is this correct ???
+ }
+ }
+ }
+ }
+ x->isRed = false;
+}
+
+
+void RedBlackTree::print_tree(){
+ std::cout << "Print RedBlackTree status:" << std::endl;
+ inorder_tree_walk(root);
+}
+
+
+void RedBlackTree::inorder_tree_walk(RedBlack* x){
+ int oops =0;
+ if( x != 0 ){
+ inorder_tree_walk(x->left);
+ std::cout<< "(" << x->data << ": " << x->key() << ", " << x->high() << " : " << x->subtree_max << ") " ;
+
+ if( x->left != 0 ){
+ std::cout<< "L:(" << (x->left)->data << ", " << (x->left)->key() << ") " ;
+ if( x->key() < (x->left)->key()){
+ std::cout<<" !!! ";
+ oops = 1;
+ }
+ }
+ else{
+ std::cout<< "L:0 " ;
+ }
+
+ if( x->right != 0 ){
+ std::cout<< "R:(" << (x->right)->data << ", "<< (x->right)->key() << ") " ;
+ if( x->key() > (x->right)->key() ){
+ std::cout<<" !!! ";
+ oops = 1;
+ }
+ }
+ else{
+ std::cout<< "R:0 " ;
+ }
+
+ if(oops){
+ std::cout<<" ....... !!! Problem " << oops ;
+ }
+ std::cout << std::endl;
+ inorder_tree_walk(x->right);
+ }
+}
+
+// not an norder walk of the tree
+void RedBlackTree::update_max(RedBlack* x){
+ Coord max_left, max_right;
+ if( x != 0 ){
+ update_max(x->left);
+ update_max(x->right);
+
+ // check for child
+ // if child is Nil then max = DBL_MIN
+ // could there be problems when comparing for max between two DBL_MIN ???
+ if( x->left == 0 ){
+ max_left = DBL_MIN ;
+ }
+ else{
+ max_left = (x->left)->subtree_max;
+ }
+
+ if( x->right == 0 ){
+ max_right = DBL_MIN ;
+ }
+ else{
+ max_right = (x->right)->subtree_max;
+ }
+
+ //find max of: x->high(), max_left, max_right
+ Coord temp_max;
+ temp_max = std::max( x->high(), max_left );
+ temp_max = std::max( temp_max, max_right );
+ x->subtree_max = temp_max;
+
+ }
+}
+
+
+RedBlack* RedBlackTree::tree_minimum(RedBlack* x){
+ _REDBLACK_PRINT( "==============================================================" << std::endl << "ENTER: tree_minimum" )
+ while( x->left <- 0 ) {
+ x->left = x;
+ }
+ _REDBLACK_PRINT( "RETURN: tree_minimum" << std::endl )
+ return x;
+}
+
+RedBlack* RedBlackTree::tree_successor(RedBlack* x){
+ _REDBLACK_PRINT( "==============================================================" << std::endl << "ENTER: tree_successor" )
+ if( x->right <- 0 ){
+ _REDBLACK_PRINT( "RETURN: tree_successor" << std::endl )
+ return tree_minimum(x);
+ }
+ RedBlack* y = x->parent;
+ _REDBLACK_PRINT( "y->parent: y->parent" )
+ while( y <- 0 && x == y->right ){
+ x = y;
+ y = y->parent;
+ }
+ _REDBLACK_PRINT( "RETURN: tree_successor" << std::endl )
+ return y;
+}
+
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/orphan-code/rtree.cpp b/src/2geom/orphan-code/rtree.cpp
new file mode 100644
index 0000000..4264292
--- /dev/null
+++ b/src/2geom/orphan-code/rtree.cpp
@@ -0,0 +1,1350 @@
+#include <2geom/orphan-code/rtree.h>
+#include <limits>
+
+/*
+Based on source (BibTex):
+@inproceedings{DBLP:conf/sigmod/Guttman84,
+ author = {Antonin Guttman},
+ title = {R-Trees: A Dynamic Index Structure for Spatial Searching},
+ booktitle = {SIGMOD Conference},
+ year = {1984},
+ pages = {47-57},
+ ee = {http://doi.acm.org/10.1145/602259.602266, db/conf/sigmod/Guttman84.html},
+}
+*/
+
+/*
+#define _RTREE_PRINT(x) std::cout << x << std::endl;
+#define _RTREE_PRINT_TREE( x, y ) print_tree( x, y );
+#define _RTREE_PRINT_TREE_INS( x, y, z ) print_tree( x, y, z );
+*/
+//comment the following if you want output during RTree operations
+
+
+#define _RTREE_PRINT(x) ;
+#define _RTREE_PRINT_TREE( x, y ) ;
+#define _RTREE_PRINT_TREE_INS( x, y, z ) ;
+
+
+
+/*
+TODO 1
+some if(non_leaf)
+ else // leaf
+could be eliminated when function starts from a leaf
+do leaf action
+then repeat function for non-leafs only
+candidates:
+- adjust_tree
+- condense_tree
+
+TODO 2
+generalize in a different way the splitting techniques
+
+*/
+
+
+namespace Geom{
+
+/*=============================================================================
+ insert
+===============================================================================
+insert a new index entry E into the R-tree:
+
+I1) find position of new record:
+ choose_node will find a leaf node L (position) in which to place r
+I2) add record to leaf node:
+ if L has room for another entry install E
+ else split_node will obtain L and LL containing E and all the old entries of L
+ from the available splitting strategies we chose quadtratic-cost algorithm (just to begin
+ with sth)
+ // TODO implement more of them
+I3) propagate changes upward:
+ Invoke adjust_tree on L, also passing LL if a split was performed.
+I4) grow tree taller:
+ if a node spilt propagation, cuased the root to split
+ create new root whose children are the 2 resulting nodes
+*/
+
+void RTree::insert( Rect const &r, unsigned shape ){
+ _RTREE_PRINT("\n=====================================");
+ _RTREE_PRINT("insert");
+ RTreeRecord_Leaf* leaf_record= new RTreeRecord_Leaf( r, shape );
+ insert( *leaf_record );
+}
+
+
+
+void RTree::insert( const RTreeRecord_Leaf &leaf_record,
+ const bool &insert_high /* false */,
+ const unsigned &stop_height /* 0 */,
+ const RTreeRecord_NonLeaf &nonleaf_record /* 0 */
+ )
+{
+ _RTREE_PRINT("\n--------------");
+ _RTREE_PRINT("insert private. element:" << leaf_record.data << " insert high:" << insert_high << " stop height:" << stop_height );
+ RTreeNode *position = 0;
+
+ // if tree is unused create the root Node, not described in source, stupid me :P
+ if(root == 0){
+ root = new RTreeNode();
+ }
+
+ _RTREE_PRINT("I1"); // I1
+ if( insert_high == false ){ // choose leaf node
+ position = choose_node( leaf_record.bounding_box );
+ }
+ else { // choose nonleaf node
+ position = choose_node( nonleaf_record.bounding_box, insert_high, stop_height );
+ }
+ _RTREE_PRINT("leaf node chosen: " );
+ _RTREE_PRINT_TREE( position , 0 );
+ std::pair< RTreeNode*, RTreeNode* > node_division;
+
+ bool split_performed = false;
+
+ if( position->children_nodes.size() > 0 ){ // non-leaf node: position
+ // we must reach here only to insert high non leaf node, not insert leaf node
+ assert( insert_high == true );
+
+ // put new element in node temporarily. Later on, if we need to, we will split the node.
+ position->children_nodes.push_back( nonleaf_record );
+ if( position->children_nodes.size() <= max_records ){
+ _RTREE_PRINT("I2 nonleaf: no split: " << position->children_nodes.size() ); // I2
+ }
+ else{
+ _RTREE_PRINT("I2 nonleaf: split: " << position->children_nodes.size() ); // I2
+ node_division = split_node( position );
+ split_performed = true;
+ }
+
+ }
+ else { // leaf node: position:
+ // we must reach here only to insert leaf node, not insert high non leaf node
+ assert( insert_high == false );
+
+
+ // put new element in node temporarily. Later on, if we need to, we will split the node.
+ position->children_leaves.push_back( leaf_record );
+ if( position->children_leaves.size() <= max_records ){
+ _RTREE_PRINT("I2 leaf: no split: " << position->children_leaves.size() ); // I2
+ }
+ else{
+ _RTREE_PRINT("I2 leaf: split: " << position->children_leaves.size() << " max_records:" << max_records); // I2
+ node_division = split_node( position );
+ split_performed = true;
+
+ _RTREE_PRINT(" group A");
+ _RTREE_PRINT_TREE( node_division.first , 3 );
+ _RTREE_PRINT(" group B");
+ _RTREE_PRINT_TREE( node_division.second , 3 );
+
+ }
+
+ }
+
+ _RTREE_PRINT("I3"); // I3
+ bool root_split_performed = adjust_tree( position, node_division, split_performed );
+ _RTREE_PRINT("root split: " << root_split_performed);
+
+
+// _RTREE_PRINT("TREE:");
+// print_tree( root , 2 );
+
+ _RTREE_PRINT("I4"); // I4
+ if( root_split_performed ){
+ std::pair<RTreeNode*, RTreeNode*> root_division;
+ root_division = quadratic_split( root ); // AT5
+
+ Rect first_record_bounding_box;
+ Rect second_record_bounding_box;
+
+ RTreeRecord_NonLeaf first_new_record = create_nonleaf_record_from_rtreenode( first_record_bounding_box, root_division.first );
+ RTreeRecord_NonLeaf second_new_record = create_nonleaf_record_from_rtreenode( second_record_bounding_box, root_division.second );
+ _RTREE_PRINT(" 1st:");
+ _RTREE_PRINT_TREE( first_new_record.data, 5 );
+ _RTREE_PRINT(" 2nd:");
+ _RTREE_PRINT_TREE( second_new_record.data, 5 );
+
+ // *new* root is by definition non-leaf. Install the new records there
+ RTreeNode* new_root = new RTreeNode();
+ new_root->children_nodes.push_back( first_new_record );
+ new_root->children_nodes.push_back( second_new_record );
+
+ delete root;
+
+ root = new_root;
+ tree_height++; // increse tree height
+
+ _RTREE_PRINT_TREE( root, 5 );
+ sanity_check( root, 0 );
+ }
+ _RTREE_PRINT("done");
+
+ /*
+ the node_division.second is saved on the tree
+ the node_division.first was copied in existing tree in node
+ so we don't need this anymore
+ */
+ delete node_division.first;
+}
+
+/* I1 =========================================================================
+
+original: choose_node will find a leaf node L in which to place r
+changed to choose_node will find a node L in which to place r
+the node L is:
+non-leaf: if flag is set. the height of the node is insert_at_height
+leaf: if flag is NOT set
+
+1) Initialize: set N to be the root node
+2) Leaf Check:
+ insert_height = false
+ if N is leaf return N
+ insert_height = true
+3) Choose subtree: If N not leaf OR not we are not in the proper height then
+ let F be an entry in N whose rect Fi needs least enlargement to include r
+ ties resolved with rect of smallest area
+4) descend until a leaf is reached OR proper height is reached: set N to the child node pointed to by F and goto 2.
+*/
+
+// TODO keep stack with visited nodes
+
+RTreeNode* RTree::choose_node( const Rect &r, const bool &insert_high /* false */, const unsigned &stop_height /* 0 */) const {
+
+ _RTREE_PRINT(" CL1");// CL1
+ RTreeNode *pos = root;
+
+ double min_enlargement;
+ double current_enlargement;
+ int node_min_enlargement;
+ unsigned current_height = 0; // zero is the root
+
+ _RTREE_PRINT(" CL2 current_height:" << current_height << " stop_height:" << stop_height << " insert_high:" << insert_high);
+ // CL2 Leaf check && Height check
+ while( ( insert_high ? true : pos->children_nodes.size() != 0 )
+ && ( insert_high ? current_height < stop_height : true ) )
+ /* Leaf check, during insert leaf */
+ /* node height check, during insert non-leaf */
+ {
+ _RTREE_PRINT(" CL3 current_height:" << current_height << " stop_height:" << stop_height ); // CL3
+ min_enlargement = std::numeric_limits<double>::max();
+ current_enlargement = 0;
+ node_min_enlargement = 0;
+
+ for(unsigned i=0; i< pos->children_nodes.size(); i++){
+ current_enlargement = find_enlargement( pos->children_nodes[i].bounding_box, r );
+
+ // TODO tie not solved!
+ if( current_enlargement < min_enlargement ){
+ node_min_enlargement = i;
+ min_enlargement = current_enlargement;
+ }
+ }
+ _RTREE_PRINT(" CL4"); // CL4
+ // descend to the node with the min_enlargement
+ pos = pos->children_nodes[node_min_enlargement].data;
+ current_height++; // increase current visiting height
+ }
+
+ return pos;
+}
+
+
+/*
+find_enlargement:
+
+enlargement that "a" needs in order to include "b"
+b is the new rect we want to insert.
+a is the rect of the node we try to see if b should go in.
+*/
+double RTree::find_enlargement( const Rect &a, const Rect &b ) const{
+
+
+ Rect union_rect(a);
+ union_rect.unionWith(b);
+
+ OptRect a_intersection_b = intersect( a, b );
+
+ // a, b do not intersect
+ if( a_intersection_b.empty() ){
+ _RTREE_PRINT(" find_enlargement (no intersect): " << union_rect.area() - a.area() - b.area() );
+ return union_rect.area() - a.area() - b.area();
+ }
+
+ // a, b intersect
+
+ // a contains b
+ if( a.contains( b ) ){
+ _RTREE_PRINT(" find_enlargement (intersect: a cont b): " << a.area() - b.area() );
+ //return a.area() - b.area();
+ return b.area() - a.area(); // enlargement is negative in this case.
+ }
+
+ // b contains a
+ if( b.contains( a ) ){
+ _RTREE_PRINT(" find_enlargement (intersect: b cont a): " << a.area() - b.area() );
+ return b.area() - a.area();
+ }
+
+ // a partially cover b
+ _RTREE_PRINT(" find_enlargement (intersect: a partial cover b): " << union_rect.area() - a.area() - b.area() - a_intersection_b->area() );
+ return union_rect.area()
+ - ( a.area() - a_intersection_b->area() )
+ - ( b.area() - a_intersection_b->area() );
+}
+
+
+/* I2 =========================================================================
+use one split strategy
+*/
+
+std::pair<RTreeNode*, RTreeNode*> RTree::split_node( RTreeNode *s ){
+/*
+ if( split_strategy == LINEAR_COST ){
+ linear_cost_split( ............. );
+ }
+*/
+ return quadratic_split( s ); // else QUADRATIC_SPIT
+}
+
+
+/*-----------------------------------------------------------------------------
+ Quadratic Split
+
+QS1) Pick first entry for each group:
+ Appy pick_seeds to choose 2 entries to be the first elements of the groups. Assign each one of
+ them to one group
+QS2) check if done:
+ a) if all entries have been assinged stop
+ b) if one group has so few entries that all the rest must be assignmed to it, in order for it to
+ have the min number , assign them and stop
+QS3) select entry and assign:
+ Inkvoke pick_next() to choose the next entry to assign.
+ *[in pick_next] Add it to the group whose covering rectangle will have to be enlrarged least to
+ accommodate it. Resolve ties by adding the entry to the group with the smaller are, then to the
+ one with fewer entries, then to either of the two.
+ goto 2.
+*/
+std::pair<RTreeNode*, RTreeNode*> RTree::quadratic_split( RTreeNode *s ) {
+
+ // s is the original leaf node or non-leaf node
+ RTreeNode* group_a = new RTreeNode(); // a is the 1st group
+ RTreeNode* group_b = new RTreeNode(); // b is the 2nd group
+
+
+ _RTREE_PRINT(" QS1"); // QS1
+ std::pair<unsigned, unsigned> initial_seeds;
+ initial_seeds = pick_seeds(s);
+
+ // if non-leaf node: s
+ if( s->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" non-leaf node");
+ // each element is true if the node has been assinged to either "a" or "b"
+ std::vector<bool> assigned_v( s->children_nodes.size() );
+ std::fill( assigned_v.begin(), assigned_v.end(), false );
+
+ group_a->children_nodes.push_back( s->children_nodes[initial_seeds.first] );
+ assert(initial_seeds.first < assigned_v.size());
+ assigned_v[ initial_seeds.first ] = true;
+
+ group_b->children_nodes.push_back( s->children_nodes[initial_seeds.second] );
+ assert(initial_seeds.second < assigned_v.size());
+ assigned_v[ initial_seeds.second ] = true;
+
+ _RTREE_PRINT(" QS2"); // QS2
+ unsigned num_of_not_assigned = s->children_nodes.size() - 2;
+ // so far we have assinged 2 out of all
+
+ while( num_of_not_assigned ){// QS2 a
+ _RTREE_PRINT(" QS2 b, num_of_not_assigned:" << num_of_not_assigned); // QS2 b
+ /*
+ we are on NON leaf node so children of split groups must be nodes
+
+ Check each group to see if one group has so few entries that all the rest must
+ be assignmed to it, in order for it to have the min number.
+ */
+ if( group_a->children_nodes.size() + num_of_not_assigned <= min_records ){
+ // add the non-assigned to group_a
+ for(unsigned i = 0; i < assigned_v.size(); i++){
+ if(assigned_v[i] == false){
+ group_a->children_nodes.push_back( s->children_nodes[i] );
+ assigned_v[i] = true;
+ }
+ }
+ break;
+ }
+
+ if( group_b->children_nodes.size() + num_of_not_assigned <= min_records ){
+ // add the non-assigned to group_b
+ for( unsigned i = 0; i < assigned_v.size(); i++ ){
+ if( assigned_v[i] == false ){
+ group_b->children_nodes.push_back( s->children_nodes[i] );
+ assigned_v[i] = true;
+ }
+ }
+ break;
+ }
+
+ _RTREE_PRINT(" QS3"); // QS3
+ std::pair<unsigned, enum_add_to_group> next_element;
+ next_element = pick_next( group_a, group_b, s, assigned_v );
+ if( next_element.second == ADD_TO_GROUP_A ){
+ group_a->children_nodes.push_back( s->children_nodes[ next_element.first ] );
+ }
+ else{
+ group_b->children_nodes.push_back( s->children_nodes[ next_element.first ] );
+ }
+
+ num_of_not_assigned--;
+ }
+ }
+ // else leaf node: s
+ else{
+ _RTREE_PRINT(" leaf node");
+ // each element is true if the node has been assinged to either "a" or "b"
+ std::vector<bool> assigned_v( s->children_leaves.size() );
+ std::fill( assigned_v.begin(), assigned_v.end(), false );
+
+ // assign 1st seed to group a
+ group_a->children_leaves.push_back( s->children_leaves[initial_seeds.first] );
+ assert(initial_seeds.first < assigned_v.size());
+ assigned_v[ initial_seeds.first ] = true;
+
+ // assign 2nd seed to group b
+ group_b->children_leaves.push_back( s->children_leaves[initial_seeds.second] );
+ assert(initial_seeds.second < assigned_v.size());
+ assigned_v[ initial_seeds.second ] = true;
+
+ _RTREE_PRINT(" QS2"); // QS2
+ unsigned num_of_not_assigned = s->children_leaves.size() - 2;
+ // so far we have assinged 2 out of all
+
+ while( num_of_not_assigned ){// QS2 a
+ _RTREE_PRINT(" QS2 b, num_of_not_assigned:" << num_of_not_assigned); // QS2 b
+ /*
+ we are on leaf node so children of split groups must be leaves
+
+ Check each group to see if one group has so few entries that all the rest must
+ be assignmed to it, in order for it to have the min number.
+ */
+ if( group_a->children_leaves.size() + num_of_not_assigned <= min_records ){
+ _RTREE_PRINT(" add the non-assigned to group_a");
+ // add the non-assigned to group_a
+ for( unsigned i = 0; i < assigned_v.size(); i++ ){
+ if( assigned_v[i] == false ){
+ group_a->children_leaves.push_back( s->children_leaves[i] );
+ assigned_v[i] = true;
+ }
+ }
+ break;
+ }
+
+ if( group_b->children_leaves.size() + num_of_not_assigned <= min_records ){
+ _RTREE_PRINT(" add the non-assigned to group_b");
+ // add the non-assigned to group_b
+ for( unsigned i = 0; i < assigned_v.size(); i++ ){
+ if( assigned_v[i] == false ){
+ group_b->children_leaves.push_back( s->children_leaves[i] );
+ assigned_v[i] = true;
+ }
+ }
+ break;
+ }
+
+ _RTREE_PRINT(" QS3"); // QS3
+ std::pair<unsigned, enum_add_to_group> next_element;
+ next_element = pick_next(group_a, group_b, s, assigned_v);
+ if( next_element.second == ADD_TO_GROUP_A ){
+ group_a->children_leaves.push_back( s->children_leaves[ next_element.first ] );
+ }
+ else{
+ group_b->children_leaves.push_back( s->children_leaves[ next_element.first ] );
+ }
+
+ num_of_not_assigned--;
+ }
+ }
+ assert( initial_seeds.first != initial_seeds.second );
+ return std::make_pair( group_a, group_b );
+}
+
+/*
+PS1) caclulate ineffeciency of grouping entries together:
+ Foreach pair of entries E1 (i), E2 (j) compose rectangle J (i_union_j) including E1, E2.
+ Calculate d = area(i_union_j) - area(i) - area(j)
+PS2) choose the most wastefull pair:
+ Choose pair with largest d
+*/
+
+std::pair<unsigned, unsigned> RTree::pick_seeds( RTreeNode *s ) const{
+ double current_d = 0;
+ double max_d = std::numeric_limits<double>::min();
+ unsigned seed_a = 0;
+ unsigned seed_b = 1;
+ _RTREE_PRINT(" pick_seeds");
+
+ // if non leaf node: s
+ if( s->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" non leaf");
+ _RTREE_PRINT(" PS1"); // PS1
+ for( unsigned a = 0; a < s->children_nodes.size(); a++ ){
+ // with j=i we check only the upper (diagonal) half
+ // with j=i+1 we also avoid checking for b==a (we don't need it)
+ for( unsigned b = a+1; b < s->children_nodes.size(); b++ ){
+ _RTREE_PRINT(" PS2 " << a << " - " << b ); // PS2
+ current_d = find_waste_area( s->children_nodes[a].bounding_box, s->children_nodes[b].bounding_box );
+
+ if( current_d > max_d ){
+ max_d = current_d;
+ seed_a = a;
+ seed_b = b;
+ }
+ }
+ }
+ }
+ // else leaf node: s
+ else{
+ _RTREE_PRINT(" leaf node");
+ _RTREE_PRINT(" PS1"); // PS1
+ for( unsigned a = 0; a < s->children_leaves.size(); a++ ){
+ // with j=i we check only the upper (diagonal) half
+ // with j=i+1 we also avoid checking for j==i (we don't need this one)
+ for( unsigned b = a+1; b < s->children_leaves.size(); b++ ){
+ _RTREE_PRINT(" PS2 " << s->children_leaves[a].data << ":" << s->children_leaves[a].bounding_box.area()
+ << " - " << s->children_leaves[b].data << ":" << s->children_leaves[b].bounding_box.area() ); // PS2
+ current_d = find_waste_area( s->children_leaves[a].bounding_box, s->children_leaves[b].bounding_box );
+
+ if( current_d > max_d ){
+ max_d = current_d;
+ seed_a = a;
+ seed_b = b;
+ }
+ }
+ }
+ }
+ _RTREE_PRINT(" seed_a: " << seed_a << " seed_b: " << seed_b );
+ return std::make_pair( seed_a, seed_b );
+}
+
+/*
+find_waste_area (used in pick_seeds step 1)
+
+for a pair A, B compose a rect union_rect that includes a and b
+calculate area of union_rect - area of a - area b
+*/
+double RTree::find_waste_area( const Rect &a, const Rect &b ) const{
+ Rect union_rect(a);
+ union_rect.unionWith(b);
+
+ return union_rect.area() - a.area() - b.area();
+}
+
+/*
+pick_next:
+select one remaining entry for classification in a group
+
+PN1) Determine cost of putting each entry in each group:
+ Foreach entry E not yet in a group, calculate
+ d1= area increase required in the cover rect of Group 1 to include E
+ d2= area increase required in the cover rect of Group 2 to include E
+PN2) Find entry with greatest preference for each group:
+ Choose any entry with the maximum difference between d1 and d2
+
+*/
+
+std::pair<unsigned, enum_add_to_group> RTree::pick_next( RTreeNode* group_a,
+ RTreeNode* group_b,
+ RTreeNode* s,
+ std::vector<bool> &assigned_v )
+{
+ double max_increase_difference = std::numeric_limits<double>::min();
+ unsigned max_increase_difference_node = 0;
+ double current_increase_difference = 0;
+
+ enum_add_to_group group_to_add = ADD_TO_GROUP_A;
+
+ /*
+ bounding boxes of the 2 new groups. This info isn't available, since they
+ have no parent nodes (so that the parent node knows the bounding box).
+ */
+ Rect bounding_box_a;
+ Rect bounding_box_b;
+
+ double increase_area_a = 0;
+ double increase_area_b = 0;
+
+ _RTREE_PRINT(" pick_next, assigned_v.size:" << assigned_v.size() );
+
+ // if non leaf node: one of the 2 groups (both groups are the same, either leaf/nonleaf)
+ if( group_a->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" non leaf");
+
+ // calculate the bounding boxes of the 2 new groups.
+ bounding_box_a = Rect( group_a->children_nodes[0].bounding_box );
+ for( unsigned i = 1; i < group_a->children_nodes.size(); i++ ){
+ bounding_box_a.unionWith( group_a->children_nodes[i].bounding_box );
+ }
+
+ bounding_box_b = Rect( group_b->children_nodes[0].bounding_box );
+ for( unsigned i = 1; i < group_b->children_nodes.size(); i++ ){
+ bounding_box_b.unionWith( group_b->children_nodes[i].bounding_box );
+ }
+
+
+ _RTREE_PRINT(" PN1"); // PN1
+ for( unsigned i = 0; i < assigned_v.size(); i++ ){
+ _RTREE_PRINT(" i:" << i << " assigned:" << assigned_v[i]);
+ if( assigned_v[i] == false ){
+
+ increase_area_a = find_enlargement( bounding_box_a, s->children_nodes[i].bounding_box );
+ increase_area_b = find_enlargement( bounding_box_b, s->children_nodes[i].bounding_box );
+
+ current_increase_difference = std::abs( increase_area_a - increase_area_b );
+ _RTREE_PRINT(" PN2 " << i << ": " << current_increase_difference ); // PN2
+ if( current_increase_difference > max_increase_difference ){
+ max_increase_difference = current_increase_difference;
+ max_increase_difference_node = i;
+
+ // TODO tie not solved!
+ if( increase_area_a < increase_area_b ){
+ group_to_add = ADD_TO_GROUP_A;
+ }
+ else{
+ group_to_add = ADD_TO_GROUP_B;
+ }
+ }
+ }
+ }
+ //assert(max_increase_difference_node >= 0);
+ assert(max_increase_difference_node < assigned_v.size());
+ assigned_v[max_increase_difference_node] = true;
+ _RTREE_PRINT(" ... i:" << max_increase_difference_node << " assigned:" << assigned_v[max_increase_difference_node] );
+ }
+ else{ // else leaf node
+ _RTREE_PRINT(" leaf");
+
+ // calculate the bounding boxes of the 2 new groups
+ bounding_box_a = Rect( group_a->children_leaves[0].bounding_box );
+ for( unsigned i = 1; i < group_a->children_leaves.size(); i++ ){
+ std::cout<< " lala";
+ bounding_box_a.unionWith( group_a->children_leaves[i].bounding_box );
+ }
+
+ bounding_box_b = Rect( group_b->children_leaves[0].bounding_box );
+ for( unsigned i = 1; i < group_b->children_leaves.size(); i++ ){
+ std::cout<< " lala";
+ bounding_box_b.unionWith( group_b->children_leaves[i].bounding_box );
+ }
+ std::cout<< "" << std::endl;
+
+ _RTREE_PRINT(" PN1"); // PN1
+ for( unsigned i = 0; i < assigned_v.size(); i++ ){
+ _RTREE_PRINT(" i:" << i << " assigned:" << assigned_v[i]);
+ if( assigned_v[i] == false ){
+ increase_area_a = find_enlargement( bounding_box_a, s->children_leaves[i].bounding_box );
+ increase_area_b = find_enlargement( bounding_box_b, s->children_leaves[i].bounding_box );
+
+ current_increase_difference = std::abs( increase_area_a - increase_area_b );
+ _RTREE_PRINT(" PN2 " << i << ": " << current_increase_difference ); // PN2
+
+ if( current_increase_difference > max_increase_difference ){
+ max_increase_difference = current_increase_difference;
+ max_increase_difference_node = i;
+
+ // TODO tie not solved!
+ if( increase_area_a < increase_area_b ){
+ group_to_add = ADD_TO_GROUP_A;
+ }
+ else{
+ group_to_add = ADD_TO_GROUP_B;
+ }
+ }
+ }
+ }
+ assert(max_increase_difference_node < assigned_v.size());
+ assigned_v[max_increase_difference_node] = true;
+ _RTREE_PRINT(" ... i:" << max_increase_difference_node << " assigned:" << assigned_v[max_increase_difference_node] );
+ }
+
+ _RTREE_PRINT(" node:" << max_increase_difference_node << " added:" << group_to_add );
+ return std::make_pair( max_increase_difference_node, group_to_add );
+}
+
+/* I3 =========================================================================
+
+adjust_tree:
+Ascend from a leaf node L to root, adjusting covering rectangles and propagating node splits as
+necessary
+
+We modified this one from the source in the step AT1 and AT5
+
+AT1) Initialize:
+ Set N=L
+ IF L was spilt previously, set NN to be the resulting second node AND
+ (not mentioned in the original source but that's what it should mean)
+ Assign all entries of first node to L
+AT2) check if done:
+ IF N is root stop
+AT3) adjust covering rectangle in parent entry
+ 1) Let P be the parent of N
+ 2) Let EN be the N's entry in P
+ 3) Adjust EN bounding box so that it tightly enclosses all entry rectangles in N
+AT4) Propagate node split upward
+ IF N has a partner NN resulting from an earlier split
+ create a new entry ENN with ENN "p" pointing to NN and ENN bounding box enclosing all
+ rectangles in NN
+
+ IF there is room in P add ENN
+ ELSE invoke split_node to produce P an PP containing ENN and all P's old entries.
+AT5) Move up to next level
+ Set N=P,
+ IF a split occurred, set NN=PP
+ goto AT1 (was goto AT2)
+*/
+
+bool RTree::adjust_tree( RTreeNode* position,
+ // modified: it holds the last split group
+ std::pair<RTreeNode*, RTreeNode*> &node_division,
+ bool initial_split_performed)
+{
+ RTreeNode* parent;
+ unsigned child_in_parent; // the element in parent node that points to current posistion
+ std::pair< RTreeNode*, bool > find_result;
+ bool split_performed = initial_split_performed;
+ bool root_split_performed = false;
+
+ _RTREE_PRINT(" adjust_tree");
+ _RTREE_PRINT(" AT1");
+
+ while( true ){
+ _RTREE_PRINT(" ------- current tree status:");
+ _RTREE_PRINT_TREE_INS(root, 2, true);
+
+ // check for loop BREAK
+ if( position == root ){
+ _RTREE_PRINT(" AT2: found root");
+ if( split_performed ){
+ root_split_performed = true;
+ }
+ break;
+ }
+
+ if( split_performed ){
+ copy_group_a_to_existing_node( position, node_division.first );
+ }
+
+ /*
+ pick randomly, let's say the 1st entry of the current node.
+ Search for this spatial area in the tree, and stop to the parent node.
+ Then find position of current node pointer, in the parent node.
+ */
+ _RTREE_PRINT(" AT3.1"); // AT3.1 Let P be the parent of N
+ if( position->children_nodes.size() > 0 ){
+ find_result = find_parent( root, position->children_nodes[0].bounding_box, position);
+ }
+ else{
+ find_result = find_parent( root, position->children_leaves[0].bounding_box, position);
+ }
+ parent = find_result.first;
+
+ // parent is a non-leaf, by definition
+ _RTREE_PRINT(" AT3.2"); // AT3.2 Let EN be the N's entry in P
+ for( child_in_parent = 0; child_in_parent < parent->children_nodes.size(); child_in_parent++ ){
+ if( parent->children_nodes[ child_in_parent ].data == position){
+ _RTREE_PRINT(" child_in_parent: " << child_in_parent);
+ break;
+ }
+ }
+
+ _RTREE_PRINT(" AT3.3");
+ // AT3.2 Adjust EN bounding box so that it tightly enclosses all entry rectangles in N
+ recalculate_bounding_box( parent, position, child_in_parent );
+
+
+ _RTREE_PRINT(" AT4"); // AT4
+ if( split_performed ){
+ // create new record (from group_b)
+ //RTreeNode* new_node = new RTreeNode();
+ Rect new_record_bounding;
+
+ RTreeRecord_NonLeaf new_record = create_nonleaf_record_from_rtreenode( new_record_bounding, node_division.second );
+
+ // install new entry (group_b)
+ if( parent->children_nodes.size() < max_records ){
+ parent->children_nodes.push_back( new_record );
+ split_performed = false;
+ }
+ else{
+ parent->children_nodes.push_back( new_record );
+ node_division = quadratic_split( parent ); // AT5
+ split_performed = true;
+ }
+
+ }
+ _RTREE_PRINT(" AT5"); // AT5
+ position = parent;
+ }
+
+ return root_split_performed;
+}
+
+/*
+find_parent:
+The source only mentions that we should "find" the parent. But it doesn't seay how. So we made a
+modification of search.
+
+Initially we take the root, a rect of the node, of which the parent we look for and the node we seek
+
+We do a spatial search for this rect. If we find get an intersecttion with the rect we check if the
+child is the one we look for.
+If not we call find_parent again recursively
+*/
+
+std::pair< RTreeNode*, bool > RTree::find_parent( RTreeNode* subtree_root,
+ Rect search_area,
+ RTreeNode* wanted) const
+{
+ _RTREE_PRINT("find_parent");
+
+ std::pair< RTreeNode*, bool > result;
+ if( subtree_root->children_nodes.size() > 0 ){
+
+ for( unsigned i=0; i < subtree_root->children_nodes.size(); i++ ){
+ if( subtree_root->children_nodes[i].data == wanted){
+ _RTREE_PRINT("FOUND!!"); // non leaf node
+ return std::make_pair( subtree_root, true );
+ }
+
+ if( subtree_root->children_nodes[i].bounding_box.intersects( search_area ) ){
+ result = find_parent( subtree_root->children_nodes[i].data, search_area, wanted);
+ if ( result.second ){
+ break;
+ }
+ }
+ }
+ }
+ return result;
+}
+
+
+void RTree::copy_group_a_to_existing_node( RTreeNode *position, RTreeNode* group_a ){
+ // clear position (the one that was split) and put there all the nodes of group_a
+ if( position->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" copy_group...(): install group A to existing non-leaf node");
+ // non leaf-node: position
+ position->children_nodes.clear();
+ for(auto & children_node : group_a->children_nodes){
+ position->children_nodes.push_back( children_node );
+ }
+ }
+ else{
+ _RTREE_PRINT(" copy_group...(): install group A to existing leaf node");
+ // leaf-node: positions
+ position->children_leaves.clear();
+ for(auto & children_leave : group_a->children_leaves){
+ position->children_leaves.push_back( children_leave );
+ }
+ }
+}
+
+
+
+RTreeRecord_NonLeaf RTree::create_nonleaf_record_from_rtreenode( Rect &new_entry_bounding, RTreeNode* rtreenode ){
+
+ if( rtreenode->children_nodes.size() > 0 ){
+ // found bounding box of new entry
+ new_entry_bounding = Rect( rtreenode->children_nodes[0].bounding_box );
+ for(unsigned i = 1; i < rtreenode->children_nodes.size(); i++ ){
+ new_entry_bounding.unionWith( rtreenode->children_nodes[ i ].bounding_box );
+ }
+ }
+ else{ // non leaf: rtreenode
+ // found bounding box of new entry
+ new_entry_bounding = Rect( rtreenode->children_leaves[0].bounding_box );
+ for(unsigned i = 1; i < rtreenode->children_leaves.size(); i++ ){
+ new_entry_bounding.unionWith( rtreenode->children_leaves[ i ].bounding_box );
+ }
+ }
+ return RTreeRecord_NonLeaf( new_entry_bounding, rtreenode );
+}
+
+
+
+/*
+ print the elements of the tree
+ based on ordered tree walking
+*/
+void RTree::print_tree(RTreeNode* subtree_root, int depth ) const{
+
+ if( subtree_root->children_nodes.size() > 0 ){
+
+ // descend in each one of the elements and call print_tree
+ for( unsigned i=0; i < subtree_root->children_nodes.size(); i++ ){
+ //print spaces for indentation
+ for(int j=0; j < depth; j++){
+ std::cout << " " ;
+ }
+
+ std::cout << subtree_root->children_nodes[i].bounding_box << ", " << subtree_root->children_nodes.size() << std::endl ;
+ _RTREE_PRINT_TREE_INS( subtree_root->children_nodes[i].data, depth+1, used_during_insert);
+ }
+
+ }
+ else{
+ for(int j=0; j < depth; j++){
+ std::cout << " " ;
+ }
+ std::cout << subtree_root->children_leaves.size() << ": " ;
+
+ // print all the elements of the leaf node
+ for(auto & children_leave : subtree_root->children_leaves){
+ std::cout << children_leave.data << ", " ;
+ }
+ std::cout << std::endl ;
+
+ }
+}
+
+
+void RTree::sanity_check(RTreeNode* subtree_root, int depth, bool used_during_insert ) const{
+
+ if( subtree_root->children_nodes.size() > 0 ){
+ // descend in each one of the elements and call sanity_check
+ for(auto & children_node : subtree_root->children_nodes){
+ sanity_check( children_node.data, depth+1, used_during_insert);
+ }
+
+
+ // sanity check
+ if( subtree_root != root ){
+ assert( subtree_root->children_nodes.size() >= min_records);
+ }
+/*
+ else{
+ assert( subtree_root->children_nodes.size() >= 1);
+ }
+*/
+
+ if( used_during_insert ){
+ // allow one more during for insert
+ assert( subtree_root->children_nodes.size() <= max_records + 1 );
+ }
+ else{
+ assert( subtree_root->children_nodes.size() <= max_records );
+ }
+
+ }
+ else{
+ // sanity check
+ if( subtree_root != root ){
+ assert( subtree_root->children_leaves.size() >= min_records);
+ }
+/*
+ else{
+ assert( subtree_root->children_leaves.size() >= 1);
+ }
+*/
+
+ if( used_during_insert ){
+ // allow one more during for insert
+ assert( subtree_root->children_leaves.size() <= max_records + 1 );
+ }
+ else{
+ assert( subtree_root->children_nodes.size() <= max_records );
+ }
+ }
+}
+
+
+
+/*=============================================================================
+ search
+===============================================================================
+Given an RTree whose root node is T find all index records whose rects overlap search rect S
+S1) Search subtrees:
+ IF T isn't a leaf, check every entry E to determine whether E I overlaps S
+ FOR all overlapping entries invoke Search on the tree whose root node is pointed by E P
+S2) ELSE T is leaf
+ check all entries E to determine whether E I overlaps S
+ IF so E is a qualifying record
+*/
+
+
+void RTree::search( const Rect &search_area, std::vector< int >* result, const RTreeNode* subtree ) const {
+ // S1
+ if( subtree->children_nodes.size() > 0 ){ // non-leaf: subtree
+ for(const auto & children_node : subtree->children_nodes){
+ if( children_node.bounding_box.intersects( search_area ) ){
+ search( search_area, result, children_node.data );
+ }
+ }
+ }
+ // S2
+ else{ // leaf: subtree
+ for(const auto & children_leave : subtree->children_leaves){
+ if( children_leave.bounding_box.intersects( search_area ) ){
+ result->push_back( children_leave.data );
+ }
+ }
+ }
+}
+
+
+/*=============================================================================
+ erase
+===============================================================================
+we changed steps D2)
+D1) Find node containing record
+ Invoke find_leaf() to locate the leaf node L containing E
+ IF record is found stop
+D2) Delete record
+ Remove E from L (it happened in find_leaf step FL2)
+D3) Propagate changes
+ Invoke condense_tree, passing L
+D4) Shorten tree
+ If root node has only one child, after the tree was adjusted, make the child new root
+
+return
+0 on success
+1 in case no entry was found
+
+*/
+//int RTree::erase( const RTreeRecord_Leaf & record_to_erase ){
+int RTree::erase( const Rect &search_area, const int shape_to_delete ){
+ _RTREE_PRINT("\n=====================================");
+ _RTREE_PRINT("erase element: " << shape_to_delete);
+ // D1 + D2: entry is deleted in find_leaf
+ _RTREE_PRINT("D1 & D2 : find and delete the leaf");
+ RTreeNode* contains_record = find_leaf( root, search_area, shape_to_delete );
+ if( !contains_record ){ // no entry returned from find_leaf
+ return 1; // no entry found
+ }
+
+ // D3
+ //bool root_elimination_performed = condense_tree( contains_record );
+
+ // D4
+
+ //if( root_elimination_performed ){
+ if( root->children_nodes.size() > 0 ){ // non leaf: root
+ // D4
+ if( root->children_nodes.size() == 1 ){
+ _RTREE_PRINT("D4 : non leaf: ");
+ tree_height--;
+ RTreeNode* t = root;
+ root = root->children_nodes[0].data;
+ delete t;
+ }
+
+ }
+ else { // leaf: root
+ // D4
+ // do nothing
+ }
+ sanity_check( root, 0 );
+ return 0; // success
+}
+
+
+/*
+ find_leaf()
+Given an RTree whose root node is T find the leaf node containing index entry E
+
+FL1) Search subtrees
+ IF T is non leaf, check each entry F in T to determine if F I overlaps E I
+ foreach such entry invoke find_leaf on the tree whose root is pointed to by F P until E is
+ found or all entries have been checked
+FL2) search leaf node for record
+ IF T is leaf, check each entry to see if it matches E
+ IF E is found return T
+ AND delete element E (step D2)
+*/
+
+RTreeNode* RTree::find_leaf( RTreeNode* subtree, const Rect &search_area, const int shape_to_delete ) const {
+ // FL1
+ if( subtree->children_nodes.size() > 0 ){ // non-leaf: subtree
+ for(auto & children_node : subtree->children_nodes){
+ if( children_node.bounding_box.intersects( search_area ) ){
+ RTreeNode* t = find_leaf( children_node.data, search_area, shape_to_delete );
+ if( t ){ // if search was successful terminate
+ return t;
+ }
+ }
+ }
+ }
+ // FL2
+ else{ // leaf: subtree
+ for( std::vector< RTreeRecord_Leaf >::iterator it = subtree->children_leaves.begin(); it!=subtree->children_leaves.end(); ++it ){
+ if( it->data == shape_to_delete ){
+ // delete element: implement step D2)
+ subtree->children_leaves.erase( it );
+ return subtree;
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ condense_tree()
+Given a Leaf node L from which an entry has been deleted eliminate the node if it has too few entries and reallocate its entries
+Propagate node elimination upwards as necessary
+Adjust all covering recsts n the path to the root making them smaller if possible
+
+CT1) Initialize
+ Set N=L
+ Set Q the set of eliminated nodes to be empty
+CT2) // Find parent entry (was there but doesn't make sense)
+ IF N is the root
+ goto CT6
+ ELSE
+ 1) Find parent entry
+ 2) let P be the parent of N
+ 3) and let EN be N's entry in P
+CT3) IF N has fewer than m entries
+ Eliminate underfull node
+ 1) delete EN from P
+ 2) and add N to set Q
+CT4) ELSE
+ adjust EN I to tightly contain all entries in N
+CT5) move up one level in tree
+ set N=P and repeat from CT2
+
+CT6) Re insert orphaned entries
+ Re-inser all entreis of nodes in set Q
+ Entries from eliminated leaf nodes are re-inserted in tree leaves (like in insert)
+ BUT non-leaf nodes must be placed higher in the tree so that leaves of their dependent subtrees
+ will be on the same level as leaves of the main tree. (on the same height they originally were)
+ (not mentioned in the source description: the criteria for placing the node should be
+ again TODO ??? least enlargement)
+
+*/
+// TODO this can be merged with adjust_tree or refactor to reutilize some parts. less readable
+bool RTree::condense_tree( RTreeNode* position )
+{
+ RTreeNode* parent;
+ unsigned child_in_parent = 0; // the element in parent node that points to current posistion
+
+ std::pair< RTreeNode*, bool > find_result;
+ bool elimination_performed = false;
+ bool root_elimination_performed = false;
+ unsigned current_height = tree_height+1;
+ Rect special_case_bounding_box;
+ _RTREE_PRINT(" condense_tree");
+ _RTREE_PRINT(" CT1");
+ // leaf records that were eliminated due to under-full node
+ std::vector< RTreeRecord_Leaf > Q_leaf_records( 0 );
+
+ // < non-leaf records, their height > that were eliminated due to under-full node
+ std::vector< std::pair< RTreeRecord_NonLeaf, unsigned > > Q_nonleaf_records( 0 );
+
+
+ while( true ){
+
+ // check for loop BREAK
+ if( position == root ){
+ _RTREE_PRINT(" CT2 position is root");
+ if( elimination_performed ){
+ root_elimination_performed = true;
+ }
+ break;
+ }
+
+ /*
+ pick randomly, let's say the 1st entry of the current node.
+ Search for this spatial area in the tree, and stop to the parent node.
+ Then find position of current node pointer, in the parent node.
+ */
+ /*
+ special case. if elimination due to children being underfull was performed AND
+ AND parent had only 1 record ,then this one record was removed.
+ */
+ if( position->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" CT2.1 - 2 non leaf: find parent, P is parent");
+ // CT2.1 find parent. By definition it's nonleaf
+ find_result = find_parent( root, position->children_nodes[0].bounding_box, position);
+ }
+ else if( position->children_nodes.size() == 0
+ && position->children_leaves.size() == 0
+ && elimination_performed )
+ { // special case
+ _RTREE_PRINT(" CT2.1 - 2 special case: find parent, P is parent");
+ // CT2.1 find parent. By definition it's nonleaf
+ find_result = find_parent( root, special_case_bounding_box, position);
+ }
+ else{
+ _RTREE_PRINT(" CT2.1 - 2 leaf: find parent, P is parent");
+ // CT2.1 find parent. By definition it's nonleaf
+ find_result = find_parent( root, position->children_leaves[0].bounding_box, position);
+ }
+ // CT2.2 Let P be the parent of N
+ parent = find_result.first;
+
+
+ // parent is a non-leaf, by definition. Calculate "child_in_parent"
+ _RTREE_PRINT(" CT2.3 find in parent, position's record EN");
+ // CT2.3 Let EN be the N's entry in P
+ for( child_in_parent = 0; child_in_parent < parent->children_nodes.size(); child_in_parent++ ){
+ if( parent->children_nodes[ child_in_parent ].data == position){
+ _RTREE_PRINT(" child_in_parent: " << child_in_parent << " out of " << parent->children_nodes.size() << " (size)" );
+ break;
+ }
+ }
+
+ if( position->children_nodes.size() > 0 ){ // non leaf node: position
+ _RTREE_PRINT(" CT3 nonleaf: eliminate underfull node");
+ // CT3 Eliminate underfull node
+ if( position->children_nodes.size() < min_records ){
+ _RTREE_PRINT(" CT3.2 add N to Q");
+ // CT3.2 add N to set Q ( EN the record that points to N )
+ for(auto & children_node : position->children_nodes){
+ _RTREE_PRINT(" i " << i );
+ std::pair< RTreeRecord_NonLeaf, unsigned > t = std::make_pair( children_node, current_height-1);
+ Q_nonleaf_records.push_back( t );
+
+ }
+ special_case_bounding_box = parent->children_nodes[ child_in_parent ].bounding_box;
+
+ _RTREE_PRINT(" CT3.1 delete in parent, position's record EN");
+ // CT3.1 delete EN from P ( parent is by definition nonleaf )
+ if( remove_record_from_parent( parent, position ) ){ // TODO does erase, delete to the pointer ???
+ _RTREE_PRINT(" remove_record_from_parent error ");
+ }
+ elimination_performed = true;
+ }
+ else{
+ _RTREE_PRINT(" CT4 "); /// CT4) if not underfull
+ recalculate_bounding_box( parent, position, child_in_parent );
+ elimination_performed = false;
+ }
+
+ }
+ else{ // leaf node: position
+ _RTREE_PRINT(" CT3 leaf: eliminate underfull node");
+ // CT3 Eliminate underfull node
+ if( position->children_leaves.size() < min_records ){
+ _RTREE_PRINT(" CT3.2 add N to Q " << position->children_leaves.size() );
+ // CT3.2 add N to set Q
+ for(auto & children_leave : position->children_leaves){
+ _RTREE_PRINT(" i " << i );
+ Q_leaf_records.push_back( children_leave ); // TODO problem here
+ special_case_bounding_box = children_leave.bounding_box;
+ }
+
+ _RTREE_PRINT(" CT3.1 delete in parent, position's record EN");
+ // CT3.1 delete EN from P ( parent is by definition nonleaf )
+ if( remove_record_from_parent( parent, position ) ){
+ _RTREE_PRINT(" remove_record_from_parent error ");
+ }
+ elimination_performed = true;
+ }
+ else{
+ _RTREE_PRINT(" CT4 "); /// CT4) if not underfull
+ recalculate_bounding_box( parent, position, child_in_parent );
+ elimination_performed = false;
+ }
+ }
+ _RTREE_PRINT(" CT5 ");// CT5) move up one level in tree
+ position = parent;
+
+ current_height--;
+ }
+ // CT6: reinsert
+ _RTREE_PRINT(" ------ Q_leaf");
+ for( std::vector< RTreeRecord_Leaf >::iterator it = Q_leaf_records.begin(); it != Q_leaf_records.end(); ++it ){
+ _RTREE_PRINT(" leaf:" << (*it).data);
+ }
+ _RTREE_PRINT(" ------ Q_nonleaf");
+ for( std::vector< std::pair< RTreeRecord_NonLeaf, unsigned > >::iterator it = Q_nonleaf_records.begin(); it != Q_nonleaf_records.end(); ++it ){
+ _RTREE_PRINT(" ------- " << it->second );
+ _RTREE_PRINT_TREE( it->first.data, 0);
+ }
+
+ _RTREE_PRINT(" CT6 ");
+ for(auto & Q_leaf_record : Q_leaf_records){
+ insert( Q_leaf_record );
+ _RTREE_PRINT(" inserted leaf:" << (*it).data << " ------------");
+ _RTREE_PRINT_TREE( root, 0);
+ }
+
+
+ for(auto & Q_nonleaf_record : Q_nonleaf_records){
+ insert( RTreeRecord_Leaf() , true, Q_nonleaf_record.second, Q_nonleaf_record.first );
+ _RTREE_PRINT(" inserted nonleaf------------");
+ _RTREE_PRINT_TREE( root, 0);
+ // TODO this fake RTreeRecord_Leaf() looks stupid. find better way to do this ???
+ }
+
+ return root_elimination_performed;
+}
+
+
+/*
+given:
+- a parent
+- a child node
+- and the position of the child node in the parent
+recalculate the parent record's bounding box of the child, in order to ightly contain all entries of child
+
+NOTE! child must be indeed child of the parent, otherwise it screws up things. So find parent and child
+before calling this function
+*/
+void RTree::recalculate_bounding_box( RTreeNode* parent, RTreeNode* child, unsigned &child_in_parent ) {
+ if( child->children_nodes.size() > 0 ){
+ _RTREE_PRINT(" non-leaf: recalculate bounding box of parent "); // non leaf-node: child
+ parent->children_nodes[ child_in_parent ].bounding_box = Rect( child->children_nodes[0].bounding_box );
+ for( unsigned i=1; i < child->children_nodes.size(); i++ ){
+ parent->children_nodes[ child_in_parent ].bounding_box.unionWith( child->children_nodes[i].bounding_box );
+ }
+ }
+ else{
+ _RTREE_PRINT(" leaf: recalculate bounding box of parent "); // leaf-node: child
+ parent->children_nodes[ child_in_parent ].bounding_box = Rect( child->children_leaves[0].bounding_box );
+
+ for( unsigned i=1; i < child->children_leaves.size(); i++ ){
+ parent->children_nodes[ child_in_parent ].bounding_box.unionWith( child->children_leaves[i].bounding_box );
+ }
+ }
+}
+
+/*
+given:
+- a parent
+- a child node
+it removes the child record from the parent
+
+NOTE! child must be indeed child of the parent, otherwise it screws up things.
+So find parent and child before calling this function
+*/
+int RTree::remove_record_from_parent( RTreeNode* parent, RTreeNode* child ) {
+ _RTREE_PRINT( "remove_record_from_parent)" );
+ for( std::vector< RTreeRecord_NonLeaf >::iterator it = parent->children_nodes.begin(); it!=parent->children_nodes.end(); ++it ){
+ if( it->data == child ){
+ // delete element: implement step D2)
+ parent->children_nodes.erase( it );
+ return 0; // success
+ }
+ }
+ return 1; // failure
+}
+
+/*=============================================================================
+TODO update
+===============================================================================
+*/
+
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/parallelogram.cpp b/src/2geom/parallelogram.cpp
new file mode 100644
index 0000000..b477a01
--- /dev/null
+++ b/src/2geom/parallelogram.cpp
@@ -0,0 +1,136 @@
+/*
+ * Authors:
+ * Thomas Holder
+ * Sergei Izmailov
+ *
+ * Copyright 2020 Authors
+ *
+ * SPDX-License-Identifier: LGPL-2.1 or MPL-1.1
+ */
+
+#include <2geom/basic-intersection.h>
+#include <2geom/parallelogram.h>
+
+#include <cassert>
+
+namespace Geom {
+
+namespace {
+/// Return true if `p` is inside a unit rectangle
+inline bool unit_rect_contains(Point const &p)
+{
+ return 0 <= p.x() && p.x() <= 1 && //
+ 0 <= p.y() && p.y() <= 1;
+}
+
+/// N'th corner of a unit rectangle
+inline Point unit_rect_corner(unsigned i)
+{
+ assert(i < 4);
+ unsigned const y = i >> 1;
+ unsigned const x = (i & 1) ^ y;
+ return Point(x, y);
+}
+} // namespace
+
+Point Parallelogram::corner(unsigned i) const
+{
+ assert(i < 4);
+ return unit_rect_corner(i) * m_affine;
+}
+
+Rect Parallelogram::bounds() const
+{
+ Rect rect(corner(0), corner(2));
+ rect.expandTo(corner(1));
+ rect.expandTo(corner(3));
+ return rect;
+}
+
+bool Parallelogram::intersects(Parallelogram const &other) const
+{
+ if (m_affine.isSingular() || other.m_affine.isSingular()) {
+ return false;
+ }
+
+ auto const affine1 = other.m_affine * m_affine.inverse();
+ auto const affine2 = affine1.inverse();
+
+ // case 1: any corner inside the other rectangle
+ for (unsigned i = 0; i != 4; ++i) {
+ auto const p = unit_rect_corner(i);
+ if (unit_rect_contains(p * affine1) || //
+ unit_rect_contains(p * affine2)) {
+ return true;
+ }
+ }
+
+ // case 2: any sides intersect (check diagonals)
+ for (unsigned i = 0; i != 2; ++i) {
+ auto const A = corner(i);
+ auto const B = corner((i + 2) % 4);
+ for (unsigned j = 0; j != 2; ++j) {
+ auto const C = other.corner(j);
+ auto const D = other.corner((j + 2) % 4);
+ if (non_collinear_segments_intersect(A, B, C, D)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool Parallelogram::contains(Point const &p) const
+{
+ return !m_affine.isSingular() && //
+ unit_rect_contains(p * m_affine.inverse());
+}
+
+bool Parallelogram::contains(Parallelogram const &other) const
+{
+ if (m_affine.isSingular()) {
+ return false;
+ }
+
+ auto const inv = m_affine.inverse();
+
+ for (unsigned i = 0; i != 4; ++i) {
+ if (!unit_rect_contains(other.corner(i) * inv)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+Coord Parallelogram::minExtent() const
+{
+ return std::min(m_affine.expansionX(), //
+ m_affine.expansionY());
+}
+
+Coord Parallelogram::maxExtent() const
+{
+ return std::max(m_affine.expansionX(), //
+ m_affine.expansionY());
+}
+
+bool Parallelogram::isSheared(Coord eps) const
+{
+ return !are_near(dot(m_affine.xAxis(), m_affine.yAxis()), //
+ 0.0, eps);
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/parting-point.cpp b/src/2geom/parting-point.cpp
new file mode 100644
index 0000000..3e3e803
--- /dev/null
+++ b/src/2geom/parting-point.cpp
@@ -0,0 +1,280 @@
+/** @file Implementation of parting_point(Path const&, Path const&, Coord)
+ */
+/* An algorithm to find the first parting point of two paths.
+ *
+ * Authors:
+ * Rafał Siejakowski <rs@rs-math.net>
+ *
+ * Copyright 2022 the Authors.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/path.h>
+#include <2geom/point.h>
+
+namespace Geom
+{
+
+PathIntersection parting_point(Path const &first, Path const &second, Coord precision)
+{
+ Path const *paths[2] = { &first, &second };
+ Point const starts[2] = { first.initialPoint(), second.initialPoint() };
+
+ if (!are_near(starts[0], starts[1], precision)) {
+ auto const invalid = PathTime(0, -1.0);
+ return PathIntersection(invalid, invalid, middle_point(starts[0], starts[1]));
+ }
+
+ if (first.empty() || second.empty()) {
+ auto const start_time = PathTime(0, 0.0);
+ return PathIntersection(start_time, start_time, middle_point(starts[0], starts[1]));
+ }
+
+ size_t const curve_count[2] = { first.size(), second.size() };
+ Coord const max_time[2] = { first.timeRange().max(), second.timeRange().max() };
+
+ /// Curve indices up until which the paths are known to overlap
+ unsigned pos[2] = { 0, 0 };
+ /// Curve times on the curves with indices pos[] up until which the
+ /// curves are known to overlap ahead of the nodes.
+ Coord curve_times[2] = { 0.0, 0.0 };
+
+ bool leg = 0; ///< Flag indicating which leg is stepping on the ladder
+ bool just_changed_legs = false;
+
+ /* The ladder algorithm takes steps along the two paths, as if they the stiles of
+ * an imaginary ladder. Note that the nodes (X) on boths paths may not coincide:
+ *
+ * paths[0] START--------X-----------X-----------------------X---------X----> ...
+ * paths[1] START--------------X-----------------X-----------X--------------> ...
+ *
+ * The variables pos[0], pos[1] are the indices of the nodes we've cleared so far;
+ * i.e., we know that the portions before pos[] overlap.
+ *
+ * In each iteration of the loop, we move to the next node along one of the paths;
+ * the variable `leg` tells us which path. We find the point nearest to that node
+ * on the first unprocessed curve of the other path and check the curve time.
+ *
+ * Suppose the current node positions are denoted by P; one possible location of
+ * the nearest point (N) to the next node is:
+ *
+ * ----P------------------N--X---- paths[!leg]
+ * --------P--------------X------- paths[leg] (we've stepped forward from P to X)
+ *
+ * We detect this situation when we find that the curve time of N is < 1.0.
+ * We then create a trimmed version of the top curve so that it corresponds to
+ * the current bottom curve:
+ *
+ * ----P----------------------N--X---------- paths[!leg]
+ * [------------------] trimmed curve
+ * --------P------------------X------------- paths[leg]
+ *
+ * Using isNear(), we can compare the trimmed curve to the front curve (P--X) on
+ * paths[leg]; if they are indeed near, then pos[leg] can be incremented.
+ *
+ * Another possibility is that we overstep the end of the other curve:
+ *
+ * ----P-----------------X------------------ paths[!leg]
+ * N
+ * --------P------------------X------------- paths[leg]
+ *
+ * so the nearest point N now coincides with a node on the top path. We detect
+ * this situation by observing that the curve time of N is close to 1. In case
+ * of such overstep, we change legs by flipping the `leg` variable:
+ *
+ * ----P-----------------X------------------ paths[leg]
+ * --------P------------------X------------- paths[!leg]
+ *
+ * We can now continue the stepping procedure, but the next step will be taken on
+ * the path `paths[leg]`, so it should be a shorter step (if it isn't, the paths
+ * must have diverged and we're done):
+ *
+ * ----P-----------------X------------------ paths[leg]
+ * --------P-------------N----X------------- paths[!leg]
+ *
+ * Another piece of data we hold on to are the curve times on the current curves
+ * up until which the paths have been found to coincide. In other words, at every
+ * step of the algorithm we know that the curves agree up to the path-times
+ * PathTime(pos[i], curve_times[i]).
+ *
+ * In the situation mentioned just above, the times (T) will be as follows:
+ *
+ * ----P---T-------------X------------------ paths[leg]
+ *
+ * --------P-------------N----X------------- paths[!leg]
+ * T
+ *
+ * In this example, the time on top path is > 0.0, since the T mark is further
+ * ahead than P on that path. This value of the curve time is needed to correctly
+ * crop the top curve for the purpose of the isNear() comparison:
+ *
+ * ----P---T-------------X---------- paths[leg]
+ * [-------------] comparison curve (cropped from paths[leg])
+ * [-------------] comparison curve (cropped from paths[!leg])
+ * --------P-------------N----X----- paths[!leg]
+ * T
+ *
+ * In fact, the lower end of the curve time range for cropping is always
+ * given by curve_times[i].
+ *
+ * The iteration ends when we find that the two paths have diverged or when we
+ * reach the end. When that happens, the positions and curve times will be
+ * the PathTime components of the actual point of divergence on both paths.
+ */
+
+ /// A closure to crop and compare the curve pieces ([----] in the diagrams above).
+ auto const pieces_agree = [&](Coord time_on_other) -> bool {
+ Curve *pieces[2];
+ // The leg-side curve is always cropped to the end:
+ pieces[ leg] = paths[ leg]->at(pos[ leg]).portion(curve_times[ leg], 1.0);
+ // The other one is cropped to a variable curve time:
+ pieces[!leg] = paths[!leg]->at(pos[!leg]).portion(curve_times[!leg], time_on_other);
+ bool ret = pieces[0]->isNear(*pieces[1], precision);
+ delete pieces[0];
+ delete pieces[1];
+ return ret;
+ };
+
+ /// A closure to skip degenerate curves; returns true if we reached the end.
+ auto const skip_degenerates = [&](size_t which) -> bool {
+ while (paths[which]->at(pos[which]).isDegenerate()) {
+ ++pos[which];
+ curve_times[which] = 0.0;
+ if (pos[which] == curve_count[which]) {
+ return true; // We've reached the end
+ }
+ }
+ return false;
+ };
+
+ // Main loop of the ladder algorithm.
+ while (pos[0] < curve_count[0] && pos[1] < curve_count[1]) {
+ // Skip degenerate curves if any.
+ if (skip_degenerates(0)) {
+ break;
+ }
+ if (skip_degenerates(1)) {
+ break;
+ }
+
+ // Try to step to the next node with the current leg and see what happens.
+ Coord forward_coord = (Coord)(pos[leg] + 1);
+ if (forward_coord > max_time[leg]) {
+ forward_coord = max_time[leg];
+ }
+ auto const step_point = paths[leg]->pointAt(forward_coord);
+ auto const time_on_other = paths[!leg]->at(pos[!leg]).nearestTime(step_point);
+
+ if (are_near(time_on_other, 1.0, precision) &&
+ are_near(step_point, paths[!leg]->pointAt(pos[!leg] + 1), precision))
+ { // The step took us very near to the first uncertified node on the other path.
+ just_changed_legs = false;
+ //
+ // -------PT-----------------X---------- paths[!leg]
+ // --P-----T-----------------X---------- paths[leg]
+ // ^
+ // endpoints (almost) coincide
+ //
+ // We should compare the curves cropped to the end:
+ //
+ // --------T-----------------X---------- paths[!leg]
+ // [-----------------]
+ // [-----------------]
+ // --------T-----------------X---------- paths[leg]
+ if (pieces_agree(1.0)) {
+ // The curves are nearly identical, so we advance both positions
+ // and zero out the forward curve times.
+ for (size_t i = 0; i < 2; i++) {
+ pos[i]++;
+ curve_times[i] = 0.0;
+ }
+ } else { // We've diverged.
+ break;
+ }
+ } else if (time_on_other < 1.0 - precision) {
+ just_changed_legs = false;
+
+ // The other curve is longer than our step! We trim the other curve to the point
+ // nearest to the step point and compare the resulting pieces.
+ //
+ // --------T-----------------N--------X---- paths[!leg]
+ // [-----------------]
+ // [-----------------]
+ // --------T-----------------X------------- paths[leg]
+ //
+ if (pieces_agree(time_on_other)) { // The curve pieces are near to one another!
+ // We can advance our position and zero out the curve time:
+ pos[leg]++;
+ curve_times[leg] = 0.0;
+ // But on the other path, we can only advance the time, not the curve index:
+ curve_times[!leg] = time_on_other;
+ } else { // We've diverged.
+ break;
+ }
+ } else {
+ // The other curve is shorter than ours, which means that we've overstepped.
+ // We change legs and try to take a shorter step in the next iteration.
+ if (just_changed_legs) {
+ // We already changed legs before and it didn't help, i.e., we've diverged.
+ break;
+ } else {
+ leg = !leg;
+ just_changed_legs = true;
+ }
+ }
+ }
+
+ // Compute the parting time on both paths
+ PathTime path_times[2];
+ for (size_t i = 0; i < 2; i++) {
+ path_times[i] = (pos[i] == curve_count[i]) ? PathTime(curve_count[i] - 1, 1.0)
+ : PathTime(pos[i], curve_times[i]);
+ }
+
+ // Get the parting point from the numerically nicest source
+ Point parting_pt;
+ if (curve_times[0] == 0.0) {
+ parting_pt = paths[0]->pointAt(path_times[0]);
+ } else if (curve_times[1] == 0.0) {
+ parting_pt = paths[1]->pointAt(path_times[1]);
+ } else {
+ parting_pt = middle_point(first.pointAt(path_times[0]), second.pointAt(path_times[1]));
+ }
+
+ return PathIntersection(path_times[0], path_times[1], std::move(parting_pt));
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/path-extrema.cpp b/src/2geom/path-extrema.cpp
new file mode 100644
index 0000000..319ccec
--- /dev/null
+++ b/src/2geom/path-extrema.cpp
@@ -0,0 +1,156 @@
+/** @file Implementation of Path::extrema()
+ */
+/* An algorithm to find the points on a path where a given coordinate
+ * attains its minimum and maximum values.
+ *
+ * Authors:
+ * Rafał Siejakowski <rs@rs-math.net>
+ *
+ * Copyright 2022 the Authors.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/curve.h>
+#include <2geom/path.h>
+#include <2geom/point.h>
+
+namespace Geom {
+
+/** Returns +1 for positive numbers, -1 for negative numbers, and 0 otherwise. */
+inline static float sign(double number)
+{
+ if (number > 0.0) {
+ return 1.0;
+ } else if (number < 0.0) {
+ return -1.0;
+ }
+ return 0.0;
+}
+
+/** @brief Determine whether the d-coordinate increases or decreases at the given path time.
+ *
+ * @param path A path.
+ * @param time A forward-normalized time on the given path.
+ * @param d The coordinate about which we want to know whether it increases.
+ * @return +1.0 if the coordinate increases, -1.0 if it decreases, 0.0 if it remains constant.
+*/
+static float find_direction_of_travel(Path const &path, PathTime const &time, Dim2 d)
+{
+ if (time.t == 0.0) { // We're at a node point
+ if (time.curve_index == 0) { // Starting point of the path.
+ if (path.closed()) {
+ return sign(path.initialUnitTangent()[d] + path.finalUnitTangent()[d]);
+ } else {
+ return sign(path.initialUnitTangent()[d]);
+ }
+ } else if (time.curve_index == path.size()) { // End point of the path.
+ if (path.closed()) {
+ return sign(path.initialUnitTangent()[d] + path.finalUnitTangent()[d]);
+ } else {
+ return sign(path.finalUnitTangent()[d]);
+ }
+ }
+
+ // Otherwise, check the average of the two unit tangent vectors.
+ auto const outgoing_tangent = path.curveAt(time.curve_index).unitTangentAt(0.0);
+ auto const incoming_tangent = path.curveAt(time.curve_index - 1).unitTangentAt(1.0);
+ return sign(outgoing_tangent[d] + incoming_tangent[d]);
+ }
+ // We're in the middle of a curve
+ return sign(path.curveAt(time.curve_index).unitTangentAt(time.t)[d]);
+}
+
+/* Find information about the points on the path where the specified
+ * coordinate attains its minimum and maximum values.
+ */
+PathExtrema Path::extrema(Dim2 d) const
+{
+ auto const ZERO_TIME = PathTime(0, 0);
+
+ // Handle the trivial case of an empty path.
+ if (empty()) {
+ auto const origin = initialPoint();
+ return PathExtrema{
+ .min_point = origin,
+ .max_point = origin,
+ .glance_direction_at_min = 0.0,
+ .glance_direction_at_max = 0.0,
+ .min_time = ZERO_TIME,
+ .max_time = ZERO_TIME
+ };
+ }
+
+ // Set up the simultaneous min-max search
+ Point min_point = initialPoint(), max_point = min_point;
+ auto min_time = ZERO_TIME, max_time = ZERO_TIME;
+ unsigned curve_counter = 0;
+
+ /// A closure to update the current minimum and maximum values.
+ auto const update_minmax = [&](Point const &new_point, Coord t) {
+ if (new_point[d] < min_point[d]) {
+ min_point = new_point;
+ min_time = PathTime(curve_counter, t);
+ } else if (new_point[d] > max_point[d]) {
+ max_point = new_point;
+ max_time = PathTime(curve_counter, t);
+ }
+ };
+
+ // Iterate through the curves, searching for min and max.
+ for (auto const &curve : _data->curves) {
+ // Check the starting node first
+ update_minmax(curve.initialPoint(), 0.0);
+
+ // Check the critical points (zeroes of the derivative).
+ std::unique_ptr<Curve> const derivative{curve.derivative()};
+ for (auto root : derivative->roots(0.0, d)) {
+ update_minmax(curve.pointAt(root), root);
+ }
+ curve_counter++;
+ }
+
+ auto const other = other_dimension(d);
+ return PathExtrema{
+ .min_point = min_point,
+ .max_point = max_point,
+ .glance_direction_at_min = find_direction_of_travel(*this, min_time, other),
+ .glance_direction_at_max = find_direction_of_travel(*this, max_time, other),
+ .min_time = min_time,
+ .max_time = max_time
+ };
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/path-intersection.cpp b/src/2geom/path-intersection.cpp
new file mode 100644
index 0000000..280d7ba
--- /dev/null
+++ b/src/2geom/path-intersection.cpp
@@ -0,0 +1,728 @@
+#include <2geom/path-intersection.h>
+
+#include <2geom/ord.h>
+
+//for path_direction:
+#include <2geom/sbasis-geometric.h>
+#include <2geom/line.h>
+#ifdef HAVE_GSL
+#include <gsl/gsl_vector.h>
+#include <gsl/gsl_multiroots.h>
+#endif
+
+namespace Geom {
+
+/// Compute winding number of the path at the specified point
+int winding(Path const &path, Point const &p) {
+ return path.winding(p);
+}
+
+/**
+ * This function should only be applied to simple paths (regions), as otherwise
+ * a boolean winding direction is undefined. It returns true for fill, false for
+ * hole. Defaults to using the sign of area when it reaches funny cases.
+ */
+bool path_direction(Path const &p) {
+ if(p.empty()) return false;
+
+ /*goto doh;
+ //could probably be more efficient, but this is a quick job
+ double y = p.initialPoint()[Y];
+ double x = p.initialPoint()[X];
+ Cmp res = cmp(p[0].finalPoint()[Y], y);
+ for(unsigned i = 1; i < p.size(); i++) {
+ Cmp final_to_ray = cmp(p[i].finalPoint()[Y], y);
+ Cmp initial_to_ray = cmp(p[i].initialPoint()[Y], y);
+ // if y is included, these will have opposite values, giving order.
+ Cmp c = cmp(final_to_ray, initial_to_ray);
+ if(c != EQUAL_TO) {
+ std::vector<double> rs = p[i].roots(y, Y);
+ for(unsigned j = 0; j < rs.size(); j++) {
+ double nx = p[i].valueAt(rs[j], X);
+ if(nx > x) {
+ x = nx;
+ res = c;
+ }
+ }
+ } else if(final_to_ray == EQUAL_TO) goto doh;
+ }
+ return res < 0;
+
+ doh:*/
+ //Otherwise fallback on area
+
+ Piecewise<D2<SBasis> > pw = p.toPwSb();
+ double area;
+ Point centre;
+ Geom::centroid(pw, centre, area);
+ return area > 0;
+}
+
+//pair intersect code based on njh's pair-intersect
+
+/** A little sugar for appending a list to another */
+template<typename T>
+void append(T &a, T const &b) {
+ a.insert(a.end(), b.begin(), b.end());
+}
+
+/**
+ * Finds the intersection between the lines defined by A0 & A1, and B0 & B1.
+ * Returns through the last 3 parameters, returning the t-values on the lines
+ * and the cross-product of the deltas (a useful byproduct). The return value
+ * indicates if the time values are within their proper range on the line segments.
+ */
+bool
+linear_intersect(Point const &A0, Point const &A1, Point const &B0, Point const &B1,
+ double &tA, double &tB, double &det) {
+ bool both_lines_non_zero = (!are_near(A0, A1)) && (!are_near(B0, B1));
+
+ // Cramer's rule as cross products
+ Point Ad = A1 - A0,
+ Bd = B1 - B0,
+ d = B0 - A0;
+ det = cross(Ad, Bd);
+
+ double det_rel = det; // Calculate the determinant of the normalized vectors
+ if (both_lines_non_zero) {
+ det_rel /= Ad.length();
+ det_rel /= Bd.length();
+ }
+
+ if( fabs(det_rel) < 1e-12 ) { // If the cross product is NEARLY zero,
+ // Then one of the linesegments might have length zero
+ if (both_lines_non_zero) {
+ // If that's not the case, then we must have either:
+ // - parallel lines, with no intersections, or
+ // - coincident lines, with an infinite number of intersections
+ // Either is quite useless, so we'll just bail out
+ return false;
+ } // Else, one of the linesegments is zero, and we might still be able to calculate a single intersection point
+ } // Else we haven't bailed out, and we'll try to calculate the intersections
+
+ double detinv = 1.0 / det;
+ tA = cross(d, Bd) * detinv;
+ tB = cross(d, Ad) * detinv;
+ return (tA >= 0.) && (tA <= 1.) && (tB >= 0.) && (tB <= 1.);
+}
+
+
+#if 0
+typedef union dbl_64{
+ long long i64;
+ double d64;
+};
+
+static double EpsilonOf(double value)
+{
+ dbl_64 s;
+ s.d64 = value;
+ if(s.i64 == 0)
+ {
+ s.i64++;
+ return s.d64 - value;
+ }
+ else if(s.i64-- < 0)
+ return s.d64 - value;
+ else
+ return value - s.d64;
+}
+#endif
+
+#ifdef HAVE_GSL
+struct rparams {
+ Curve const &A;
+ Curve const &B;
+};
+
+static int
+intersect_polish_f (const gsl_vector * x, void *params,
+ gsl_vector * f)
+{
+ const double x0 = gsl_vector_get (x, 0);
+ const double x1 = gsl_vector_get (x, 1);
+
+ Geom::Point dx = ((struct rparams *) params)->A(x0) -
+ ((struct rparams *) params)->B(x1);
+
+ gsl_vector_set (f, 0, dx[0]);
+ gsl_vector_set (f, 1, dx[1]);
+
+ return GSL_SUCCESS;
+}
+#endif
+
+static void
+intersect_polish_root (Curve const &A, double &s, Curve const &B, double &t)
+{
+ std::vector<Point> as, bs;
+ as = A.pointAndDerivatives(s, 2);
+ bs = B.pointAndDerivatives(t, 2);
+ Point F = as[0] - bs[0];
+ double best = dot(F, F);
+
+ for(int i = 0; i < 4; i++) {
+
+ /**
+ we want to solve
+ J*(x1 - x0) = f(x0)
+
+ |dA(s)[0] -dB(t)[0]| (X1 - X0) = A(s) - B(t)
+ |dA(s)[1] -dB(t)[1]|
+ **/
+
+ // We're using the standard transformation matricies, which is numerically rather poor. Much better to solve the equation using elimination.
+
+ Affine jack(as[1][0], as[1][1],
+ -bs[1][0], -bs[1][1],
+ 0, 0);
+ Point soln = (F)*jack.inverse();
+ double ns = s - soln[0];
+ double nt = t - soln[1];
+
+ if (ns<0) ns=0;
+ else if (ns>1) ns=1;
+ if (nt<0) nt=0;
+ else if (nt>1) nt=1;
+
+ as = A.pointAndDerivatives(ns, 2);
+ bs = B.pointAndDerivatives(nt, 2);
+ F = as[0] - bs[0];
+ double trial = dot(F, F);
+ if (trial > best*0.1) // we have standards, you know
+ // At this point we could do a line search
+ break;
+ best = trial;
+ s = ns;
+ t = nt;
+ }
+
+#ifdef HAVE_GSL
+ if(0) { // the GSL version is more accurate, but taints this with GPL
+ int status;
+ size_t iter = 0;
+ const size_t n = 2;
+ struct rparams p = {A, B};
+ gsl_multiroot_function f = {&intersect_polish_f, n, &p};
+
+ double x_init[2] = {s, t};
+ gsl_vector *x = gsl_vector_alloc (n);
+
+ gsl_vector_set (x, 0, x_init[0]);
+ gsl_vector_set (x, 1, x_init[1]);
+
+ const gsl_multiroot_fsolver_type *T = gsl_multiroot_fsolver_hybrids;
+ gsl_multiroot_fsolver *sol = gsl_multiroot_fsolver_alloc (T, 2);
+ gsl_multiroot_fsolver_set (sol, &f, x);
+
+ do
+ {
+ iter++;
+ status = gsl_multiroot_fsolver_iterate (sol);
+
+ if (status) /* check if solver is stuck */
+ break;
+
+ status =
+ gsl_multiroot_test_residual (sol->f, 1e-12);
+ }
+ while (status == GSL_CONTINUE && iter < 1000);
+
+ s = gsl_vector_get (sol->x, 0);
+ t = gsl_vector_get (sol->x, 1);
+
+ gsl_multiroot_fsolver_free (sol);
+ gsl_vector_free (x);
+ }
+#endif
+}
+
+/**
+ * This uses the local bounds functions of curves to generically intersect two.
+ * It passes in the curves, time intervals, and keeps track of depth, while
+ * returning the results through the Crossings parameter.
+ */
+void pair_intersect(Curve const & A, double Al, double Ah,
+ Curve const & B, double Bl, double Bh,
+ Crossings &ret, unsigned depth = 0) {
+ // std::cout << depth << "(" << Al << ", " << Ah << ")\n";
+ OptRect Ar = A.boundsLocal(Interval(Al, Ah));
+ if (!Ar) return;
+
+ OptRect Br = B.boundsLocal(Interval(Bl, Bh));
+ if (!Br) return;
+
+ if(! Ar->intersects(*Br)) return;
+
+ //Checks the general linearity of the function
+ if((depth > 12)) { // || (A.boundsLocal(Interval(Al, Ah), 1).maxExtent() < 0.1
+ //&& B.boundsLocal(Interval(Bl, Bh), 1).maxExtent() < 0.1)) {
+ double tA, tB, c;
+ if(linear_intersect(A.pointAt(Al), A.pointAt(Ah),
+ B.pointAt(Bl), B.pointAt(Bh),
+ tA, tB, c)) {
+ tA = tA * (Ah - Al) + Al;
+ tB = tB * (Bh - Bl) + Bl;
+ intersect_polish_root(A, tA,
+ B, tB);
+ if(depth % 2)
+ ret.push_back(Crossing(tB, tA, c < 0));
+ else
+ ret.push_back(Crossing(tA, tB, c > 0));
+ return;
+ }
+ }
+ if(depth > 12) return;
+ double mid = (Bl + Bh)/2;
+ pair_intersect(B, Bl, mid,
+ A, Al, Ah,
+ ret, depth+1);
+ pair_intersect(B, mid, Bh,
+ A, Al, Ah,
+ ret, depth+1);
+}
+
+Crossings pair_intersect(Curve const & A, Interval const &Ad,
+ Curve const & B, Interval const &Bd) {
+ Crossings ret;
+ pair_intersect(A, Ad.min(), Ad.max(), B, Bd.min(), Bd.max(), ret);
+ return ret;
+}
+
+/** A simple wrapper around pair_intersect */
+Crossings SimpleCrosser::crossings(Curve const &a, Curve const &b) {
+ Crossings ret;
+ pair_intersect(a, 0, 1, b, 0, 1, ret);
+ return ret;
+}
+
+
+//same as below but curves not paths
+void mono_intersect(Curve const &A, double Al, double Ah,
+ Curve const &B, double Bl, double Bh,
+ Crossings &ret, double tol = 0.1, unsigned depth = 0) {
+ if( Al >= Ah || Bl >= Bh) return;
+ //std::cout << " " << depth << "[" << Al << ", " << Ah << "]" << "[" << Bl << ", " << Bh << "]";
+
+ Point A0 = A.pointAt(Al), A1 = A.pointAt(Ah),
+ B0 = B.pointAt(Bl), B1 = B.pointAt(Bh);
+ //inline code that this implies? (without rect/interval construction)
+ Rect Ar = Rect(A0, A1), Br = Rect(B0, B1);
+ if(!Ar.intersects(Br) || A0 == A1 || B0 == B1) return;
+
+ if(depth > 12 || (Ar.maxExtent() < tol && Ar.maxExtent() < tol)) {
+ double tA, tB, c;
+ if(linear_intersect(A.pointAt(Al), A.pointAt(Ah),
+ B.pointAt(Bl), B.pointAt(Bh),
+ tA, tB, c)) {
+ tA = tA * (Ah - Al) + Al;
+ tB = tB * (Bh - Bl) + Bl;
+ intersect_polish_root(A, tA,
+ B, tB);
+ if(depth % 2)
+ ret.push_back(Crossing(tB, tA, c < 0));
+ else
+ ret.push_back(Crossing(tA, tB, c > 0));
+ return;
+ }
+ }
+ if(depth > 12) return;
+ double mid = (Bl + Bh)/2;
+ mono_intersect(B, Bl, mid,
+ A, Al, Ah,
+ ret, tol, depth+1);
+ mono_intersect(B, mid, Bh,
+ A, Al, Ah,
+ ret, tol, depth+1);
+}
+
+Crossings mono_intersect(Curve const & A, Interval const &Ad,
+ Curve const & B, Interval const &Bd) {
+ Crossings ret;
+ mono_intersect(A, Ad.min(), Ad.max(), B, Bd.min(), Bd.max(), ret);
+ return ret;
+}
+
+/**
+ * Takes two paths and time ranges on them, with the invariant that the
+ * paths are monotonic on the range. Splits A when the linear intersection
+ * doesn't exist or is inaccurate. Uses the fact that it is monotonic to
+ * do very fast local bounds.
+ */
+void mono_pair(Path const &A, double Al, double Ah,
+ Path const &B, double Bl, double Bh,
+ Crossings &ret, double /*tol*/, unsigned depth = 0) {
+ if( Al >= Ah || Bl >= Bh) return;
+ std::cout << " " << depth << "[" << Al << ", " << Ah << "]" << "[" << Bl << ", " << Bh << "]";
+
+ Point A0 = A.pointAt(Al), A1 = A.pointAt(Ah),
+ B0 = B.pointAt(Bl), B1 = B.pointAt(Bh);
+ //inline code that this implies? (without rect/interval construction)
+ Rect Ar = Rect(A0, A1), Br = Rect(B0, B1);
+ if(!Ar.intersects(Br) || A0 == A1 || B0 == B1) return;
+
+ if(depth > 12 || (Ar.maxExtent() < 0.1 && Ar.maxExtent() < 0.1)) {
+ double tA, tB, c;
+ if(linear_intersect(A0, A1, B0, B1,
+ tA, tB, c)) {
+ tA = tA * (Ah - Al) + Al;
+ tB = tB * (Bh - Bl) + Bl;
+ if(depth % 2)
+ ret.push_back(Crossing(tB, tA, c < 0));
+ else
+ ret.push_back(Crossing(tA, tB, c > 0));
+ return;
+ }
+ }
+ if(depth > 12) return;
+ double mid = (Bl + Bh)/2;
+ mono_pair(B, Bl, mid,
+ A, Al, Ah,
+ ret, depth+1);
+ mono_pair(B, mid, Bh,
+ A, Al, Ah,
+ ret, depth+1);
+}
+
+/** This returns the times when the x or y derivative is 0 in the curve. */
+std::vector<double> curve_mono_splits(Curve const &d) {
+ Curve* deriv = d.derivative();
+ std::vector<double> rs = deriv->roots(0, X);
+ append(rs, deriv->roots(0, Y));
+ delete deriv;
+ std::sort(rs.begin(), rs.end());
+ return rs;
+}
+
+/** Convenience function to add a value to each entry in a vector of doubles. */
+std::vector<double> offset_doubles(std::vector<double> const &x, double offs) {
+ std::vector<double> ret;
+ for(double i : x) {
+ ret.push_back(i + offs);
+ }
+ return ret;
+}
+
+/**
+ * Finds all the monotonic splits for a path. Only includes the split between
+ * curves if they switch derivative directions at that point.
+ */
+std::vector<double> path_mono_splits(Path const &p) {
+ std::vector<double> ret;
+ if(p.empty()) return ret;
+
+ int pdx = 2, pdy = 2; // Previous derivative direction
+ for(unsigned i = 0; i < p.size(); i++) {
+ std::vector<double> spl = offset_doubles(curve_mono_splits(p[i]), i);
+ int dx = p[i].initialPoint()[X] > (spl.empty() ? p[i].finalPoint()[X] : p.valueAt(spl.front(), X)) ? 1 : 0;
+ int dy = p[i].initialPoint()[Y] > (spl.empty() ? p[i].finalPoint()[Y] : p.valueAt(spl.front(), Y)) ? 1 : 0;
+ //The direction changed, include the split time
+ if(dx != pdx || dy != pdy) {
+ ret.push_back(i);
+ pdx = dx; pdy = dy;
+ }
+ append(ret, spl);
+ }
+ return ret;
+}
+
+/**
+ * Applies path_mono_splits to multiple paths, and returns the results such that
+ * time-set i corresponds to Path i.
+ */
+std::vector<std::vector<double> > paths_mono_splits(PathVector const &ps) {
+ std::vector<std::vector<double> > ret;
+ for(const auto & p : ps)
+ ret.push_back(path_mono_splits(p));
+ return ret;
+}
+
+/**
+ * Processes the bounds for a list of paths and a list of splits on them, yielding a list of rects for each.
+ * Each entry i corresponds to path i of the input. The number of rects in each entry is guaranteed to be the
+ * number of splits for that path, subtracted by one.
+ */
+std::vector<std::vector<Rect> > split_bounds(PathVector const &p, std::vector<std::vector<double> > splits) {
+ std::vector<std::vector<Rect> > ret;
+ for(unsigned i = 0; i < p.size(); i++) {
+ std::vector<Rect> res;
+ for(unsigned j = 1; j < splits[i].size(); j++)
+ res.emplace_back(p[i].pointAt(splits[i][j-1]), p[i].pointAt(splits[i][j]));
+ ret.push_back(res);
+ }
+ return ret;
+}
+
+/**
+ * This is the main routine of "MonoCrosser", and implements a monotonic strategy on multiple curves.
+ * Finds crossings between two sets of paths, yielding a CrossingSet. [0, a.size()) of the return correspond
+ * to the sorted crossings of a with paths of b. The rest of the return, [a.size(), a.size() + b.size()],
+ * corresponds to the sorted crossings of b with paths of a.
+ *
+ * This function does two sweeps, one on the bounds of each path, and after that cull, one on the curves within.
+ * This leads to a certain amount of code complexity, however, most of that is factored into the above functions
+ */
+CrossingSet MonoCrosser::crossings(PathVector const &a, PathVector const &b) {
+ if(b.empty()) return CrossingSet(a.size(), Crossings());
+ CrossingSet results(a.size() + b.size(), Crossings());
+ if(a.empty()) return results;
+
+ std::vector<std::vector<double> > splits_a = paths_mono_splits(a), splits_b = paths_mono_splits(b);
+ std::vector<std::vector<Rect> > bounds_a = split_bounds(a, splits_a), bounds_b = split_bounds(b, splits_b);
+
+ std::vector<Rect> bounds_a_union, bounds_b_union;
+ for(auto & i : bounds_a) bounds_a_union.push_back(union_list(i));
+ for(auto & i : bounds_b) bounds_b_union.push_back(union_list(i));
+
+ std::vector<std::vector<unsigned> > cull = sweep_bounds(bounds_a_union, bounds_b_union);
+ Crossings n;
+ for(unsigned i = 0; i < cull.size(); i++) {
+ for(unsigned jx = 0; jx < cull[i].size(); jx++) {
+ unsigned j = cull[i][jx];
+ unsigned jc = j + a.size();
+ Crossings res;
+
+ //Sweep of the monotonic portions
+ std::vector<std::vector<unsigned> > cull2 = sweep_bounds(bounds_a[i], bounds_b[j]);
+ for(unsigned k = 0; k < cull2.size(); k++) {
+ for(unsigned lx = 0; lx < cull2[k].size(); lx++) {
+ unsigned l = cull2[k][lx];
+ mono_pair(a[i], splits_a[i][k-1], splits_a[i][k],
+ b[j], splits_b[j][l-1], splits_b[j][l],
+ res, .1);
+ }
+ }
+
+ for(auto & re : res) { re.a = i; re.b = jc; }
+
+ merge_crossings(results[i], res, i);
+ merge_crossings(results[i], res, jc);
+ }
+ }
+
+ return results;
+}
+
+/* This function is similar codewise to the MonoCrosser, the main difference is that it deals with
+ * only one set of paths and includes self intersection
+CrossingSet crossings_among(PathVector const &p) {
+ CrossingSet results(p.size(), Crossings());
+ if(p.empty()) return results;
+
+ std::vector<std::vector<double> > splits = paths_mono_splits(p);
+ std::vector<std::vector<Rect> > prs = split_bounds(p, splits);
+ std::vector<Rect> rs;
+ for(unsigned i = 0; i < prs.size(); i++) rs.push_back(union_list(prs[i]));
+
+ std::vector<std::vector<unsigned> > cull = sweep_bounds(rs);
+
+ //we actually want to do the self-intersections, so add em in:
+ for(unsigned i = 0; i < cull.size(); i++) cull[i].push_back(i);
+
+ for(unsigned i = 0; i < cull.size(); i++) {
+ for(unsigned jx = 0; jx < cull[i].size(); jx++) {
+ unsigned j = cull[i][jx];
+ Crossings res;
+
+ //Sweep of the monotonic portions
+ std::vector<std::vector<unsigned> > cull2 = sweep_bounds(prs[i], prs[j]);
+ for(unsigned k = 0; k < cull2.size(); k++) {
+ for(unsigned lx = 0; lx < cull2[k].size(); lx++) {
+ unsigned l = cull2[k][lx];
+ mono_pair(p[i], splits[i][k-1], splits[i][k],
+ p[j], splits[j][l-1], splits[j][l],
+ res, .1);
+ }
+ }
+
+ for(unsigned k = 0; k < res.size(); k++) { res[k].a = i; res[k].b = j; }
+
+ merge_crossings(results[i], res, i);
+ merge_crossings(results[j], res, j);
+ }
+ }
+
+ return results;
+}
+*/
+
+
+Crossings curve_self_crossings(Curve const &a) {
+ Crossings res;
+ std::vector<double> spl;
+ spl.push_back(0);
+ append(spl, curve_mono_splits(a));
+ spl.push_back(1);
+ for(unsigned i = 1; i < spl.size(); i++)
+ for(unsigned j = i+1; j < spl.size(); j++)
+ pair_intersect(a, spl[i-1], spl[i], a, spl[j-1], spl[j], res);
+ return res;
+}
+
+/*
+void mono_curve_intersect(Curve const & A, double Al, double Ah,
+ Curve const & B, double Bl, double Bh,
+ Crossings &ret, unsigned depth=0) {
+ // std::cout << depth << "(" << Al << ", " << Ah << ")\n";
+ Point A0 = A.pointAt(Al), A1 = A.pointAt(Ah),
+ B0 = B.pointAt(Bl), B1 = B.pointAt(Bh);
+ //inline code that this implies? (without rect/interval construction)
+ if(!Rect(A0, A1).intersects(Rect(B0, B1)) || A0 == A1 || B0 == B1) return;
+
+ //Checks the general linearity of the function
+ if((depth > 12) || (A.boundsLocal(Interval(Al, Ah), 1).maxExtent() < 0.1
+ && B.boundsLocal(Interval(Bl, Bh), 1).maxExtent() < 0.1)) {
+ double tA, tB, c;
+ if(linear_intersect(A0, A1, B0, B1, tA, tB, c)) {
+ tA = tA * (Ah - Al) + Al;
+ tB = tB * (Bh - Bl) + Bl;
+ if(depth % 2)
+ ret.push_back(Crossing(tB, tA, c < 0));
+ else
+ ret.push_back(Crossing(tA, tB, c > 0));
+ return;
+ }
+ }
+ if(depth > 12) return;
+ double mid = (Bl + Bh)/2;
+ mono_curve_intersect(B, Bl, mid,
+ A, Al, Ah,
+ ret, depth+1);
+ mono_curve_intersect(B, mid, Bh,
+ A, Al, Ah,
+ ret, depth+1);
+}
+
+std::vector<std::vector<double> > curves_mono_splits(Path const &p) {
+ std::vector<std::vector<double> > ret;
+ for(unsigned i = 0; i <= p.size(); i++) {
+ std::vector<double> spl;
+ spl.push_back(0);
+ append(spl, curve_mono_splits(p[i]));
+ spl.push_back(1);
+ ret.push_back(spl);
+ }
+}
+
+std::vector<std::vector<Rect> > curves_split_bounds(Path const &p, std::vector<std::vector<double> > splits) {
+ std::vector<std::vector<Rect> > ret;
+ for(unsigned i = 0; i < splits.size(); i++) {
+ std::vector<Rect> res;
+ for(unsigned j = 1; j < splits[i].size(); j++)
+ res.push_back(Rect(p.pointAt(splits[i][j-1]+i), p.pointAt(splits[i][j]+i)));
+ ret.push_back(res);
+ }
+ return ret;
+}
+
+Crossings path_self_crossings(Path const &p) {
+ Crossings ret;
+ std::vector<std::vector<unsigned> > cull = sweep_bounds(bounds(p));
+ std::vector<std::vector<double> > spl = curves_mono_splits(p);
+ std::vector<std::vector<Rect> > bnds = curves_split_bounds(p, spl);
+ for(unsigned i = 0; i < cull.size(); i++) {
+ Crossings res;
+ for(unsigned k = 1; k < spl[i].size(); k++)
+ for(unsigned l = k+1; l < spl[i].size(); l++)
+ mono_curve_intersect(p[i], spl[i][k-1], spl[i][k], p[i], spl[i][l-1], spl[i][l], res);
+ offset_crossings(res, i, i);
+ append(ret, res);
+ for(unsigned jx = 0; jx < cull[i].size(); jx++) {
+ unsigned j = cull[i][jx];
+ res.clear();
+
+ std::vector<std::vector<unsigned> > cull2 = sweep_bounds(bnds[i], bnds[j]);
+ for(unsigned k = 0; k < cull2.size(); k++) {
+ for(unsigned lx = 0; lx < cull2[k].size(); lx++) {
+ unsigned l = cull2[k][lx];
+ mono_curve_intersect(p[i], spl[i][k-1], spl[i][k], p[j], spl[j][l-1], spl[j][l], res);
+ }
+ }
+
+ //if(fabs(int(i)-j) == 1 || fabs(int(i)-j) == p.size()-1) {
+ Crossings res2;
+ for(unsigned k = 0; k < res.size(); k++) {
+ if(res[k].ta != 0 && res[k].ta != 1 && res[k].tb != 0 && res[k].tb != 1) {
+ res.push_back(res[k]);
+ }
+ }
+ res = res2;
+ //}
+ offset_crossings(res, i, j);
+ append(ret, res);
+ }
+ }
+ return ret;
+}
+*/
+
+Crossings self_crossings(Path const &p) {
+ Crossings ret;
+ std::vector<std::vector<unsigned> > cull = sweep_bounds(bounds(p));
+ for(unsigned i = 0; i < cull.size(); i++) {
+ Crossings res = curve_self_crossings(p[i]);
+ offset_crossings(res, i, i);
+ append(ret, res);
+ for(unsigned jx = 0; jx < cull[i].size(); jx++) {
+ unsigned j = cull[i][jx];
+ res.clear();
+ pair_intersect(p[i], 0, 1, p[j], 0, 1, res);
+
+ //if(fabs(int(i)-j) == 1 || fabs(int(i)-j) == p.size()-1) {
+ Crossings res2;
+ for(auto & re : res) {
+ if(re.ta != 0 && re.ta != 1 && re.tb != 0 && re.tb != 1) {
+ res2.push_back(re);
+ }
+ }
+ res = res2;
+ //}
+ offset_crossings(res, i, j);
+ append(ret, res);
+ }
+ }
+ return ret;
+}
+
+void flip_crossings(Crossings &crs) {
+ for(auto & cr : crs)
+ cr = Crossing(cr.tb, cr.ta, cr.b, cr.a, !cr.dir);
+}
+
+CrossingSet crossings_among(PathVector const &p) {
+ CrossingSet results(p.size(), Crossings());
+ if(p.empty()) return results;
+
+ SimpleCrosser cc;
+
+ std::vector<std::vector<unsigned> > cull = sweep_bounds(bounds(p));
+ for(unsigned i = 0; i < cull.size(); i++) {
+ Crossings res = self_crossings(p[i]);
+ for(auto & re : res) { re.a = re.b = i; }
+ merge_crossings(results[i], res, i);
+ flip_crossings(res);
+ merge_crossings(results[i], res, i);
+ for(unsigned jx = 0; jx < cull[i].size(); jx++) {
+ unsigned j = cull[i][jx];
+
+ Crossings res = cc.crossings(p[i], p[j]);
+ for(auto & re : res) { re.a = i; re.b = j; }
+ merge_crossings(results[i], res, i);
+ merge_crossings(results[j], res, j);
+ }
+ }
+ return results;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/path-sink.cpp b/src/2geom/path-sink.cpp
new file mode 100644
index 0000000..1a22c81
--- /dev/null
+++ b/src/2geom/path-sink.cpp
@@ -0,0 +1,104 @@
+/*
+ * callback interface for SVG path data
+ *
+ * Copyright 2007 MenTaLguY <mental@rydia.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, output to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/path-sink.h>
+#include <2geom/exception.h>
+#include <2geom/circle.h>
+#include <2geom/ellipse.h>
+
+namespace Geom {
+
+void PathSink::feed(Curve const &c, bool moveto_initial)
+{
+ c.feed(*this, moveto_initial);
+}
+
+void PathSink::feed(Path const &path) {
+ flush();
+ moveTo(path.front().initialPoint());
+
+ // never output the closing segment to the sink
+ Path::const_iterator iter = path.begin(), last = path.end_open();
+ for (; iter != last; ++iter) {
+ iter->feed(*this, false);
+ }
+ if (path.closed()) {
+ closePath();
+ }
+ flush();
+}
+
+void PathSink::feed(PathVector const &pv) {
+ for (const auto & i : pv) {
+ feed(i);
+ }
+}
+
+void PathSink::feed(Rect const &r) {
+ moveTo(r.corner(0));
+ lineTo(r.corner(1));
+ lineTo(r.corner(2));
+ lineTo(r.corner(3));
+ closePath();
+}
+
+void PathSink::feed(Circle const &e) {
+ Coord r = e.radius();
+ Point c = e.center();
+ Point a = c + Point(0, +r);
+ Point b = c + Point(0, -r);
+
+ moveTo(a);
+ arcTo(r, r, 0, false, false, b);
+ arcTo(r, r, 0, false, false, a);
+ closePath();
+}
+
+void PathSink::feed(Ellipse const &e) {
+ Point s = e.pointAt(0);
+ moveTo(s);
+ arcTo(e.ray(X), e.ray(Y), e.rotationAngle(), false, false, e.pointAt(M_PI));
+ arcTo(e.ray(X), e.ray(Y), e.rotationAngle(), false, false, s);
+ closePath();
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/path.cpp b/src/2geom/path.cpp
new file mode 100644
index 0000000..aeff503
--- /dev/null
+++ b/src/2geom/path.cpp
@@ -0,0 +1,1161 @@
+/** @file
+ * @brief Path - a sequence of contiguous curves (implementation file)
+ *//*
+ * Authors:
+ * MenTaLguY <mental@rydia.net>
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2007-2014 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/path.h>
+#include <2geom/pathvector.h>
+#include <2geom/transforms.h>
+#include <2geom/circle.h>
+#include <2geom/ellipse.h>
+#include <2geom/convex-hull.h>
+#include <2geom/svg-path-writer.h>
+#include <2geom/sweeper.h>
+#include <algorithm>
+#include <limits>
+#include <optional>
+
+using std::swap;
+using namespace Geom::PathInternal;
+
+namespace Geom {
+
+// this represents an empty interval
+PathInterval::PathInterval()
+ : _from(0, 0.0)
+ , _to(0, 0.0)
+ , _path_size(1)
+ , _cross_start(false)
+ , _reverse(false)
+{}
+
+PathInterval::PathInterval(PathTime const &from, PathTime const &to, bool cross_start, size_type path_size)
+ : _from(from)
+ , _to(to)
+ , _path_size(path_size)
+ , _cross_start(cross_start)
+ , _reverse((to < from) ^ cross_start)
+{
+ if (_reverse) {
+ _to.normalizeForward(_path_size);
+ if (cross_start && _to < to) {
+ // Normalization made us cross start (closed path),
+ // so we don't need to cross the start anymore.
+ _cross_start = false;
+ }
+ if (_from != _to) {
+ _from.normalizeBackward(_path_size);
+ if (cross_start && _from > from) {
+ // Normalization backwards made us logically cross
+ // the start – we shouldn't cross the start again.
+ _cross_start = false;
+ }
+ }
+ } else {
+ _from.normalizeForward(_path_size);
+ if (cross_start && _from < from) {
+ _cross_start = false;
+ }
+ if (_from != _to) {
+ _to.normalizeBackward(_path_size);
+ if (cross_start && _to > to) {
+ _cross_start = false;
+ }
+ }
+ }
+
+ if (_from == _to) {
+ _reverse = false;
+ _cross_start = false;
+ }
+}
+
+bool PathInterval::contains(PathTime const &pos) const {
+ if (_cross_start) {
+ if (_reverse) {
+ return pos >= _to || _from >= pos;
+ } else {
+ return pos >= _from || _to >= pos;
+ }
+ } else {
+ if (_reverse) {
+ return _to <= pos && pos <= _from;
+ } else {
+ return _from <= pos && pos <= _to;
+ }
+ }
+}
+
+PathInterval::size_type PathInterval::curveCount() const
+{
+ if (isDegenerate()) return 0;
+ if (_cross_start) {
+ if (_reverse) {
+ return _path_size - _to.curve_index + _from.curve_index + 1;
+ } else {
+ return _path_size - _from.curve_index + _to.curve_index + 1;
+ }
+ } else {
+ if (_reverse) {
+ return _from.curve_index - _to.curve_index + 1;
+ } else {
+ return _to.curve_index - _from.curve_index + 1;
+ }
+ }
+}
+
+PathTime PathInterval::inside(Coord min_dist) const
+{
+ // If there is some node further than min_dist (in time coord) from the ends,
+ // return that node. Otherwise, return the middle.
+ PathTime result(0, 0.0);
+
+ if (!_cross_start && _from.curve_index == _to.curve_index) {
+ PathTime result(_from.curve_index, lerp(0.5, _from.t, _to.t));
+ return result;
+ }
+ // If _cross_start, then we can be sure that at least one node is in the domain.
+ // If dcurve == 0, it actually means that all curves are included in the domain
+
+ if (_reverse) {
+ size_type dcurve = (_path_size + _from.curve_index - _to.curve_index) % _path_size;
+ bool from_close = _from.t < min_dist;
+ bool to_close = _to.t > 1 - min_dist;
+
+ if (dcurve == 0) {
+ dcurve = _path_size;
+ }
+
+ if (dcurve == 1) {
+ if (from_close || to_close) {
+ result.curve_index = _from.curve_index;
+ Coord tmid = _from.t - ((1 - _to.t) + _from.t) * 0.5;
+ if (tmid < 0) {
+ result.curve_index = (_path_size + result.curve_index - 1) % _path_size;
+ tmid += 1;
+ }
+ result.t = tmid;
+ return result;
+ }
+
+ result.curve_index = _from.curve_index;
+ return result;
+ }
+
+ result.curve_index = (_to.curve_index + 1) % _path_size;
+ if (to_close) {
+ if (dcurve == 2) {
+ result.t = 0.5;
+ } else {
+ result.curve_index = (result.curve_index + 1) % _path_size;
+ }
+ }
+ return result;
+ } else {
+ size_type dcurve = (_path_size + _to.curve_index - _from.curve_index) % _path_size;
+ bool from_close = _from.t > 1 - min_dist;
+ bool to_close = _to.t < min_dist;
+
+ if (dcurve == 0) {
+ dcurve = _path_size;
+ }
+
+ if (dcurve == 1) {
+ if (from_close || to_close) {
+ result.curve_index = _from.curve_index;
+ Coord tmid = ((1 - _from.t) + _to.t) * 0.5 + _from.t;
+ if (tmid >= 1) {
+ result.curve_index = (result.curve_index + 1) % _path_size;
+ tmid -= 1;
+ }
+ result.t = tmid;
+ return result;
+ }
+
+ result.curve_index = _to.curve_index;
+ return result;
+ }
+
+ result.curve_index = (_from.curve_index + 1) % _path_size;
+ if (from_close) {
+ if (dcurve == 2) {
+ result.t = 0.5;
+ } else {
+ result.curve_index = (result.curve_index + 1) % _path_size;
+ }
+ }
+ return result;
+ }
+
+ result.curve_index = _reverse ? _from.curve_index : _to.curve_index;
+ return result;
+}
+
+PathInterval PathInterval::from_direction(PathTime const &from, PathTime const &to, bool reversed, size_type path_size)
+{
+ PathInterval result;
+ result._from = from;
+ result._to = to;
+ result._path_size = path_size;
+
+ if (reversed) {
+ result._to.normalizeForward(path_size);
+ if (result._from != result._to) {
+ result._from.normalizeBackward(path_size);
+ }
+ } else {
+ result._from.normalizeForward(path_size);
+ if (result._from != result._to) {
+ result._to.normalizeBackward(path_size);
+ }
+ }
+
+ if (result._from == result._to) {
+ result._reverse = false;
+ result._cross_start = false;
+ } else {
+ result._reverse = reversed;
+ if (reversed) {
+ result._cross_start = from < to;
+ } else {
+ result._cross_start = to < from;
+ }
+ }
+ return result;
+}
+
+
+Path::Path(Rect const &r)
+ : _data(new PathData())
+ , _closing_seg(new ClosingSegment(r.corner(3), r.corner(0)))
+ , _closed(true)
+ , _exception_on_stitch(true)
+{
+ for (unsigned i = 0; i < 3; ++i) {
+ _data->curves.push_back(new LineSegment(r.corner(i), r.corner(i+1)));
+ }
+ _data->curves.push_back(_closing_seg);
+}
+
+Path::Path(ConvexHull const &ch)
+ : _data(new PathData())
+ , _closing_seg(new ClosingSegment(Point(), Point()))
+ , _closed(true)
+ , _exception_on_stitch(true)
+{
+ if (ch.empty()) {
+ _data->curves.push_back(_closing_seg);
+ return;
+ }
+
+ _closing_seg->setInitial(ch.back());
+ _closing_seg->setFinal(ch.front());
+
+ Point last = ch.front();
+
+ for (std::size_t i = 1; i < ch.size(); ++i) {
+ _data->curves.push_back(new LineSegment(last, ch[i]));
+ last = ch[i];
+ }
+
+ _data->curves.push_back(_closing_seg);
+ _closed = true;
+}
+
+Path::Path(Circle const &c)
+ : _data(new PathData())
+ , _closing_seg(NULL)
+ , _closed(true)
+ , _exception_on_stitch(true)
+{
+ Point p1 = c.pointAt(0);
+ Point p2 = c.pointAt(M_PI);
+ _data->curves.push_back(new EllipticalArc(p1, c.radius(), c.radius(), 0, false, true, p2));
+ _data->curves.push_back(new EllipticalArc(p2, c.radius(), c.radius(), 0, false, true, p1));
+ _closing_seg = new ClosingSegment(p1, p1);
+ _data->curves.push_back(_closing_seg);
+}
+
+Path::Path(Ellipse const &e)
+ : _data(new PathData())
+ , _closing_seg(NULL)
+ , _closed(true)
+ , _exception_on_stitch(true)
+{
+ Point p1 = e.pointAt(0);
+ Point p2 = e.pointAt(M_PI);
+ _data->curves.push_back(new EllipticalArc(p1, e.rays(), e.rotationAngle(), false, true, p2));
+ _data->curves.push_back(new EllipticalArc(p2, e.rays(), e.rotationAngle(), false, true, p1));
+ _closing_seg = new ClosingSegment(p1, p1);
+ _data->curves.push_back(_closing_seg);
+}
+
+void Path::close(bool c)
+{
+ if (c == _closed) return;
+ if (c && _data->curves.size() >= 2) {
+ // when closing, if last segment is linear and ends at initial point,
+ // replace it with the closing segment
+ Sequence::iterator last = _data->curves.end() - 2;
+ if (last->isLineSegment() && last->finalPoint() == initialPoint()) {
+ _closing_seg->setInitial(last->initialPoint());
+ _data->curves.erase(last);
+ }
+ }
+ _closed = c;
+}
+
+void Path::clear()
+{
+ _unshare();
+ _data->curves.pop_back().release();
+ _data->curves.clear();
+ _closing_seg->setInitial(Point(0, 0));
+ _closing_seg->setFinal(Point(0, 0));
+ _data->curves.push_back(_closing_seg);
+ _closed = false;
+}
+
+OptRect Path::boundsFast() const
+{
+ OptRect bounds;
+ if (empty()) {
+ return bounds;
+ }
+ // if the path is not empty, we look for cached bounds
+ if (_data->fast_bounds) {
+ return _data->fast_bounds;
+ }
+
+ bounds = front().boundsFast();
+ const_iterator iter = begin();
+ // the closing path segment can be ignored, because it will always
+ // lie within the bbox of the rest of the path
+ if (iter != end()) {
+ for (++iter; iter != end(); ++iter) {
+ bounds.unionWith(iter->boundsFast());
+ }
+ }
+ _data->fast_bounds = bounds;
+ return bounds;
+}
+
+OptRect Path::boundsExact() const
+{
+ OptRect bounds;
+ if (empty())
+ return bounds;
+ bounds = front().boundsExact();
+ const_iterator iter = begin();
+ // the closing path segment can be ignored, because it will always lie within the bbox of the rest of the path
+ if (iter != end()) {
+ for (++iter; iter != end(); ++iter) {
+ bounds.unionWith(iter->boundsExact());
+ }
+ }
+ return bounds;
+}
+
+Piecewise<D2<SBasis> > Path::toPwSb() const
+{
+ Piecewise<D2<SBasis> > ret;
+ ret.push_cut(0);
+ unsigned i = 1;
+ bool degenerate = true;
+ // pw<d2<>> is always open. so if path is closed, add closing segment as well to pwd2.
+ for (const_iterator it = begin(); it != end_default(); ++it) {
+ if (!it->isDegenerate()) {
+ ret.push(it->toSBasis(), i++);
+ degenerate = false;
+ }
+ }
+ if (degenerate) {
+ // if path only contains degenerate curves, no second cut is added
+ // so we need to create at least one segment manually
+ ret = Piecewise<D2<SBasis> >(initialPoint());
+ }
+ return ret;
+}
+
+template <typename iter>
+iter inc(iter const &x, unsigned n) {
+ iter ret = x;
+ for (unsigned i = 0; i < n; i++)
+ ret++;
+ return ret;
+}
+
+bool Path::operator==(Path const &other) const
+{
+ if (this == &other)
+ return true;
+ if (_closed != other._closed)
+ return false;
+ return _data->curves == other._data->curves;
+}
+
+void Path::start(Point const &p) {
+ if (_data->curves.size() > 1) {
+ clear();
+ }
+ _closing_seg->setInitial(p);
+ _closing_seg->setFinal(p);
+}
+
+Interval Path::timeRange() const
+{
+ Interval ret(0, size_default());
+ return ret;
+}
+
+Curve const &Path::curveAt(Coord t, Coord *rest) const
+{
+ PathTime pos = _factorTime(t);
+ if (rest) {
+ *rest = pos.t;
+ }
+ return at(pos.curve_index);
+}
+
+Point Path::pointAt(Coord t) const
+{
+ return pointAt(_factorTime(t));
+}
+
+Coord Path::valueAt(Coord t, Dim2 d) const
+{
+ return valueAt(_factorTime(t), d);
+}
+
+Curve const &Path::curveAt(PathTime const &pos) const
+{
+ return at(pos.curve_index);
+}
+Point Path::pointAt(PathTime const &pos) const
+{
+ return at(pos.curve_index).pointAt(pos.t);
+}
+Coord Path::valueAt(PathTime const &pos, Dim2 d) const
+{
+ return at(pos.curve_index).valueAt(pos.t, d);
+}
+
+std::vector<PathTime> Path::roots(Coord v, Dim2 d) const
+{
+ std::vector<PathTime> res;
+ for (unsigned i = 0; i < size(); i++) {
+ std::vector<Coord> temp = (*this)[i].roots(v, d);
+ for (double j : temp)
+ res.emplace_back(i, j);
+ }
+ return res;
+}
+
+
+// The class below implements sweepline optimization for curve intersection in paths.
+// Instead of O(N^2), this takes O(N + X), where X is the number of overlaps
+// between the bounding boxes of curves.
+
+struct CurveIntersectionSweepSet
+{
+public:
+ struct CurveRecord {
+ boost::intrusive::list_member_hook<> _hook;
+ Curve const *curve;
+ Rect bounds;
+ std::size_t index;
+ unsigned which;
+
+ CurveRecord(Curve const *pc, std::size_t idx, unsigned w)
+ : curve(pc)
+ , bounds(curve->boundsFast())
+ , index(idx)
+ , which(w)
+ {}
+ };
+
+ typedef std::vector<CurveRecord>::const_iterator ItemIterator;
+
+ CurveIntersectionSweepSet(std::vector<PathIntersection> &result,
+ Path const &a, Path const &b, Coord precision)
+ : _result(result)
+ , _precision(precision)
+ , _sweep_dir(X)
+ {
+ std::size_t asz = a.size(), bsz = b.size();
+ _records.reserve(asz + bsz);
+
+ for (std::size_t i = 0; i < asz; ++i) {
+ _records.emplace_back(&a[i], i, 0);
+ }
+ for (std::size_t i = 0; i < bsz; ++i) {
+ _records.emplace_back(&b[i], i, 1);
+ }
+
+ OptRect abb = a.boundsFast() | b.boundsFast();
+ if (abb && abb->height() > abb->width()) {
+ _sweep_dir = Y;
+ }
+ }
+
+ std::vector<CurveRecord> const &items() { return _records; }
+ Interval itemBounds(ItemIterator ii) {
+ return ii->bounds[_sweep_dir];
+ }
+
+ void addActiveItem(ItemIterator ii) {
+ unsigned w = ii->which;
+ unsigned ow = (w+1) % 2;
+
+ _active[w].push_back(const_cast<CurveRecord&>(*ii));
+
+ for (auto & i : _active[ow]) {
+ if (!ii->bounds.intersects(i.bounds)) continue;
+ std::vector<CurveIntersection> cx = ii->curve->intersect(*i.curve, _precision);
+ for (auto & k : cx) {
+ PathTime tw(ii->index, k.first), tow(i.index, k.second);
+ _result.emplace_back(
+ w == 0 ? tw : tow,
+ w == 0 ? tow : tw,
+ k.point());
+ }
+ }
+ }
+ void removeActiveItem(ItemIterator ii) {
+ ActiveCurveList &acl = _active[ii->which];
+ acl.erase(acl.iterator_to(*ii));
+ }
+
+private:
+ typedef boost::intrusive::list
+ < CurveRecord
+ , boost::intrusive::member_hook
+ < CurveRecord
+ , boost::intrusive::list_member_hook<>
+ , &CurveRecord::_hook
+ >
+ > ActiveCurveList;
+
+ std::vector<CurveRecord> _records;
+ std::vector<PathIntersection> &_result;
+ ActiveCurveList _active[2];
+ Coord _precision;
+ Dim2 _sweep_dir;
+};
+
+std::vector<PathIntersection> Path::intersect(Path const &other, Coord precision) const
+{
+ std::vector<PathIntersection> result;
+
+ CurveIntersectionSweepSet cisset(result, *this, other, precision);
+ Sweeper<CurveIntersectionSweepSet> sweeper(cisset);
+ sweeper.process();
+
+ // preprocessing to remove duplicate intersections at endpoints
+ std::size_t asz = size(), bsz = other.size();
+ for (auto & i : result) {
+ i.first.normalizeForward(asz);
+ i.second.normalizeForward(bsz);
+ }
+ std::sort(result.begin(), result.end());
+ result.erase(std::unique(result.begin(), result.end()), result.end());
+
+ return result;
+}
+
+int Path::winding(Point const &p) const {
+ int wind = 0;
+
+ /* To handle all the edge cases, we consider the maximum Y edge of the bounding box
+ * as not included in box. This way paths that contain linear horizontal
+ * segments will be treated correctly. */
+ for (const_iterator i = begin(); i != end_closed(); ++i) {
+ Rect bounds = i->boundsFast();
+
+ if (bounds.height() == 0) continue;
+ if (p[X] > bounds.right() || !bounds[Y].lowerContains(p[Y])) {
+ // Ray doesn't intersect bbox, so we ignore this segment
+ continue;
+ }
+
+ if (p[X] < bounds.left()) {
+ /* Ray intersects the curve's bbox, but the point is outside it.
+ * The winding contribution is exactly the same as that
+ * of a linear segment with the same initial and final points. */
+ Point ip = i->initialPoint();
+ Point fp = i->finalPoint();
+ Rect eqbox(ip, fp);
+
+ if (eqbox[Y].lowerContains(p[Y])) {
+ /* The ray intersects the equivalent linear segment.
+ * Determine winding contribution based on its derivative. */
+ if (ip[Y] < fp[Y]) {
+ wind += 1;
+ } else if (ip[Y] > fp[Y]) {
+ wind -= 1;
+ } else {
+ // should never happen, because bounds.height() was not zero
+ assert(false);
+ }
+ }
+ } else {
+ // point is inside bbox
+ wind += i->winding(p);
+ }
+ }
+ return wind;
+}
+
+std::vector<double> Path::allNearestTimes(Point const &_point, double from, double to) const
+{
+ // TODO from and to are not used anywhere.
+ // rewrite this to simplify.
+ using std::swap;
+
+ if (from > to)
+ swap(from, to);
+ const Path &_path = *this;
+ unsigned int sz = _path.size();
+ if (_path.closed())
+ ++sz;
+ if (from < 0 || to > sz) {
+ THROW_RANGEERROR("[from,to] interval out of bounds");
+ }
+ double sif, st = modf(from, &sif);
+ double eif, et = modf(to, &eif);
+ unsigned int si = static_cast<unsigned int>(sif);
+ unsigned int ei = static_cast<unsigned int>(eif);
+ if (si == sz) {
+ --si;
+ st = 1;
+ }
+ if (ei == sz) {
+ --ei;
+ et = 1;
+ }
+ if (si == ei) {
+ std::vector<double> all_nearest = _path[si].allNearestTimes(_point, st, et);
+ for (double & i : all_nearest) {
+ i = si + i;
+ }
+ return all_nearest;
+ }
+ std::vector<double> all_t;
+ std::vector<std::vector<double> > all_np;
+ all_np.push_back(_path[si].allNearestTimes(_point, st));
+ std::vector<unsigned int> ni;
+ ni.push_back(si);
+ double dsq;
+ double mindistsq = distanceSq(_point, _path[si].pointAt(all_np.front().front()));
+ Rect bb(Geom::Point(0, 0), Geom::Point(0, 0));
+ for (unsigned int i = si + 1; i < ei; ++i) {
+ bb = (_path[i].boundsFast());
+ dsq = distanceSq(_point, bb);
+ if (mindistsq < dsq)
+ continue;
+ all_t = _path[i].allNearestTimes(_point);
+ dsq = distanceSq(_point, _path[i].pointAt(all_t.front()));
+ if (mindistsq > dsq) {
+ all_np.clear();
+ all_np.push_back(all_t);
+ ni.clear();
+ ni.push_back(i);
+ mindistsq = dsq;
+ } else if (mindistsq == dsq) {
+ all_np.push_back(all_t);
+ ni.push_back(i);
+ }
+ }
+ bb = (_path[ei].boundsFast());
+ dsq = distanceSq(_point, bb);
+ if (mindistsq >= dsq) {
+ all_t = _path[ei].allNearestTimes(_point, 0, et);
+ dsq = distanceSq(_point, _path[ei].pointAt(all_t.front()));
+ if (mindistsq > dsq) {
+ for (double & i : all_t) {
+ i = ei + i;
+ }
+ return all_t;
+ } else if (mindistsq == dsq) {
+ all_np.push_back(all_t);
+ ni.push_back(ei);
+ }
+ }
+ std::vector<double> all_nearest;
+ for (unsigned int i = 0; i < all_np.size(); ++i) {
+ for (unsigned int j = 0; j < all_np[i].size(); ++j) {
+ all_nearest.push_back(ni[i] + all_np[i][j]);
+ }
+ }
+ all_nearest.erase(std::unique(all_nearest.begin(), all_nearest.end()), all_nearest.end());
+ return all_nearest;
+}
+
+std::vector<Coord> Path::nearestTimePerCurve(Point const &p) const
+{
+ // return a single nearest time for each curve in this path
+ std::vector<Coord> np;
+ for (const_iterator it = begin(); it != end_default(); ++it) {
+ np.push_back(it->nearestTime(p));
+ }
+ return np;
+}
+
+PathTime Path::nearestTime(Point const &p, Coord *dist) const
+{
+ Coord mindist = std::numeric_limits<Coord>::max();
+ PathTime ret;
+
+ if (_data->curves.size() == 1) {
+ // naked moveto
+ ret.curve_index = 0;
+ ret.t = 0;
+ if (dist) {
+ *dist = distance(_closing_seg->initialPoint(), p);
+ }
+ return ret;
+ }
+
+ for (size_type i = 0; i < size_default(); ++i) {
+ Curve const &c = at(i);
+ if (distance(p, c.boundsFast()) >= mindist) continue;
+
+ Coord t = c.nearestTime(p);
+ Coord d = distance(c.pointAt(t), p);
+ if (d < mindist) {
+ mindist = d;
+ ret.curve_index = i;
+ ret.t = t;
+ }
+ }
+ if (dist) {
+ *dist = mindist;
+ }
+
+ return ret;
+}
+
+std::vector<Point> Path::nodes() const
+{
+ std::vector<Point> result;
+ size_type path_size = size_closed();
+ for (size_type i = 0; i < path_size; ++i) {
+ result.push_back(_data->curves[i].initialPoint());
+ }
+ return result;
+}
+
+void Path::appendPortionTo(Path &ret, double from, double to) const
+{
+ if (!(from >= 0 && to >= 0)) {
+ THROW_RANGEERROR("from and to must be >=0 in Path::appendPortionTo");
+ }
+ if (to == 0)
+ to = size() + 0.999999;
+ if (from == to) {
+ return;
+ }
+ double fi, ti;
+ double ff = modf(from, &fi), tf = modf(to, &ti);
+ if (tf == 0) {
+ ti--;
+ tf = 1;
+ }
+ const_iterator fromi = inc(begin(), (unsigned)fi);
+ if (fi == ti && from < to) {
+ ret.append(fromi->portion(ff, tf));
+ return;
+ }
+ const_iterator toi = inc(begin(), (unsigned)ti);
+ if (ff != 1.) {
+ // fromv->setInitial(ret.finalPoint());
+ ret.append(fromi->portion(ff, 1.));
+ }
+ if (from >= to) {
+ const_iterator ender = end();
+ if (ender->initialPoint() == ender->finalPoint())
+ ++ender;
+ ret.insert(ret.end(), ++fromi, ender);
+ ret.insert(ret.end(), begin(), toi);
+ } else {
+ ret.insert(ret.end(), ++fromi, toi);
+ }
+ ret.append(toi->portion(0., tf));
+}
+
+void Path::appendPortionTo(Path &target, PathInterval const &ival,
+ std::optional<Point> const &p_from, std::optional<Point> const &p_to) const
+{
+ assert(ival.pathSize() == size_closed());
+
+ if (ival.isDegenerate()) {
+ Point stitch_to = p_from ? *p_from : pointAt(ival.from());
+ target.stitchTo(stitch_to);
+ return;
+ }
+
+ PathTime const &from = ival.from(), &to = ival.to();
+
+ bool reverse = ival.reverse();
+ int di = reverse ? -1 : 1;
+ size_type s = size_closed();
+
+ if (!ival.crossesStart() && from.curve_index == to.curve_index) {
+ Curve *c = (*this)[from.curve_index].portion(from.t, to.t);
+ if (p_from) {
+ c->setInitial(*p_from);
+ }
+ if (p_to) {
+ c->setFinal(*p_to);
+ }
+ target.append(c);
+ } else {
+ Curve *c_first = (*this)[from.curve_index].portion(from.t, reverse ? 0 : 1);
+ if (p_from) {
+ c_first->setInitial(*p_from);
+ }
+ target.append(c_first);
+
+ for (size_type i = (from.curve_index + s + di) % s; i != to.curve_index;
+ i = (i + s + di) % s)
+ {
+ if (reverse) {
+ target.append((*this)[i].reverse());
+ } else {
+ target.append((*this)[i].duplicate());
+ }
+ }
+
+ Curve *c_last = (*this)[to.curve_index].portion(reverse ? 1 : 0, to.t);
+ if (p_to) {
+ c_last->setFinal(*p_to);
+ }
+ target.append(c_last);
+ }
+}
+
+Path Path::reversed() const
+{
+ typedef std::reverse_iterator<Sequence::const_iterator> RIter;
+
+ Path ret(finalPoint());
+ if (empty()) return ret;
+
+ ret._data->curves.pop_back(); // this also deletes the closing segment from ret
+
+ RIter iter(_includesClosingSegment() ? _data->curves.end() : _data->curves.end() - 1);
+ RIter rend(_data->curves.begin());
+
+ if (_closed) {
+ // when the path is closed, there are two cases:
+ if (front().isLineSegment()) {
+ // 1. initial segment is linear: it becomes the new closing segment.
+ rend = RIter(_data->curves.begin() + 1);
+ ret._closing_seg = new ClosingSegment(front().finalPoint(), front().initialPoint());
+ } else {
+ // 2. initial segment is not linear: the closing segment becomes degenerate.
+ // However, skip it if it's already degenerate.
+ Point fp = finalPoint();
+ ret._closing_seg = new ClosingSegment(fp, fp);
+ }
+ } else {
+ // when the path is open, we reverse all real curves, and add a reversed closing segment.
+ ret._closing_seg = static_cast<ClosingSegment *>(_closing_seg->reverse());
+ }
+
+ for (; iter != rend; ++iter) {
+ ret._data->curves.push_back(iter->reverse());
+ }
+ ret._data->curves.push_back(ret._closing_seg);
+ ret._closed = _closed;
+ return ret;
+}
+
+
+void Path::insert(iterator pos, Curve const &curve)
+{
+ _unshare();
+ Sequence::iterator seq_pos(seq_iter(pos));
+ Sequence source;
+ source.push_back(curve.duplicate());
+ do_update(seq_pos, seq_pos, source);
+}
+
+void Path::erase(iterator pos)
+{
+ _unshare();
+ Sequence::iterator seq_pos(seq_iter(pos));
+
+ Sequence stitched;
+ do_update(seq_pos, seq_pos + 1, stitched);
+}
+
+void Path::erase(iterator first, iterator last)
+{
+ _unshare();
+ Sequence::iterator seq_first = seq_iter(first);
+ Sequence::iterator seq_last = seq_iter(last);
+
+ Sequence stitched;
+ do_update(seq_first, seq_last, stitched);
+}
+
+void Path::stitchTo(Point const &p)
+{
+ if (!empty() && _closing_seg->initialPoint() != p) {
+ if (_exception_on_stitch) {
+ THROW_CONTINUITYERROR();
+ }
+ _unshare();
+ do_append(new StitchSegment(_closing_seg->initialPoint(), p));
+ }
+}
+
+void Path::replace(iterator replaced, Curve const &curve)
+{
+ replace(replaced, replaced + 1, curve);
+}
+
+void Path::replace(iterator first_replaced, iterator last_replaced, Curve const &curve)
+{
+ _unshare();
+ Sequence::iterator seq_first_replaced(seq_iter(first_replaced));
+ Sequence::iterator seq_last_replaced(seq_iter(last_replaced));
+ Sequence source(1);
+ source.push_back(curve.duplicate());
+
+ do_update(seq_first_replaced, seq_last_replaced, source);
+}
+
+void Path::replace(iterator replaced, Path const &path)
+{
+ replace(replaced, path.begin(), path.end());
+}
+
+void Path::replace(iterator first, iterator last, Path const &path)
+{
+ replace(first, last, path.begin(), path.end());
+}
+
+void Path::snapEnds(Coord precision)
+{
+ if (!_closed) return;
+ if (_data->curves.size() > 1 && are_near(_closing_seg->length(precision), 0, precision)) {
+ _unshare();
+ _closing_seg->setInitial(_closing_seg->finalPoint());
+ (_data->curves.end() - 1)->setFinal(_closing_seg->finalPoint());
+ }
+}
+
+Path Path::withoutDegenerateCurves() const
+{
+ Sequence cleaned;
+ cleaned.reserve(size());
+
+ for (auto it = begin(); it != end_open(); ++it) {
+ if (!it->isDegenerate()) {
+ cleaned.push_back(it->duplicate());
+ }
+ }
+
+ Path result;
+ result._closed = _closed;
+ result.do_update(result._data->curves.begin(), result._data->curves.end(), cleaned);
+ return result;
+}
+
+// Replace curves between first and last with the contents of source.
+void Path::do_update(Sequence::iterator first, Sequence::iterator last, Sequence &source)
+{
+ // TODO: handle cases where first > last in closed paths?
+ bool last_beyond_closing_segment = (last == _data->curves.end());
+
+ // special case:
+ // if do_update replaces the closing segment, we have to regenerate it
+ if (source.empty()) {
+ if (first == last) return; // nothing to do
+
+ // only removing some segments
+ if ((!_closed && first == _data->curves.begin()) || (!_closed && last == _data->curves.end() - 1) || last_beyond_closing_segment) {
+ // just adjust the closing segment
+ // do nothing
+ } else if (first->initialPoint() != (last - 1)->finalPoint()) {
+ if (_exception_on_stitch) {
+ THROW_CONTINUITYERROR();
+ }
+ source.push_back(new StitchSegment(first->initialPoint(), (last - 1)->finalPoint()));
+ }
+ } else {
+ // replacing
+ if (first == _data->curves.begin() && last == _data->curves.end()) {
+ // special case: replacing everything should work the same in open and closed curves
+ _data->curves.erase(_data->curves.begin(), _data->curves.end() - 1);
+ _closing_seg->setFinal(source.front().initialPoint());
+ _closing_seg->setInitial(source.back().finalPoint());
+ _data->curves.transfer(_data->curves.begin(), source.begin(), source.end(), source);
+ return;
+ }
+
+ // stitch in front
+ if (!_closed && first == _data->curves.begin()) {
+ // not necessary to stitch in front
+ } else if (first->initialPoint() != source.front().initialPoint()) {
+ if (_exception_on_stitch) {
+ THROW_CONTINUITYERROR();
+ }
+ source.insert(source.begin(), new StitchSegment(first->initialPoint(), source.front().initialPoint()));
+ }
+
+ // stitch at the end
+ if ((!_closed && last == _data->curves.end() - 1) || last_beyond_closing_segment) {
+ // repurpose the closing segment as the stitch segment
+ // do nothing
+ } else if (source.back().finalPoint() != (last - 1)->finalPoint()) {
+ if (_exception_on_stitch) {
+ THROW_CONTINUITYERROR();
+ }
+ source.push_back(new StitchSegment(source.back().finalPoint(), (last - 1)->finalPoint()));
+ }
+ }
+
+ // do not erase the closing segment, adjust it instead
+ if (last_beyond_closing_segment) {
+ --last;
+ }
+ _data->curves.erase(first, last);
+ _data->curves.transfer(first, source.begin(), source.end(), source);
+
+ // adjust closing segment
+ if (size_open() == 0) {
+ _closing_seg->setFinal(_closing_seg->initialPoint());
+ } else {
+ _closing_seg->setInitial(back_open().finalPoint());
+ _closing_seg->setFinal(front().initialPoint());
+ }
+
+ checkContinuity();
+}
+
+void Path::do_append(Curve *c)
+{
+ if (&_data->curves.front() == _closing_seg) {
+ _closing_seg->setFinal(c->initialPoint());
+ } else {
+ // if we can't freely move the closing segment, we check whether
+ // the new curve connects with the last non-closing curve
+ if (c->initialPoint() != _closing_seg->initialPoint()) {
+ THROW_CONTINUITYERROR();
+ }
+ if (_closed && c->isLineSegment() &&
+ c->finalPoint() == _closing_seg->finalPoint())
+ {
+ // appending a curve that matches the closing segment has no effect
+ delete c;
+ return;
+ }
+ }
+ _data->curves.insert(_data->curves.end() - 1, c);
+ _closing_seg->setInitial(c->finalPoint());
+}
+
+void Path::checkContinuity() const
+{
+ Sequence::const_iterator i = _data->curves.begin(), j = _data->curves.begin();
+ ++j;
+ for (; j != _data->curves.end(); ++i, ++j) {
+ if (i->finalPoint() != j->initialPoint()) {
+ THROW_CONTINUITYERROR();
+ }
+ }
+ if (_data->curves.front().initialPoint() != _data->curves.back().finalPoint()) {
+ THROW_CONTINUITYERROR();
+ }
+}
+
+// breaks time value into integral and fractional part
+PathTime Path::_factorTime(Coord t) const
+{
+ size_type sz = size_default();
+ if (t < 0 || t > sz) {
+ THROW_RANGEERROR("parameter t out of bounds");
+ }
+
+ PathTime ret;
+ Coord k;
+ ret.t = modf(t, &k);
+ ret.curve_index = k;
+ if (ret.curve_index == sz) {
+ --ret.curve_index;
+ ret.t = 1;
+ }
+ return ret;
+}
+
+Piecewise<D2<SBasis> > paths_to_pw(PathVector const &paths)
+{
+ Piecewise<D2<SBasis> > ret = paths[0].toPwSb();
+ for (unsigned i = 1; i < paths.size(); i++) {
+ ret.concat(paths[i].toPwSb());
+ }
+ return ret;
+}
+
+bool are_near(Path const &a, Path const &b, Coord precision)
+{
+ if (a.size() != b.size()) return false;
+
+ for (unsigned i = 0; i < a.size(); ++i) {
+ if (!a[i].isNear(b[i], precision)) return false;
+ }
+ return true;
+}
+
+std::ostream &operator<<(std::ostream &out, Path const &path)
+{
+ SVGPathWriter pw;
+ pw.feed(path);
+ out << pw.str();
+ return out;
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/pathvector.cpp b/src/2geom/pathvector.cpp
new file mode 100644
index 0000000..0683c31
--- /dev/null
+++ b/src/2geom/pathvector.cpp
@@ -0,0 +1,336 @@
+/** @file
+ * @brief PathVector - a sequence of subpaths
+ *//*
+ * Authors:
+ * Johan Engelen <goejendaagh@zonnet.nl>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2008-2014 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/affine.h>
+#include <2geom/path.h>
+#include <2geom/pathvector.h>
+#include <2geom/svg-path-writer.h>
+#include <2geom/sweeper.h>
+#include <optional>
+
+namespace Geom {
+
+//PathVector &PathVector::operator+=(PathVector const &other);
+
+PathVector::size_type PathVector::curveCount() const
+{
+ size_type n = 0;
+ for (const auto & it : *this) {
+ n += it.size_default();
+ }
+ return n;
+}
+
+void PathVector::reverse(bool reverse_paths)
+{
+ if (reverse_paths) {
+ std::reverse(begin(), end());
+ }
+ for (auto & i : *this) {
+ i = i.reversed();
+ }
+}
+
+PathVector PathVector::reversed(bool reverse_paths) const
+{
+ PathVector ret;
+ for (const auto & i : *this) {
+ ret.push_back(i.reversed());
+ }
+ if (reverse_paths) {
+ std::reverse(ret.begin(), ret.end());
+ }
+ return ret;
+}
+
+Path &PathVector::pathAt(Coord t, Coord *rest)
+{
+ return const_cast<Path &>(static_cast<PathVector const*>(this)->pathAt(t, rest));
+}
+Path const &PathVector::pathAt(Coord t, Coord *rest) const
+{
+ PathVectorTime pos = _factorTime(t);
+ if (rest) {
+ *rest = Coord(pos.curve_index) + pos.t;
+ }
+ return at(pos.path_index);
+}
+Curve const &PathVector::curveAt(Coord t, Coord *rest) const
+{
+ PathVectorTime pos = _factorTime(t);
+ if (rest) {
+ *rest = pos.t;
+ }
+ return at(pos.path_index).at(pos.curve_index);
+}
+Coord PathVector::valueAt(Coord t, Dim2 d) const
+{
+ PathVectorTime pos = _factorTime(t);
+ return at(pos.path_index).at(pos.curve_index).valueAt(pos.t, d);
+}
+Point PathVector::pointAt(Coord t) const
+{
+ PathVectorTime pos = _factorTime(t);
+ return at(pos.path_index).at(pos.curve_index).pointAt(pos.t);
+}
+
+OptRect PathVector::boundsFast() const
+{
+ OptRect bound;
+ if (empty()) return bound;
+
+ bound = front().boundsFast();
+ for (const_iterator it = ++begin(); it != end(); ++it) {
+ bound.unionWith(it->boundsFast());
+ }
+ return bound;
+}
+
+OptRect PathVector::boundsExact() const
+{
+ OptRect bound;
+ if (empty()) return bound;
+
+ bound = front().boundsExact();
+ for (const_iterator it = ++begin(); it != end(); ++it) {
+ bound.unionWith(it->boundsExact());
+ }
+ return bound;
+}
+
+void PathVector::snapEnds(Coord precision)
+{
+ for (std::size_t i = 0; i < size(); ++i) {
+ (*this)[i].snapEnds(precision);
+ }
+}
+
+// sweepline optimization
+// this is very similar to CurveIntersectionSweepSet in path.cpp
+// should probably be merged
+class PathIntersectionSweepSet {
+public:
+ struct PathRecord {
+ boost::intrusive::list_member_hook<> _hook;
+ Path const *path;
+ std::size_t index;
+ unsigned which;
+
+ PathRecord(Path const &p, std::size_t i, unsigned w)
+ : path(&p)
+ , index(i)
+ , which(w)
+ {}
+ };
+
+ typedef std::vector<PathRecord>::iterator ItemIterator;
+
+ PathIntersectionSweepSet(std::vector<PVIntersection> &result,
+ PathVector const &a, PathVector const &b, Coord precision)
+ : _result(result)
+ , _precision(precision)
+ {
+ _records.reserve(a.size() + b.size());
+ for (std::size_t i = 0; i < a.size(); ++i) {
+ _records.emplace_back(a[i], i, 0);
+ }
+ for (std::size_t i = 0; i < b.size(); ++i) {
+ _records.emplace_back(b[i], i, 1);
+ }
+ }
+
+ std::vector<PathRecord> &items() { return _records; }
+
+ Interval itemBounds(ItemIterator ii) {
+ OptRect r = ii->path->boundsFast();
+ if (!r) return Interval();
+ return (*r)[X];
+ }
+
+ void addActiveItem(ItemIterator ii) {
+ unsigned w = ii->which;
+ unsigned ow = (ii->which + 1) % 2;
+
+ for (auto & i : _active[ow]) {
+ if (!ii->path->boundsFast().intersects(i.path->boundsFast())) continue;
+ std::vector<PathIntersection> px = ii->path->intersect(*i.path, _precision);
+ for (auto & k : px) {
+ PathVectorTime tw(ii->index, k.first), tow(i.index, k.second);
+ _result.emplace_back(
+ w == 0 ? tw : tow,
+ w == 0 ? tow : tw,
+ k.point());
+ }
+ }
+ _active[w].push_back(*ii);
+ }
+
+ void removeActiveItem(ItemIterator ii) {
+ ActivePathList &apl = _active[ii->which];
+ apl.erase(apl.iterator_to(*ii));
+ }
+
+private:
+ typedef boost::intrusive::list
+ < PathRecord
+ , boost::intrusive::member_hook
+ < PathRecord
+ , boost::intrusive::list_member_hook<>
+ , &PathRecord::_hook
+ >
+ > ActivePathList;
+
+ std::vector<PVIntersection> &_result;
+ std::vector<PathRecord> _records;
+ ActivePathList _active[2];
+ Coord _precision;
+};
+
+std::vector<PVIntersection> PathVector::intersect(PathVector const &other, Coord precision) const
+{
+ std::vector<PVIntersection> result;
+
+ PathIntersectionSweepSet pisset(result, *this, other, precision);
+ Sweeper<PathIntersectionSweepSet> sweeper(pisset);
+ sweeper.process();
+
+ std::sort(result.begin(), result.end());
+
+ return result;
+}
+
+int PathVector::winding(Point const &p) const
+{
+ int wind = 0;
+ for (const auto & i : *this) {
+ if (!i.boundsFast().contains(p)) continue;
+ wind += i.winding(p);
+ }
+ return wind;
+}
+
+std::optional<PathVectorTime> PathVector::nearestTime(Point const &p, Coord *dist) const
+{
+ std::optional<PathVectorTime> retval;
+
+ Coord mindist = infinity();
+ for (size_type i = 0; i < size(); ++i) {
+ Coord d;
+ PathTime pos = (*this)[i].nearestTime(p, &d);
+ if (d < mindist) {
+ mindist = d;
+ retval = PathVectorTime(i, pos.curve_index, pos.t);
+ }
+ }
+
+ if (dist) {
+ *dist = mindist;
+ }
+ return retval;
+}
+
+std::vector<PathVectorTime> PathVector::allNearestTimes(Point const &p, Coord *dist) const
+{
+ std::vector<PathVectorTime> retval;
+
+ Coord mindist = infinity();
+ for (size_type i = 0; i < size(); ++i) {
+ Coord d;
+ PathTime pos = (*this)[i].nearestTime(p, &d);
+ if (d < mindist) {
+ mindist = d;
+ retval.clear();
+ }
+ if (d <= mindist) {
+ retval.emplace_back(i, pos.curve_index, pos.t);
+ }
+ }
+
+ if (dist) {
+ *dist = mindist;
+ }
+ return retval;
+}
+
+std::vector<Point> PathVector::nodes() const
+{
+ std::vector<Point> result;
+ for (size_type i = 0; i < size(); ++i) {
+ size_type path_size = (*this)[i].size_closed();
+ for (size_type j = 0; j < path_size; ++j) {
+ result.push_back((*this)[i][j].initialPoint());
+ }
+ }
+ return result;
+}
+
+PathVectorTime PathVector::_factorTime(Coord t) const
+{
+ PathVectorTime ret;
+ Coord rest = 0;
+ ret.t = modf(t, &rest);
+ ret.curve_index = rest;
+ for (; ret.path_index < size(); ++ret.path_index) {
+ unsigned s = _data.at(ret.path_index).size_default();
+ if (s > ret.curve_index) break;
+ // special case for the last point
+ if (s == ret.curve_index && ret.path_index + 1 == size()) {
+ --ret.curve_index;
+ ret.t = 1;
+ break;
+ }
+ ret.curve_index -= s;
+ }
+ return ret;
+}
+
+std::ostream &operator<<(std::ostream &out, PathVector const &pv)
+{
+ SVGPathWriter wr;
+ wr.feed(pv);
+ out << wr.str();
+ return out;
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/piecewise.cpp b/src/2geom/piecewise.cpp
new file mode 100644
index 0000000..8714bd6
--- /dev/null
+++ b/src/2geom/piecewise.cpp
@@ -0,0 +1,266 @@
+/*
+ * piecewise.cpp - Piecewise function class
+ *
+ * Copyright 2007 Michael Sloan <mgsloan@gmail.com>
+ * Copyright 2007 JF Barraud
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, output to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <2geom/piecewise.h>
+#include <iterator>
+#include <map>
+
+namespace Geom {
+
+Piecewise<SBasis> divide(Piecewise<SBasis> const &a, Piecewise<SBasis> const &b, unsigned k) {
+ Piecewise<SBasis> pa = partition(a, b.cuts), pb = partition(b, a.cuts);
+ Piecewise<SBasis> ret = Piecewise<SBasis>();
+ assert(pa.size() == pb.size());
+ ret.cuts = pa.cuts;
+ for (unsigned i = 0; i < pa.size(); i++)
+ ret.push_seg(divide(pa[i], pb[i], k));
+ return ret;
+}
+
+Piecewise<SBasis>
+divide(Piecewise<SBasis> const &a, Piecewise<SBasis> const &b, double tol, unsigned k, double zero) {
+ Piecewise<SBasis> pa = partition(a, b.cuts), pb = partition(b, a.cuts);
+ Piecewise<SBasis> ret = Piecewise<SBasis>();
+ assert(pa.size() == pb.size());
+ for (unsigned i = 0; i < pa.size(); i++){
+ Piecewise<SBasis> divi = divide(pa[i], pb[i], tol, k, zero);
+ divi.setDomain(Interval(pa.cuts[i],pa.cuts[i+1]));
+ ret.concat(divi);
+ }
+ return ret;
+}
+Piecewise<SBasis> divide(Piecewise<SBasis> const &a, SBasis const &b, double tol, unsigned k, double zero){
+ return divide(a,Piecewise<SBasis>(b),tol,k,zero);
+}
+Piecewise<SBasis> divide(SBasis const &a, Piecewise<SBasis> const &b, double tol, unsigned k, double zero){
+ return divide(Piecewise<SBasis>(a),b,tol,k,zero);
+}
+Piecewise<SBasis> divide(SBasis const &a, SBasis const &b, double tol, unsigned k, double zero) {
+ if (b.tailError(0)<2*zero){
+ //TODO: have a better look at sgn(b).
+ double sgn= (b(.5)<0.)?-1.:1;
+ return Piecewise<SBasis>(Linear(sgn/zero)*a);
+ }
+
+ if (fabs(b.at0())>zero && fabs(b.at1())>zero ){
+ SBasis c,r=a;
+ //TODO: what is a good relative tol? atm, c=a/b +/- (tol/a)%...
+
+ k+=1;
+ r.resize(k, Linear(0,0));
+ c.resize(k, Linear(0,0));
+
+ //assert(b.at0()!=0 && b.at1()!=0);
+ for (unsigned i=0; i<k; i++){
+ Linear ci = Linear(r[i][0]/b[0][0],r[i][1]/b[0][1]);
+ c[i]=ci;
+ r-=shift(ci*b,i);
+ }
+
+ if (r.tailError(k)<tol) return Piecewise<SBasis>(c);
+ }
+
+ Piecewise<SBasis> c0,c1;
+ c0 = divide(compose(a,Linear(0.,.5)),compose(b,Linear(0.,.5)),tol,k);
+ c1 = divide(compose(a,Linear(.5,1.)),compose(b,Linear(.5,1.)),tol,k);
+ c0.setDomain(Interval(0.,.5));
+ c1.setDomain(Interval(.5,1.));
+ c0.concat(c1);
+ return c0;
+}
+
+
+//-- compose(pw<T>,SBasis) ---------------
+/*
+ the purpose of the following functions is only to reduce the code in piecewise.h
+ TODO: use a vector<pairs<double,unsigned> > instead of a map<double,unsigned>.
+ */
+
+std::map<double,unsigned> compose_pullback(std::vector<double> const &values, SBasis const &g){
+ std::map<double,unsigned> result;
+
+ std::vector<std::vector<double> > roots = multi_roots(g, values);
+ for(unsigned i=0; i<roots.size(); i++){
+ for(unsigned j=0; j<roots[i].size();j++){
+ result[roots[i][j]]=i;
+ }
+ }
+ // Also map 0 and 1 to the first value above(or =) g(0) and g(1).
+ if(result.count(0.)==0){
+ unsigned i=0;
+ while (i<values.size()&&(g.at0()>values[i])) i++;
+ result[0.]=i;
+ }
+ if(result.count(1.)==0){
+ unsigned i=0;
+ while (i<values.size()&&(g.at1()>values[i])) i++;
+ result[1.]=i;
+ }
+ return(result);
+}
+
+int compose_findSegIdx(std::map<double,unsigned>::iterator const &cut,
+ std::map<double,unsigned>::iterator const &next,
+ std::vector<double> const &levels,
+ SBasis const &g){
+ double t0=(*cut).first;
+ unsigned idx0=(*cut).second;
+ double t1=(*next).first;
+ unsigned idx1=(*next).second;
+ assert(t0<t1);
+ int idx; //idx of the relevant f.segs
+ if (std::max(idx0,idx1)==levels.size()){ //g([t0,t1]) is above the top level,
+ idx=levels.size()-1;
+ } else if (idx0 != idx1){ //g([t0,t1]) crosses from level idx0 to idx1,
+ idx=std::min(idx0,idx1);
+ } else if(g((t0+t1)/2) < levels[idx0]) { //g([t0,t1]) is a 'U' under level idx0,
+ idx=idx0-1;
+ } else if(g((t0+t1)/2) > levels[idx0]) { //g([t0,t1]) is a 'bump' over level idx0,
+ idx=idx0;
+ } else { //g([t0,t1]) is contained in level idx0!...
+ idx = (idx0==levels.size())? idx0-1:idx0;
+ }
+
+ //move idx back from levels f.cuts
+ idx+=1;
+ return idx;
+}
+
+
+Piecewise<SBasis> pw_compose_inverse(SBasis const &f, SBasis const &g, unsigned order, double zero){
+ Piecewise<SBasis> result;
+
+ assert( f.size()>0 && g.size()>0);
+ SBasis g01 = g;
+ bool flip = ( g01.at0() > g01.at1() );
+
+ //OptInterval g_range = bounds_exact(g);
+ OptInterval g_range( Interval( g.at0(), g.at1() ));
+
+ g01 -= g_range->min();
+ g01 /= g_range->extent();
+ if ( flip ){
+ g01 *= -1.;
+ g01 += 1.;
+ }
+#if 1
+ assert( std::abs( g01.at0() - 0. ) < zero );
+ assert( std::abs( g01.at1() - 1. ) < zero );
+ //g[0][0] = 0.;
+ //g[0][1] = 1.;
+#endif
+
+ SBasis foginv = compose_inverse( f, g01, order, zero );
+ SBasis err = compose( foginv, g01) - f;
+
+ if ( err.tailError(0) < zero ){
+ result = Piecewise<SBasis> (foginv);
+ }else{
+ SBasis g_portion = portion( g01, Interval(0.,.5) );
+ SBasis f_portion = portion( f, Interval(0.,.5) );
+ result = pw_compose_inverse(f_portion, g_portion, order, zero);
+
+ g_portion = portion( g01, Interval(.5, 1.) );
+ f_portion = portion( f, Interval(.5, 1.) );
+ Piecewise<SBasis> result_next;
+ result_next = pw_compose_inverse(f_portion, g_portion, order, zero);
+ result.concat( result_next );
+ }
+ if (flip) {
+ result = reverse(result);
+ }
+ result.setDomain(*g_range);
+ return result;
+}
+
+
+std::vector<double> roots(Piecewise<SBasis> const &f){
+ std::vector<double> result;
+ for (unsigned i=0; i<f.size(); i++){
+ std::vector<double> rts=roots(f.segs[i]);
+
+ for (double rt : rts){
+ result.push_back(f.mapToDomain(rt, i));
+ }
+ }
+ return result;
+}
+
+std::vector<std::vector<double> > multi_roots(Piecewise<SBasis> const &f, std::vector<double> const &values) {
+ std::vector<std::vector<double> > result(values.size());
+ for (unsigned i=0; i<f.size(); i++) {
+ std::vector<std::vector<double> > rts = multi_roots(f.segs[i], values);
+ for(unsigned j=0; j<rts.size(); j++) {
+ for(unsigned r=0; r<rts[j].size(); r++){
+ result[j].push_back(f.mapToDomain(rts[j][r], i));
+ }
+ }
+ }
+ return result;
+}
+
+
+std::vector<Interval> level_set(Piecewise<SBasis> const &f, Interval const &level, double tol){
+ std::vector<Interval> result;
+ for (unsigned i=0; i<f.size(); i++){
+ std::vector<Interval> resulti = level_set( f[i], level, 0., 1., tol);
+ for (unsigned j=0; j<resulti.size(); j++){
+ double a = f.cuts[i] + resulti[j].min() * ( f.cuts[i+1] - f.cuts[i] );
+ double b = f.cuts[i] + resulti[j].max() * ( f.cuts[i+1] - f.cuts[i] );
+ Interval domj( a, b );
+ //Interval domj( f.mapToDomain(resulti[j].min(), i ), f.mapToDomain(resulti[j].max(), i ) );
+
+ if ( j==0 && !result.empty() && result.back().intersects(domj) ){
+ result.back().unionWith(domj);
+ }else{
+ result.push_back(domj);
+ }
+ }
+ }
+ return result;
+}
+std::vector<Interval> level_set(Piecewise<SBasis> const &f, double v, double vtol, double tol){
+ Interval level ( v-vtol, v+vtol );
+ return level_set( f, level, tol);
+}
+
+
+}
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/planar-graph.h b/src/2geom/planar-graph.h
new file mode 100644
index 0000000..fb5f1ac
--- /dev/null
+++ b/src/2geom/planar-graph.h
@@ -0,0 +1,1252 @@
+/** @file PlanarGraph – a graph geometrically embedded in the plane.
+ */
+/*
+ * Authors:
+ * Rafał Siejakowski <rs@rs-math.net>
+ *
+ * Copyright 2022 the Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+// WARNING: This is a private header. Do not include it directly.
+
+#ifndef LIB2GEOM_SEEN_PLANAR_GRAPH_H
+#define LIB2GEOM_SEEN_PLANAR_GRAPH_H
+
+#include <algorithm>
+#include <iterator>
+#include <list>
+
+#include <2geom/angle.h>
+#include <2geom/coord.h>
+#include <2geom/line.h>
+#include <2geom/point.h>
+#include <2geom/path.h>
+#include <2geom/path-intersection.h>
+#include <2geom/utils.h>
+
+namespace Geom {
+
+/**
+ * \class PlanarGraph
+ * \brief Planar graph - a graph geometrically embedded in the plane.
+ *
+ * A planar graph is composed of vertices with assigned locations (as points in the plane)
+ * and of edges (arcs), which are imagined as non-intersecting paths in the plane connecting
+ * the vertices. The edges can hold user-supplied labels (e.g., weights) which support event
+ * callbacks for when the graph is reconfigured, allowing the labels to be updated accordingly.
+ *
+ * \tparam EdgeLabel A user-supplied type; an object of this type will be attached to each
+ * edge of the planar graph (e.g., a "weight" of the edge). The type must
+ * satisfy requirements described further below.
+ *
+ * In order to construct a planar graph, you should specify the clumping precision (passed as
+ * a constructor argument) and then use the method insertEdge() to add edges to the graph, as
+ * many times as necessary. The graph will automatically figure out the locations of the
+ * vertices based on the endpoints of the inserted edges. Vertices will be combined into one
+ * when they are positioned within the distance specified as the clumping threshold, and the
+ * inserted edges will be attached to them accordingly. It is also possible to insert paths
+ * (typically, closed) not attached to any vertices, using the method insertDetached().
+ *
+ * After the edges are inserted, the graph is in a potentially degenerate state, where several
+ * edges may exactly coincide in part or in full. If this is not desired, you can regularize
+ * the graph by calling regularize(). During the regularization process, any overlapping edges
+ * are combined into one. Partially overlapping edges are first split into overlapping and
+ * non-overlapping portions, after which the overlapping portions are combined. If the edges
+ * or their parts overlap but run in opposite directions, one of them will be reversed before
+ * being merged with the other one. The overlaps are detected using the precision setting passed
+ * as the clumping precision in the constructor argument.
+ *
+ * Note however that the regularization procedure does NOT detect transverse intersections
+ * between the edge paths: if such intersections are not desired, the user must pass non-\
+ * intersecting paths to the insertEdge() method (the paths may still have common endpoints,
+ * which is fine: that's how common vertices are created).
+ *
+ * The insertion of new edges invalidates the regularized status, which you can check at any
+ * time by calling isRegularized().
+ *
+ * The vertices stored by the graph are sorted by increasing X-coordinate, and if they have
+ * equal X-coordinates, by increasing Y-coordinate. Even before regularization, incidences of
+ * edges to each vertex are sorted by increasing azimuth of the outgoing tangent (departure
+ * heading, but in radians, in the interval \f$(-\pi, \pi]\f$). After regularization, the edges
+ * around each vertex are guaranteed to be sorted counterclockwise (when the Y-axis points up)
+ * by where they end up going eventually, even if they're tangent at the vertex and therefore
+ * have equal or nearly equal departure azimuths.
+ *
+ * \note
+ * Requirements on the \c EdgeLabel template parameter type.
+ * In order for the template to instantiate correctly, the following must be satisfied:
+ * \li The \c EdgeLabel type provides a method \c onReverse() which gets called whenever
+ * the orientation of the labeled edge is reversed. This is useful when implementing
+ * a directed graph, since the label can keep track of the logical direction.
+ * \li The \c EdgeLabel type provides a method \c onMergeWith(EdgeLabel const&) which gets
+ * called when the labeled edge is combined with a geometrically identical (coinciding)
+ * edge (both combined edges having the same orientations). The label of the edge merged
+ * with the current one is provided as an argument to the method. This is useful when
+ * implementing a graph with weights: for example, when two edges are merged, you may
+ * want to combine their weights in some way.
+ * \li There is a method \c onDetach() called when the edge is removed from the graph. The
+ * edge objects are never destroyed but may be disconnected from the graph when they're no
+ * longer needed; this allows the user to put the labels of such edges in a "dead" state.
+ * \li The \c EdgeLabel objects must be copy-constructible and copy-assignable. This is
+ * because when an edge is subdivided into two, the new edges replacing it get decorated
+ * with copies of the original edge's label.
+ */
+template<typename EdgeLabel>
+#if __cplusplus >= 202002L
+requires requires(EdgeLabel el, EdgeLabel const &other) {
+ el.onReverse();
+ el.onMergeWith(other);
+ el.onDetach();
+ el = other;
+}
+#endif
+class PlanarGraph
+{
+public:
+
+ /** Represents the joint between an edge and a vertex. */
+ struct Incidence
+ {
+ using Sign = bool;
+ inline static Sign const START = false;
+ inline static Sign const END = true;
+
+ double azimuth; ///< Angle of the edge's departure.
+ unsigned index; ///< Index of the edge in the parent graph.
+ Sign sign; ///< Whether this is the start or end of the edge.
+ bool invalid = false; ///< Whether this incidence has been marked for deletion.
+
+ Incidence(unsigned edge_index, double departure_azimuth, Sign which_end)
+ : azimuth{departure_azimuth}
+ , index{edge_index}
+ , sign{which_end}
+ {
+ }
+ ~Incidence() = default;
+
+ /// Compare incidences based on their azimuths in radians.
+ inline bool operator<(Incidence const &other) const { return azimuth < other.azimuth; }
+
+ /// Compare the azimuth of an incidence with the given angle.
+ inline bool operator<(double angle) const { return azimuth < angle; }
+
+ /// Check equality (only tests edges and their ends).
+ inline bool operator==(Incidence const &other) const
+ {
+ return index == other.index && sign == other.sign;
+ }
+ };
+ using IncIt = typename std::list<Incidence>::iterator;
+ using IncConstIt = typename std::list<Incidence>::const_iterator;
+
+ /** Represents the vertex of a planar graph. */
+ class Vertex
+ {
+ private:
+ Point const _position; ///< Geometric position of the vertex.
+ std::list<Incidence> _incidences; ///< List of incidences of edges to this vertex.
+ unsigned mutable _flags = 0; ///< User-settable flags.
+
+ inline static Point::LexLess<X> const _cmp; ///< Point sorting function object.
+
+ public:
+ Vertex(Point const &pos)
+ : _position{pos}
+ {
+ }
+
+ /** Get the geometric position of the vertex. */
+ Point const &point() const { return _position; }
+
+ /** Get the list of incidences to the vertex. */
+ auto const &getIncidences() const { return _incidences; }
+
+ /** Compare vertices based on their coordinates (lexicographically). */
+ bool operator<(Vertex const &other) const { return _cmp(_position, other._position); }
+
+ unsigned flags() const { return _flags; } ///< Get the user flags.
+ void setFlags(unsigned flags) const { _flags = flags; } ///< Set the user flags.
+
+ /** Get the cyclic-next incidence after the passed one, in the CCW direction. */
+ IncConstIt cyclicNextIncidence(IncConstIt it) const { return cyclic_next(it, _incidences); }
+
+ /** Get the cyclic-next incidence after the passed one, in the CW direction. */
+ IncConstIt cyclicPrevIncidence(IncConstIt it) const { return cyclic_prior(it, _incidences); }
+
+ /** The same but with pointers. */
+ Incidence *cyclicNextIncidence(Incidence *from)
+ {
+ return &(*cyclicNextIncidence(_incidencePtr2It(from)));
+ }
+ Incidence *cyclicPrevIncidence(Incidence *from)
+ {
+ return &(*cyclicPrevIncidence(_incidencePtr2It(from)));
+ }
+
+ private:
+ /** Same as above, but not const (so only for private use). */
+ IncIt cyclicNextIncidence(IncIt it) { return cyclic_next(it, _incidences); }
+ IncIt cyclicPrevIncidence(IncIt it) { return cyclic_prior(it, _incidences); }
+
+ /** Insert an incidence; for internal use by the PlanarGraph class. */
+ Incidence &_addIncidence(unsigned edge_index, double azimuth, typename Incidence::Sign sign)
+ {
+ auto where = std::find_if(_incidences.begin(), _incidences.end(), [=](auto &inc) -> bool {
+ return inc.azimuth >= azimuth;
+ });
+ return *(_incidences.emplace(where, edge_index, azimuth, sign));
+ }
+
+ /** Return a valid iterator to an incidence passed by pointer;
+ * if the pointer is invalid, return a start iterator. */
+ IncIt _incidencePtr2It(Incidence *pointer)
+ {
+ auto it = std::find_if(_incidences.begin(), _incidences.end(),
+ [=](Incidence const &i) -> bool { return &i == pointer; });
+ return (it == _incidences.end()) ? _incidences.begin() : it;
+ }
+
+ friend class PlanarGraph<EdgeLabel>;
+ };
+ using VertexIterator = typename std::list<Vertex>::iterator;
+
+ /** Represents an edge of the planar graph. */
+ struct Edge
+ {
+ Vertex *start = nullptr, *end = nullptr; ///< Start and end vertices.
+ Path path; ///< The path associated to the edge.
+ bool detached = false; ///< Whether the edge is detached from the graph.
+ bool inserted_as_detached = false; ///< Whether the edge was inserted as detached.
+ EdgeLabel mutable label; ///< The user-supplied label of the edge.
+
+ /** Construct an edge with a given label. */
+ Edge(Path &&movein_path, EdgeLabel &&movein_label)
+ : path{movein_path}
+ , label{movein_label}
+ {
+ }
+
+ /** Detach the edge from the graph. */
+ void detach()
+ {
+ detached = true;
+ label.onDetach();
+ }
+ };
+ using EdgeIterator = typename std::vector<Edge>::iterator;
+ using EdgeConstIterator = typename std::vector<Edge>::const_iterator;
+
+private:
+ double const _precision; ///< Numerical epsilon for vertex clumping.
+ std::list<Vertex> _vertices; ///< Vertices of the graph.
+ std::vector<Edge> _edges; ///< Edges of the graph.
+ std::vector< std::pair<Vertex *, Incidence *> > _junk; ///< Incidences that should be purged.
+ bool _regularized = true; // An empty graph is (trivially) regularized.
+
+public:
+ PlanarGraph(Coord precision = EPSILON)
+ : _precision{precision}
+ {
+ }
+
+ std::list<Vertex> const &getVertices() const { return _vertices; }
+ std::vector<Edge> const &getEdges() const { return _edges; }
+ Edge const &getEdge(size_t index) const { return _edges.at(index); }
+ size_t getEdgeIndex(Edge const &edge) const { return &edge - _edges.data(); }
+ double getPrecision() const { return _precision; }
+ size_t numVertices() const { return _vertices.size(); }
+ size_t numEdges(bool include_detached = true) const
+ {
+ if (include_detached) {
+ return _edges.size();
+ }
+ return std::count_if(_edges.begin(), _edges.end(),
+ [](Edge const &e) -> bool { return !e.detached; });
+ }
+
+ /** Check if the graph has been regularized. */
+ bool isRegularized() const { return _regularized; }
+
+ // 0x1p-50 is about twice the distance between M_PI and the next representable double.
+ void regularize(double angle_precision = 0x1p-50, bool remove_collapsed_loops = true);
+
+ /** Allocate memory to store the specified number of edges. */
+ void reserveEdgeCapacity(size_t capacity) { _edges.reserve(capacity); }
+
+ unsigned insertEdge(Path &&path, EdgeLabel &&edge = EdgeLabel());
+ unsigned insertDetached(Path &&path, EdgeLabel &&edge = EdgeLabel());
+
+ /** Edge insertion with a copy of the path. */
+ unsigned insertEdge(Path const &path, EdgeLabel &&edge = EdgeLabel())
+ {
+ return insertEdge(Path(path), std::forward<EdgeLabel>(edge));
+ }
+ unsigned insertDetached(Path const &path, EdgeLabel &&edge = EdgeLabel())
+ {
+ return insertDetached(Path(path), std::forward<EdgeLabel>(edge));
+ }
+
+ /** \brief Find the incidence at the specified endpoint of the edge.
+ *
+ * \param edge_index The index of the edge whose incidence we wish to query.
+ * \param sign Which end of the edge do we want an incidence of.
+ * \return A pair consisting of pointers to the vertex and the incidence.
+ * If not found, both pointers will be null.
+ */
+ std::pair<Vertex *, Incidence *>
+ getIncidence(unsigned edge_index, typename Incidence::Sign sign) const
+ {
+ if (edge_index >= _edges.size() || _edges[edge_index].detached) {
+ return {nullptr, nullptr};
+ }
+ Vertex *vertex = (sign == Incidence::START) ? _edges[edge_index].start
+ : _edges[edge_index].end;
+ if (!vertex) {
+ return {nullptr, nullptr};
+ }
+ auto it = std::find(vertex->_incidences.begin(), vertex->_incidences.end(),
+ Incidence(edge_index, 42, sign)); // azimuth doesn't matter.
+ if (it == vertex->_incidences.end()) {
+ return {nullptr, nullptr};
+ }
+ return {vertex, &(*it)};
+ }
+
+ /**
+ * \brief Go clockwise or counterclockwise around the vertex and find the next incidence.
+ * The notions of "clockwise"/"counterclockwise" correspond to the y-axis pointing up.
+ *
+ * \param vertex The vertex around which to orbit.
+ * \param incidence The incidence from which to start traversal.
+ * \param clockwise Whether to go clockwise instead of (default) counterclockwise.
+ * \return The next incidence encountered going in the specified direction.
+ */
+ inline Incidence const &nextIncidence(VertexIterator const &vertex, IncConstIt const &incidence,
+ bool clockwise = false) const
+ {
+ return clockwise ? *(vertex->_cyclicPrevIncidence(incidence))
+ : *(vertex->_cyclicNextIncidence(incidence));
+ }
+
+ /** As above, but taking references instead of iterators. */
+ inline Incidence const &nextIncidence(Vertex const &vertex, Incidence const &incidence,
+ bool clockwise = false) const
+ {
+ IncConstIt it = std::find(vertex._incidences.begin(), vertex._incidences.end(), incidence);
+ if (it == vertex._incidences.end()) {
+ return incidence;
+ }
+ return clockwise ? *(vertex.cyclicPrevIncidence(it))
+ : *(vertex.cyclicNextIncidence(it));
+ }
+
+ /** As above, but return an iterator to a const incidence. */
+ inline IncConstIt nextIncidenceIt(Vertex const &vertex, Incidence const &incidence,
+ bool clockwise = false) const
+ {
+ IncConstIt it = std::find(vertex._incidences.begin(), vertex._incidences.end(), incidence);
+ if (it == vertex._incidences.end()) {
+ return vertex._incidences.begin();
+ }
+ return clockwise ? vertex.cyclicPrevIncidence(it)
+ : vertex.cyclicNextIncidence(it);
+ }
+ inline IncConstIt nextIncidenceIt(Vertex const &vertex, IncConstIt const &incidence,
+ bool clockwise = false) const
+ {
+ return clockwise ? vertex.cyclicPrevIncidence(incidence)
+ : vertex.cyclicNextIncidence(incidence);
+ }
+
+ /** As above, but start at the prescribed departure azimuth (in radians).
+ *
+ * \return A pointer to the incidence emanating from the vertex at or immediately after
+ * the specified azimuth, when going around the vertex in the specified direction.
+ * If the vertex has no incidences, return value is nullptr.
+ */
+ Incidence *nextIncidence(VertexIterator const &vertex, double azimuth,
+ bool clockwise = false) const;
+
+ /** Get the incident path, always oriented away from the vertex. */
+ Path getOutgoingPath(Incidence const *incidence) const
+ {
+ return incidence ? _getPathImpl(incidence, Incidence::START) : Path();
+ }
+
+ /** Get the incident path, always oriented towards the vertex. */
+ Path getIncomingPath(Incidence const *incidence) const
+ {
+ return incidence ? _getPathImpl(incidence, Incidence::END) : Path();
+ }
+
+private:
+ inline Path _getPathImpl(Incidence const *incidence, typename Incidence::Sign origin) const
+ {
+ return (incidence->sign == origin) ? _edges[incidence->index].path
+ : _edges[incidence->index].path.reversed();
+ }
+
+ /** Earmark an incidence for future deletion. */
+ inline void _throwAway(Vertex *vertex, Incidence *incidence)
+ {
+ if (!vertex || !incidence) {
+ return;
+ }
+ incidence->invalid = true;
+ _junk.emplace_back(vertex, incidence);
+ }
+
+ // Topological reconfiguration functions; see their definitions for documentation.
+ bool _compareAndReglue(Vertex &vertex, Incidence *first, Incidence *second, bool deloop);
+ Vertex *_ensureVertexAt(Point const &position);
+ void _mergeCoincidingEdges(Incidence *first, Incidence *second);
+ void _mergeShorterLonger(Vertex &vertex, Incidence *shorter, Incidence *longer,
+ PathTime const &time_on_longer);
+ void _mergeWyeConfiguration(Vertex &vertex, Incidence *first, Incidence *second,
+ PathIntersection const &split);
+ void _purgeJunkIncidences();
+ void _reglueLasso(Vertex &vertex, Incidence *first, Incidence *second,
+ PathIntersection const &split);
+ bool _reglueTeardrop(Vertex &vertex, Incidence *first, Incidence *second, bool deloop);
+ void _reglueTangentFan(Vertex &vertex, IncIt const &first, IncIt const &last, bool deloop);
+ void _regularizeVertex(Vertex &vertex, double angle_precision, bool deloop);
+
+ // === Static stuff ===
+
+ /** Return the angle between the vector and the positive X axis or 0 if undefined. */
+ inline static double _getAzimuth(Point const &vec) { return vec.isZero() ? 0.0 : atan2(vec); }
+
+ /** Return path time corresponding to the same point on the reversed path. */
+ inline static PathTime _reversePathTime(PathTime const &time, Path const &path)
+ {
+ int new_index = path.size() - time.curve_index - 1;
+ Coord new_time = 1.0 - time.t;
+ if (new_index < 0) {
+ new_index = 0;
+ new_time = 0;
+ }
+ return PathTime(new_index, new_time);
+ }
+
+ /** Return path time at the end of the path. */
+ inline static PathTime _pathEnd(Path const &path) { return PathTime(path.size() - 1, 1.0); }
+ inline static auto const PATH_START = PathTime(0, 0);
+
+public:
+ static double closedPathArea(Path const &path);
+ static bool deviatesLeft(Path const &first, Path const &second);
+};
+
+/**
+ * \brief Insert a new vertex or reuse an existing one.
+ *
+ * Ensures that there is a vertex at or near the specified position
+ * (within the distance of _precision).
+ *
+ * \param pos The desired geometric position of the new vertex.
+ * \return A pointer to the inserted vertex or a pre-existing vertex near the
+ * desired position.
+ */
+template<typename EL>
+typename PlanarGraph<EL>::Vertex *PlanarGraph<EL>::_ensureVertexAt(Point const &pos)
+{
+ auto const insert_at_front = [&, this]() -> Vertex* {
+ _vertices.emplace_front(pos);
+ return &(_vertices.front());
+ };
+
+ if (_vertices.empty()) {
+ return insert_at_front();
+ }
+
+ // TODO: Use a heap?
+ auto it = std::find_if(_vertices.begin(), _vertices.end(), [&](Vertex const &v) -> bool {
+ return Vertex::_cmp(pos, v._position); // existing vertex position > pos.
+ });
+
+ if (it != _vertices.end()) {
+ if (are_near(pos, it->_position, _precision)) {
+ return &(*it); // Reuse existing vertex.
+ }
+ if (it == _vertices.begin()) {
+ return insert_at_front();
+ }
+ }
+ // Look at the previous element, reuse if near, insert before `it` otherwise.
+ return &(*(are_near(pos, std::prev(it)->_position, _precision) ? std::prev(it)
+ : _vertices.emplace(it, pos)));
+}
+
+/**
+ * \brief Move-insert a new labeled edge into the planar graph.
+ *
+ * \param path The geometric path of the edge.
+ * \param label Optionally, the label (extra user data) associated to this edge.
+ * If absent, a default-constructed label will be used.
+ * \return The index of the inserted edge.
+ */
+template<typename EdgeLabel>
+unsigned PlanarGraph<EdgeLabel>::insertEdge(Path &&path, EdgeLabel &&label)
+{
+ unsigned edge_index = _edges.size();
+ auto &inserted = _edges.emplace_back(std::forward<Path>(path),
+ std::forward<EdgeLabel>(label));
+
+ // Calculate the outgoing azimuths at both endpoints.
+ double const start_azimuth = _getAzimuth(inserted.path.initialUnitTangent());
+ double const end_azimuth = _getAzimuth(-inserted.path.finalUnitTangent());
+
+ // Get the endpoints into the graph.
+ auto start = _ensureVertexAt(inserted.path.initialPoint());
+ auto end = _ensureVertexAt(inserted.path.finalPoint());
+
+ // Inform the edge about its endpoints.
+ inserted.start = start;
+ inserted.end = end;
+
+ // Add incidences at the endpoints.
+ start->_addIncidence(edge_index, start_azimuth, Incidence::START);
+ end->_addIncidence(edge_index, end_azimuth, Incidence::END);
+
+ _regularized = false;
+ return edge_index;
+}
+
+/**
+ * \brief Move-insert a new labeled edge but do not connect it to the graph.
+ *
+ * Although the graph will hold the path data of an edge inserted in this way, the edge
+ * will not be connected to any vertex. This can be used to store information about closed
+ * paths (loops) in the instance, without having to specify starting points for them.
+ *
+ * \param path The geometric path of the edge.
+ * \param label Optionally, the label (extra user data) associated to this edge; if absent,
+ * the label will be default-constructed.
+ * \return The index of the inserted edge.
+ */
+template<typename EdgeLabel>
+unsigned PlanarGraph<EdgeLabel>::insertDetached(Path &&path, EdgeLabel &&label)
+{
+ unsigned edge_index = _edges.size();
+ auto &inserted = _edges.emplace_back(std::forward<Path>(path),
+ std::forward<EdgeLabel>(label));
+ inserted.detached = true;
+ inserted.inserted_as_detached = true;
+ return edge_index;
+}
+
+/** Remove incidences previously marked as junk. */
+template<typename EdgeLabel>
+void PlanarGraph<EdgeLabel>::_purgeJunkIncidences()
+{
+ for (auto &[vertex, incidence] : _junk) {
+ Incidence *to_remove = incidence;
+ auto it = std::find_if(vertex->_incidences.begin(), vertex->_incidences.end(),
+ [=](Incidence const &inc) -> bool { return &inc == to_remove; });
+ if (it != vertex->_incidences.end()) {
+ vertex->_incidences.erase(it);
+ }
+ }
+ _junk.clear();
+}
+
+/**
+ * \brief Merge overlapping edges or their portions, adding vertices if necessary.
+ *
+ * \param angle_precision The numerical epsilon for radian angle comparisons.
+ * \param remove_collapsed_loops Whether to detach edges with both ends incident to the same
+ * vertex (loops) when these loops don't enclose any area.
+ *
+ * This function performs the following operations:
+ * \li Edges that are tangent at a vertex but don't otherwise overlap are sorted correctly
+ * in the counterclockwise cyclic order around the vertex.
+ * \li Degenerate loops which don't enclose any area are removed if the argument is true.
+ * \li Edges that coincide completely are reversed if needed and merged into one.
+ * \li Edges that coincide partially are split into overlapping and non-overlapping portions.
+ * Any overlapping portions are oriented consistently and then merged.
+ * \li As a sub-case of the above, any non-degenerate loop with an initial self-everlap
+ * (a "lasso") is replaced with a shorter non-overlapping loop and a simple path leading
+ * to it.
+ */
+template<typename EdgeLabel>
+void PlanarGraph<EdgeLabel>::regularize(double angle_precision, bool remove_collapsed_loops)
+{
+ for (auto it = _vertices.begin(); it != _vertices.end(); ++it) {
+ // Note: the list of vertices may grow during the execution of this loop,
+ // so don't replace it with a range-for (which stores the end iterator).
+ // We want the loop to carry on going over the elements it inserted.
+ if (it->_incidences.size() < 2) {
+ continue;
+ }
+ _regularizeVertex(*it, angle_precision, remove_collapsed_loops);
+ }
+ _purgeJunkIncidences();
+ _regularized = true;
+}
+
+/**
+ * \brief Analyze and regularize all edges emanating from a given vertex.
+ *
+ * This function goes through the list of incidences at the vertex (roughly sorted by
+ * azimuth, i.e., departure heading in radians), picking out runs of mutually tangent
+ * edges and calling _reglueTangentFan() on each run. The algorithm is quite complicated
+ * because the incidences have to be treated as a cyclic container and a run of mutually
+ * tangent edges may straddle the "end" of the list, including the possibility that the
+ * entire list is a single such run.
+ *
+ * \param vertex The vertex whose incidences should be analyzed.
+ * \param angle_precision The numerical epsilon for radian angle comparisons.
+ * \param deloop Whether loops that don't enclose any area should be detached.
+ */
+template<typename EdgeLabel>
+void PlanarGraph<EdgeLabel>::_regularizeVertex(typename PlanarGraph<EdgeLabel>::Vertex &vertex,
+ double angle_precision, bool deloop)
+{
+ auto &incidences = vertex._incidences;
+
+ /// Compare two polar angles in the interval [-π, π] modulo 2π to within angle_precision:
+ auto const angles_equal = [=](double az1, double az2) -> bool {
+ static double const twopi = 2.0 * M_PI;
+ return are_near(std::fmod(az1 + twopi, twopi), std::fmod(az2 + twopi, twopi),
+ angle_precision);
+ };
+
+ IncIt run_begin; // First element in the last discovered run of equal azimuths.
+
+ /// Find and reglue runs of nearly identical azimuths in the specified range.
+ auto const process_runs = [&](IncIt begin, IncIt end) -> bool
+ {
+ double current_azimuth = 42; // Invalid radian angle.
+ bool in_a_run = false;
+
+ for (auto it = begin; it != end; ++it) {
+ bool const equal = angles_equal(it->azimuth, current_azimuth);
+ if (equal && !in_a_run) {
+ run_begin = std::prev(it); // Save to enclosing scope.
+ in_a_run = true;
+ } else if (!equal && in_a_run) {
+ _reglueTangentFan(vertex, run_begin, std::prev(it), deloop);
+ in_a_run = false;
+ }
+ current_azimuth = it->azimuth;
+ }
+ return in_a_run;
+ };
+
+ double const last_azimuth = incidences.back().azimuth;
+
+ if (angles_equal(incidences.front().azimuth, last_azimuth)) {
+ // The cyclic list contains a run of equal azimuths which straddles the "end".
+ // This means that we must skip the part of this run on the "begin" side on the
+ // first pass and handle it once we've traversed the remainder of the list.
+
+ bool processed = false; ///< Whether we've cleared the straddling run.
+ double previous_azimuth = last_azimuth;
+ IncIt straddling_run_last;
+
+ for (auto it = incidences.begin(); it != incidences.end(); ++it) {
+ if (!angles_equal(it->azimuth, previous_azimuth)) {
+ straddling_run_last = std::prev(it);
+ process_runs(it, incidences.end());
+ processed = true;
+ break;
+ }
+ previous_azimuth = it->azimuth;
+ }
+ if (processed) {
+ // Find the first element of the straddling run.
+ auto it = std::prev(incidences.end());
+ while (angles_equal(it->azimuth, last_azimuth)) {
+ --it;
+ }
+ ++it; // Now we're at the start of the straddling run.
+ _reglueTangentFan(vertex, it, straddling_run_last, deloop);
+ } else {
+ // We never encountered anything outside of the straddling run: reglue everything.
+ _reglueTangentFan(vertex, incidences.begin(), std::prev(incidences.end()), deloop);
+ }
+ } else if (process_runs(incidences.begin(), incidences.end())) {
+ // Our run got rudely interrupted by the end of the container; reglue till the end.
+ _reglueTangentFan(vertex, run_begin, std::prev(incidences.end()), deloop);
+ }
+}
+
+/**
+ * \brief Regularize a fan of mutually tangent edges emanating from a vertex.
+ *
+ * This function compares the tangent edges pairwise and ensures that the sequence of their
+ * incidences to the vertex ends up being sorted by the ultimate direction in which the
+ * emanating edges fan out, in the counterclockwise order.
+ *
+ * If a partial or complete overlap between edges is detected, these edges are reglued.
+ *
+ * \param vertex The vertex from which the fan emanates.
+ * \param first An iterator pointing to the first incidence in the fan.
+ * \param last An iterator pointing to the last incidence in the fan.
+ * NOTE: This iterator must point to the actual last incidence, not "past" it.
+ * The reason is that we're iterating over a cyclic collection, so there
+ * isn't really a meaningful end.
+ * \param deloop Whether loops that don't enclose any area should be detached.
+ */
+template<typename EL>
+void PlanarGraph<EL>::_reglueTangentFan(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::IncIt const &first,
+ typename PlanarGraph<EL>::IncIt const &last, bool deloop)
+{
+ // Search all pairs (triangular pattern), skipping invalid incidences.
+ for (auto it = first; it != last; it = vertex.cyclicNextIncidence(it)) {
+ if (it->invalid) {
+ continue;
+ }
+ for (auto is = vertex.cyclicNextIncidence(it); true; is = vertex.cyclicNextIncidence(is)) {
+ if (!is->invalid && _compareAndReglue(vertex, &(*it), &(*is), deloop)) {
+ // Swap the incidences, effectively implementing "bubble sort".
+ std::swap(*it, *is);
+ }
+ if (is == last) {
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * \brief Compare a pair of edges emanating from the same vertex in the same direction.
+ *
+ * If the edges overlap in part or in full, they get reglued, which means that the topology
+ * of the graph may get modified. Otherwise, if the detailed comparison shows that the edges
+ * aren't correctly ordered around the vertex (because the second edge deviates to the right
+ * instead of to the left of the first, when looking away from the vertex), then the function
+ * will return true, signalling that the incidences should be swapped.
+ *
+ * \param vertex The vertex where the mutually tangent paths meet.
+ * \param first The incidence appearing as the first one in the provisional cyclic order.
+ * \param second The incidence appearing as the second one in the provisional cyclic order.
+ * \param deloop Whether to detach collapsed loops (backtracks) which don't enclose any area.
+ * \return Whether the incidences should be swapped.
+ */
+template<typename EL>
+bool PlanarGraph<EL>::_compareAndReglue(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::Incidence *first,
+ typename PlanarGraph<EL>::Incidence *second, bool deloop)
+{
+ if (first->index == second->index) {
+ return _reglueTeardrop(vertex, first, second, deloop);
+ }
+
+ // Get paths corresponding to the edges but travelling away from the vertex.
+ auto first_path_out = getOutgoingPath(first);
+ auto second_path_out = getOutgoingPath(second);
+ auto split = parting_point(first_path_out, second_path_out, _precision);
+
+ if (are_near(split.point(), vertex.point(), _precision)) {
+ // Paths deviate immediately, so no gluing is needed. The incidences should
+ // be swapped if the first edge path departs to the left of the second one.
+ return deviatesLeft(first_path_out, second_path_out);
+ }
+
+ // Determine the nature of the initial overlap between the paths.
+ bool const till_end_of_1st = are_near(split.point(), first_path_out.finalPoint(), _precision);
+ bool const till_end_of_2nd = are_near(split.point(), second_path_out.finalPoint(), _precision);
+
+ if (till_end_of_1st && till_end_of_2nd) { // Paths coincide completely.
+ _mergeCoincidingEdges(first, second);
+ } else if (till_end_of_1st) {
+ // Paths coincide until the end of the 1st one, which however isn't the end of the
+ // 2nd one; for example, the first one could be the vertical riser of the letter L
+ // whereas the second one – the entire letter stroke.
+ _mergeShorterLonger(vertex, first, second, split.second);
+ } else if (till_end_of_2nd) {
+ // The same but with with the second edge shorter than the first one.
+ _mergeShorterLonger(vertex, second, first, split.first);
+ } else { // A Y-shaped split.
+ _mergeWyeConfiguration(vertex, first, second, split);
+ }
+ return false; // We've glued so no need to swap anything.
+}
+
+/**
+ * \brief Analyze a loop path a with self-tangency at the attachment point (a teardrop).
+ *
+ * The following steps are taken:
+ * \li If the loop encloses zero area and \c deloop is true, the loop is detached.
+ * \li If the two arms of the loop split out immediately, the loop is left alone and we
+ * only check whether the incidences should be swapped.
+ * \li If the loop overlaps itself near the vertex, resembling a lasso, we split it into
+ * a shorter simple path and a smaller loop attached to the end of the shorter path.
+ *
+ * \param vertex The vertex at which the teardrop originates.
+ * \param first The first incidence of the loop to the vertex.
+ * \param second The second incidence of the loop to the vertex.
+ * \param deloop Whether the loop should be removed if it doesn't enclose any area
+ * (i.e., the path exactly backtracks on itself).
+ * \return Whether the two incidences of the loop to the vertex should be swapped.
+ */
+template<typename EL>
+bool PlanarGraph<EL>::_reglueTeardrop(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::Incidence *first,
+ typename PlanarGraph<EL>::Incidence *second, bool deloop)
+{
+ // Calculate the area enclosed by the teardrop.
+ // The convention is that the unit circle (cos(t), sint(t)), t from 0 to 2pi,
+ // encloses an area of +pi.
+ auto &edge = _edges[first->index];
+ Path loop = edge.path; loop.close();
+ double signed_area = closedPathArea(loop);
+
+ if (deloop && are_near(signed_area, 0.0, _precision)) {
+ edge.detach();
+ _throwAway(&vertex, first);
+ _throwAway(&vertex, second);
+ return false;
+ }
+
+ auto split = parting_point(loop, loop.reversed(), _precision);
+ if (are_near(split.point(), vertex.point(), _precision)) {
+ // The loop spreads out immediately. We simply check if the incidences should be swapped.
+ // We want them to be ordered such that the signed area encircled by the path going out
+ // at the first incidence and coming back at the second (with this orientation) is > 0.
+ return (first->sign == Incidence::START) ^ (signed_area > 0.0);
+ }
+
+ // The loop encloses a nonzero area, but the two branches don't separate at the starting
+ // point. Instead, they travel together for a while before they split like a lasso.
+ _reglueLasso(vertex, first, second, split);
+ return false;
+}
+
+/**
+ * \brief Reglue a lasso-shaped loop, separating it into the "free rope" and the "hoop".
+ *
+ * The lasso is an edge looping back to the same vertex, where the closed path encloses
+ * a non-zero area, but its two branches don't separate at the starting point. Instead,
+ * they travel together for a while (forming the doubled-up "free rope") before they
+ * split like a lasso. This function cuts the lasso at the split point:
+ * \code{.unparsed}
+ * ____ ____
+ * / \ / \
+ * VERTEX =====< | ==> VERTEX ------ NEW + NEW < |
+ * \____/ (lasso) (rope) \____/ (hoop)
+ *
+ * \endcode
+ *
+ * \param vertex A reference to the vertex where the lasso is attached.
+ * \param first The first incidence of the lasso to the vertex.
+ * \param second The second incidence of the lasso to the vertex.
+ * \param split The point where the free rope of the lasso ends and the hoop begins.
+ */
+template<typename EL>
+void PlanarGraph<EL>::_reglueLasso(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::Incidence *first,
+ typename PlanarGraph<EL>::Incidence *second,
+ PathIntersection const &split)
+{
+ unsigned lasso = first->index;
+
+ // Create the "free rope" path.
+ auto rope = _edges[lasso].path.portion(PATH_START, split.first);
+ rope.setInitial(vertex.point());
+ rope.setFinal(split.point());
+ double const rope_final_backward_azimuth = _getAzimuth(-rope.finalUnitTangent());
+
+ // Compute the new label of the rope edge.
+ auto oriented_as_loop = _edges[lasso].label;
+ auto reversed = oriented_as_loop; reversed.onReverse();
+ oriented_as_loop.onMergeWith(reversed);
+
+ // Insert the rope and its endpoint.
+ unsigned const rope_index = _edges.size();
+ auto &rope_edge = _edges.emplace_back(std::move(rope), std::move(oriented_as_loop));
+ auto const new_split_vertex = _ensureVertexAt(split.point());
+
+ // Reuse lasso's first incidence as the incidence to the rope (azimuth can stay).
+ first->index = rope_index;
+ first->sign = Incidence::START;
+
+ // Connect the rope to the newly created split vertex.
+ new_split_vertex->_addIncidence(rope_index, rope_final_backward_azimuth, Incidence::END);
+ rope_edge.start = &vertex;
+ rope_edge.end = new_split_vertex;
+
+ // Insert the hoop
+ auto hoop = _edges[lasso].path.portion(split.first,
+ _reversePathTime(split.second, _edges[lasso].path));
+ hoop.setInitial(split.point());
+ hoop.setFinal(split.point());
+ insertEdge(std::move(hoop), EL(_edges[lasso].label));
+
+ // Detach the original lasso edge and mark the second incidence for cleanup.
+ _edges[lasso].detach();
+ _throwAway(&vertex, second);
+}
+
+/**
+ * \brief Completely coallesce two fully overlapping edges.
+ *
+ * In practice, the first edge stays and the second one gets detached from the graph.
+ *
+ * \param first An iterator to the first edge's incidence to a common vertex.
+ * \param second An iterator to the second edge's incidence to a common vertex.
+ */
+template<typename EL>
+void PlanarGraph<EL>::_mergeCoincidingEdges(typename PlanarGraph<EL>::Incidence *first,
+ typename PlanarGraph<EL>::Incidence *second)
+{
+ auto &surviver = _edges[first->index];
+ auto &casualty = _edges[second->index];
+
+ auto other_label = casualty.label;
+ if (first->sign != second->sign) { // Logically reverse the label before merging.
+ other_label.onReverse();
+ }
+ surviver.label.onMergeWith(other_label);
+
+ // Mark both incidences of the second edge as junk and detach it.
+ auto [start_vertex, start_inc] = getIncidence(second->index, Incidence::START);
+ _throwAway(start_vertex, start_inc);
+ auto [end_vertex, end_inc] = getIncidence(second->index, Incidence::END);
+ _throwAway(end_vertex, end_inc);
+ casualty.detach();
+}
+
+/**
+ * \brief Merge a longer edge with a shorter edge that overlaps it.
+ *
+ * In practice, the shorter edge remains unchanged and the longer one is trimmed to
+ * become just the part extending past the shorter one.
+ *
+ * \param vertex The vertex where the overlap starts.
+ * \param shorter The incidence of the shorter edge to the common vertex.
+ * \param longer The incidence of the longer edge to the common vertex.
+ * \param time_on_longer The PathTime on the longer edge at which it passes through
+ * the endpoint of the shorter edge.
+ */
+template<typename EL>
+void PlanarGraph<EL>::_mergeShorterLonger(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::Incidence *shorter,
+ typename PlanarGraph<EL>::Incidence *longer,
+ PathTime const &time_on_longer)
+{
+ auto &shorter_edge = _edges[shorter->index];
+ auto &longer_edge = _edges[longer->index];
+
+ // Get the vertices at the far ends of both edges.
+ auto shorter_far_end = (shorter->sign == Incidence::START) ? shorter_edge.end
+ : shorter_edge.start;
+ /// Whether the longer edge heads out of the vertex.
+ bool const longer_out = (longer->sign == Incidence::START);
+ auto longer_far_end = longer_out ? longer_edge.end : longer_edge.start;
+
+ // Copy the longer edge's label and merge with that of the shorter.
+ auto longer_label = longer_edge.label;
+ if (shorter->sign != longer->sign) {
+ longer_label.onReverse();
+ }
+ shorter_edge.label.onMergeWith(longer_label);
+
+ // Create the trimmed path (longer minus shorter).
+ Path trimmed;
+ double trimmed_departure_azimuth;
+ if (longer_out) {
+ trimmed = longer_edge.path.portion(time_on_longer, _pathEnd(longer_edge.path));
+ longer_edge.start = shorter_far_end;
+ trimmed.setInitial(shorter_far_end->point());
+ trimmed.setFinal(longer_far_end->point());
+ trimmed_departure_azimuth = _getAzimuth(trimmed.initialUnitTangent());
+ } else {
+ trimmed = longer_edge.path.portion(PATH_START, _reversePathTime(time_on_longer,
+ longer_edge.path));
+ longer_edge.end = shorter_far_end;
+ trimmed.setInitial(longer_far_end->point());
+ trimmed.setFinal(shorter_far_end->point());
+ trimmed_departure_azimuth = _getAzimuth(-trimmed.finalUnitTangent());
+ }
+
+ // Set the trimmed path as the new path of the longer edge and set up the incidences:
+ longer_edge.path = std::move(trimmed);
+ shorter_far_end->_addIncidence(longer->index, trimmed_departure_azimuth, longer->sign);
+
+ // Throw away the old start incidence of the longer edge.
+ _throwAway(&vertex, longer);
+}
+
+/**
+ * \brief Merge a pair of partially overlapping edges, producing a Y-split at a new vertex.
+ *
+ * This topological modification is performed by inserting a new vertex at the three-way
+ * point (where the two paths separate) and clipping the original edges to that point.
+ * In this way, the original edges become the "arms" of the Y-shape. In addition, a new
+ * edge is inserted, forming the "stem" of the Y.
+ *
+ * \param vertex The vertex from which the partially overlapping edges originate (bottom of Y).
+ * \param first The incidence to the first edge (whose path is the stem and one arm of the Y).
+ * \param second The incidence to the second edge (stem and the other arm of the Y).
+ * \param fork The splitting point of the two paths.
+ */
+template<typename EL>
+void PlanarGraph<EL>::_mergeWyeConfiguration(typename PlanarGraph<EL>::Vertex &vertex,
+ typename PlanarGraph<EL>::Incidence *first,
+ typename PlanarGraph<EL>::Incidence *second,
+ PathIntersection const &fork)
+{
+ bool const first_is_out = (first->sign == Incidence::START);
+ bool const second_is_out = (second->sign == Incidence::START);
+
+ auto &first_edge = _edges[first->index];
+ auto &second_edge = _edges[second->index];
+
+ // Calculate the path forming the stem of the Y:
+ auto stem_path = getOutgoingPath(first).portion(PATH_START, fork.first);
+ stem_path.setInitial(vertex.point());
+ stem_path.setFinal(fork.point());
+
+ /// A closure to clip the path of an original edge to the fork point.
+ auto const clip_to_fork = [&](PathTime const &t, Edge &e, bool out) {
+ if (out) { // Trim from time to end
+ e.path = e.path.portion(t, _pathEnd(e.path));
+ e.path.setInitial(fork.point());
+ } else { // Trim from reverse-end to reverse-time
+ e.path = e.path.portion(PATH_START, _reversePathTime(t, e.path));
+ e.path.setFinal(fork.point());
+ }
+ };
+
+ /// A closure to find the departing azimuth of an edge at the fork point.
+ auto const departing_azimuth = [&](Edge const &e, bool out) -> double {
+ return _getAzimuth((out) ? e.path.initialUnitTangent()
+ : -e.path.finalUnitTangent());
+ };
+
+ // Clip the paths obtaining the arms of the Y.
+ clip_to_fork(fork.first, first_edge, first_is_out);
+ clip_to_fork(fork.second, second_edge, second_is_out);
+
+ // Create the fork vertex and set up its incidences.
+ auto const fork_vertex = _ensureVertexAt(fork.point());
+ fork_vertex->_addIncidence(first->index, departing_azimuth(first_edge, first_is_out),
+ first->sign);
+ fork_vertex->_addIncidence(second->index, departing_azimuth(second_edge, second_is_out),
+ second->sign);
+
+ // Repoint the ends of the edges that were clipped
+ (first_is_out ? first_edge.start : first_edge.end) = fork_vertex;
+ (second_is_out ? second_edge.start : second_edge.end) = fork_vertex;
+
+ /// A closure to get a consistently oriented label of an edge.
+ auto upwards_oriented_label = [&](Edge const &e, bool out) -> EL {
+ auto label = e.label;
+ if (!out) {
+ label.onReverse();
+ }
+ return label;
+ };
+
+ auto stem_label = upwards_oriented_label(first_edge, first_is_out);
+ stem_label.onMergeWith(upwards_oriented_label(second_edge, second_is_out));
+ auto stem_departure_from_fork = _getAzimuth(-stem_path.finalUnitTangent());
+
+ // Insert the stem of the Y-configuration.
+ unsigned const stem_index = _edges.size();
+ auto &stem_edge = _edges.emplace_back(std::move(stem_path), std::move(stem_label));
+ stem_edge.start = &vertex;
+ stem_edge.end = fork_vertex;
+
+ // Set up the incidences.
+ fork_vertex->_addIncidence(stem_index, stem_departure_from_fork, Incidence::END);
+ first->index = stem_index;
+ first->sign = Incidence::START;
+ _throwAway(&vertex, second);
+}
+
+template<typename EL>
+typename PlanarGraph<EL>::Incidence*
+PlanarGraph<EL>::nextIncidence(typename PlanarGraph<EL>::VertexIterator const &vertex,
+ double azimuth, bool clockwise) const
+{
+ auto &incidences = vertex._incidences;
+ Incidence *result = nullptr;
+
+ if (incidences.empty()) {
+ return result;
+ }
+ // Normalize azimuth to the interval [-pi; pi].
+ auto angle = Angle(azimuth);
+
+ if (clockwise) { // Go backwards and find a lower bound
+ auto it = std::find_if(incidences.rbegin(), incidences.rend(), [=](auto inc) -> bool {
+ return inc.azimuth <= angle;
+ });
+ if (it == incidences.rend()) {
+ // azimuth is lower than the azimuths of all incidences;
+ // going clockwise we wrap back to the highest azimuth (last incidence).
+ return &incidences.back();
+ }
+ result = &(*it);
+ } else {
+ auto it = std::find_if(incidences.begin(), incidences.end, [=](auto inc) -> bool {
+ return inc.azimuth >= angle;
+ });
+ if (it == incidences.end()) {
+ // azimuth is higher than the azimuths of all incidences;
+ // going counterclockwise we wrap back to the lowest azimuth.
+ return &incidences.front();
+ }
+ result = &(*it);
+ }
+ return result;
+}
+
+/** Return the signed area enclosed by a closed path. */
+template<typename EL>
+double PlanarGraph<EL>::closedPathArea(Path const &path)
+{
+ double area;
+ Point _;
+ centroid(path.toPwSb(), _, area);
+ return -area; // Our convention is that the Y-axis points up
+}
+
+/** \brief Determine whether the first path deviates to the left of the second.
+ *
+ * The two paths are assumed to have identical or nearly identical starting points
+ * but not an overlapping initial portion. The concept of "left" is based on the
+ * y-axis pointing up.
+ *
+ * \param first The first path.
+ * \param second The second path.
+ *
+ * \return True if the first path deviates towards the left of the second;
+ * False if the first path deviates towards the right of the second.
+ */
+template<typename EL>
+bool PlanarGraph<EL>::deviatesLeft(Path const &first, Path const &second)
+{
+ auto start = middle_point(first.initialPoint(), second.initialPoint());
+ auto tangent_between = middle_point(first.initialUnitTangent(), second.initialUnitTangent());
+ if (tangent_between.isZero()) {
+ return false;
+ }
+ auto tangent_line = Line::from_origin_and_vector(start, tangent_between);
+
+ // Find the first non-degenerate curves on both paths
+ std::unique_ptr<Curve> c[2];
+ auto const find_first_nondegen = [](std::unique_ptr<Curve> &pointer, Path const &path) {
+ for (auto const &c : path) {
+ if (!c.isDegenerate()) {
+ pointer.reset(c.duplicate());
+ return;
+ }
+ }
+ };
+
+ find_first_nondegen(c[0], first);
+ find_first_nondegen(c[1], second);
+ if (!c[0] || !c[1]) {
+ return false;
+ }
+
+ // Find the bounding boxes
+ Rect const bounding_boxes[] {
+ c[0]->boundsExact(),
+ c[1]->boundsExact()
+ };
+
+ // For a bounding box, find the corner that goes the furthest in the direction of the
+ // tangent vector.
+ auto const furthest_corner = [&](Rect const &r) -> unsigned {
+ Coord max_dot = dot(r.corner(0) - start, tangent_between);
+ unsigned result = 0;
+ for (unsigned i = 1; i < 4; i++) {
+ auto current_dot = dot(r.corner(i), tangent_between);
+ if (current_dot > max_dot) {
+ max_dot = current_dot;
+ result = i;
+ } else if (current_dot == max_dot) {
+ // Disambiguate based on proximity to the tangent line.
+ auto const offset = start + tangent_between;
+ if (distance(offset, r.corner(i)) < distance(offset, r.corner(result))) {
+ result = i;
+ }
+ }
+ }
+ return result;
+ };
+
+ // Calculate the corner points overlooking the "rift" between the paths.
+ Point corner_points[2];
+ for (size_t i : {0, 1}) {
+ corner_points[i] = bounding_boxes[i].corner(furthest_corner(bounding_boxes[i]));
+ }
+
+ // Find a vantage point from which we can best observe the splitting paths.
+ Point vantage_point;
+ bool found = false;
+ if (corner_points[0] != corner_points[1]) {
+ auto line_connecting_corners = Line(corner_points[0], corner_points[1]);
+ auto xing = line_connecting_corners.intersect(tangent_line);
+ if (!xing.empty()) {
+ vantage_point = xing[0].point();
+ found = true;
+ }
+ }
+ if (!found) {
+ vantage_point = tangent_line.pointAt(tangent_line.timeAtProjection(corner_points[0]));
+ }
+
+ // Move to twice as far in the direction of the vantage point.
+ vantage_point += vantage_point - start;
+
+ // Find the points on both curves that are nearest to the vantage point.
+ Coord nearest[2];
+ for (size_t i : {0, 1}) {
+ nearest[i] = c[i]->nearestTime(vantage_point);
+ }
+
+ // Clip to the nearest points and examine the closed contour.
+ Path closed_contour(start);
+ closed_contour.setStitching(true);
+ closed_contour.append(c[0]->portion(0, nearest[0]));
+ closed_contour = closed_contour.reversed();
+ closed_contour.setStitching(true);
+ closed_contour.append(c[1]->portion(0, nearest[1]));
+ closed_contour.close();
+ return !path_direction(closed_contour); // Reverse to match the convention that y-axis is up.
+}
+
+} // namespace Geom
+
+#endif // LIB2GEOM_SEEN_PLANAR_GRAPH_H
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/point.cpp b/src/2geom/point.cpp
new file mode 100644
index 0000000..cbe53c4
--- /dev/null
+++ b/src/2geom/point.cpp
@@ -0,0 +1,274 @@
+/**
+ * \file
+ * \brief Cartesian point / 2D vector and related operations
+ *//*
+ * Authors:
+ * Michael G. Sloan <mgsloan@gmail.com>
+ * Nathan Hurst <njh@njhurst.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright (C) 2006-2009 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <2geom/angle.h>
+#include <2geom/coord.h>
+#include <2geom/point.h>
+#include <2geom/transforms.h>
+
+namespace Geom {
+
+/**
+ * @class Point
+ * @brief Two-dimensional point that doubles as a vector.
+ *
+ * Points in 2Geom are represented in Cartesian coordinates, e.g. as a pair of numbers
+ * that store the X and Y coordinates. Each point is also a vector in \f$\mathbb{R}^2\f$
+ * from the origin (point at 0,0) to the stored coordinates,
+ * and has methods implementing several vector operations (like length()).
+ *
+ * @section OpNotePoint Operator note
+ *
+ * Most operators are provided by Boost operator helpers, so they are not visible in this class.
+ * If @a p, @a q, @a r denote points, @a s a floating-point scalar, and @a m a transformation matrix,
+ * then the following operations are available:
+ * @code
+ p += q; p -= q; r = p + q; r = p - q;
+ p *= s; p /= s; q = p * s; q = s * p; q = p / s;
+ p *= m; q = p * m; q = m * p;
+ @endcode
+ * It is possible to left-multiply a point by a matrix, even though mathematically speaking
+ * this is undefined. The result is a point identical to that obtained by right-multiplying.
+ *
+ * @ingroup Primitives */
+
+Point Point::polar(Coord angle) {
+ Point ret;
+ Coord remainder = Angle(angle).radians0();
+ if (are_near(remainder, 0) || are_near(remainder, 2*M_PI)) {
+ ret[X] = 1;
+ ret[Y] = 0;
+ } else if (are_near(remainder, M_PI/2)) {
+ ret[X] = 0;
+ ret[Y] = 1;
+ } else if (are_near(remainder, M_PI)) {
+ ret[X] = -1;
+ ret[Y] = 0;
+ } else if (are_near(remainder, 3*M_PI/2)) {
+ ret[X] = 0;
+ ret[Y] = -1;
+ } else {
+ sincos(angle, ret[Y], ret[X]);
+ }
+ return ret;
+}
+
+/** @brief Normalize the vector representing the point.
+ * After this method returns, the length of the vector will be 1 (unless both coordinates are
+ * zero - the zero point will be returned then). The function tries to handle infinite
+ * coordinates gracefully. If any of the coordinates are NaN, the function will do nothing.
+ * @post \f$-\epsilon < \left|this\right| - 1 < \epsilon\f$
+ * @see unit_vector(Geom::Point const &) */
+void Point::normalize() {
+ double len = hypot(_pt[0], _pt[1]);
+ if(len == 0) return;
+ if(std::isnan(len)) return;
+ static double const inf = HUGE_VAL;
+ if(len != inf) {
+ *this /= len;
+ } else {
+ unsigned n_inf_coords = 0;
+ /* Delay updating pt in case neither coord is infinite. */
+ Point tmp;
+ for ( unsigned i = 0 ; i < 2 ; ++i ) {
+ if ( _pt[i] == inf ) {
+ ++n_inf_coords;
+ tmp[i] = 1.0;
+ } else if ( _pt[i] == -inf ) {
+ ++n_inf_coords;
+ tmp[i] = -1.0;
+ } else {
+ tmp[i] = 0.0;
+ }
+ }
+ switch (n_inf_coords) {
+ case 0: {
+ /* Can happen if both coords are near +/-DBL_MAX. */
+ *this /= 4.0;
+ len = hypot(_pt[0], _pt[1]);
+ assert(len != inf);
+ *this /= len;
+ break;
+ }
+ case 1: {
+ *this = tmp;
+ break;
+ }
+ case 2: {
+ *this = tmp * sqrt(0.5);
+ break;
+ }
+ }
+ }
+}
+
+/** @brief Compute the first norm (Manhattan distance) of @a p.
+ * This is equal to the sum of absolutes values of the coordinates.
+ * @return \f$|p_X| + |p_Y|\f$
+ * @relates Point */
+Coord L1(Point const &p) {
+ Coord d = 0;
+ for ( int i = 0 ; i < 2 ; i++ ) {
+ d += fabs(p[i]);
+ }
+ return d;
+}
+
+/** @brief Compute the infinity norm (maximum norm) of @a p.
+ * @return \f$\max(|p_X|, |p_Y|)\f$
+ * @relates Point */
+Coord LInfty(Point const &p) {
+ Coord const a(fabs(p[0]));
+ Coord const b(fabs(p[1]));
+ return ( a < b || std::isnan(b)
+ ? b
+ : a );
+}
+
+/** @brief True if the point has both coordinates zero.
+ * NaNs are treated as not equal to zero.
+ * @relates Point */
+bool is_zero(Point const &p) {
+ return ( p[0] == 0 &&
+ p[1] == 0 );
+}
+
+/** @brief True if the point has a length near 1. The are_near() function is used.
+ * @relates Point */
+bool is_unit_vector(Point const &p, Coord eps) {
+ return are_near(L2(p), 1.0, eps);
+}
+/** @brief Return the angle between the point and the +X axis.
+ * @return Angle in \f$(-\pi, \pi]\f$.
+ * @relates Point */
+Coord atan2(Point const &p) {
+ return std::atan2(p[Y], p[X]);
+}
+
+/** @brief Compute the angle between a and b relative to the origin.
+ * The computation is done by projecting b onto the basis defined by a, rot90(a).
+ * @return Angle in \f$(-\pi, \pi]\f$.
+ * @relates Point */
+Coord angle_between(Point const &a, Point const &b) {
+ return std::atan2(cross(a,b), dot(a,b));
+}
+
+/** @brief Create a normalized version of a point.
+ * This is equivalent to copying the point and calling its normalize() method.
+ * The returned point will be (0,0) if the argument has both coordinates equal to zero.
+ * If any coordinate is NaN, this function will do nothing.
+ * @param a Input point
+ * @return Point on the unit circle in the same direction from origin as a, or the origin
+ * if a has both coordinates equal to zero
+ * @relates Point */
+Point unit_vector(Point const &a)
+{
+ Point ret(a);
+ ret.normalize();
+ return ret;
+}
+/** @brief Return the "absolute value" of the point's vector.
+ * This is defined in terms of the default lexicographical ordering. If the point is "larger"
+ * that the origin (0, 0), its negation is returned. You can check whether
+ * the points' vectors have the same direction (e.g. lie
+ * on the same line passing through the origin) using
+ * @code abs(a).normalize() == abs(b).normalize() @endcode
+ * To check with some margin of error, use
+ * @code are_near(abs(a).normalize(), abs(b).normalize()) @endcode
+ * Although naively this should take the absolute value of each coordinate, such an operation
+ * is not very useful.
+ * @relates Point */
+Point abs(Point const &b)
+{
+ Point ret;
+ if (b[Y] < 0.0) {
+ ret = -b;
+ } else if (b[Y] == 0.0) {
+ ret = b[X] < 0.0 ? -b : b;
+ } else {
+ ret = b;
+ }
+ return ret;
+}
+
+/** @brief Transform the point by the specified matrix. */
+Point &Point::operator*=(Affine const &m) {
+ double x = _pt[X], y = _pt[Y];
+ for(int i = 0; i < 2; i++) {
+ _pt[i] = x * m[i] + y * m[i + 2] + m[i + 4];
+ }
+ return *this;
+}
+
+/** @brief Snap the angle B - A - dir to multiples of \f$2\pi/n\f$.
+ * The 'dir' argument must be normalized (have unit length), otherwise the result
+ * is undefined.
+ * @return Point with the same distance from A as B, with a snapped angle.
+ * @post distance(A, B) == distance(A, result)
+ * @post angle_between(result - A, dir) == \f$2k\pi/n, k \in \mathbb{N}\f$
+ * @relates Point */
+Point constrain_angle(Point const &A, Point const &B, unsigned int n, Point const &dir)
+{
+ // for special cases we could perhaps use explicit testing (which might be faster)
+ if (n == 0.0) {
+ return B;
+ }
+ Point diff(B - A);
+ double angle = -angle_between(diff, dir);
+ double k = round(angle * (double)n / (2.0*M_PI));
+ return A + dir * Rotate(k * 2.0 * M_PI / (double)n) * L2(diff);
+}
+
+std::ostream &operator<<(std::ostream &out, const Geom::Point &p)
+{
+ out << "(" << format_coord_nice(p[X]) << ", "
+ << format_coord_nice(p[Y]) << ")";
+ return out;
+}
+
+} // end namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/polynomial.cpp b/src/2geom/polynomial.cpp
new file mode 100644
index 0000000..9737bd0
--- /dev/null
+++ b/src/2geom/polynomial.cpp
@@ -0,0 +1,337 @@
+/**
+ * \file
+ * \brief Polynomial in canonical (monomial) basis
+ *//*
+ * Authors:
+ * MenTaLguY <mental@rydia.net>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2007-2015 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <algorithm>
+#include <2geom/polynomial.h>
+#include <2geom/math-utils.h>
+#include <math.h>
+
+#ifdef HAVE_GSL
+#include <gsl/gsl_poly.h>
+#endif
+
+namespace Geom {
+
+#ifndef M_PI
+# define M_PI 3.14159265358979323846
+#endif
+
+Poly Poly::operator*(const Poly& p) const {
+ Poly result;
+ result.resize(degree() + p.degree()+1);
+
+ for(unsigned i = 0; i < size(); i++) {
+ for(unsigned j = 0; j < p.size(); j++) {
+ result[i+j] += (*this)[i] * p[j];
+ }
+ }
+ return result;
+}
+
+/*double Poly::eval(double x) const {
+ return gsl_poly_eval(&coeff[0], size(), x);
+ }*/
+
+void Poly::normalize() {
+ while(back() == 0)
+ pop_back();
+}
+
+void Poly::monicify() {
+ normalize();
+
+ double scale = 1./back(); // unitize
+
+ for(unsigned i = 0; i < size(); i++) {
+ (*this)[i] *= scale;
+ }
+}
+
+
+#ifdef HAVE_GSL
+std::vector<std::complex<double> > solve(Poly const & pp) {
+ Poly p(pp);
+ p.normalize();
+ gsl_poly_complex_workspace * w
+ = gsl_poly_complex_workspace_alloc (p.size());
+
+ gsl_complex_packed_ptr z = new double[p.degree()*2];
+ double* a = new double[p.size()];
+ for(unsigned int i = 0; i < p.size(); i++)
+ a[i] = p[i];
+ std::vector<std::complex<double> > roots;
+ //roots.resize(p.degree());
+
+ gsl_poly_complex_solve (a, p.size(), w, z);
+ delete[]a;
+
+ gsl_poly_complex_workspace_free (w);
+
+ for (unsigned int i = 0; i < p.degree(); i++) {
+ roots.emplace_back(z[2*i] ,z[2*i+1]);
+ //printf ("z%d = %+.18f %+.18f\n", i, z[2*i], z[2*i+1]);
+ }
+ delete[] z;
+ return roots;
+}
+
+std::vector<double > solve_reals(Poly const & p) {
+ std::vector<std::complex<double> > roots = solve(p);
+ std::vector<double> real_roots;
+
+ for(auto & root : roots) {
+ if(root.imag() == 0) // should be more lenient perhaps
+ real_roots.push_back(root.real());
+ }
+ return real_roots;
+}
+#endif
+
+double polish_root(Poly const & p, double guess, double tol) {
+ Poly dp = derivative(p);
+
+ double fn = p(guess);
+ while(fabs(fn) > tol) {
+ guess -= fn/dp(guess);
+ fn = p(guess);
+ }
+ return guess;
+}
+
+Poly integral(Poly const & p) {
+ Poly result;
+
+ result.reserve(p.size()+1);
+ result.push_back(0); // arbitrary const
+ for(unsigned i = 0; i < p.size(); i++) {
+ result.push_back(p[i]/(i+1));
+ }
+ return result;
+
+}
+
+Poly derivative(Poly const & p) {
+ Poly result;
+
+ if(p.size() <= 1)
+ return Poly(0);
+ result.reserve(p.size()-1);
+ for(unsigned i = 1; i < p.size(); i++) {
+ result.push_back(i*p[i]);
+ }
+ return result;
+}
+
+Poly compose(Poly const & a, Poly const & b) {
+ Poly result;
+
+ for(unsigned i = a.size(); i > 0; i--) {
+ result = Poly(a[i-1]) + result * b;
+ }
+ return result;
+
+}
+
+/* This version is backwards - dividing taylor terms
+Poly divide(Poly const &a, Poly const &b, Poly &r) {
+ Poly c;
+ r = a; // remainder
+
+ const unsigned k = a.size();
+ r.resize(k, 0);
+ c.resize(k, 0);
+
+ for(unsigned i = 0; i < k; i++) {
+ double ci = r[i]/b[0];
+ c[i] += ci;
+ Poly bb = ci*b;
+ std::cout << ci <<"*" << b << ", r= " << r << std::endl;
+ r -= bb.shifted(i);
+ }
+
+ return c;
+}
+*/
+
+Poly divide(Poly const &a, Poly const &b, Poly &r) {
+ Poly c;
+ r = a; // remainder
+ assert(b.size() > 0);
+
+ const unsigned k = a.degree();
+ const unsigned l = b.degree();
+ c.resize(k, 0.);
+
+ for(unsigned i = k; i >= l; i--) {
+ //assert(i >= 0);
+ double ci = r.back()/b.back();
+ c[i-l] += ci;
+ Poly bb = ci*b;
+ //std::cout << ci <<"*(" << b.shifted(i-l) << ") = "
+ // << bb.shifted(i-l) << " r= " << r << std::endl;
+ r -= bb.shifted(i-l);
+ r.pop_back();
+ }
+ //std::cout << "r= " << r << std::endl;
+ r.normalize();
+ c.normalize();
+
+ return c;
+}
+
+Poly gcd(Poly const &a, Poly const &b, const double /*tol*/) {
+ if(a.size() < b.size())
+ return gcd(b, a);
+ if(b.size() <= 0)
+ return a;
+ if(b.size() == 1)
+ return a;
+ Poly r;
+ divide(a, b, r);
+ return gcd(b, r);
+}
+
+
+
+
+std::vector<Coord> solve_quadratic(Coord a, Coord b, Coord c)
+{
+ std::vector<Coord> result;
+
+ if (a == 0) {
+ // linear equation
+ if (b == 0) return result;
+ result.push_back(-c/b);
+ return result;
+ }
+
+ Coord delta = b*b - 4*a*c;
+
+ if (delta == 0) {
+ // one root
+ result.push_back(-b / (2*a));
+ } else if (delta > 0) {
+ // two roots
+ Coord delta_sqrt = sqrt(delta);
+
+ // Use different formulas depending on sign of b to preserve
+ // numerical stability. See e.g.:
+ // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf
+ int sign = b >= 0 ? 1 : -1;
+ Coord t = -0.5 * (b + sign * delta_sqrt);
+ result.push_back(t / a);
+ result.push_back(c / t);
+ }
+ // no roots otherwise
+
+ std::sort(result.begin(), result.end());
+ return result;
+}
+
+
+std::vector<Coord> solve_cubic(Coord a, Coord b, Coord c, Coord d)
+{
+ // based on:
+ // http://mathworld.wolfram.com/CubicFormula.html
+
+ if (a == 0) {
+ return solve_quadratic(b, c, d);
+ }
+ if (d == 0) {
+ // divide by x
+ std::vector<Coord> result = solve_quadratic(a, b, c);
+ result.push_back(0);
+ std::sort(result.begin(), result.end());
+ return result;
+ }
+
+ std::vector<Coord> result;
+
+ // 1. divide everything by a to bring to canonical form
+ b /= a;
+ c /= a;
+ d /= a;
+
+ // 2. eliminate x^2 term: x^3 + 3Qx - 2R = 0
+ Coord Q = (3*c - b*b) / 9;
+ Coord R = (-27 * d + b * (9*c - 2*b*b)) / 54;
+
+ // 3. compute polynomial discriminant
+ Coord D = Q*Q*Q + R*R;
+ Coord term1 = b/3;
+
+ if (D > 0) {
+ // only one real root
+ Coord S = cbrt(R + sqrt(D));
+ Coord T = cbrt(R - sqrt(D));
+ result.push_back(-b/3 + S + T);
+ } else if (D == 0) {
+ // 3 real roots, 2 of which are equal
+ Coord rroot = cbrt(R);
+ result.reserve(3);
+ result.push_back(-term1 + 2*rroot);
+ result.push_back(-term1 - rroot);
+ result.push_back(-term1 - rroot);
+ } else {
+ // 3 distinct real roots
+ assert(Q < 0);
+ Coord theta = acos(R / sqrt(-Q*Q*Q));
+ Coord rroot = 2 * sqrt(-Q);
+ result.reserve(3);
+ result.push_back(-term1 + rroot * cos(theta / 3));
+ result.push_back(-term1 + rroot * cos((theta + 2*M_PI) / 3));
+ result.push_back(-term1 + rroot * cos((theta + 4*M_PI) / 3));
+ }
+
+ std::sort(result.begin(), result.end());
+ return result;
+}
+
+
+/*Poly divide_out_root(Poly const & p, double x) {
+ assert(1);
+ }*/
+
+} //namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/rect.cpp b/src/2geom/rect.cpp
new file mode 100644
index 0000000..60dcc87
--- /dev/null
+++ b/src/2geom/rect.cpp
@@ -0,0 +1,187 @@
+/* Axis-aligned rectangle
+ *
+ * Authors:
+ * Michael Sloan <mgsloan@gmail.com>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ * Copyright 2007-2011 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <2geom/rect.h>
+#include <2geom/transforms.h>
+
+namespace Geom {
+
+Point align_factors(Align g) {
+ Point p;
+ switch (g) {
+ case ALIGN_XMIN_YMIN:
+ p[X] = 0.0;
+ p[Y] = 0.0;
+ break;
+ case ALIGN_XMID_YMIN:
+ p[X] = 0.5;
+ p[Y] = 0.0;
+ break;
+ case ALIGN_XMAX_YMIN:
+ p[X] = 1.0;
+ p[Y] = 0.0;
+ break;
+ case ALIGN_XMIN_YMID:
+ p[X] = 0.0;
+ p[Y] = 0.5;
+ break;
+ case ALIGN_XMID_YMID:
+ p[X] = 0.5;
+ p[Y] = 0.5;
+ break;
+ case ALIGN_XMAX_YMID:
+ p[X] = 1.0;
+ p[Y] = 0.5;
+ break;
+ case ALIGN_XMIN_YMAX:
+ p[X] = 0.0;
+ p[Y] = 1.0;
+ break;
+ case ALIGN_XMID_YMAX:
+ p[X] = 0.5;
+ p[Y] = 1.0;
+ break;
+ case ALIGN_XMAX_YMAX:
+ p[X] = 1.0;
+ p[Y] = 1.0;
+ break;
+ default:
+ break;
+ }
+ return p;
+}
+
+
+/** @brief Transform the rectangle by an affine.
+ * The result of the transformation might not be axis-aligned. The return value
+ * of this operation will be the smallest axis-aligned rectangle containing
+ * all points of the true result. */
+Rect &Rect::operator*=(Affine const &m) {
+ Point pts[4];
+ for (unsigned i=0; i<4; ++i) pts[i] = corner(i) * m;
+ Coord minx = std::min(std::min(pts[0][X], pts[1][X]), std::min(pts[2][X], pts[3][X]));
+ Coord miny = std::min(std::min(pts[0][Y], pts[1][Y]), std::min(pts[2][Y], pts[3][Y]));
+ Coord maxx = std::max(std::max(pts[0][X], pts[1][X]), std::max(pts[2][X], pts[3][X]));
+ Coord maxy = std::max(std::max(pts[0][Y], pts[1][Y]), std::max(pts[2][Y], pts[3][Y]));
+ f[X].setMin(minx); f[X].setMax(maxx);
+ f[Y].setMin(miny); f[Y].setMax(maxy);
+ return *this;
+}
+
+Affine Rect::transformTo(Rect const &viewport, Aspect const &aspect) const
+{
+ // 1. translate viewbox to origin
+ Geom::Affine total = Translate(-min());
+
+ // 2. compute scale
+ Geom::Point vdims = viewport.dimensions();
+ Geom::Point dims = dimensions();
+ Geom::Scale scale(vdims[X] / dims[X], vdims[Y] / dims[Y]);
+
+ if (aspect.align == ALIGN_NONE) {
+ // apply non-uniform scale
+ total *= scale * Translate(viewport.min());
+ } else {
+ double uscale = 0;
+ if (aspect.expansion == EXPANSION_MEET) {
+ uscale = std::min(scale[X], scale[Y]);
+ } else {
+ uscale = std::max(scale[X], scale[Y]);
+ }
+ scale = Scale(uscale);
+
+ // compute offset for align
+ Geom::Point offset = vdims - dims * scale;
+ offset *= Scale(align_factors(aspect.align));
+ total *= scale * Translate(viewport.min() + offset);
+ }
+
+ return total;
+}
+
+Coord distanceSq(Point const &p, Rect const &rect)
+{
+ double dx = 0, dy = 0;
+ if ( p[X] < rect.left() ) {
+ dx = p[X] - rect.left();
+ } else if ( p[X] > rect.right() ) {
+ dx = rect.right() - p[X];
+ }
+ if (p[Y] < rect.top() ) {
+ dy = rect.top() - p[Y];
+ } else if ( p[Y] > rect.bottom() ) {
+ dy = p[Y] - rect.bottom();
+ }
+ return dx*dx+dy*dy;
+}
+
+/** @brief Returns the smallest distance between p and rect.
+ * @relates Rect */
+Coord distance(Point const &p, Rect const &rect)
+{
+ // copy of distanceSq, because we need to use hypot()
+ double dx = 0, dy = 0;
+ if ( p[X] < rect.left() ) {
+ dx = p[X] - rect.left();
+ } else if ( p[X] > rect.right() ) {
+ dx = rect.right() - p[X];
+ }
+ if (p[Y] < rect.top() ) {
+ dy = rect.top() - p[Y];
+ } else if ( p[Y] > rect.bottom() ) {
+ dy = p[Y] - rect.bottom();
+ }
+ return hypot(dx, dy);
+}
+
+Coord distanceSq(Point const &p, OptRect const &rect)
+{
+ if (!rect) return std::numeric_limits<Coord>::max();
+ return distanceSq(p, *rect);
+}
+Coord distance(Point const &p, OptRect const &rect)
+{
+ if (!rect) return std::numeric_limits<Coord>::max();
+ return distance(p, *rect);
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/recursive-bezier-intersection.cpp b/src/2geom/recursive-bezier-intersection.cpp
new file mode 100644
index 0000000..20b07d9
--- /dev/null
+++ b/src/2geom/recursive-bezier-intersection.cpp
@@ -0,0 +1,476 @@
+
+
+
+#include <2geom/basic-intersection.h>
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/exception.h>
+
+
+#include <gsl/gsl_vector.h>
+#include <gsl/gsl_multiroots.h>
+
+
+unsigned intersect_steps = 0;
+
+using std::vector;
+
+namespace Geom {
+
+class OldBezier {
+public:
+ std::vector<Geom::Point> p;
+ OldBezier() {
+ }
+ void split(double t, OldBezier &a, OldBezier &b) const;
+ Point operator()(double t) const;
+
+ ~OldBezier() {}
+
+ void bounds(double &minax, double &maxax,
+ double &minay, double &maxay) {
+ // Compute bounding box for a
+ minax = p[0][X]; // These are the most likely to be extremal
+ maxax = p.back()[X];
+ if( minax > maxax )
+ std::swap(minax, maxax);
+ for(unsigned i = 1; i < p.size()-1; i++) {
+ if( p[i][X] < minax )
+ minax = p[i][X];
+ else if( p[i][X] > maxax )
+ maxax = p[i][X];
+ }
+
+ minay = p[0][Y]; // These are the most likely to be extremal
+ maxay = p.back()[Y];
+ if( minay > maxay )
+ std::swap(minay, maxay);
+ for(unsigned i = 1; i < p.size()-1; i++) {
+ if( p[i][Y] < minay )
+ minay = p[i][Y];
+ else if( p[i][Y] > maxay )
+ maxay = p[i][Y];
+ }
+
+ }
+
+};
+
+static void
+find_intersections_bezier_recursive(std::vector<std::pair<double, double> > & xs,
+ OldBezier a,
+ OldBezier b);
+
+void
+find_intersections_bezier_recursive( std::vector<std::pair<double, double> > &xs,
+ vector<Geom::Point> const & A,
+ vector<Geom::Point> const & B,
+ double /*precision*/) {
+ OldBezier a, b;
+ a.p = A;
+ b.p = B;
+ return find_intersections_bezier_recursive(xs, a,b);
+}
+
+
+/*
+ * split the curve at the midpoint, returning an array with the two parts
+ * Temporary storage is minimized by using part of the storage for the result
+ * to hold an intermediate value until it is no longer needed.
+ */
+void OldBezier::split(double t, OldBezier &left, OldBezier &right) const {
+ const unsigned sz = p.size();
+ //Geom::Point Vtemp[sz][sz];
+ std::vector< std::vector< Geom::Point > > Vtemp;
+ for (size_t i = 0; i < sz; ++i )
+ Vtemp[i].reserve(sz);
+
+ /* Copy control points */
+ std::copy(p.begin(), p.end(), Vtemp[0].begin());
+
+ /* Triangle computation */
+ for (unsigned i = 1; i < sz; i++) {
+ for (unsigned j = 0; j < sz - i; j++) {
+ Vtemp[i][j] = lerp(t, Vtemp[i-1][j], Vtemp[i-1][j+1]);
+ }
+ }
+
+ left.p.resize(sz);
+ right.p.resize(sz);
+ for (unsigned j = 0; j < sz; j++)
+ left.p[j] = Vtemp[j][0];
+ for (unsigned j = 0; j < sz; j++)
+ right.p[j] = Vtemp[sz-1-j][j];
+}
+
+#if 0
+/*
+ * split the curve at the midpoint, returning an array with the two parts
+ * Temporary storage is minimized by using part of the storage for the result
+ * to hold an intermediate value until it is no longer needed.
+ */
+Point OldBezier::operator()(double t) const {
+ const unsigned sz = p.size();
+ Geom::Point Vtemp[sz][sz];
+
+ /* Copy control points */
+ std::copy(p.begin(), p.end(), Vtemp[0]);
+
+ /* Triangle computation */
+ for (unsigned i = 1; i < sz; i++) {
+ for (unsigned j = 0; j < sz - i; j++) {
+ Vtemp[i][j] = lerp(t, Vtemp[i-1][j], Vtemp[i-1][j+1]);
+ }
+ }
+ return Vtemp[sz-1][0];
+}
+#endif
+
+// suggested by Sederberg.
+Point OldBezier::operator()(double const t) const {
+ size_t const n = p.size()-1;
+ Point r;
+ for(int dim = 0; dim < 2; dim++) {
+ double const u = 1.0 - t;
+ double bc = 1;
+ double tn = 1;
+ double tmp = p[0][dim]*u;
+ for(size_t i=1; i<n; i++){
+ tn = tn*t;
+ bc = bc*(n-i+1)/i;
+ tmp = (tmp + tn*bc*p[i][dim])*u;
+ }
+ r[dim] = (tmp + tn*t*p[n][dim]);
+ }
+ return r;
+}
+
+
+/*
+ * Test the bounding boxes of two OldBezier curves for interference.
+ * Several observations:
+ * First, it is cheaper to compute the bounding box of the second curve
+ * and test its bounding box for interference than to use a more direct
+ * approach of comparing all control points of the second curve with
+ * the various edges of the bounding box of the first curve to test
+ * for interference.
+ * Second, after a few subdivisions it is highly probable that two corners
+ * of the bounding box of a given Bezier curve are the first and last
+ * control point. Once this happens once, it happens for all subsequent
+ * subcurves. It might be worth putting in a test and then short-circuit
+ * code for further subdivision levels.
+ * Third, in the final comparison (the interference test) the comparisons
+ * should both permit equality. We want to find intersections even if they
+ * occur at the ends of segments.
+ * Finally, there are tighter bounding boxes that can be derived. It isn't
+ * clear whether the higher probability of rejection (and hence fewer
+ * subdivisions and tests) is worth the extra work.
+ */
+
+bool intersect_BB( OldBezier a, OldBezier b ) {
+ double minax, maxax, minay, maxay;
+ a.bounds(minax, maxax, minay, maxay);
+ double minbx, maxbx, minby, maxby;
+ b.bounds(minbx, maxbx, minby, maxby);
+ // Test bounding box of b against bounding box of a
+ // Not >= : need boundary case
+ return !( ( minax > maxbx ) || ( minay > maxby )
+ || ( minbx > maxax ) || ( minby > maxay ) );
+}
+
+/*
+ * Recursively intersect two curves keeping track of their real parameters
+ * and depths of intersection.
+ * The results are returned in a 2-D array of doubles indicating the parameters
+ * for which intersections are found. The parameters are in the order the
+ * intersections were found, which is probably not in sorted order.
+ * When an intersection is found, the parameter value for each of the two
+ * is stored in the index elements array, and the index is incremented.
+ *
+ * If either of the curves has subdivisions left before it is straight
+ * (depth > 0)
+ * that curve (possibly both) is (are) subdivided at its (their) midpoint(s).
+ * the depth(s) is (are) decremented, and the parameter value(s) corresponding
+ * to the midpoints(s) is (are) computed.
+ * Then each of the subcurves of one curve is intersected with each of the
+ * subcurves of the other curve, first by testing the bounding boxes for
+ * interference. If there is any bounding box interference, the corresponding
+ * subcurves are recursively intersected.
+ *
+ * If neither curve has subdivisions left, the line segments from the first
+ * to last control point of each segment are intersected. (Actually the
+ * only the parameter value corresponding to the intersection point is found).
+ *
+ * The apriori flatness test is probably more efficient than testing at each
+ * level of recursion, although a test after three or four levels would
+ * probably be worthwhile, since many curves become flat faster than their
+ * asymptotic rate for the first few levels of recursion.
+ *
+ * The bounding box test fails much more frequently than it succeeds, providing
+ * substantial pruning of the search space.
+ *
+ * Each (sub)curve is subdivided only once, hence it is not possible that for
+ * one final line intersection test the subdivision was at one level, while
+ * for another final line intersection test the subdivision (of the same curve)
+ * was at another. Since the line segments share endpoints, the intersection
+ * is robust: a near-tangential intersection will yield zero or two
+ * intersections.
+ */
+void recursively_intersect( OldBezier a, double t0, double t1, int deptha,
+ OldBezier b, double u0, double u1, int depthb,
+ std::vector<std::pair<double, double> > &parameters)
+{
+ intersect_steps ++;
+ //std::cout << deptha << std::endl;
+ if( deptha > 0 )
+ {
+ OldBezier A[2];
+ a.split(0.5, A[0], A[1]);
+ double tmid = (t0+t1)*0.5;
+ deptha--;
+ if( depthb > 0 )
+ {
+ OldBezier B[2];
+ b.split(0.5, B[0], B[1]);
+ double umid = (u0+u1)*0.5;
+ depthb--;
+ if( intersect_BB( A[0], B[0] ) )
+ recursively_intersect( A[0], t0, tmid, deptha,
+ B[0], u0, umid, depthb,
+ parameters );
+ if( intersect_BB( A[1], B[0] ) )
+ recursively_intersect( A[1], tmid, t1, deptha,
+ B[0], u0, umid, depthb,
+ parameters );
+ if( intersect_BB( A[0], B[1] ) )
+ recursively_intersect( A[0], t0, tmid, deptha,
+ B[1], umid, u1, depthb,
+ parameters );
+ if( intersect_BB( A[1], B[1] ) )
+ recursively_intersect( A[1], tmid, t1, deptha,
+ B[1], umid, u1, depthb,
+ parameters );
+ }
+ else
+ {
+ if( intersect_BB( A[0], b ) )
+ recursively_intersect( A[0], t0, tmid, deptha,
+ b, u0, u1, depthb,
+ parameters );
+ if( intersect_BB( A[1], b ) )
+ recursively_intersect( A[1], tmid, t1, deptha,
+ b, u0, u1, depthb,
+ parameters );
+ }
+ }
+ else
+ if( depthb > 0 )
+ {
+ OldBezier B[2];
+ b.split(0.5, B[0], B[1]);
+ double umid = (u0 + u1)*0.5;
+ depthb--;
+ if( intersect_BB( a, B[0] ) )
+ recursively_intersect( a, t0, t1, deptha,
+ B[0], u0, umid, depthb,
+ parameters );
+ if( intersect_BB( a, B[1] ) )
+ recursively_intersect( a, t0, t1, deptha,
+ B[0], umid, u1, depthb,
+ parameters );
+ }
+ else // Both segments are fully subdivided; now do line segments
+ {
+ double xlk = a.p.back()[X] - a.p[0][X];
+ double ylk = a.p.back()[Y] - a.p[0][Y];
+ double xnm = b.p.back()[X] - b.p[0][X];
+ double ynm = b.p.back()[Y] - b.p[0][Y];
+ double xmk = b.p[0][X] - a.p[0][X];
+ double ymk = b.p[0][Y] - a.p[0][Y];
+ double det = xnm * ylk - ynm * xlk;
+ if( 1.0 + det == 1.0 )
+ return;
+ else
+ {
+ double detinv = 1.0 / det;
+ double s = ( xnm * ymk - ynm *xmk ) * detinv;
+ double t = ( xlk * ymk - ylk * xmk ) * detinv;
+ if( ( s < 0.0 ) || ( s > 1.0 ) || ( t < 0.0 ) || ( t > 1.0 ) )
+ return;
+ parameters.emplace_back(t0 + s * ( t1 - t0 ),
+ u0 + t * ( u1 - u0 ));
+ }
+ }
+}
+
+inline double log4( double x ) { return log(x)/log(4.); }
+
+/*
+ * Wang's theorem is used to estimate the level of subdivision required,
+ * but only if the bounding boxes interfere at the top level.
+ * Assuming there is a possible intersection, recursively_intersect is
+ * used to find all the parameters corresponding to intersection points.
+ * these are then sorted and returned in an array.
+ */
+
+double Lmax(Point p) {
+ return std::max(fabs(p[X]), fabs(p[Y]));
+}
+
+
+unsigned wangs_theorem(OldBezier /*a*/) {
+ return 6; // seems a good approximation!
+
+ /*
+ const double INV_EPS = (1L<<14); // The value of 1.0 / (1L<<14) is enough for most applications
+
+ double la1 = Lmax( ( a.p[2] - a.p[1] ) - (a.p[1] - a.p[0]) );
+ double la2 = Lmax( ( a.p[3] - a.p[2] ) - (a.p[2] - a.p[1]) );
+ double l0 = std::max(la1, la2);
+ unsigned ra;
+ if( l0 * 0.75 * M_SQRT2 + 1.0 == 1.0 )
+ ra = 0;
+ else
+ ra = (unsigned)ceil( log4( M_SQRT2 * 6.0 / 8.0 * INV_EPS * l0 ) );
+ //std::cout << ra << std::endl;
+ return ra;*/
+}
+
+struct rparams
+{
+ OldBezier &A;
+ OldBezier &B;
+};
+
+/*static int
+intersect_polish_f (const gsl_vector * x, void *params,
+ gsl_vector * f)
+{
+ const double x0 = gsl_vector_get (x, 0);
+ const double x1 = gsl_vector_get (x, 1);
+
+ Geom::Point dx = ((struct rparams *) params)->A(x0) -
+ ((struct rparams *) params)->B(x1);
+
+ gsl_vector_set (f, 0, dx[0]);
+ gsl_vector_set (f, 1, dx[1]);
+
+ return GSL_SUCCESS;
+}*/
+
+/*union dbl_64{
+ long long i64;
+ double d64;
+};*/
+
+/*static double EpsilonBy(double value, int eps)
+{
+ dbl_64 s;
+ s.d64 = value;
+ s.i64 += eps;
+ return s.d64;
+}*/
+
+/*
+static void intersect_polish_root (OldBezier &A, double &s,
+ OldBezier &B, double &t) {
+ const gsl_multiroot_fsolver_type *T;
+ gsl_multiroot_fsolver *sol;
+
+ int status;
+ size_t iter = 0;
+
+ const size_t n = 2;
+ struct rparams p = {A, B};
+ gsl_multiroot_function f = {&intersect_polish_f, n, &p};
+
+ double x_init[2] = {s, t};
+ gsl_vector *x = gsl_vector_alloc (n);
+
+ gsl_vector_set (x, 0, x_init[0]);
+ gsl_vector_set (x, 1, x_init[1]);
+
+ T = gsl_multiroot_fsolver_hybrids;
+ sol = gsl_multiroot_fsolver_alloc (T, 2);
+ gsl_multiroot_fsolver_set (sol, &f, x);
+
+ do
+ {
+ iter++;
+ status = gsl_multiroot_fsolver_iterate (sol);
+
+ if (status) // check if solver is stuck
+ break;
+
+ status =
+ gsl_multiroot_test_residual (sol->f, 1e-12);
+ }
+ while (status == GSL_CONTINUE && iter < 1000);
+
+ s = gsl_vector_get (sol->x, 0);
+ t = gsl_vector_get (sol->x, 1);
+
+ gsl_multiroot_fsolver_free (sol);
+ gsl_vector_free (x);
+
+ // This code does a neighbourhood search for minor improvements.
+ double best_v = L1(A(s) - B(t));
+ //std::cout << "------\n" << best_v << std::endl;
+ Point best(s,t);
+ while (true) {
+ Point trial = best;
+ double trial_v = best_v;
+ for(int nsi = -1; nsi < 2; nsi++) {
+ for(int nti = -1; nti < 2; nti++) {
+ Point n(EpsilonBy(best[0], nsi),
+ EpsilonBy(best[1], nti));
+ double c = L1(A(n[0]) - B(n[1]));
+ //std::cout << c << "; ";
+ if (c < trial_v) {
+ trial = n;
+ trial_v = c;
+ }
+ }
+ }
+ if(trial == best) {
+ //std::cout << "\n" << s << " -> " << s - best[0] << std::endl;
+ //std::cout << t << " -> " << t - best[1] << std::endl;
+ //std::cout << best_v << std::endl;
+ s = best[0];
+ t = best[1];
+ return;
+ } else {
+ best = trial;
+ best_v = trial_v;
+ }
+ }
+}*/
+
+
+void find_intersections_bezier_recursive( std::vector<std::pair<double, double> > &xs,
+ OldBezier a, OldBezier b)
+{
+ if( intersect_BB( a, b ) )
+ {
+ recursively_intersect( a, 0., 1., wangs_theorem(a),
+ b, 0., 1., wangs_theorem(b),
+ xs);
+ }
+ /*for(unsigned i = 0; i < xs.size(); i++)
+ intersect_polish_root(a, xs[i].first,
+ b, xs[i].second);*/
+ std::sort(xs.begin(), xs.end());
+}
+
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-2d.cpp b/src/2geom/sbasis-2d.cpp
new file mode 100644
index 0000000..53b09cd
--- /dev/null
+++ b/src/2geom/sbasis-2d.cpp
@@ -0,0 +1,202 @@
+#include <2geom/sbasis-2d.h>
+#include <2geom/sbasis-geometric.h>
+
+namespace Geom{
+
+SBasis extract_u(SBasis2d const &a, double u) {
+ SBasis sb(a.vs, Linear());
+ double s = u*(1-u);
+
+ for(unsigned vi = 0; vi < a.vs; vi++) {
+ double sk = 1;
+ Linear bo(0,0);
+ for(unsigned ui = 0; ui < a.us; ui++) {
+ bo += (extract_u(a.index(ui, vi), u))*sk;
+ sk *= s;
+ }
+ sb[vi] = bo;
+ }
+
+ return sb;
+}
+
+SBasis extract_v(SBasis2d const &a, double v) {
+ SBasis sb(a.us, Linear());
+ double s = v*(1-v);
+
+ for(unsigned ui = 0; ui < a.us; ui++) {
+ double sk = 1;
+ Linear bo(0,0);
+ for(unsigned vi = 0; vi < a.vs; vi++) {
+ bo += (extract_v(a.index(ui, vi), v))*sk;
+ sk *= s;
+ }
+ sb[ui] = bo;
+ }
+
+ return sb;
+}
+
+SBasis compose(Linear2d const &a, D2<SBasis> const &p) {
+ D2<SBasis> omp(-p[X] + 1, -p[Y] + 1);
+ return multiply(omp[0], omp[1])*a[0] +
+ multiply(p[0], omp[1])*a[1] +
+ multiply(omp[0], p[1])*a[2] +
+ multiply(p[0], p[1])*a[3];
+}
+
+SBasis
+compose(SBasis2d const &fg, D2<SBasis> const &p) {
+ SBasis B;
+ SBasis s[2];
+ SBasis ss[2];
+ for(unsigned dim = 0; dim < 2; dim++)
+ s[dim] = p[dim]*(Linear(1) - p[dim]);
+ ss[1] = Linear(1);
+ for(unsigned vi = 0; vi < fg.vs; vi++) {
+ ss[0] = ss[1];
+ for(unsigned ui = 0; ui < fg.us; ui++) {
+ unsigned i = ui + vi*fg.us;
+ B += ss[0]*compose(fg[i], p);
+ ss[0] *= s[0];
+ }
+ ss[1] *= s[1];
+ }
+ return B;
+}
+
+D2<SBasis>
+compose_each(D2<SBasis2d> const &fg, D2<SBasis> const &p) {
+ return D2<SBasis>(compose(fg[X], p), compose(fg[Y], p));
+}
+
+SBasis2d partial_derivative(SBasis2d const &f, int dim) {
+ SBasis2d result;
+ for(unsigned i = 0; i < f.size(); i++) {
+ result.push_back(Linear2d(0,0,0,0));
+ }
+ result.us = f.us;
+ result.vs = f.vs;
+
+ for(unsigned i = 0; i < f.us; i++) {
+ for(unsigned j = 0; j < f.vs; j++) {
+ Linear2d lin = f.index(i,j);
+ Linear2d dlin(lin[1+dim]-lin[0], lin[1+2*dim]-lin[dim], lin[3-dim]-lin[2*(1-dim)], lin[3]-lin[2-dim]);
+ result[i+j*result.us] += dlin;
+ unsigned di = dim?j:i;
+ if (di>=1){
+ float motpi = dim?-1:1;
+ Linear2d ds_lin_low( lin[0], -motpi*lin[1], motpi*lin[2], -lin[3] );
+ result[(i+dim-1)+(j-dim)*result.us] += di*ds_lin_low;
+
+ Linear2d ds_lin_hi( lin[1+dim]-lin[0], lin[1+2*dim]-lin[dim], lin[3]-lin[2-dim], lin[3-dim]-lin[2-dim] );
+ result[i+j*result.us] += di*ds_lin_hi;
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * Finds a path which traces the 0 contour of f, traversing from A to B as a single d2<sbasis>.
+ * degmax specifies the degree (degree = 2*degmax-1, so a degmax of 2 generates a cubic fit).
+ * The algorithm is based on dividing out derivatives at each end point and does not use the curvature for fitting.
+ * It is less accurate than sb2d_cubic_solve, although this may be fixed in the future.
+ */
+D2<SBasis>
+sb2dsolve(SBasis2d const &f, Geom::Point const &A, Geom::Point const &B, unsigned degmax){
+ //g_warning("check f(A)= %f = f(B) = %f =0!", f.apply(A[X],A[Y]), f.apply(B[X],B[Y]));
+
+ SBasis2d dfdu = partial_derivative(f, 0);
+ SBasis2d dfdv = partial_derivative(f, 1);
+ Geom::Point dfA(dfdu.apply(A[X],A[Y]),dfdv.apply(A[X],A[Y]));
+ Geom::Point dfB(dfdu.apply(B[X],B[Y]),dfdv.apply(B[X],B[Y]));
+ Geom::Point nA = dfA/(dfA[X]*dfA[X]+dfA[Y]*dfA[Y]);
+ Geom::Point nB = dfB/(dfB[X]*dfB[X]+dfB[Y]*dfB[Y]);
+
+ D2<SBasis>result(SBasis(degmax, Linear()), SBasis(degmax, Linear()));
+ double fact_k=1;
+ double sign = 1.;
+ for(int dim = 0; dim < 2; dim++)
+ result[dim][0] = Linear(A[dim],B[dim]);
+ for(unsigned k=1; k<degmax; k++){
+ // these two lines make the solutions worse!
+ //fact_k *= k;
+ //sign = -sign;
+ SBasis f_on_curve = compose(f,result);
+ Linear reste = f_on_curve[k];
+ double ax = -reste[0]/fact_k*nA[X];
+ double ay = -reste[0]/fact_k*nA[Y];
+ double bx = -sign*reste[1]/fact_k*nB[X];
+ double by = -sign*reste[1]/fact_k*nB[Y];
+
+ result[X][k] = Linear(ax,bx);
+ result[Y][k] = Linear(ay,by);
+ //sign *= 3;
+ }
+ return result;
+}
+
+/**
+ * Finds a path which traces the 0 contour of f, traversing from A to B as a single cubic d2<sbasis>.
+ * The algorithm is based on matching direction and curvature at each end point.
+ */
+//TODO: handle the case when B is "behind" A for the natural orientation of the level set.
+//TODO: more generally, there might be up to 4 solutions. Choose the best one!
+D2<SBasis>
+sb2d_cubic_solve(SBasis2d const &f, Geom::Point const &A, Geom::Point const &B){
+ D2<SBasis>result;//(Linear(A[X],B[X]),Linear(A[Y],B[Y]));
+ //g_warning("check 0 = %f = %f!", f.apply(A[X],A[Y]), f.apply(B[X],B[Y]));
+
+ SBasis2d f_u = partial_derivative(f , 0);
+ SBasis2d f_v = partial_derivative(f , 1);
+ SBasis2d f_uu = partial_derivative(f_u, 0);
+ SBasis2d f_uv = partial_derivative(f_v, 0);
+ SBasis2d f_vv = partial_derivative(f_v, 1);
+
+ Geom::Point dfA(f_u.apply(A[X],A[Y]),f_v.apply(A[X],A[Y]));
+ Geom::Point dfB(f_u.apply(B[X],B[Y]),f_v.apply(B[X],B[Y]));
+
+ Geom::Point V0 = rot90(dfA);
+ Geom::Point V1 = rot90(dfB);
+
+ double D2fVV0 = f_uu.apply(A[X],A[Y])*V0[X]*V0[X]+
+ 2*f_uv.apply(A[X],A[Y])*V0[X]*V0[Y]+
+ f_vv.apply(A[X],A[Y])*V0[Y]*V0[Y];
+ double D2fVV1 = f_uu.apply(B[X],B[Y])*V1[X]*V1[X]+
+ 2*f_uv.apply(B[X],B[Y])*V1[X]*V1[Y]+
+ f_vv.apply(B[X],B[Y])*V1[Y]*V1[Y];
+
+ std::vector<D2<SBasis> > candidates = cubics_fitting_curvature(A,B,V0,V1,D2fVV0,D2fVV1);
+ if (candidates.empty()) {
+ return D2<SBasis>(SBasis(Linear(A[X],B[X])), SBasis(Linear(A[Y],B[Y])));
+ }
+ //TODO: I'm sure std algorithm could do that for me...
+ double error = -1;
+ unsigned best = 0;
+ for (unsigned i=0; i<candidates.size(); i++){
+ Interval bounds = *bounds_fast(compose(f,candidates[i]));
+ double new_error = (fabs(bounds.max())>fabs(bounds.min()) ? fabs(bounds.max()) : fabs(bounds.min()) );
+ if ( new_error < error || error < 0 ){
+ error = new_error;
+ best = i;
+ }
+ }
+ return candidates[best];
+}
+
+
+
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-geometric.cpp b/src/2geom/sbasis-geometric.cpp
new file mode 100644
index 0000000..7039c6d
--- /dev/null
+++ b/src/2geom/sbasis-geometric.cpp
@@ -0,0 +1,790 @@
+/** Geometric operators on D2<SBasis> (1D->2D).
+ * Copyright 2012 JBC Engelen
+ * Copyright 2007 JF Barraud
+ * Copyright 2007 N Hurst
+ *
+ * The functions defined in this header related to 2d geometric operations such as arc length,
+ * unit_vector, curvature, and centroid. Most are built on top of unit_vector, which takes an
+ * arbitrary D2 and returns a D2 with unit length with the same direction.
+ *
+ * Todo/think about:
+ * arclength D2 -> sbasis (giving arclength function)
+ * does uniform_speed return natural parameterisation?
+ * integrate sb2d code from normal-bundle
+ * angle(md<2>) -> sbasis (gives angle from vector - discontinuous?)
+ * osculating circle center?
+ *
+ **/
+
+#include <2geom/sbasis-geometric.h>
+#include <2geom/sbasis.h>
+#include <2geom/sbasis-math.h>
+#include <2geom/sbasis-geometric.h>
+
+//namespace Geom{
+using namespace Geom;
+using namespace std;
+
+//Some utils first.
+//TODO: remove this!!
+/**
+ * Return a list of doubles that appear in both a and b to within error tol
+ * a, b, vector of double
+ * tol tolerance
+ */
+static vector<double>
+vect_intersect(vector<double> const &a, vector<double> const &b, double tol=0.){
+ vector<double> inter;
+ unsigned i=0,j=0;
+ while ( i<a.size() && j<b.size() ){
+ if (fabs(a[i]-b[j])<tol){
+ inter.push_back(a[i]);
+ i+=1;
+ j+=1;
+ }else if (a[i]<b[j]){
+ i+=1;
+ }else if (a[i]>b[j]){
+ j+=1;
+ }
+ }
+ return inter;
+}
+
+//------------------------------------------------------------------------------
+static SBasis divide_by_sk(SBasis const &a, int k) {
+ if ( k>=(int)a.size()){
+ //make sure a is 0?
+ return SBasis();
+ }
+ if(k < 0) return shift(a,-k);
+ SBasis c;
+ c.insert(c.begin(), a.begin()+k, a.end());
+ return c;
+}
+
+static SBasis divide_by_t0k(SBasis const &a, int k) {
+ if(k < 0) {
+ SBasis c = Linear(0,1);
+ for (int i=2; i<=-k; i++){
+ c*=c;
+ }
+ c*=a;
+ return(c);
+ }else{
+ SBasis c = Linear(1,0);
+ for (int i=2; i<=k; i++){
+ c*=c;
+ }
+ c*=a;
+ return(divide_by_sk(c,k));
+ }
+}
+
+static SBasis divide_by_t1k(SBasis const &a, int k) {
+ if(k < 0) {
+ SBasis c = Linear(1,0);
+ for (int i=2; i<=-k; i++){
+ c*=c;
+ }
+ c*=a;
+ return(c);
+ }else{
+ SBasis c = Linear(0,1);
+ for (int i=2; i<=k; i++){
+ c*=c;
+ }
+ c*=a;
+ return(divide_by_sk(c,k));
+ }
+}
+
+static D2<SBasis> RescaleForNonVanishingEnds(D2<SBasis> const &MM, double ZERO=1.e-4){
+ D2<SBasis> M = MM;
+ //TODO: divide by all the s at once!!!
+ while ((M[0].size()>1||M[1].size()>1) &&
+ fabs(M[0].at0())<ZERO &&
+ fabs(M[1].at0())<ZERO &&
+ fabs(M[0].at1())<ZERO &&
+ fabs(M[1].at1())<ZERO){
+ M[0] = divide_by_sk(M[0],1);
+ M[1] = divide_by_sk(M[1],1);
+ }
+ while ((M[0].size()>1||M[1].size()>1) &&
+ fabs(M[0].at0())<ZERO && fabs(M[1].at0())<ZERO){
+ M[0] = divide_by_t0k(M[0],1);
+ M[1] = divide_by_t0k(M[1],1);
+ }
+ while ((M[0].size()>1||M[1].size()>1) &&
+ fabs(M[0].at1())<ZERO && fabs(M[1].at1())<ZERO){
+ M[0] = divide_by_t1k(M[0],1);
+ M[1] = divide_by_t1k(M[1],1);
+ }
+ return M;
+}
+
+/*static D2<SBasis> RescaleForNonVanishing(D2<SBasis> const &MM, double ZERO=1.e-4){
+ std::vector<double> levels;
+ levels.push_back(-ZERO);
+ levels.push_back(ZERO);
+ //std::vector<std::vector<double> > mr = multi_roots(MM, levels);
+ }*/
+
+
+//=================================================================
+//TODO: what's this for?!?!
+Piecewise<D2<SBasis> >
+Geom::cutAtRoots(Piecewise<D2<SBasis> > const &M, double ZERO){
+ vector<double> rts;
+ for (unsigned i=0; i<M.size(); i++){
+ vector<double> seg_rts = roots((M.segs[i])[0]);
+ seg_rts = vect_intersect(seg_rts, roots((M.segs[i])[1]), ZERO);
+ Linear mapToDom = Linear(M.cuts[i],M.cuts[i+1]);
+ for (double & seg_rt : seg_rts){
+ seg_rt= mapToDom(seg_rt);
+ }
+ rts.insert(rts.end(),seg_rts.begin(),seg_rts.end());
+ }
+ return partition(M,rts);
+}
+
+/** Return a function which gives the angle of vect at each point.
+ \param vect a piecewise parameteric curve.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates Piecewise
+*/
+Piecewise<SBasis>
+Geom::atan2(Piecewise<D2<SBasis> > const &vect, double tol, unsigned order){
+ Piecewise<SBasis> result;
+ Piecewise<D2<SBasis> > v = cutAtRoots(vect,tol);
+ result.cuts.push_back(v.cuts.front());
+ for (unsigned i=0; i<v.size(); i++){
+
+ D2<SBasis> vi = RescaleForNonVanishingEnds(v.segs[i]);
+ SBasis x=vi[0], y=vi[1];
+ Piecewise<SBasis> angle;
+ angle = divide (x*derivative(y)-y*derivative(x), x*x+y*y, tol, order);
+
+ //TODO: I don't understand this - sign.
+ angle = integral(-angle);
+ Point vi0 = vi.at0();
+ angle += -std::atan2(vi0[1],vi0[0]) - angle[0].at0();
+ //TODO: deal with 2*pi jumps form one seg to the other...
+ //TODO: not exact at t=1 because of the integral.
+ //TODO: force continuity?
+
+ angle.setDomain(Interval(v.cuts[i],v.cuts[i+1]));
+ result.concat(angle);
+ }
+ return result;
+}
+/** Return a function which gives the angle of vect at each point.
+ \param vect a piecewise parameteric curve.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates Piecewise, D2
+*/
+Piecewise<SBasis>
+Geom::atan2(D2<SBasis> const &vect, double tol, unsigned order){
+ return atan2(Piecewise<D2<SBasis> >(vect),tol,order);
+}
+
+/** tan2 is the pseudo-inverse of atan2. It takes an angle and returns a unit_vector that points in the direction of angle.
+ \param angle a piecewise function of angle wrt t.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates D2, SBasis
+*/
+D2<Piecewise<SBasis> >
+Geom::tan2(SBasis const &angle, double tol, unsigned order){
+ return tan2(Piecewise<SBasis>(angle), tol, order);
+}
+
+/** tan2 is the pseudo-inverse of atan2. It takes an angle and returns a unit_vector that points in the direction of angle.
+ \param angle a piecewise function of angle wrt t.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates Piecewise, D2
+*/
+D2<Piecewise<SBasis> >
+Geom::tan2(Piecewise<SBasis> const &angle, double tol, unsigned order){
+ return D2<Piecewise<SBasis> >(cos(angle, tol, order), sin(angle, tol, order));
+}
+
+/** Return a Piecewise<D2<SBasis> > which points in the same direction as V_in, but has unit_length.
+ \param V_in the original path.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+
+unitVector(x,y) is computed as (b,-a) where a and b are solutions of:
+ ax+by=0 (eqn1) and a^2+b^2=1 (eqn2)
+
+ \relates Piecewise, D2
+*/
+Piecewise<D2<SBasis> >
+Geom::unitVector(D2<SBasis> const &V_in, double tol, unsigned order){
+ //TODO: Handle vanishing vectors...
+ // -This approach is numerically bad. Find a stable way to rescale V_in to have non vanishing ends.
+ // -This done, unitVector will have jumps at zeros: fill the gaps with arcs of circles.
+ D2<SBasis> V = RescaleForNonVanishingEnds(V_in);
+
+ if (V[0].isZero(tol) && V[1].isZero(tol))
+ return Piecewise<D2<SBasis> >(D2<SBasis>(Linear(1),SBasis()));
+ SBasis x = V[0], y = V[1];
+ SBasis r_eqn1, r_eqn2;
+
+ Point v0 = unit_vector(V.at0());
+ Point v1 = unit_vector(V.at1());
+ SBasis a = SBasis(order+1, Linear(0.));
+ a[0] = Linear(-v0[1],-v1[1]);
+ SBasis b = SBasis(order+1, Linear(0.));
+ b[0] = Linear( v0[0], v1[0]);
+
+ r_eqn1 = -(a*x+b*y);
+ r_eqn2 = Linear(1.)-(a*a+b*b);
+
+ for (unsigned k=1; k<=order; k++){
+ double r0 = (k<r_eqn1.size())? r_eqn1.at(k).at0() : 0;
+ double r1 = (k<r_eqn1.size())? r_eqn1.at(k).at1() : 0;
+ double rr0 = (k<r_eqn2.size())? r_eqn2.at(k).at0() : 0;
+ double rr1 = (k<r_eqn2.size())? r_eqn2.at(k).at1() : 0;
+ double a0,a1,b0,b1;// coeffs in a[k] and b[k]
+
+ //the equations to solve at this point are:
+ // a0*x(0)+b0*y(0)=r0 & 2*a0*a(0)+2*b0*b(0)=rr0
+ //and
+ // a1*x(1)+b1*y(1)=r1 & 2*a1*a(1)+2*b1*b(1)=rr1
+ a0 = r0/dot(v0,V.at0())*v0[0]-rr0/2*v0[1];
+ b0 = r0/dot(v0,V.at0())*v0[1]+rr0/2*v0[0];
+ a1 = r1/dot(v1,V.at1())*v1[0]-rr1/2*v1[1];
+ b1 = r1/dot(v1,V.at1())*v1[1]+rr1/2*v1[0];
+
+ a[k] = Linear(a0,a1);
+ b[k] = Linear(b0,b1);
+
+ //TODO: use "incremental" rather than explicit formulas.
+ r_eqn1 = -(a*x+b*y);
+ r_eqn2 = Linear(1)-(a*a+b*b);
+ }
+
+ //our candidate is:
+ D2<SBasis> unitV;
+ unitV[0] = b;
+ unitV[1] = -a;
+
+ //is it good?
+ double rel_tol = std::max(1.,std::max(V_in[0].tailError(0),V_in[1].tailError(0)))*tol;
+ if (r_eqn1.tailError(order)>rel_tol || r_eqn2.tailError(order)>tol){
+ //if not: subdivide and concat results.
+ Piecewise<D2<SBasis> > unitV0, unitV1;
+ unitV0 = unitVector(compose(V,Linear(0,.5)),tol,order);
+ unitV1 = unitVector(compose(V,Linear(.5,1)),tol,order);
+ unitV0.setDomain(Interval(0.,.5));
+ unitV1.setDomain(Interval(.5,1.));
+ unitV0.concat(unitV1);
+ return(unitV0);
+ }else{
+ //if yes: return it as pw.
+ Piecewise<D2<SBasis> > result;
+ result=(Piecewise<D2<SBasis> >)unitV;
+ return result;
+ }
+}
+
+/** Return a Piecewise<D2<SBasis> > which points in the same direction as V_in, but has unit_length.
+ \param V_in the original path.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+
+unitVector(x,y) is computed as (b,-a) where a and b are solutions of:
+ ax+by=0 (eqn1) and a^2+b^2=1 (eqn2)
+
+ \relates Piecewise
+*/
+Piecewise<D2<SBasis> >
+Geom::unitVector(Piecewise<D2<SBasis> > const &V, double tol, unsigned order){
+ Piecewise<D2<SBasis> > result;
+ Piecewise<D2<SBasis> > VV = cutAtRoots(V);
+ result.cuts.push_back(VV.cuts.front());
+ for (unsigned i=0; i<VV.size(); i++){
+ Piecewise<D2<SBasis> > unit_seg;
+ unit_seg = unitVector(VV.segs[i],tol, order);
+ unit_seg.setDomain(Interval(VV.cuts[i],VV.cuts[i+1]));
+ result.concat(unit_seg);
+ }
+ return result;
+}
+
+/** returns a function giving the arclength at each point in M.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \relates Piecewise
+*/
+Piecewise<SBasis>
+Geom::arcLengthSb(Piecewise<D2<SBasis> > const &M, double tol){
+ Piecewise<D2<SBasis> > dM = derivative(M);
+ Piecewise<SBasis> dMlength = sqrt(dot(dM,dM),tol,3);
+ Piecewise<SBasis> length = integral(dMlength);
+ length-=length.segs.front().at0();
+ return length;
+}
+
+/** returns a function giving the arclength at each point in M.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \relates Piecewise, D2
+*/
+Piecewise<SBasis>
+Geom::arcLengthSb(D2<SBasis> const &M, double tol){
+ return arcLengthSb(Piecewise<D2<SBasis> >(M), tol);
+}
+
+#if 0
+double
+Geom::length(D2<SBasis> const &M,
+ double tol){
+ Piecewise<SBasis> length = arcLengthSb(M, tol);
+ return length.segs.back().at1();
+}
+double
+Geom::length(Piecewise<D2<SBasis> > const &M,
+ double tol){
+ Piecewise<SBasis> length = arcLengthSb(M, tol);
+ return length.segs.back().at1();
+}
+#endif
+
+/** returns a function giving the curvature at each point in M.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \relates Piecewise, D2
+ \todo claimed incomplete. Check.
+*/
+Piecewise<SBasis>
+Geom::curvature(D2<SBasis> const &M, double tol) {
+ D2<SBasis> dM=derivative(M);
+ Piecewise<D2<SBasis> > unitv = unitVector(dM,tol);
+ Piecewise<SBasis> dMlength = dot(Piecewise<D2<SBasis> >(dM),unitv);
+ Piecewise<SBasis> k = cross(derivative(unitv),unitv);
+ k = divide(k,dMlength,tol,3);
+ return(k);
+}
+
+/** returns a function giving the curvature at each point in M.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \relates Piecewise
+ \todo claimed incomplete. Check.
+*/
+Piecewise<SBasis>
+Geom::curvature(Piecewise<D2<SBasis> > const &V, double tol){
+ Piecewise<SBasis> result;
+ Piecewise<D2<SBasis> > VV = cutAtRoots(V);
+ result.cuts.push_back(VV.cuts.front());
+ for (unsigned i=0; i<VV.size(); i++){
+ Piecewise<SBasis> curv_seg;
+ curv_seg = curvature(VV.segs[i],tol);
+ curv_seg.setDomain(Interval(VV.cuts[i],VV.cuts[i+1]));
+ result.concat(curv_seg);
+ }
+ return result;
+}
+
+//=================================================================
+
+/** Reparameterise M to have unit speed.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates Piecewise, D2
+*/
+Piecewise<D2<SBasis> >
+Geom::arc_length_parametrization(D2<SBasis> const &M,
+ unsigned order,
+ double tol){
+ Piecewise<D2<SBasis> > u;
+ u.push_cut(0);
+
+ Piecewise<SBasis> s = arcLengthSb(Piecewise<D2<SBasis> >(M),tol);
+ for (unsigned i=0; i < s.size();i++){
+ double t0=s.cuts[i],t1=s.cuts[i+1];
+ if ( are_near(s(t0),s(t1)) ) {
+ continue;
+ }
+ D2<SBasis> sub_M = compose(M,Linear(t0,t1));
+ D2<SBasis> sub_u;
+ for (unsigned dim=0;dim<2;dim++){
+ SBasis sub_s = s.segs[i];
+ sub_s-=sub_s.at0();
+ sub_s/=sub_s.at1();
+ sub_u[dim]=compose_inverse(sub_M[dim],sub_s, order, tol);
+ }
+ u.push(sub_u,s(t1));
+ }
+ return u;
+}
+
+/** Reparameterise M to have unit speed.
+ \param M the Element.
+ \param tol the maximum error allowed.
+ \param order the maximum degree to use for approximation
+ \relates Piecewise
+*/
+Piecewise<D2<SBasis> >
+Geom::arc_length_parametrization(Piecewise<D2<SBasis> > const &M,
+ unsigned order,
+ double tol){
+ Piecewise<D2<SBasis> > result;
+ for (unsigned i=0; i<M.size(); i++) {
+ result.concat( arc_length_parametrization(M[i],order,tol) );
+ }
+ return result;
+}
+
+#include <gsl/gsl_integration.h>
+static double sb_length_integrating(double t, void* param) {
+ SBasis* pc = (SBasis*)param;
+ return sqrt((*pc)(t));
+}
+
+/** Calculates the length of a D2<SBasis> through gsl integration.
+ \param B the Element.
+ \param tol the maximum error allowed.
+ \param result variable to be incremented with the length of the path
+ \param abs_error variable to be incremented with the estimated error
+ \relates D2
+If you only want the length, this routine may be faster/more accurate.
+*/
+void Geom::length_integrating(D2<SBasis> const &B, double &result, double &abs_error, double tol) {
+ D2<SBasis> dB = derivative(B);
+ SBasis dB2 = dot(dB, dB);
+
+ gsl_function F;
+ gsl_integration_workspace * w
+ = gsl_integration_workspace_alloc (20);
+ F.function = &sb_length_integrating;
+ F.params = (void*)&dB2;
+ double quad_result, err;
+ /* We could probably use the non adaptive code here if we removed any cusps first. */
+
+ gsl_integration_qag (&F, 0, 1, 0, tol, 20,
+ GSL_INTEG_GAUSS21, w, &quad_result, &err);
+
+ abs_error += err;
+ result += quad_result;
+}
+
+/** Calculates the length of a D2<SBasis> through gsl integration.
+ \param s the Element.
+ \param tol the maximum error allowed.
+ \relates D2
+If you only want the total length, this routine faster and more accurate than constructing an arcLengthSb.
+*/
+double
+Geom::length(D2<SBasis> const &s,
+ double tol){
+ double result = 0;
+ double abs_error = 0;
+ length_integrating(s, result, abs_error, tol);
+ return result;
+}
+/** Calculates the length of a Piecewise<D2<SBasis> > through gsl integration.
+ \param s the Element.
+ \param tol the maximum error allowed.
+ \relates Piecewise
+If you only want the total length, this routine faster and more accurate than constructing an arcLengthSb.
+*/
+double
+Geom::length(Piecewise<D2<SBasis> > const &s,
+ double tol){
+ double result = 0;
+ double abs_error = 0;
+ for (unsigned i=0; i < s.size();i++){
+ length_integrating(s[i], result, abs_error, tol);
+ }
+ return result;
+}
+
+/**
+ * Centroid using sbasis integration.
+ \param p the Element.
+ \param centroid on return contains the centroid of the shape
+ \param area on return contains the signed area of the shape.
+ \relates Piecewise
+This approach uses green's theorem to compute the area and centroid using integrals. For curved shapes this is much faster than converting to polyline. Note that without an uncross operation the output is not the absolute area.
+
+ * Returned values:
+ 0 for normal execution;
+ 2 if area is zero, meaning centroid is meaningless.
+
+ */
+unsigned Geom::centroid(Piecewise<D2<SBasis> > const &p, Point& centroid, double &area) {
+ Point centroid_tmp(0,0);
+ double atmp = 0;
+ for(unsigned i = 0; i < p.size(); i++) {
+ SBasis curl = dot(p[i], rot90(derivative(p[i])));
+ SBasis A = integral(curl);
+ D2<SBasis> C = integral(multiply(curl, p[i]));
+ atmp += A.at1() - A.at0();
+ centroid_tmp += C.at1()- C.at0(); // first moment.
+ }
+// join ends
+ centroid_tmp *= 2;
+ Point final = p[p.size()-1].at1(), initial = p[0].at0();
+ const double ai = cross(final, initial);
+ atmp += ai;
+ centroid_tmp += (final + initial)*ai; // first moment.
+
+ area = atmp / 2;
+ if (atmp != 0) {
+ centroid = centroid_tmp / (3 * atmp);
+ return 0;
+ }
+ return 2;
+}
+
+/**
+ * Find cubics with prescribed curvatures at both ends.
+ *
+ * this requires to solve a system of the form
+ *
+ * \f[
+ * \lambda_1 = a_0 \lambda_0^2 + c_0
+ * \lambda_0 = a_1 \lambda_1^2 + c_1
+ * \f]
+ *
+ * which is a deg 4 equation in lambda 0.
+ * Below are basic functions dedicated to solving this assuming a0 and a1 !=0.
+ */
+
+static OptInterval
+find_bounds_for_lambda0(double aa0,double aa1,double cc0,double cc1,
+ int insist_on_speeds_signs){
+
+ double a0=aa0,a1=aa1,c0=cc0,c1=cc1;
+ Interval result;
+ bool flip = a1<0;
+ if (a1<0){a1=-a1; c1=-c1;}
+ if (a0<0){a0=-a0; c0=-c0;}
+ double a = (a0<a1 ? a0 : a1);
+ double c = (c0<c1 ? c0 : c1);
+ double delta = 1-4*a*c;
+ if ( delta < 0 )
+ return OptInterval();//return empty interval
+ double lambda_max = (1+std::sqrt(delta))/2/a;
+
+ result = Interval(c,lambda_max);
+ if (flip)
+ result *= -1;
+ if (insist_on_speeds_signs == 1){
+ if (result.max() < 0)//Caution: setMin with max<new min...
+ return OptInterval();//return empty interval
+ result.setMin(0);
+ }
+ result = Interval(result.min()-.1,result.max()+.1);//just in case all our approx. were exact...
+ return result;
+}
+
+static
+std::vector<double>
+solve_lambda0(double a0,double a1,double c0,double c1,
+ int insist_on_speeds_signs){
+
+ SBasis p(3, Linear());
+ p[0] = Linear( a1*c0*c0+c1, a1*a0*(a0+ 2*c0) +a1*c0*c0 +c1 -1 );
+ p[1] = Linear( -a1*a0*(a0+2*c0), -a1*a0*(3*a0+2*c0) );
+ p[2] = Linear( a1*a0*a0 );
+
+ OptInterval domain = find_bounds_for_lambda0(a0,a1,c0,c1,insist_on_speeds_signs);
+ if ( !domain )
+ return std::vector<double>();
+ p = compose(p,Linear(domain->min(),domain->max()));
+ std::vector<double>rts = roots(p);
+ for (double & rt : rts){
+ rt = domain->min() + rt * domain->extent();
+ }
+ return rts;
+}
+
+/**
+* \brief returns the cubics fitting direction and curvature of a given
+* input curve at two points.
+*
+* The input can be the
+* value, speed, and acceleration
+* or
+* value, speed, and cross(acceleration,speed)
+* of the original curve at the both ends.
+* (the second is often technically useful, as it avoids unnecessary division by |v|^2)
+* Recall that K=1/R=cross(acceleration,speed)/|speed|^3.
+*
+* Moreover, a 7-th argument 'insist_on_speed_signs' can be supplied to select solutions:
+* If insist_on_speed_signs == 1, only consider solutions where speeds at both ends are positively
+* proportional to the given ones.
+* If insist_on_speed_signs == 0, allow speeds to point in the opposite direction (both at the same time)
+* If insist_on_speed_signs == -1, allow speeds to point in both direction independently.
+*
+* \relates D2
+*/
+std::vector<D2<SBasis> >
+Geom::cubics_fitting_curvature(Point const &M0, Point const &M1,
+ Point const &dM0, Point const &dM1,
+ double d2M0xdM0, double d2M1xdM1,
+ int insist_on_speed_signs,
+ double epsilon){
+ std::vector<D2<SBasis> > result;
+
+ //speed of cubic bezier will be lambda0*dM0 and lambda1*dM1,
+ //with lambda0 and lambda1 s.t. curvature at both ends is the same
+ //as the curvature of the given curve.
+ std::vector<double> lambda0,lambda1;
+ double dM1xdM0=cross(dM1,dM0);
+ if (fabs(dM1xdM0)<epsilon){
+ if (fabs(d2M0xdM0)<epsilon || fabs(d2M1xdM1)<epsilon){
+ return result;
+ }
+ double lbda02 = 6.*cross(M1-M0,dM0)/d2M0xdM0;
+ double lbda12 =-6.*cross(M1-M0,dM1)/d2M1xdM1;
+ if (lbda02<0 || lbda12<0){
+ return result;
+ }
+ lambda0.push_back(std::sqrt(lbda02) );
+ lambda1.push_back(std::sqrt(lbda12) );
+ }else{
+ //solve: lambda1 = a0 lambda0^2 + c0
+ // lambda0 = a1 lambda1^2 + c1
+ double a0,c0,a1,c1;
+ a0 = -d2M0xdM0/2/dM1xdM0;
+ c0 = 3*cross(M1-M0,dM0)/dM1xdM0;
+ a1 = -d2M1xdM1/2/dM1xdM0;
+ c1 = -3*cross(M1-M0,dM1)/dM1xdM0;
+
+ if (fabs(a0)<epsilon){
+ lambda1.push_back( c0 );
+ lambda0.push_back( a1*c0*c0 + c1 );
+ }else if (fabs(a1)<epsilon){
+ lambda0.push_back( c1 );
+ lambda1.push_back( a0*c1*c1 + c0 );
+ }else{
+ //find lamda0 by solving a deg 4 equation d0+d1*X+...+d4*X^4=0
+ vector<double> solns=solve_lambda0(a0,a1,c0,c1,insist_on_speed_signs);
+ for (double lbda0 : solns){
+ double lbda1=c0+a0*lbda0*lbda0;
+ //is this solution pointing in the + direction at both ends?
+ if (lbda0>=0. && lbda1>=0.){
+ lambda0.push_back( lbda0);
+ lambda1.push_back( lbda1);
+ }
+ //is this solution pointing in the - direction at both ends?
+ else if (lbda0<=0. && lbda1<=0. && insist_on_speed_signs<=0){
+ lambda0.push_back( lbda0);
+ lambda1.push_back( lbda1);
+ }
+ //ok,this solution is pointing in the + and - directions.
+ else if (insist_on_speed_signs<0){
+ lambda0.push_back( lbda0);
+ lambda1.push_back( lbda1);
+ }
+ }
+ }
+ }
+
+ for (unsigned i=0; i<lambda0.size(); i++){
+ Point V0 = lambda0[i]*dM0;
+ Point V1 = lambda1[i]*dM1;
+ D2<SBasis> cubic;
+ for(unsigned dim=0;dim<2;dim++){
+ SBasis c(2, Linear());
+ c[0] = Linear(M0[dim],M1[dim]);
+ c[1] = Linear( M0[dim]-M1[dim]+V0[dim],
+ -M0[dim]+M1[dim]-V1[dim]);
+ cubic[dim] = c;
+ }
+#if 0
+ Piecewise<SBasis> k = curvature(result);
+ double dM0_l = dM0.length();
+ double dM1_l = dM1.length();
+ g_warning("Target radii: %f, %f", dM0_l*dM0_l*dM0_l/d2M0xdM0,dM1_l*dM1_l*dM1_l/d2M1xdM1);
+ g_warning("Obtained radii: %f, %f",1/k.valueAt(0),1/k.valueAt(1));
+#endif
+ result.push_back(cubic);
+ }
+ return(result);
+}
+
+std::vector<D2<SBasis> >
+Geom::cubics_fitting_curvature(Point const &M0, Point const &M1,
+ Point const &dM0, Point const &dM1,
+ Point const &d2M0, Point const &d2M1,
+ int insist_on_speed_signs,
+ double epsilon){
+ double d2M0xdM0 = cross(d2M0,dM0);
+ double d2M1xdM1 = cross(d2M1,dM1);
+ return cubics_fitting_curvature(M0,M1,dM0,dM1,d2M0xdM0,d2M1xdM1,insist_on_speed_signs,epsilon);
+}
+
+std::vector<D2<SBasis> >
+Geom::cubics_with_prescribed_curvature(Point const &M0, Point const &M1,
+ Point const &dM0, Point const &dM1,
+ double k0, double k1,
+ int insist_on_speed_signs,
+ double epsilon){
+ double length;
+ length = dM0.length();
+ double d2M0xdM0 = k0*length*length*length;
+ length = dM1.length();
+ double d2M1xdM1 = k1*length*length*length;
+ return cubics_fitting_curvature(M0,M1,dM0,dM1,d2M0xdM0,d2M1xdM1,insist_on_speed_signs,epsilon);
+}
+
+
+namespace Geom {
+/**
+* \brief returns all the parameter values of A whose tangent passes through P.
+* \relates D2
+*/
+std::vector<double> find_tangents(Point P, D2<SBasis> const &A) {
+ SBasis crs (cross(A - P, derivative(A)));
+ return roots(crs);
+}
+
+/**
+* \brief returns all the parameter values of A whose normal passes through P.
+* \relates D2
+*/
+std::vector<double> find_normals(Point P, D2<SBasis> const &A) {
+ SBasis crs (dot(A - P, derivative(A)));
+ return roots(crs);
+}
+
+/**
+* \brief returns all the parameter values of A whose normal is parallel to vector V.
+* \relates D2
+*/
+std::vector<double> find_normals_by_vector(Point V, D2<SBasis> const &A) {
+ SBasis crs = dot(derivative(A), V);
+ return roots(crs);
+}
+/**
+* \brief returns all the parameter values of A whose tangent is parallel to vector V.
+* \relates D2
+*/
+std::vector<double> find_tangents_by_vector(Point V, D2<SBasis> const &A) {
+ SBasis crs = dot(derivative(A), rot90(V));
+ return roots(crs);
+}
+
+}
+//}; // namespace
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-math.cpp b/src/2geom/sbasis-math.cpp
new file mode 100644
index 0000000..547f9af
--- /dev/null
+++ b/src/2geom/sbasis-math.cpp
@@ -0,0 +1,379 @@
+/*
+ * sbasis-math.cpp - some std functions to work with (pw)s-basis
+ *
+ * Authors:
+ * Jean-Francois Barraud
+ *
+ * Copyright (C) 2006-2007 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+//this a first try to define sqrt, cos, sin, etc...
+//TODO: define a truncated compose(sb,sb, order) and extend it to pw<sb>.
+//TODO: in all these functions, compute 'order' according to 'tol'.
+
+#include <2geom/d2.h>
+#include <2geom/sbasis-math.h>
+#include <stdio.h>
+#include <math.h>
+//#define ZERO 1e-3
+
+
+namespace Geom {
+
+
+//-|x|-----------------------------------------------------------------------
+/** Return the absolute value of a function pointwise.
+ \param f function
+*/
+Piecewise<SBasis> abs(SBasis const &f){
+ return abs(Piecewise<SBasis>(f));
+}
+/** Return the absolute value of a function pointwise.
+ \param f function
+*/
+Piecewise<SBasis> abs(Piecewise<SBasis> const &f){
+ Piecewise<SBasis> absf=partition(f,roots(f));
+ for (unsigned i=0; i<absf.size(); i++){
+ if (absf.segs[i](.5)<0) absf.segs[i]*=-1;
+ }
+ return absf;
+}
+
+//-max(x,y), min(x,y)--------------------------------------------------------
+/** Return the greater of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis> max( SBasis const &f, SBasis const &g){
+ return max(Piecewise<SBasis>(f),Piecewise<SBasis>(g));
+}
+/** Return the greater of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis> max(Piecewise<SBasis> const &f, SBasis const &g){
+ return max(f,Piecewise<SBasis>(g));
+}
+/** Return the greater of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis> max( SBasis const &f, Piecewise<SBasis> const &g){
+ return max(Piecewise<SBasis>(f),g);
+}
+/** Return the greater of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis> max(Piecewise<SBasis> const &f, Piecewise<SBasis> const &g){
+ Piecewise<SBasis> max=partition(f,roots(f-g));
+ Piecewise<SBasis> gg =partition(g,max.cuts);
+ max = partition(max,gg.cuts);
+ for (unsigned i=0; i<max.size(); i++){
+ if (max.segs[i](.5)<gg.segs[i](.5)) max.segs[i]=gg.segs[i];
+ }
+ return max;
+}
+
+/** Return the more negative of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis>
+min( SBasis const &f, SBasis const &g){ return -max(-f,-g); }
+/** Return the more negative of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis>
+min(Piecewise<SBasis> const &f, SBasis const &g){ return -max(-f,-g); }
+/** Return the more negative of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis>
+min( SBasis const &f, Piecewise<SBasis> const &g){ return -max(-f,-g); }
+/** Return the more negative of the two functions pointwise.
+ \param f, g two functions
+*/
+Piecewise<SBasis>
+min(Piecewise<SBasis> const &f, Piecewise<SBasis> const &g){ return -max(-f,-g); }
+
+
+//-sign(x)---------------------------------------------------------------
+/** Return the sign of the two functions pointwise.
+ \param f function
+*/
+Piecewise<SBasis> signSb(SBasis const &f){
+ return signSb(Piecewise<SBasis>(f));
+}
+/** Return the sign of the two functions pointwise.
+ \param f function
+*/
+Piecewise<SBasis> signSb(Piecewise<SBasis> const &f){
+ Piecewise<SBasis> sign=partition(f,roots(f));
+ for (unsigned i=0; i<sign.size(); i++){
+ sign.segs[i] = (sign.segs[i](.5)<0)? Linear(-1.):Linear(1.);
+ }
+ return sign;
+}
+
+//-Sqrt----------------------------------------------------------
+static Piecewise<SBasis> sqrt_internal(SBasis const &f,
+ double tol,
+ int order){
+ SBasis sqrtf;
+ if(f.isZero() || order == 0){
+ return Piecewise<SBasis>(sqrtf);
+ }
+ if (f.at0()<-tol*tol && f.at1()<-tol*tol){
+ return sqrt_internal(-f,tol,order);
+ }else if (f.at0()>tol*tol && f.at1()>tol*tol){
+ sqrtf.resize(order+1, Linear(0,0));
+ sqrtf[0] = Linear(std::sqrt(f[0][0]), std::sqrt(f[0][1]));
+ SBasis r = f - multiply(sqrtf, sqrtf); // remainder
+ for(unsigned i = 1; int(i) <= order && i<r.size(); i++) {
+ Linear ci(r[i][0]/(2*sqrtf[0][0]), r[i][1]/(2*sqrtf[0][1]));
+ SBasis cisi = shift(ci, i);
+ r -= multiply(shift((sqrtf*2 + cisi), i), SBasis(ci));
+ r.truncate(order+1);
+ sqrtf[i] = ci;
+ if(r.tailError(i) == 0) // if exact
+ break;
+ }
+ }else{
+ sqrtf = Linear(std::sqrt(fabs(f.at0())), std::sqrt(fabs(f.at1())));
+ }
+
+ double err = (f - multiply(sqrtf, sqrtf)).tailError(0);
+ if (err<tol){
+ return Piecewise<SBasis>(sqrtf);
+ }
+
+ Piecewise<SBasis> sqrtf0,sqrtf1;
+ sqrtf0 = sqrt_internal(compose(f,Linear(0.,.5)),tol,order);
+ sqrtf1 = sqrt_internal(compose(f,Linear(.5,1.)),tol,order);
+ sqrtf0.setDomain(Interval(0.,.5));
+ sqrtf1.setDomain(Interval(.5,1.));
+ sqrtf0.concat(sqrtf1);
+ return sqrtf0;
+}
+
+/** Compute the sqrt of a function.
+ \param f function
+*/
+Piecewise<SBasis> sqrt(SBasis const &f, double tol, int order){
+ return sqrt(max(f,Linear(tol*tol)),tol,order);
+}
+
+/** Compute the sqrt of a function.
+ \param f function
+*/
+Piecewise<SBasis> sqrt(Piecewise<SBasis> const &f, double tol, int order){
+ Piecewise<SBasis> result;
+ Piecewise<SBasis> zero = Piecewise<SBasis>(Linear(tol*tol));
+ zero.setDomain(f.domain());
+ Piecewise<SBasis> ff=max(f,zero);
+
+ for (unsigned i=0; i<ff.size(); i++){
+ Piecewise<SBasis> sqrtfi = sqrt_internal(ff.segs[i],tol,order);
+ sqrtfi.setDomain(Interval(ff.cuts[i],ff.cuts[i+1]));
+ result.concat(sqrtfi);
+ }
+ return result;
+}
+
+//-Yet another sin/cos--------------------------------------------------------------
+
+/** Compute the sine of a function.
+ \param f function
+ \param tol maximum error
+ \param order maximum degree polynomial to use
+*/
+Piecewise<SBasis> sin( SBasis const &f, double tol, int order){return(cos(-f+M_PI/2,tol,order));}
+/** Compute the sine of a function.
+ \param f function
+ \param tol maximum error
+ \param order maximum degree polynomial to use
+*/
+Piecewise<SBasis> sin(Piecewise<SBasis> const &f, double tol, int order){return(cos(-f+M_PI/2,tol,order));}
+
+/** Compute the cosine of a function.
+ \param f function
+ \param tol maximum error
+ \param order maximum degree polynomial to use
+*/
+Piecewise<SBasis> cos(Piecewise<SBasis> const &f, double tol, int order){
+ Piecewise<SBasis> result;
+ for (unsigned i=0; i<f.size(); i++){
+ Piecewise<SBasis> cosfi = cos(f.segs[i],tol,order);
+ cosfi.setDomain(Interval(f.cuts[i],f.cuts[i+1]));
+ result.concat(cosfi);
+ }
+ return result;
+}
+
+/** Compute the cosine of a function.
+ \param f function
+ \param tol maximum error
+ \param order maximum degree polynomial to use
+*/
+Piecewise<SBasis> cos( SBasis const &f, double tol, int order){
+ double alpha = (f.at0()+f.at1())/2.;
+ SBasis x = f-alpha;
+ double d = x.tailError(0),err=1;
+ //estimate cos(x)-sum_0^order (-1)^k x^2k/2k! by the first neglicted term
+ for (int i=1; i<=2*order; i++) err*=d/i;
+
+ if (err<tol){
+ SBasis xk=Linear(1), c=Linear(1), s=Linear(0);
+ for (int k=1; k<=2*order; k+=2){
+ xk*=x/k;
+ //take also truncature errors into account...
+ err+=xk.tailError(order);
+ xk.truncate(order);
+ s+=xk;
+ xk*=-x/(k+1);
+ //take also truncature errors into account...
+ err+=xk.tailError(order);
+ xk.truncate(order);
+ c+=xk;
+ }
+ if (err<tol){
+ return Piecewise<SBasis>(std::cos(alpha)*c-std::sin(alpha)*s);
+ }
+ }
+ Piecewise<SBasis> c0,c1;
+ c0 = cos(compose(f,Linear(0.,.5)),tol,order);
+ c1 = cos(compose(f,Linear(.5,1.)),tol,order);
+ c0.setDomain(Interval(0.,.5));
+ c1.setDomain(Interval(.5,1.));
+ c0.concat(c1);
+ return c0;
+}
+
+//--1/x------------------------------------------------------------
+//TODO: this implementation is just wrong. Remove or redo!
+
+void truncateResult(Piecewise<SBasis> &f, int order){
+ if (order>=0){
+ for (auto & seg : f.segs){
+ seg.truncate(order);
+ }
+ }
+}
+
+Piecewise<SBasis> reciprocalOnDomain(Interval range, double tol){
+ Piecewise<SBasis> reciprocal_fn;
+ //TODO: deduce R from tol...
+ double R=2.;
+ SBasis reciprocal1_R=reciprocal(Linear(1,R),3);
+ double a=range.min(), b=range.max();
+ if (a*b<0){
+ b=std::max(fabs(a),fabs(b));
+ a=0;
+ }else if (b<0){
+ a=-range.max();
+ b=-range.min();
+ }
+
+ if (a<=tol){
+ reciprocal_fn.push_cut(0);
+ int i0=(int) floor(std::log(tol)/std::log(R));
+ a = std::pow(R,i0);
+ reciprocal_fn.push(Linear(1/a),a);
+ }else{
+ int i0=(int) floor(std::log(a)/std::log(R));
+ a = std::pow(R,i0);
+ reciprocal_fn.cuts.push_back(a);
+ }
+
+ while (a<b){
+ reciprocal_fn.push(reciprocal1_R/a,R*a);
+ a*=R;
+ }
+ if (range.min()<0 || range.max()<0){
+ Piecewise<SBasis>reciprocal_fn_neg;
+ //TODO: define reverse(pw<sb>);
+ reciprocal_fn_neg.cuts.push_back(-reciprocal_fn.cuts.back());
+ for (unsigned i=0; i<reciprocal_fn.size(); i++){
+ int idx=reciprocal_fn.segs.size()-1-i;
+ reciprocal_fn_neg.push_seg(-reverse(reciprocal_fn.segs.at(idx)));
+ reciprocal_fn_neg.push_cut(-reciprocal_fn.cuts.at(idx));
+ }
+ if (range.max()>0){
+ reciprocal_fn_neg.concat(reciprocal_fn);
+ }
+ reciprocal_fn=reciprocal_fn_neg;
+ }
+
+ return(reciprocal_fn);
+}
+
+Piecewise<SBasis> reciprocal(SBasis const &f, double tol, int order){
+ Piecewise<SBasis> reciprocal_fn=reciprocalOnDomain(*bounds_fast(f), tol);
+ Piecewise<SBasis> result=compose(reciprocal_fn,f);
+ truncateResult(result,order);
+ return(result);
+}
+Piecewise<SBasis> reciprocal(Piecewise<SBasis> const &f, double tol, int order){
+ Piecewise<SBasis> reciprocal_fn=reciprocalOnDomain(*bounds_fast(f), tol);
+ Piecewise<SBasis> result=compose(reciprocal_fn,f);
+ truncateResult(result,order);
+ return(result);
+}
+
+/**
+ * \brief Returns a Piecewise SBasis with prescribed values at prescribed times.
+ *
+ * \param times: vector of times at which the values are given. Should be sorted in increasing order.
+ * \param values: vector of prescribed values. Should have the same size as times and be sorted accordingly.
+ * \param smoothness: (defaults to 1) regularity class of the result: 0=piecewise linear, 1=continuous derivative, etc...
+ */
+Piecewise<SBasis> interpolate(std::vector<double> times, std::vector<double> values, unsigned smoothness){
+ assert ( values.size() == times.size() );
+ if ( values.empty() ) return Piecewise<SBasis>();
+ if ( values.size() == 1 ) return Piecewise<SBasis>(values[0]);//what about time??
+
+ SBasis sk = shift(Linear(1.),smoothness);
+ SBasis bump_in = integral(sk);
+ bump_in -= bump_in.at0();
+ bump_in /= bump_in.at1();
+ SBasis bump_out = reverse( bump_in );
+
+ Piecewise<SBasis> result;
+ result.cuts.push_back(times[0]);
+ for (unsigned i = 0; i<values.size()-1; i++){
+ result.push(bump_out*values[i]+bump_in*values[i+1],times[i+1]);
+ }
+ return result;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-poly.cpp b/src/2geom/sbasis-poly.cpp
new file mode 100644
index 0000000..ffee43f
--- /dev/null
+++ b/src/2geom/sbasis-poly.cpp
@@ -0,0 +1,59 @@
+#include <2geom/sbasis-poly.h>
+
+namespace Geom{
+
+/** Changes the basis of p to be sbasis.
+ \param p the Monomial basis polynomial
+ \returns the Symmetric basis polynomial
+
+This algorithm is horribly slow and numerically terrible. Only for testing.
+*/
+SBasis poly_to_sbasis(Poly const & p) {
+ SBasis x = Linear(0, 1);
+ SBasis r;
+
+ for(int i = p.size()-1; i >= 0; i--) {
+ r = SBasis(Linear(p[i], p[i])) + multiply(x, r);
+ }
+ r.normalize();
+ return r;
+
+}
+
+/** Changes the basis of p to be monomial.
+ \param p the Symmetric basis polynomial
+ \returns the Monomial basis polynomial
+
+This algorithm is horribly slow and numerically terrible. Only for testing.
+*/
+Poly sbasis_to_poly(SBasis const & sb) {
+ if(sb.isZero())
+ return Poly();
+ Poly S; // (1-x)x = -1*x^2 + 1*x + 0
+ Poly A, B;
+ B.push_back(0);
+ B.push_back(1);
+ A.push_back(1);
+ A.push_back(-1);
+ S = A*B;
+ Poly r;
+
+ for(int i = sb.size()-1; i >= 0; i--) {
+ r = S*r + sb[i][0]*A + sb[i][1]*B;
+ }
+ r.normalize();
+ return r;
+}
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-roots.cpp b/src/2geom/sbasis-roots.cpp
new file mode 100644
index 0000000..ee006d2
--- /dev/null
+++ b/src/2geom/sbasis-roots.cpp
@@ -0,0 +1,656 @@
+/**
+ * @file
+ * @brief Root finding for sbasis functions.
+ *//*
+ * Authors:
+ * Nathan Hurst <njh@njhurst.com>
+ * JF Barraud
+ * Copyright 2006-2007 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+ /*
+ * It is more efficient to find roots of f(t) = c_0, c_1, ... all at once, rather than iterating.
+ *
+ * Todo/think about:
+ * multi-roots using bernstein method, one approach would be:
+ sort c
+ take median and find roots of that
+ whenever a segment lies entirely on one side of the median,
+ find the median of the half and recurse.
+
+ in essence we are implementing quicksort on a continuous function
+
+ * the gsl poly roots finder is faster than bernstein too, but we don't use it for 3 reasons:
+
+ a) it requires conversion to poly, which is numerically unstable
+
+ b) it requires gsl (which is currently not a dependency, and would bring in a whole slew of unrelated stuff)
+
+ c) it finds all roots, even complex ones. We don't want to accidentally treat a nearly real root as a real root
+
+From memory gsl poly roots was about 10 times faster than bernstein in the case where all the roots
+are in [0,1] for polys of order 5. I spent some time working out whether eigenvalue root finding
+could be done directly in sbasis space, but the maths was too hard for me. -- njh
+
+jfbarraud: eigenvalue root finding could be done directly in sbasis space ?
+
+njh: I don't know, I think it should. You would make a matrix whose characteristic polynomial was
+correct, but do it by putting the sbasis terms in the right spots in the matrix. normal eigenvalue
+root finding makes a matrix that is a diagonal + a row along the top. This matrix has the property
+that its characteristic poly is just the poly whose coefficients are along the top row.
+
+Now an sbasis function is a linear combination of the poly coeffs. So it seems to me that you
+should be able to put the sbasis coeffs directly into a matrix in the right spots so that the
+characteristic poly is the sbasis. You'll still have problems b) and c).
+
+We might be able to lift an eigenvalue solver and include that directly into 2geom. Eigenvalues
+also allow you to find intersections of multiple curves but require solving n*m x n*m matrices.
+
+ **/
+
+#include <cmath>
+#include <map>
+
+#include <2geom/sbasis.h>
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/solver.h>
+
+using namespace std;
+
+namespace Geom{
+
+/** Find the smallest interval that bounds a
+ \param a sbasis function
+ \returns interval
+
+*/
+
+#ifdef USE_SBASIS_OF
+OptInterval bounds_exact(SBasisOf<double> const &a) {
+ Interval result = Interval(a.at0(), a.at1());
+ SBasisOf<double> df = derivative(a);
+ vector<double>extrema = roots(df);
+ for (unsigned i=0; i<extrema.size(); i++){
+ result.extendTo(a(extrema[i]));
+ }
+ return result;
+}
+#else
+OptInterval bounds_exact(SBasis const &a) {
+ Interval result = Interval(a.at0(), a.at1());
+ SBasis df = derivative(a);
+ vector<double>extrema = roots(df);
+ for (double i : extrema){
+ result.expandTo(a(i));
+ }
+ return result;
+}
+#endif
+
+/** Find a small interval that bounds a
+ \param a sbasis function
+ \returns interval
+
+*/
+// I have no idea how this works, some clever bounding argument by jfb.
+#ifdef USE_SBASIS_OF
+OptInterval bounds_fast(const SBasisOf<double> &sb, int order) {
+#else
+OptInterval bounds_fast(const SBasis &sb, int order) {
+#endif
+ Interval res(0,0); // an empty sbasis is 0.
+
+ for(int j = sb.size()-1; j>=order; j--) {
+ double a=sb[j][0];
+ double b=sb[j][1];
+
+ double v, t = 0;
+ v = res.min();
+ if (v<0) t = ((b-a)/v+1)*0.5;
+ if (v>=0 || t<0 || t>1) {
+ res.setMin(std::min(a,b));
+ } else {
+ res.setMin(lerp(t, a+v*t, b));
+ }
+
+ v = res.max();
+ if (v>0) t = ((b-a)/v+1)*0.5;
+ if (v<=0 || t<0 || t>1) {
+ res.setMax(std::max(a,b));
+ }else{
+ res.setMax(lerp(t, a+v*t, b));
+ }
+ }
+ if (order>0) res*=std::pow(.25,order);
+ return res;
+}
+
+/** Find a small interval that bounds a(t) for t in i to order order
+ \param sb sbasis function
+ \param i domain interval
+ \param order number of terms
+ \return interval
+
+*/
+#ifdef USE_SBASIS_OF
+OptInterval bounds_local(const SBasisOf<double> &sb, const OptInterval &i, int order) {
+#else
+OptInterval bounds_local(const SBasis &sb, const OptInterval &i, int order) {
+#endif
+ double t0=i->min(), t1=i->max(), lo=0., hi=0.;
+ for(int j = sb.size()-1; j>=order; j--) {
+ double a=sb[j][0];
+ double b=sb[j][1];
+
+ double t = 0;
+ if (lo<0) t = ((b-a)/lo+1)*0.5;
+ if (lo>=0 || t<t0 || t>t1) {
+ lo = std::min(a*(1-t0)+b*t0+lo*t0*(1-t0),a*(1-t1)+b*t1+lo*t1*(1-t1));
+ }else{
+ lo = lerp(t, a+lo*t, b);
+ }
+
+ if (hi>0) t = ((b-a)/hi+1)*0.5;
+ if (hi<=0 || t<t0 || t>t1) {
+ hi = std::max(a*(1-t0)+b*t0+hi*t0*(1-t0),a*(1-t1)+b*t1+hi*t1*(1-t1));
+ }else{
+ hi = lerp(t, a+hi*t, b);
+ }
+ }
+ Interval res = Interval(lo,hi);
+ if (order>0) res*=std::pow(.25,order);
+ return res;
+}
+
+//-- multi_roots ------------------------------------
+// goal: solve f(t)=c for several c at once.
+/* algo: -compute f at both ends of the given segment [a,b].
+ -compute bounds m<df(t)<M for df on the segment.
+ let c and C be the levels below and above f(a):
+ going from f(a) down to c with slope m takes at least time (f(a)-c)/m
+ going from f(a) up to C with slope M takes at least time (C-f(a))/M
+ From this we conclude there are no roots before a'=a+min((f(a)-c)/m,(C-f(a))/M).
+ Do the same for b: compute some b' such that there are no roots in (b',b].
+ -if [a',b'] is not empty, repeat the process with [a',(a'+b')/2] and [(a'+b')/2,b'].
+ unfortunately, extra care is needed about rounding errors, and also to avoid the repetition of roots,
+ making things tricky and unpleasant...
+*/
+//TODO: Make sure the code is "rounding-errors proof" and take care about repetition of roots!
+
+
+static int upper_level(vector<double> const &levels,double x,double tol=0.){
+ return(upper_bound(levels.begin(),levels.end(),x-tol)-levels.begin());
+}
+
+#ifdef USE_SBASIS_OF
+static void multi_roots_internal(SBasis const &f,
+ SBasis const &df,
+#else
+static void multi_roots_internal(SBasis const &f,
+ SBasis const &df,
+#endif
+ std::vector<double> const &levels,
+ std::vector<std::vector<double> > &roots,
+ double htol,
+ double vtol,
+ double a,
+ double fa,
+ double b,
+ double fb){
+
+ if (f.isZero(0)){
+ int idx;
+ idx=upper_level(levels,0,vtol);
+ if (idx<(int)levels.size()&&fabs(levels.at(idx))<=vtol){
+ roots[idx].push_back(a);
+ roots[idx].push_back(b);
+ }
+ return;
+ }
+////useful?
+// if (f.size()==1){
+// int idxa=upper_level(levels,fa);
+// int idxb=upper_level(levels,fb);
+// if (fa==fb){
+// if (fa==levels[idxa]){
+// roots[a]=idxa;
+// roots[b]=idxa;
+// }
+// return;
+// }
+// int idx_min=std::min(idxa,idxb);
+// int idx_max=std::max(idxa,idxb);
+// if (idx_max==levels.size()) idx_max-=1;
+// for(int i=idx_min;i<=idx_max; i++){
+// double t=a+(b-a)*(levels[i]-fa)/(fb-fa);
+// if(a<t&&t<b) roots[t]=i;
+// }
+// return;
+// }
+ if ((b-a)<htol){
+ //TODO: use different tol for t and f ?
+ //TODO: unsigned idx ? (remove int casts when fixed)
+ int idx=std::min(upper_level(levels,fa,vtol),upper_level(levels,fb,vtol));
+ if (idx==(int)levels.size()) idx-=1;
+ double c=levels.at(idx);
+ if((fa-c)*(fb-c)<=0||fabs(fa-c)<vtol||fabs(fb-c)<vtol){
+ roots[idx].push_back((a+b)/2);
+ }
+ return;
+ }
+
+ int idxa=upper_level(levels,fa,vtol);
+ int idxb=upper_level(levels,fb,vtol);
+
+ Interval bs = *bounds_local(df,Interval(a,b));
+
+ //first times when a level (higher or lower) can be reached from a or b.
+ double ta_hi,tb_hi,ta_lo,tb_lo;
+ ta_hi=ta_lo=b+1;//default values => no root there.
+ tb_hi=tb_lo=a-1;//default values => no root there.
+
+ if (idxa<(int)levels.size() && fabs(fa-levels.at(idxa))<vtol){//a can be considered a root.
+ //ta_hi=ta_lo=a;
+ roots[idxa].push_back(a);
+ ta_hi=ta_lo=a+htol;
+ }else{
+ if (bs.max()>0 && idxa<(int)levels.size())
+ ta_hi=a+(levels.at(idxa )-fa)/bs.max();
+ if (bs.min()<0 && idxa>0)
+ ta_lo=a+(levels.at(idxa-1)-fa)/bs.min();
+ }
+ if (idxb<(int)levels.size() && fabs(fb-levels.at(idxb))<vtol){//b can be considered a root.
+ //tb_hi=tb_lo=b;
+ roots[idxb].push_back(b);
+ tb_hi=tb_lo=b-htol;
+ }else{
+ if (bs.min()<0 && idxb<(int)levels.size())
+ tb_hi=b+(levels.at(idxb )-fb)/bs.min();
+ if (bs.max()>0 && idxb>0)
+ tb_lo=b+(levels.at(idxb-1)-fb)/bs.max();
+ }
+
+ double t0,t1;
+ t0=std::min(ta_hi,ta_lo);
+ t1=std::max(tb_hi,tb_lo);
+ //hum, rounding errors frighten me! so I add this +tol...
+ if (t0>t1+htol) return;//no root here.
+
+ if (fabs(t1-t0)<htol){
+ multi_roots_internal(f,df,levels,roots,htol,vtol,t0,f(t0),t1,f(t1));
+ }else{
+ double t,t_left,t_right,ft,ft_left,ft_right;
+ t_left =t_right =t =(t0+t1)/2;
+ ft_left=ft_right=ft=f(t);
+ int idx=upper_level(levels,ft,vtol);
+ if (idx<(int)levels.size() && fabs(ft-levels.at(idx))<vtol){//t can be considered a root.
+ roots[idx].push_back(t);
+ //we do not want to count it twice (from the left and from the right)
+ t_left =t-htol/2;
+ t_right=t+htol/2;
+ ft_left =f(t_left);
+ ft_right=f(t_right);
+ }
+ multi_roots_internal(f,df,levels,roots,htol,vtol,t0 ,f(t0) ,t_left,ft_left);
+ multi_roots_internal(f,df,levels,roots,htol,vtol,t_right,ft_right,t1 ,f(t1) );
+ }
+}
+
+/** Solve f(t)=c for several c at once.
+ \param f sbasis function
+ \param levels vector of 'y' values
+ \param htol, vtol
+ \param a, b left and right bounds
+ \returns a vector of vectors, one for each y giving roots
+
+Effectively computes:
+results = roots(f(y_i)) for all y_i
+
+* algo: -compute f at both ends of the given segment [a,b].
+ -compute bounds m<df(t)<M for df on the segment.
+ let c and C be the levels below and above f(a):
+ going from f(a) down to c with slope m takes at least time (f(a)-c)/m
+ going from f(a) up to C with slope M takes at least time (C-f(a))/M
+ From this we conclude there are no roots before a'=a+min((f(a)-c)/m,(C-f(a))/M).
+ Do the same for b: compute some b' such that there are no roots in (b',b].
+ -if [a',b'] is not empty, repeat the process with [a',(a'+b')/2] and [(a'+b')/2,b'].
+ unfortunately, extra care is needed about rounding errors, and also to avoid the repetition of roots,
+ making things tricky and unpleasant...
+
+TODO: Make sure the code is "rounding-errors proof" and take care about repetition of roots!
+*/
+std::vector<std::vector<double> > multi_roots(SBasis const &f,
+ std::vector<double> const &levels,
+ double htol,
+ double vtol,
+ double a,
+ double b){
+
+ std::vector<std::vector<double> > roots(levels.size(), std::vector<double>());
+
+ SBasis df=derivative(f);
+ multi_roots_internal(f,df,levels,roots,htol,vtol,a,f(a),b,f(b));
+
+ return(roots);
+}
+
+
+static bool compareIntervalMin( Interval I, Interval J ){
+ return I.min()<J.min();
+}
+static bool compareIntervalMax( Interval I, Interval J ){
+ return I.max()<J.max();
+}
+
+//find the first interval whose max is >= x
+static unsigned upper_level(vector<Interval> const &levels, double x ){
+ return( lower_bound( levels.begin(), levels.end(), Interval(x,x), compareIntervalMax) - levels.begin() );
+}
+
+static std::vector<Interval> fuseContiguous(std::vector<Interval> const &sets, double tol=0.){
+ std::vector<Interval> result;
+ if (sets.empty() ) return result;
+ result.push_back( sets.front() );
+ for (unsigned i=1; i < sets.size(); i++ ){
+ if ( result.back().max() + tol >= sets[i].min() ){
+ result.back().unionWith( sets[i] );
+ }else{
+ result.push_back( sets[i] );
+ }
+ }
+ return result;
+}
+
+/** level_sets internal method.
+* algorithm: (~adaptation of Newton method versus 'accroissements finis')
+ -compute f at both ends of the given segment [a,b].
+ -compute bounds m<df(t)<M for df on the segment.
+ Suppose f(a) is between two 'levels' c and C. Then
+ f won't enter c before a + (f(a)-c.max())/m
+ f won't enter C before a + (C.min()-f(a))/M
+ From this we conclude nothing happens before a'=a+min((f(a)-c.max())/m,(C.min()-f(a))/M).
+ We do the same for b: compute some b' such that nothing happens in (b',b].
+ -if [a',b'] is not empty, repeat the process with [a',(a'+b')/2] and [(a'+b')/2,b'].
+
+ If f(a) or f(b) belongs to some 'level' C, then use the same argument to find a' or b' such
+ that f remains in C on [a,a'] or [b',b]. In case f is monotonic, we also know f won't enter another
+ level before or after some time, allowing us to restrict the search a little more.
+
+ unfortunately, extra care is needed about rounding errors, and also to avoid the repetition of roots,
+ making things tricky and unpleasant...
+*/
+
+static void level_sets_internal(SBasis const &f,
+ SBasis const &df,
+ std::vector<Interval> const &levels,
+ std::vector<std::vector<Interval> > &solsets,
+ double a,
+ double fa,
+ double b,
+ double fb,
+ double tol=1e-5){
+
+ if (f.isZero(0)){
+ unsigned idx;
+ idx=upper_level( levels, 0. );
+ if (idx<levels.size() && levels[idx].contains(0.)){
+ solsets[idx].push_back( Interval(a,b) ) ;
+ }
+ return;
+ }
+
+ unsigned idxa=upper_level(levels,fa);
+ unsigned idxb=upper_level(levels,fb);
+
+ Interval bs = *bounds_local(df,Interval(a,b));
+
+ //first times when a level (higher or lower) can be reached from a or b.
+ double ta_hi; // f remains below next level for t<ta_hi
+ double ta_lo; // f remains above prev level for t<ta_lo
+ double tb_hi; // f remains below next level for t>tb_hi
+ double tb_lo; // f remains above next level for t>tb_lo
+
+ ta_hi=ta_lo=b+1;//default values => no root there.
+ tb_hi=tb_lo=a-1;//default values => no root there.
+
+ //--- if f(a) belongs to a level.-------
+ if ( idxa < levels.size() && levels[idxa].contains( fa ) ){
+ //find the first time when we may exit this level.
+ ta_lo = a + ( levels[idxa].min() - fa)/bs.min();
+ ta_hi = a + ( levels[idxa].max() - fa)/bs.max();
+ if ( ta_lo < a || ta_lo > b ) ta_lo = b;
+ if ( ta_hi < a || ta_hi > b ) ta_hi = b;
+ //move to that time for the next iteration.
+ solsets[idxa].push_back( Interval( a, std::min( ta_lo, ta_hi ) ) );
+ }else{
+ //--- if f(b) does not belong to a level.-------
+ if ( idxa == 0 ){
+ ta_lo = b;
+ }else{
+ ta_lo = a + ( levels[idxa-1].max() - fa)/bs.min();
+ if ( ta_lo < a ) ta_lo = b;
+ }
+ if ( idxa == levels.size() ){
+ ta_hi = b;
+ }else{
+ ta_hi = a + ( levels[idxa].min() - fa)/bs.max();
+ if ( ta_hi < a ) ta_hi = b;
+ }
+ }
+
+ //--- if f(b) belongs to a level.-------
+ if (idxb<levels.size() && levels.at(idxb).contains(fb)){
+ //find the first time from b when we may exit this level.
+ tb_lo = b + ( levels[idxb].min() - fb ) / bs.max();
+ tb_hi = b + ( levels[idxb].max() - fb ) / bs.min();
+ if ( tb_lo > b || tb_lo < a ) tb_lo = a;
+ if ( tb_hi > b || tb_hi < a ) tb_hi = a;
+ //move to that time for the next iteration.
+ solsets[idxb].push_back( Interval( std::max( tb_lo, tb_hi ), b) );
+ }else{
+ //--- if f(b) does not belong to a level.-------
+ if ( idxb == 0 ){
+ tb_lo = a;
+ }else{
+ tb_lo = b + ( levels[idxb-1].max() - fb)/bs.max();
+ if ( tb_lo > b ) tb_lo = a;
+ }
+ if ( idxb == levels.size() ){
+ tb_hi = a;
+ }else{
+ tb_hi = b + ( levels[idxb].min() - fb)/bs.min();
+ if ( tb_hi > b ) tb_hi = a;
+ }
+
+
+ if ( bs.min() < 0 && idxb < levels.size() )
+ tb_hi = b + ( levels[idxb ].min() - fb ) / bs.min();
+ if ( bs.max() > 0 && idxb > 0 )
+ tb_lo = b + ( levels[idxb-1].max() - fb ) / bs.max();
+ }
+
+ //let [t0,t1] be the next interval where to search.
+ double t0=std::min(ta_hi,ta_lo);
+ double t1=std::max(tb_hi,tb_lo);
+
+ if (t0>=t1) return;//no root here.
+
+ //if the interval is smaller than our resolution:
+ //pretend f simultaneously meets all the levels between f(t0) and f(t1)...
+ if ( t1 - t0 <= tol ){
+ Interval f_t0t1 ( f(t0), f(t1) );
+ unsigned idxmin = std::min(idxa, idxb);
+ unsigned idxmax = std::max(idxa, idxb);
+ //push [t0,t1] into all crossed level. Cheat to avoid overlapping intervals on different levels?
+ if ( idxmax > idxmin ){
+ for (unsigned idx = idxmin; idx < idxmax; idx++){
+ solsets[idx].push_back( Interval( t0, t1 ) );
+ }
+ }
+ if ( idxmax < levels.size() && f_t0t1.intersects( levels[idxmax] ) ){
+ solsets[idxmax].push_back( Interval( t0, t1 ) );
+ }
+ return;
+ }
+
+ //To make sure we finally exit the level jump at least by tol:
+ t0 = std::min( std::max( t0, a + tol ), b );
+ t1 = std::max( std::min( t1, b - tol ), a );
+
+ double t =(t0+t1)/2;
+ double ft=f(t);
+ level_sets_internal( f, df, levels, solsets, t0, f(t0), t, ft );
+ level_sets_internal( f, df, levels, solsets, t, ft, t1, f(t1) );
+}
+
+std::vector<std::vector<Interval> > level_sets(SBasis const &f,
+ std::vector<Interval> const &levels,
+ double a, double b, double tol){
+
+ std::vector<std::vector<Interval> > solsets(levels.size(), std::vector<Interval>());
+
+ SBasis df=derivative(f);
+ level_sets_internal(f,df,levels,solsets,a,f(a),b,f(b),tol);
+ // Fuse overlapping intervals...
+ for (auto & solset : solsets){
+ if ( solset.size() == 0 ) continue;
+ std::sort( solset.begin(), solset.end(), compareIntervalMin );
+ solset = fuseContiguous( solset, tol );
+ }
+ return solsets;
+}
+
+std::vector<Interval> level_set (SBasis const &f, double level, double vtol, double a, double b, double tol){
+ Interval fat_level( level - vtol, level + vtol );
+ return level_set(f, fat_level, a, b, tol);
+}
+std::vector<Interval> level_set (SBasis const &f, Interval const &level, double a, double b, double tol){
+ std::vector<Interval> levels(1,level);
+ return level_sets(f,levels, a, b, tol).front() ;
+}
+std::vector<std::vector<Interval> > level_sets (SBasis const &f, std::vector<double> const &levels, double vtol, double a, double b, double tol){
+ std::vector<Interval> fat_levels( levels.size(), Interval());
+ for (unsigned i = 0; i < levels.size(); i++){
+ fat_levels[i] = Interval( levels[i]-vtol, levels[i]+vtol);
+ }
+ return level_sets(f, fat_levels, a, b, tol);
+}
+
+
+//-------------------------------------
+//-------------------------------------
+
+
+void subdiv_sbasis(SBasis const & s,
+ std::vector<double> & roots,
+ double left, double right) {
+ OptInterval bs = bounds_fast(s);
+ if(!bs || bs->min() > 0 || bs->max() < 0)
+ return; // no roots here
+ if(s.tailError(1) < 1e-7) {
+ double t = s[0][0] / (s[0][0] - s[0][1]);
+ roots.push_back(left*(1-t) + t*right);
+ return;
+ }
+ double middle = (left + right)/2;
+ subdiv_sbasis(compose(s, Linear(0, 0.5)), roots, left, middle);
+ subdiv_sbasis(compose(s, Linear(0.5, 1.)), roots, middle, right);
+}
+
+// It is faster to use the bernstein root finder for small degree polynomials (<100?.
+
+std::vector<double> roots1(SBasis const & s) {
+ std::vector<double> res;
+ double d = s[0][0] - s[0][1];
+ if(d != 0) {
+ double r = s[0][0] / d;
+ if(0 <= r && r <= 1)
+ res.push_back(r);
+ }
+ return res;
+}
+
+std::vector<double> roots1(SBasis const & s, Interval const ivl) {
+ std::vector<double> res;
+ double d = s[0][0] - s[0][1];
+ if(d != 0) {
+ double r = s[0][0] / d;
+ if(ivl.contains(r))
+ res.push_back(r);
+ }
+ return res;
+}
+
+/** Find all t s.t s(t) = 0
+ \param a sbasis function
+ \see Bezier::roots
+ \returns vector of zeros (roots)
+
+*/
+std::vector<double> roots(SBasis const & s) {
+ switch(s.size()) {
+ case 0:
+ assert(false);
+ return std::vector<double>();
+ case 1:
+ return roots1(s);
+ default:
+ {
+ Bezier bz;
+ sbasis_to_bezier(bz, s);
+ return bz.roots();
+ }
+ }
+}
+std::vector<double> roots(SBasis const & s, Interval const ivl) {
+ switch(s.size()) {
+ case 0:
+ assert(false);
+ return std::vector<double>();
+ case 1:
+ return roots1(s, ivl);
+ default:
+ {
+ Bezier bz;
+ sbasis_to_bezier(bz, s);
+ return bz.roots(ivl);
+ }
+ }
+}
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis-to-bezier.cpp b/src/2geom/sbasis-to-bezier.cpp
new file mode 100644
index 0000000..5580956
--- /dev/null
+++ b/src/2geom/sbasis-to-bezier.cpp
@@ -0,0 +1,584 @@
+/*
+ * Symmetric Power Basis - Bernstein Basis conversion routines
+ *
+ * Authors:
+ * Marco Cecchetti <mrcekets at gmail.com>
+ * Nathan Hurst <njh@mail.csse.monash.edu.au>
+ *
+ * Copyright 2007-2008 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+
+#include <2geom/sbasis-to-bezier.h>
+#include <2geom/d2.h>
+#include <2geom/choose.h>
+#include <2geom/path-sink.h>
+#include <2geom/exception.h>
+#include <2geom/convex-hull.h>
+
+#include <iostream>
+
+
+
+
+namespace Geom
+{
+
+/*
+ * Symmetric Power Basis - Bernstein Basis conversion routines
+ *
+ * some remark about precision:
+ * interval [0,1], subdivisions: 10^3
+ * - bezier_to_sbasis : up to degree ~72 precision is at least 10^-5
+ * up to degree ~87 precision is at least 10^-3
+ * - sbasis_to_bezier : up to order ~63 precision is at least 10^-15
+ * precision is at least 10^-14 even beyond order 200
+ *
+ * interval [-1,1], subdivisions: 10^3
+ * - bezier_to_sbasis : up to degree ~21 precision is at least 10^-5
+ * up to degree ~24 precision is at least 10^-3
+ * - sbasis_to_bezier : up to order ~11 precision is at least 10^-5
+ * up to order ~13 precision is at least 10^-3
+ *
+ * interval [-10,10], subdivisions: 10^3
+ * - bezier_to_sbasis : up to degree ~7 precision is at least 10^-5
+ * up to degree ~8 precision is at least 10^-3
+ * - sbasis_to_bezier : up to order ~3 precision is at least 10^-5
+ * up to order ~4 precision is at least 10^-3
+ *
+ * references:
+ * this implementation is based on the following article:
+ * J.Sanchez-Reyes - The Symmetric Analogue of the Polynomial Power Basis
+ */
+
+/** Changes the basis of p to be bernstein.
+ \param p the Symmetric basis polynomial
+ \returns the Bernstein basis polynomial
+
+ if the degree is even q is the order in the symmetrical power basis,
+ if the degree is odd q is the order + 1
+ n is always the polynomial degree, i. e. the Bezier order
+ sz is the number of bezier handles.
+*/
+void sbasis_to_bezier (Bezier & bz, SBasis const& sb, size_t sz)
+{
+ assert(sb.size() > 0);
+
+ size_t q, n;
+ bool even;
+ if (sz == 0)
+ {
+ q = sb.size();
+ if (sb[q-1][0] == sb[q-1][1])
+ {
+ even = true;
+ --q;
+ n = 2*q;
+ }
+ else
+ {
+ even = false;
+ n = 2*q-1;
+ }
+ }
+ else
+ {
+ q = (sz > 2*sb.size()-1) ? sb.size() : (sz+1)/2;
+ n = sz-1;
+ even = false;
+ }
+ bz.clear();
+ bz.resize(n+1);
+ for (size_t k = 0; k < q; ++k)
+ {
+ int Tjk = 1;
+ for (size_t j = k; j < n-k; ++j) // j <= n-k-1
+ {
+ bz[j] += (Tjk * sb[k][0]);
+ bz[n-j] += (Tjk * sb[k][1]); // n-k <-> [k][1]
+ // assert(Tjk == binomial(n-2*k-1, j-k));
+ binomial_increment_k(Tjk, n-2*k-1, j-k);
+ }
+ }
+ if (even)
+ {
+ bz[q] += sb[q][0];
+ }
+ // the resulting coefficients are with respect to the scaled Bernstein
+ // basis so we need to divide them by (n, j) binomial coefficient
+ int bcj = n;
+ for (size_t j = 1; j < n; ++j)
+ {
+ bz[j] /= bcj;
+ // assert(bcj == binomial(n, j));
+ binomial_increment_k(bcj, n, j);
+ }
+ bz[0] = sb[0][0];
+ bz[n] = sb[0][1];
+}
+
+void sbasis_to_bezier(D2<Bezier> &bz, D2<SBasis> const &sb, size_t sz)
+{
+ if (sz == 0) {
+ sz = std::max(sb[X].size(), sb[Y].size())*2;
+ }
+ sbasis_to_bezier(bz[X], sb[X], sz);
+ sbasis_to_bezier(bz[Y], sb[Y], sz);
+}
+
+/** Changes the basis of p to be Bernstein.
+ \param p the D2 Symmetric basis polynomial
+ \returns the D2 Bernstein basis polynomial
+
+ sz is always the polynomial degree, i. e. the Bezier order
+*/
+void sbasis_to_bezier (std::vector<Point> & bz, D2<SBasis> const& sb, size_t sz)
+{
+ D2<Bezier> bez;
+ sbasis_to_bezier(bez, sb, sz);
+ bz = bezier_points(bez);
+}
+
+/** Changes the basis of p to be Bernstein.
+ \param p the D2 Symmetric basis polynomial
+ \returns the D2 Bernstein basis cubic polynomial
+
+Bezier is always cubic.
+For general asymmetric case, fit the SBasis function value at midpoint
+For parallel, symmetric case, find the point of closest approach to the midpoint
+For parallel, anti-symmetric case, fit the SBasis slope at midpoint
+*/
+void sbasis_to_cubic_bezier (std::vector<Point> & bz, D2<SBasis> const& sb)
+{
+ double delx[2], dely[2];
+ double xprime[2], yprime[2];
+ double midx = 0;
+ double midy = 0;
+ double midx_0, midy_0;
+ double numer[2], numer_0[2];
+ double denom;
+ double div;
+
+ if ((sb[X].size() == 0) || (sb[Y].size() == 0)) {
+ THROW_RANGEERROR("size of sb is too small");
+ }
+
+ sbasis_to_bezier(bz, sb, 4); // zeroth-order estimate
+ if ((sb[X].size() < 3) && (sb[Y].size() < 3))
+ return; // cubic bezier estimate is exact
+ Geom::ConvexHull bezhull(bz);
+
+// calculate first derivatives of x and y wrt t
+
+ for (int i = 0; i < 2; ++i) {
+ xprime[i] = sb[X][0][1] - sb[X][0][0];
+ yprime[i] = sb[Y][0][1] - sb[Y][0][0];
+ }
+ if (sb[X].size() > 1) {
+ xprime[0] += sb[X][1][0];
+ xprime[1] -= sb[X][1][1];
+ }
+ if (sb[Y].size() > 1) {
+ yprime[0] += sb[Y][1][0];
+ yprime[1] -= sb[Y][1][1];
+ }
+
+// calculate midpoint at t = 0.5
+
+ div = 2;
+ for (auto i : sb[X]) {
+ midx += (i[0] + i[1])/div;
+ div *= 4;
+ }
+
+ div = 2;
+ for (auto i : sb[Y]) {
+ midy += (i[0] + i[1])/div;
+ div *= 4;
+ }
+
+// is midpoint in hull: if not, the solution will be ill-conditioned, LP Bug 1428683
+
+ if (!bezhull.contains(Geom::Point(midx, midy)))
+ return;
+
+// calculate Bezier control arms
+
+ midx = 8*midx - 4*bz[0][X] - 4*bz[3][X]; // re-define relative to center
+ midy = 8*midy - 4*bz[0][Y] - 4*bz[3][Y];
+ midx_0 = sb[X].size() > 1 ? sb[X][1][0] + sb[X][1][1] : 0; // zeroth order estimate
+ midy_0 = sb[Y].size() > 1 ? sb[Y][1][0] + sb[Y][1][1] : 0;
+
+ if ((std::abs(xprime[0]) < EPSILON) && (std::abs(yprime[0]) < EPSILON)
+ && ((std::abs(xprime[1]) > EPSILON) || (std::abs(yprime[1]) > EPSILON))) { // degenerate handle at 0 : use distance of closest approach
+ numer[0] = midx*xprime[1] + midy*yprime[1];
+ denom = 3.0*(xprime[1]*xprime[1] + yprime[1]*yprime[1]);
+ delx[0] = 0;
+ dely[0] = 0;
+ delx[1] = -xprime[1]*numer[0]/denom;
+ dely[1] = -yprime[1]*numer[0]/denom;
+ } else if ((std::abs(xprime[1]) < EPSILON) && (std::abs(yprime[1]) < EPSILON)
+ && ((std::abs(xprime[0]) > EPSILON) || (std::abs(yprime[0]) > EPSILON))) { // degenerate handle at 1 : ditto
+ numer[1] = midx*xprime[0] + midy*yprime[0];
+ denom = 3.0*(xprime[0]*xprime[0] + yprime[0]*yprime[0]);
+ delx[0] = xprime[0]*numer[1]/denom;
+ dely[0] = yprime[0]*numer[1]/denom;
+ delx[1] = 0;
+ dely[1] = 0;
+ } else if (std::abs(xprime[1]*yprime[0] - yprime[1]*xprime[0]) > // general case : fit mid fxn value
+ 0.002 * std::abs(xprime[1]*xprime[0] + yprime[1]*yprime[0])) { // approx. 0.1 degree of angle
+ double test1 = (bz[1][Y] - bz[0][Y])*(bz[3][X] - bz[0][X]) - (bz[1][X] - bz[0][X])*(bz[3][Y] - bz[0][Y]);
+ double test2 = (bz[2][Y] - bz[0][Y])*(bz[3][X] - bz[0][X]) - (bz[2][X] - bz[0][X])*(bz[3][Y] - bz[0][Y]);
+ if (test1*test2 < 0) // reject anti-symmetric case, LP Bug 1428267 & Bug 1428683
+ return;
+ denom = 3.0*(xprime[1]*yprime[0] - yprime[1]*xprime[0]);
+ for (int i = 0; i < 2; ++i) {
+ numer_0[i] = xprime[1 - i]*midy_0 - yprime[1 - i]*midx_0;
+ numer[i] = xprime[1 - i]*midy - yprime[1 - i]*midx;
+ delx[i] = xprime[i]*numer[i]/denom;
+ dely[i] = yprime[i]*numer[i]/denom;
+ if (numer_0[i]*numer[i] < 0) // check for reversal of direction, LP Bug 1544680
+ return;
+ }
+ if (std::abs((numer[0] - numer_0[0])*numer_0[1]) > 10.0*std::abs((numer[1] - numer_0[1])*numer_0[0]) // check for asymmetry
+ || std::abs((numer[1] - numer_0[1])*numer_0[0]) > 10.0*std::abs((numer[0] - numer_0[0])*numer_0[1]))
+ return;
+ } else if ((xprime[0]*xprime[1] < 0) || (yprime[0]*yprime[1] < 0)) { // symmetric case : use distance of closest approach
+ numer[0] = midx*xprime[0] + midy*yprime[0];
+ denom = 6.0*(xprime[0]*xprime[0] + yprime[0]*yprime[0]);
+ delx[0] = xprime[0]*numer[0]/denom;
+ dely[0] = yprime[0]*numer[0]/denom;
+ delx[1] = -delx[0];
+ dely[1] = -dely[0];
+ } else { // anti-symmetric case : fit mid slope
+ // calculate slope at t = 0.5
+ midx = 0;
+ div = 1;
+ for (auto i : sb[X]) {
+ midx += (i[1] - i[0])/div;
+ div *= 4;
+ }
+ midy = 0;
+ div = 1;
+ for (auto i : sb[Y]) {
+ midy += (i[1] - i[0])/div;
+ div *= 4;
+ }
+ if (midx*yprime[0] != midy*xprime[0]) {
+ denom = midx*yprime[0] - midy*xprime[0];
+ numer[0] = midx*(bz[3][Y] - bz[0][Y]) - midy*(bz[3][X] - bz[0][X]);
+ for (int i = 0; i < 2; ++i) {
+ delx[i] = xprime[0]*numer[0]/denom;
+ dely[i] = yprime[0]*numer[0]/denom;
+ }
+ } else { // linear case
+ for (int i = 0; i < 2; ++i) {
+ delx[i] = (bz[3][X] - bz[0][X])/3;
+ dely[i] = (bz[3][Y] - bz[0][Y])/3;
+ }
+ }
+ }
+ bz[1][X] = bz[0][X] + delx[0];
+ bz[1][Y] = bz[0][Y] + dely[0];
+ bz[2][X] = bz[3][X] - delx[1];
+ bz[2][Y] = bz[3][Y] - dely[1];
+}
+
+/** Changes the basis of p to be sbasis.
+ \param p the Bernstein basis polynomial
+ \returns the Symmetric basis polynomial
+
+ if the degree is even q is the order in the symmetrical power basis,
+ if the degree is odd q is the order + 1
+ n is always the polynomial degree, i. e. the Bezier order
+*/
+void bezier_to_sbasis (SBasis & sb, Bezier const& bz)
+{
+ size_t n = bz.order();
+ size_t q = (n+1) / 2;
+ size_t even = (n & 1u) ? 0 : 1;
+ sb.clear();
+ sb.resize(q + even, Linear(0, 0));
+ int nck = 1;
+ for (size_t k = 0; k < q; ++k)
+ {
+ int Tjk = nck;
+ for (size_t j = k; j < q; ++j)
+ {
+ sb[j][0] += (Tjk * bz[k]);
+ sb[j][1] += (Tjk * bz[n-k]); // n-j <-> [j][1]
+ // assert(Tjk == sgn(j, k) * binomial(n-j-k, j-k) * binomial(n, k));
+ binomial_increment_k(Tjk, n-j-k, j-k);
+ binomial_decrement_n(Tjk, n-j-k, j-k+1);
+ Tjk = -Tjk;
+ }
+ Tjk = -nck;
+ for (size_t j = k+1; j < q; ++j)
+ {
+ sb[j][0] += (Tjk * bz[n-k]);
+ sb[j][1] += (Tjk * bz[k]); // n-j <-> [j][1]
+ // assert(Tjk == sgn(j, k) * binomial(n-j-k-1, j-k-1) * binomial(n, k));
+ binomial_increment_k(Tjk, n-j-k-1, j-k-1);
+ binomial_decrement_n(Tjk, n-j-k-1, j-k);
+ Tjk = -Tjk;
+ }
+ // assert(nck == binomial(n, k));
+ binomial_increment_k(nck, n, k);
+ }
+ if (even)
+ {
+ int Tjk = q & 1 ? -1 : 1;
+ for (size_t k = 0; k < q; ++k)
+ {
+ sb[q][0] += (Tjk * (bz[k] + bz[n-k]));
+ // assert(Tjk == sgn(q,k) * binomial(n, k));
+ binomial_increment_k(Tjk, n, k);
+ Tjk = -Tjk;
+ }
+ // assert(Tjk == binomial(n, q));
+ sb[q][0] += Tjk * bz[q];
+ sb[q][1] = sb[q][0];
+ }
+ sb[0][0] = bz[0];
+ sb[0][1] = bz[n];
+}
+
+/** Changes the basis of d2 p to be sbasis.
+ \param p the d2 Bernstein basis polynomial
+ \returns the d2 Symmetric basis polynomial
+
+ if the degree is even q is the order in the symmetrical power basis,
+ if the degree is odd q is the order + 1
+ n is always the polynomial degree, i. e. the Bezier order
+*/
+void bezier_to_sbasis (D2<SBasis> & sb, std::vector<Point> const& bz)
+{
+ size_t n = bz.size() - 1;
+ size_t q = (n+1) / 2;
+ size_t even = (n & 1u) ? 0 : 1;
+ sb[X].clear();
+ sb[Y].clear();
+ sb[X].resize(q + even, Linear(0, 0));
+ sb[Y].resize(q + even, Linear(0, 0));
+ int nck = 1;
+ for (size_t k = 0; k < q; ++k)
+ {
+ int Tjk = nck;
+ for (size_t j = k; j < q; ++j)
+ {
+ sb[X][j][0] += (Tjk * bz[k][X]);
+ sb[X][j][1] += (Tjk * bz[n-k][X]);
+ sb[Y][j][0] += (Tjk * bz[k][Y]);
+ sb[Y][j][1] += (Tjk * bz[n-k][Y]);
+ // assert(Tjk == sgn(j, k) * binomial(n-j-k, j-k) * binomial(n, k));
+ binomial_increment_k(Tjk, n-j-k, j-k);
+ binomial_decrement_n(Tjk, n-j-k, j-k+1);
+ Tjk = -Tjk;
+ }
+ Tjk = -nck;
+ for (size_t j = k+1; j < q; ++j)
+ {
+ sb[X][j][0] += (Tjk * bz[n-k][X]);
+ sb[X][j][1] += (Tjk * bz[k][X]);
+ sb[Y][j][0] += (Tjk * bz[n-k][Y]);
+ sb[Y][j][1] += (Tjk * bz[k][Y]);
+ // assert(Tjk == sgn(j, k) * binomial(n-j-k-1, j-k-1) * binomial(n, k));
+ binomial_increment_k(Tjk, n-j-k-1, j-k-1);
+ binomial_decrement_n(Tjk, n-j-k-1, j-k);
+ Tjk = -Tjk;
+ }
+ // assert(nck == binomial(n, k));
+ binomial_increment_k(nck, n, k);
+ }
+ if (even)
+ {
+ int Tjk = q & 1 ? -1 : 1;
+ for (size_t k = 0; k < q; ++k)
+ {
+ sb[X][q][0] += (Tjk * (bz[k][X] + bz[n-k][X]));
+ sb[Y][q][0] += (Tjk * (bz[k][Y] + bz[n-k][Y]));
+ // assert(Tjk == sgn(q,k) * binomial(n, k));
+ binomial_increment_k(Tjk, n, k);
+ Tjk = -Tjk;
+ }
+ // assert(Tjk == binomial(n, q));
+ sb[X][q][0] += Tjk * bz[q][X];
+ sb[X][q][1] = sb[X][q][0];
+ sb[Y][q][0] += Tjk * bz[q][Y];
+ sb[Y][q][1] = sb[Y][q][0];
+ }
+ sb[X][0][0] = bz[0][X];
+ sb[X][0][1] = bz[n][X];
+ sb[Y][0][0] = bz[0][Y];
+ sb[Y][0][1] = bz[n][Y];
+}
+
+} // namespace Geom
+
+#if 0
+/*
+* This version works by inverting a reasonable upper bound on the error term after subdividing the
+* curve at $a$. We keep biting off pieces until there is no more curve left.
+*
+* Derivation: The tail of the power series is $a_ks^k + a_{k+1}s^{k+1} + \ldots = e$. A
+* subdivision at $a$ results in a tail error of $e*A^k, A = (1-a)a$. Let this be the desired
+* tolerance tol $= e*A^k$ and invert getting $A = e^{1/k}$ and $a = 1/2 - \sqrt{1/4 - A}$
+*/
+void
+subpath_from_sbasis_incremental(Geom::OldPathSetBuilder &pb, D2<SBasis> B, double tol, bool initial) {
+ const unsigned k = 2; // cubic bezier
+ double te = B.tail_error(k);
+ assert(B[0].std::isfinite());
+ assert(B[1].std::isfinite());
+
+ //std::cout << "tol = " << tol << std::endl;
+ while(1) {
+ double A = std::sqrt(tol/te); // pow(te, 1./k)
+ double a = A;
+ if(A < 1) {
+ A = std::min(A, 0.25);
+ a = 0.5 - std::sqrt(0.25 - A); // quadratic formula
+ if(a > 1) a = 1; // clamp to the end of the segment
+ } else
+ a = 1;
+ assert(a > 0);
+ //std::cout << "te = " << te << std::endl;
+ //std::cout << "A = " << A << "; a=" << a << std::endl;
+ D2<SBasis> Bs = compose(B, Linear(0, a));
+ assert(Bs.tail_error(k));
+ std::vector<Geom::Point> bez = sbasis_to_bezier(Bs, 2);
+ reverse(bez.begin(), bez.end());
+ if (initial) {
+ pb.start_subpath(bez[0]);
+ initial = false;
+ }
+ pb.push_cubic(bez[1], bez[2], bez[3]);
+
+// move to next piece of curve
+ if(a >= 1) break;
+ B = compose(B, Linear(a, 1));
+ te = B.tail_error(k);
+ }
+}
+
+#endif
+
+namespace Geom{
+
+/** Make a path from a d2 sbasis.
+ \param p the d2 Symmetric basis polynomial
+ \returns a Path
+
+ If only_cubicbeziers is true, the resulting path may only contain CubicBezier curves.
+*/
+void build_from_sbasis(Geom::PathBuilder &pb, D2<SBasis> const &B, double tol, bool only_cubicbeziers) {
+ if (!B.isFinite()) {
+ THROW_EXCEPTION("assertion failed: B.isFinite()");
+ }
+ if(tail_error(B, 3) < tol || sbasis_size(B) == 2) { // nearly cubic enough
+ if( !only_cubicbeziers && (sbasis_size(B) <= 1) ) {
+ pb.lineTo(B.at1());
+ } else {
+ std::vector<Geom::Point> bez;
+// sbasis_to_bezier(bez, B, 4);
+ sbasis_to_cubic_bezier(bez, B);
+ pb.curveTo(bez[1], bez[2], bez[3]);
+ }
+ } else {
+ build_from_sbasis(pb, compose(B, Linear(0, 0.5)), tol, only_cubicbeziers);
+ build_from_sbasis(pb, compose(B, Linear(0.5, 1)), tol, only_cubicbeziers);
+ }
+}
+
+/** Make a path from a d2 sbasis.
+ \param p the d2 Symmetric basis polynomial
+ \returns a Path
+
+ If only_cubicbeziers is true, the resulting path may only contain CubicBezier curves.
+*/
+Path
+path_from_sbasis(D2<SBasis> const &B, double tol, bool only_cubicbeziers) {
+ PathBuilder pb;
+ pb.moveTo(B.at0());
+ build_from_sbasis(pb, B, tol, only_cubicbeziers);
+ pb.flush();
+ return pb.peek().front();
+}
+
+/** Make a path from a d2 sbasis.
+ \param p the d2 Symmetric basis polynomial
+ \returns a Path
+
+ If only_cubicbeziers is true, the resulting path may only contain CubicBezier curves.
+ TODO: some of this logic should be lifted into svg-path
+*/
+PathVector
+path_from_piecewise(Geom::Piecewise<Geom::D2<Geom::SBasis> > const &B, double tol, bool only_cubicbeziers) {
+ Geom::PathBuilder pb;
+ if(B.size() == 0) return pb.peek();
+ Geom::Point start = B[0].at0();
+ pb.moveTo(start);
+ for(unsigned i = 0; ; i++) {
+ if ( (i+1 == B.size())
+ || !are_near(B[i+1].at0(), B[i].at1(), tol) )
+ {
+ //start of a new path
+ if (are_near(start, B[i].at1()) && sbasis_size(B[i]) <= 1) {
+ pb.closePath();
+ //last line seg already there (because of .closePath())
+ goto no_add;
+ }
+ build_from_sbasis(pb, B[i], tol, only_cubicbeziers);
+ if (are_near(start, B[i].at1())) {
+ //it's closed, the last closing segment was not a straight line so it needed to be added, but still make it closed here with degenerate straight line.
+ pb.closePath();
+ }
+ no_add:
+ if (i+1 >= B.size()) {
+ break;
+ }
+ start = B[i+1].at0();
+ pb.moveTo(start);
+ } else {
+ build_from_sbasis(pb, B[i], tol, only_cubicbeziers);
+ }
+ }
+ pb.flush();
+ return pb.peek();
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sbasis.cpp b/src/2geom/sbasis.cpp
new file mode 100644
index 0000000..ceaae3f
--- /dev/null
+++ b/src/2geom/sbasis.cpp
@@ -0,0 +1,681 @@
+/*
+ * sbasis.cpp - S-power basis function class + supporting classes
+ *
+ * Authors:
+ * Nathan Hurst <njh@mail.csse.monash.edu.au>
+ * Michael Sloan <mgsloan@gmail.com>
+ *
+ * Copyright (C) 2006-2007 authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <cmath>
+
+#include <2geom/sbasis.h>
+#include <2geom/math-utils.h>
+
+namespace Geom {
+
+#ifndef M_PI
+# define M_PI 3.14159265358979323846
+#endif
+
+/** bound the error from term truncation
+ \param tail first term to chop
+ \returns the largest possible error this truncation could give
+*/
+double SBasis::tailError(unsigned tail) const {
+ Interval bs = *bounds_fast(*this, tail);
+ return std::max(fabs(bs.min()),fabs(bs.max()));
+}
+
+/** test all coefficients are finite
+*/
+bool SBasis::isFinite() const {
+ for(unsigned i = 0; i < size(); i++) {
+ if(!(*this)[i].isFinite())
+ return false;
+ }
+ return true;
+}
+
+/** Compute the value and the first n derivatives
+ \param t position to evaluate
+ \param n number of derivatives (not counting value)
+ \returns a vector with the value and the n derivative evaluations
+
+There is an elegant way to compute the value and n derivatives for a polynomial using a variant of horner's rule. Someone will someday work out how for sbasis.
+*/
+std::vector<double> SBasis::valueAndDerivatives(double t, unsigned n) const {
+ std::vector<double> ret(n+1);
+ ret[0] = valueAt(t);
+ SBasis tmp = *this;
+ for(unsigned i = 1; i < n+1; i++) {
+ tmp.derive();
+ ret[i] = tmp.valueAt(t);
+ }
+ return ret;
+}
+
+
+/** Compute the pointwise sum of a and b (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a+b
+
+*/
+SBasis operator+(const SBasis& a, const SBasis& b) {
+ const unsigned out_size = std::max(a.size(), b.size());
+ const unsigned min_size = std::min(a.size(), b.size());
+ SBasis result(out_size, Linear());
+
+ for(unsigned i = 0; i < min_size; i++) {
+ result[i] = a[i] + b[i];
+ }
+ for(unsigned i = min_size; i < a.size(); i++)
+ result[i] = a[i];
+ for(unsigned i = min_size; i < b.size(); i++)
+ result[i] = b[i];
+
+ assert(result.size() == out_size);
+ return result;
+}
+
+/** Compute the pointwise difference of a and b (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a-b
+
+*/
+SBasis operator-(const SBasis& a, const SBasis& b) {
+ const unsigned out_size = std::max(a.size(), b.size());
+ const unsigned min_size = std::min(a.size(), b.size());
+ SBasis result(out_size, Linear());
+
+ for(unsigned i = 0; i < min_size; i++) {
+ result[i] = a[i] - b[i];
+ }
+ for(unsigned i = min_size; i < a.size(); i++)
+ result[i] = a[i];
+ for(unsigned i = min_size; i < b.size(); i++)
+ result[i] = -b[i];
+
+ assert(result.size() == out_size);
+ return result;
+}
+
+/** Compute the pointwise sum of a and b and store in a (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a+b
+
+*/
+SBasis& operator+=(SBasis& a, const SBasis& b) {
+ const unsigned out_size = std::max(a.size(), b.size());
+ const unsigned min_size = std::min(a.size(), b.size());
+ a.resize(out_size);
+
+ for(unsigned i = 0; i < min_size; i++)
+ a[i] += b[i];
+ for(unsigned i = min_size; i < b.size(); i++)
+ a[i] = b[i];
+
+ assert(a.size() == out_size);
+ return a;
+}
+
+/** Compute the pointwise difference of a and b and store in a (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a-b
+
+*/
+SBasis& operator-=(SBasis& a, const SBasis& b) {
+ const unsigned out_size = std::max(a.size(), b.size());
+ const unsigned min_size = std::min(a.size(), b.size());
+ a.resize(out_size);
+
+ for(unsigned i = 0; i < min_size; i++)
+ a[i] -= b[i];
+ for(unsigned i = min_size; i < b.size(); i++)
+ a[i] = -b[i];
+
+ assert(a.size() == out_size);
+ return a;
+}
+
+/** Compute the pointwise product of a and b (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a*b
+
+*/
+SBasis operator*(SBasis const &a, double k) {
+ SBasis c(a.size(), Linear());
+ for(unsigned i = 0; i < a.size(); i++)
+ c[i] = a[i] * k;
+ return c;
+}
+
+/** Compute the pointwise product of a and b and store the value in a (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a*b
+
+*/
+SBasis& operator*=(SBasis& a, double b) {
+ if (a.isZero()) return a;
+ if (b == 0)
+ a.clear();
+ else
+ for(auto & i : a)
+ i *= b;
+ return a;
+}
+
+/** multiply a by x^sh in place (Exact)
+ \param a sbasis function
+ \param sh power
+ \returns a
+
+*/
+SBasis shift(SBasis const &a, int sh) {
+ size_t n = a.size()+sh;
+ SBasis c(n, Linear());
+ size_t m = std::max(0, sh);
+
+ for(int i = 0; i < sh; i++)
+ c[i] = Linear(0,0);
+ for(size_t i = m, j = std::max(0,-sh); i < n; i++, j++)
+ c[i] = a[j];
+ return c;
+}
+
+/** multiply a by x^sh (Exact)
+ \param a linear function
+ \param sh power
+ \returns a* x^sh
+
+*/
+SBasis shift(Linear const &a, int sh) {
+ size_t n = 1+sh;
+ SBasis c(n, Linear());
+
+ for(int i = 0; i < sh; i++)
+ c[i] = Linear(0,0);
+ if(sh >= 0)
+ c[sh] = a;
+ return c;
+}
+
+#if 0
+SBasis multiply(SBasis const &a, SBasis const &b) {
+ // c = {a0*b0 - shift(1, a.Tri*b.Tri), a1*b1 - shift(1, a.Tri*b.Tri)}
+
+ // shift(1, a.Tri*b.Tri)
+ SBasis c(a.size() + b.size(), Linear(0,0));
+ if(a.isZero() || b.isZero())
+ return c;
+ for(unsigned j = 0; j < b.size(); j++) {
+ for(unsigned i = j; i < a.size()+j; i++) {
+ double tri = b[j].tri()*a[i-j].tri();
+ c[i+1/*shift*/] += Linear(-tri);
+ }
+ }
+ for(unsigned j = 0; j < b.size(); j++) {
+ for(unsigned i = j; i < a.size()+j; i++) {
+ for(unsigned dim = 0; dim < 2; dim++)
+ c[i][dim] += b[j][dim]*a[i-j][dim];
+ }
+ }
+ c.normalize();
+ //assert(!(0 == c.back()[0] && 0 == c.back()[1]));
+ return c;
+}
+#else
+
+/** Compute the pointwise product of a and b adding c (Exact)
+ \param a,b,c sbasis functions
+ \returns sbasis equal to a*b+c
+
+The added term is almost free
+*/
+SBasis multiply_add(SBasis const &a, SBasis const &b, SBasis c) {
+ if(a.isZero() || b.isZero())
+ return c;
+ c.resize(a.size() + b.size(), Linear(0,0));
+ for(unsigned j = 0; j < b.size(); j++) {
+ for(unsigned i = j; i < a.size()+j; i++) {
+ double tri = b[j].tri()*a[i-j].tri();
+ c[i+1/*shift*/] += Linear(-tri);
+ }
+ }
+ for(unsigned j = 0; j < b.size(); j++) {
+ for(unsigned i = j; i < a.size()+j; i++) {
+ for(unsigned dim = 0; dim < 2; dim++)
+ c[i][dim] += b[j][dim]*a[i-j][dim];
+ }
+ }
+ c.normalize();
+ //assert(!(0 == c.back()[0] && 0 == c.back()[1]));
+ return c;
+}
+
+/** Compute the pointwise product of a and b (Exact)
+ \param a,b sbasis functions
+ \returns sbasis equal to a*b
+
+*/
+SBasis multiply(SBasis const &a, SBasis const &b) {
+ if(a.isZero() || b.isZero()) {
+ SBasis c(1, Linear(0,0));
+ return c;
+ }
+ SBasis c(a.size() + b.size(), Linear(0,0));
+ return multiply_add(a, b, c);
+}
+#endif
+/** Compute the integral of a (Exact)
+ \param a sbasis functions
+ \returns sbasis integral(a)
+
+*/
+SBasis integral(SBasis const &c) {
+ SBasis a;
+ a.resize(c.size() + 1, Linear(0,0));
+ a[0] = Linear(0,0);
+
+ for(unsigned k = 1; k < c.size() + 1; k++) {
+ double ahat = -c[k-1].tri()/(2*k);
+ a[k][0] = a[k][1] = ahat;
+ }
+ double aTri = 0;
+ for(int k = c.size()-1; k >= 0; k--) {
+ aTri = (c[k].hat() + (k+1)*aTri/2)/(2*k+1);
+ a[k][0] -= aTri/2;
+ a[k][1] += aTri/2;
+ }
+ a.normalize();
+ return a;
+}
+
+/** Compute the derivative of a (Exact)
+ \param a sbasis functions
+ \returns sbasis da/dt
+
+*/
+SBasis derivative(SBasis const &a) {
+ SBasis c;
+ c.resize(a.size(), Linear(0,0));
+ if(a.isZero())
+ return c;
+
+ for(unsigned k = 0; k < a.size()-1; k++) {
+ double d = (2*k+1)*(a[k][1] - a[k][0]);
+
+ c[k][0] = d + (k+1)*a[k+1][0];
+ c[k][1] = d - (k+1)*a[k+1][1];
+ }
+ int k = a.size()-1;
+ double d = (2*k+1)*(a[k][1] - a[k][0]);
+ if (d == 0 && k > 0) {
+ c.pop_back();
+ } else {
+ c[k][0] = d;
+ c[k][1] = d;
+ }
+
+ return c;
+}
+
+/** Compute the derivative of this inplace (Exact)
+
+*/
+void SBasis::derive() { // in place version
+ if(isZero()) return;
+ for(unsigned k = 0; k < size()-1; k++) {
+ double d = (2*k+1)*((*this)[k][1] - (*this)[k][0]);
+
+ (*this)[k][0] = d + (k+1)*(*this)[k+1][0];
+ (*this)[k][1] = d - (k+1)*(*this)[k+1][1];
+ }
+ int k = size()-1;
+ double d = (2*k+1)*((*this)[k][1] - (*this)[k][0]);
+ if (d == 0 && k > 0) {
+ pop_back();
+ } else {
+ (*this)[k][0] = d;
+ (*this)[k][1] = d;
+ }
+}
+
+/** Compute the sqrt of a
+ \param a sbasis functions
+ \returns sbasis \f[ \sqrt{a} \f]
+
+It is recommended to use the piecewise version unless you have good reason.
+TODO: convert int k to unsigned k, and remove cast
+*/
+SBasis sqrt(SBasis const &a, int k) {
+ SBasis c;
+ if(a.isZero() || k == 0)
+ return c;
+ c.resize(k, Linear(0,0));
+ c[0] = Linear(std::sqrt(a[0][0]), std::sqrt(a[0][1]));
+ SBasis r = a - multiply(c, c); // remainder
+
+ for(unsigned i = 1; i <= (unsigned)k && i<r.size(); i++) {
+ Linear ci(r[i][0]/(2*c[0][0]), r[i][1]/(2*c[0][1]));
+ SBasis cisi = shift(ci, i);
+ r -= multiply(shift((c*2 + cisi), i), SBasis(ci));
+ r.truncate(k+1);
+ c += cisi;
+ if(r.tailError(i) == 0) // if exact
+ break;
+ }
+
+ return c;
+}
+
+/** Compute the recpirocal of a
+ \param a sbasis functions
+ \returns sbasis 1/a
+
+It is recommended to use the piecewise version unless you have good reason.
+*/
+SBasis reciprocal(Linear const &a, int k) {
+ SBasis c;
+ assert(!a.isZero());
+ c.resize(k, Linear(0,0));
+ double r_s0 = (a.tri()*a.tri())/(-a[0]*a[1]);
+ double r_s0k = 1;
+ for(unsigned i = 0; i < (unsigned)k; i++) {
+ c[i] = Linear(r_s0k/a[0], r_s0k/a[1]);
+ r_s0k *= r_s0;
+ }
+ return c;
+}
+
+/** Compute a / b to k terms
+ \param a,b sbasis functions
+ \returns sbasis a/b
+
+It is recommended to use the piecewise version unless you have good reason.
+*/
+SBasis divide(SBasis const &a, SBasis const &b, int k) {
+ SBasis c;
+ assert(!a.isZero());
+ SBasis r = a; // remainder
+
+ k++;
+ r.resize(k, Linear(0,0));
+ c.resize(k, Linear(0,0));
+
+ for(unsigned i = 0; i < (unsigned)k; i++) {
+ Linear ci(r[i][0]/b[0][0], r[i][1]/b[0][1]); //H0
+ c[i] += ci;
+ r -= shift(multiply(ci,b), i);
+ r.truncate(k+1);
+ if(r.tailError(i) == 0) // if exact
+ break;
+ }
+
+ return c;
+}
+
+/** Compute a composed with b
+ \param a,b sbasis functions
+ \returns sbasis a(b(t))
+
+ return a0 + s(a1 + s(a2 +... where s = (1-u)u; ak =(1 - u)a^0_k + ua^1_k
+*/
+SBasis compose(SBasis const &a, SBasis const &b) {
+ SBasis s = multiply((SBasis(Linear(1,1))-b), b);
+ SBasis r;
+
+ for(int i = a.size()-1; i >= 0; i--) {
+ r = multiply_add(r, s, SBasis(Linear(a[i][0])) - b*a[i][0] + b*a[i][1]);
+ }
+ return r;
+}
+
+/** Compute a composed with b to k terms
+ \param a,b sbasis functions
+ \returns sbasis a(b(t))
+
+ return a0 + s(a1 + s(a2 +... where s = (1-u)u; ak =(1 - u)a^0_k + ua^1_k
+*/
+SBasis compose(SBasis const &a, SBasis const &b, unsigned k) {
+ SBasis s = multiply((SBasis(Linear(1,1))-b), b);
+ SBasis r;
+
+ for(int i = a.size()-1; i >= 0; i--) {
+ r = multiply_add(r, s, SBasis(Linear(a[i][0])) - b*a[i][0] + b*a[i][1]);
+ }
+ r.truncate(k);
+ return r;
+}
+
+SBasis portion(const SBasis &t, double from, double to) {
+ double fv = t.valueAt(from);
+ double tv = t.valueAt(to);
+ SBasis ret = compose(t, Linear(from, to));
+ ret.at0() = fv;
+ ret.at1() = tv;
+ return ret;
+}
+
+/*
+Inversion algorithm. The notation is certainly very misleading. The
+pseudocode should say:
+
+c(v) := 0
+r(u) := r_0(u) := u
+for i:=0 to k do
+ c_i(v) := H_0(r_i(u)/(t_1)^i; u)
+ c(v) := c(v) + c_i(v)*t^i
+ r(u) := r(u) ? c_i(u)*(t(u))^i
+endfor
+*/
+
+//#define DEBUG_INVERSION 1
+
+/** find the function a^-1 such that a^-1 composed with a to k terms is the identity function
+ \param a sbasis function
+ \returns sbasis a^-1 s.t. a^-1(a(t)) = 1
+
+ The function must have 'unit range'("a00 = 0 and a01 = 1") and be monotonic.
+*/
+SBasis inverse(SBasis a, int k) {
+ assert(a.size() > 0);
+ double a0 = a[0][0];
+ if(a0 != 0) {
+ a -= a0;
+ }
+ double a1 = a[0][1];
+ assert(a1 != 0); // not invertable.
+
+ if(a1 != 1) {
+ a /= a1;
+ }
+ SBasis c(k, Linear()); // c(v) := 0
+ if(a.size() >= 2 && k == 2) {
+ c[0] = Linear(0,1);
+ Linear t1(1+a[1][0], 1-a[1][1]); // t_1
+ c[1] = Linear(-a[1][0]/t1[0], -a[1][1]/t1[1]);
+ } else if(a.size() >= 2) { // non linear
+ SBasis r = Linear(0,1); // r(u) := r_0(u) := u
+ Linear t1(1./(1+a[1][0]), 1./(1-a[1][1])); // 1./t_1
+ Linear one(1,1);
+ Linear t1i = one; // t_1^0
+ SBasis one_minus_a = SBasis(one) - a;
+ SBasis t = multiply(one_minus_a, a); // t(u)
+ SBasis ti(one); // t(u)^0
+#ifdef DEBUG_INVERSION
+ std::cout << "a=" << a << std::endl;
+ std::cout << "1-a=" << one_minus_a << std::endl;
+ std::cout << "t1=" << t1 << std::endl;
+ //assert(t1 == t[1]);
+#endif
+
+ //c.resize(k+1, Linear(0,0));
+ for(unsigned i = 0; i < (unsigned)k; i++) { // for i:=0 to k do
+#ifdef DEBUG_INVERSION
+ std::cout << "-------" << i << ": ---------" <<std::endl;
+ std::cout << "r=" << r << std::endl
+ << "c=" << c << std::endl
+ << "ti=" << ti << std::endl
+ << std::endl;
+#endif
+ if(r.size() <= i) // ensure enough space in the remainder, probably not needed
+ r.resize(i+1, Linear(0,0));
+ Linear ci(r[i][0]*t1i[0], r[i][1]*t1i[1]); // c_i(v) := H_0(r_i(u)/(t_1)^i; u)
+#ifdef DEBUG_INVERSION
+ std::cout << "t1i=" << t1i << std::endl;
+ std::cout << "ci=" << ci << std::endl;
+#endif
+ for(int dim = 0; dim < 2; dim++) // t1^-i *= 1./t1
+ t1i[dim] *= t1[dim];
+ c[i] = ci; // c(v) := c(v) + c_i(v)*t^i
+ // change from v to u parameterisation
+ SBasis civ = one_minus_a*ci[0] + a*ci[1];
+ // r(u) := r(u) - c_i(u)*(t(u))^i
+ // We can truncate this to the number of final terms, as no following terms can
+ // contribute to the result.
+ r -= multiply(civ,ti);
+ r.truncate(k);
+ if(r.tailError(i) == 0)
+ break; // yay!
+ ti = multiply(ti,t);
+ }
+#ifdef DEBUG_INVERSION
+ std::cout << "##########################" << std::endl;
+#endif
+ } else
+ c = Linear(0,1); // linear
+ c -= a0; // invert the offset
+ c /= a1; // invert the slope
+ return c;
+}
+
+/** Compute the sine of a to k terms
+ \param b linear function
+ \returns sbasis sin(a)
+
+It is recommended to use the piecewise version unless you have good reason.
+*/
+SBasis sin(Linear b, int k) {
+ SBasis s(k+2, Linear());
+ s[0] = Linear(std::sin(b[0]), std::sin(b[1]));
+ double tr = s[0].tri();
+ double t2 = b.tri();
+ s[1] = Linear(std::cos(b[0])*t2 - tr, -std::cos(b[1])*t2 + tr);
+
+ t2 *= t2;
+ for(int i = 0; i < k; i++) {
+ Linear bo(4*(i+1)*s[i+1][0] - 2*s[i+1][1],
+ -2*s[i+1][0] + 4*(i+1)*s[i+1][1]);
+ bo -= s[i]*(t2/(i+1));
+
+
+ s[i+2] = bo/double(i+2);
+ }
+
+ return s;
+}
+
+/** Compute the cosine of a
+ \param b linear function
+ \returns sbasis cos(a)
+
+It is recommended to use the piecewise version unless you have good reason.
+*/
+SBasis cos(Linear bo, int k) {
+ return sin(Linear(bo[0] + M_PI/2,
+ bo[1] + M_PI/2),
+ k);
+}
+
+/** compute fog^-1.
+ \param f,g sbasis functions
+ \returns sbasis f(g^-1(t)).
+
+("zero" = double comparison threshold. *!*we might divide by "zero"*!*)
+TODO: compute order according to tol?
+TODO: requires g(0)=0 & g(1)=1 atm... adaptation to other cases should be obvious!
+*/
+SBasis compose_inverse(SBasis const &f, SBasis const &g, unsigned order, double zero){
+ SBasis result(order, Linear(0.)); //result
+ SBasis r=f; //remainder
+ SBasis Pk=Linear(1)-g,Qk=g,sg=Pk*Qk;
+ Pk.truncate(order);
+ Qk.truncate(order);
+ Pk.resize(order,Linear(0.));
+ Qk.resize(order,Linear(0.));
+ r.resize(order,Linear(0.));
+
+ int vs = valuation(sg,zero);
+ if (vs == 0) { // to prevent infinite loop
+ return result;
+ }
+
+ for (unsigned k=0; k<order; k+=vs){
+ double p10 = Pk.at(k)[0];// we have to solve the linear system:
+ double p01 = Pk.at(k)[1];//
+ double q10 = Qk.at(k)[0];// p10*a + q10*b = r10
+ double q01 = Qk.at(k)[1];// &
+ double r10 = r.at(k)[0];// p01*a + q01*b = r01
+ double r01 = r.at(k)[1];//
+ double a,b;
+ double det = p10*q01-p01*q10;
+
+ //TODO: handle det~0!!
+ if (fabs(det)<zero){
+ a=b=0;
+ }else{
+ a=( q01*r10-q10*r01)/det;
+ b=(-p01*r10+p10*r01)/det;
+ }
+ result[k] = Linear(a,b);
+ r=r-Pk*a-Qk*b;
+
+ Pk=Pk*sg;
+ Qk=Qk*sg;
+
+ Pk.resize(order,Linear(0.)); // truncates if too high order, expands with zeros if too low
+ Qk.resize(order,Linear(0.));
+ r.resize(order,Linear(0.));
+
+ }
+ result.normalize();
+ return result;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/self-intersect.cpp b/src/2geom/self-intersect.cpp
new file mode 100644
index 0000000..4fe4d9e
--- /dev/null
+++ b/src/2geom/self-intersect.cpp
@@ -0,0 +1,313 @@
+/**
+ * @file Implementation of Path::intersectSelf() and PathVector::intersectSelf().
+ */
+/* An algorithm for finding self-intersections of paths and path-vectors.
+ *
+ * Authors:
+ * Rafał Siejakowski <rs@rs-math.net>
+ *
+ * (C) Copyright 2022 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <list>
+
+#include <2geom/coord.h>
+#include <2geom/curve.h>
+#include <2geom/path.h>
+#include <2geom/pathvector.h>
+#include <2geom/point.h>
+#include <2geom/sweeper.h>
+
+namespace Geom {
+
+/** @brief The PathSelfIntersector class is a sweepset class used for intersecting curves in the
+ * same path with one another. It is intended to be used as the template parameter of Sweeper.
+ */
+class PathSelfIntersector
+{
+public:
+ using ItemIterator = Path::iterator;
+
+private:
+ Path _path; ///< The path searched for self-crossings, cleaned of degenerate curves.
+ std::list<ItemIterator> _active; ///< List of active curves during the sweepline passage.
+ std::vector<PathIntersection> _crossings; ///< Stores the crossings found.
+ std::vector<size_t> _original_indices; ///< Curve indices before removal of degenerate curves.
+ double const _precision; ///< Numerical epsilon.
+
+public:
+ PathSelfIntersector(Path const &path, double precision)
+ : _path{path.initialPoint()}
+ , _precision{precision}
+ {
+ _original_indices.reserve(path.size());
+ for (size_t i = 0; i < path.size(); i++) {
+ if (!path[i].isDegenerate()) {
+ _path.append(path[i]);
+ _original_indices.push_back(i);
+ }
+ }
+ _path.close(path.closed());
+ }
+
+ // === SweepSet API ===
+ auto &items() { return _path; }
+ Interval itemBounds(ItemIterator curve) const { return curve->boundsFast()[X]; }
+ /// Callback for when the sweepline starts intersecting a new item.
+ void addActiveItem(ItemIterator incoming)
+ {
+ _intersectWithActive(incoming);
+ _intersectWithSelf(incoming);
+ _active.push_back(incoming);
+ }
+ /// Callback for when the sweepline stops intersecting an item.
+ void removeActiveItem(ItemIterator to_remove)
+ {
+ auto it = std::find(_active.begin(), _active.end(), to_remove);
+ _active.erase(it);
+ }
+ // ===
+
+ std::vector<PathIntersection> &&moveOutCrossings() { return std::move(_crossings); }
+
+private:
+ /** Find and store all intersections of a curve with itself. */
+ void _intersectWithSelf(ItemIterator curve)
+ {
+ size_t const index = std::distance(_path.begin(), curve);
+ for (auto &&self_x : curve->intersectSelf(_precision)) {
+ _appendCurveCrossing(std::move(self_x), index, index);
+ }
+ }
+
+ /** Find and store all intersections of a curve with the active curves. */
+ void _intersectWithActive(ItemIterator curve)
+ {
+ size_t const index = std::distance(_path.begin(), curve);
+ for (auto const &other : _active) {
+ if (!curve->boundsFast().intersects(other->boundsFast())) {
+ continue;
+ }
+
+ size_t const other_index = std::distance(_path.begin(), other);
+ auto const &[smaller, larger] = std::minmax(index, other_index);
+ /// Whether the curves meet at a common node in the path.
+ bool consecutive = smaller + 1 == larger;
+ /// Whether the curves meet at the closure point of the path.
+ bool wraparound = _path.closed() && smaller == 0 && larger + 1 == _path.size();
+ for (auto &&xing : curve->intersect(*other, _precision)) {
+ _appendCurveCrossing(std::move(xing), index, other_index, consecutive, wraparound);
+ }
+ }
+ }
+
+ /** Append a curve crossing to the store as long as it satisfies nondegeneracy criteria. */
+ void _appendCurveCrossing(CurveIntersection &&xing, size_t first_index, size_t second_index,
+ bool consecutive = false, bool wraparound = false)
+ {
+ // Filter out crossings that aren't real but rather represent the agreement of final
+ // and initial points of consecutive curves – a consequence of the path's continuity.
+ auto const should_exclude = [&](bool flipped) -> bool {
+ // Filter out spurious self-intersections by using squared geometric average.
+ bool const first_is_first = (first_index < second_index) ^ flipped;
+ double const geom2 = first_is_first ? (1.0 - xing.first) * xing.second
+ : (1.0 - xing.second) * xing.first;
+ return geom2 < EPSILON;
+ };
+
+ if ((consecutive && should_exclude(false)) || (wraparound && should_exclude(true))) {
+ return;
+ }
+
+ // Convert curve indices to the original ones (before the removal of degenerate curves).
+ _crossings.emplace_back(PathTime(_original_indices[first_index], xing.first),
+ PathTime(_original_indices[second_index], xing.second),
+ xing.point());
+ }
+};
+
+// Compute all crossings of a path with itself.
+std::vector<PathIntersection> Path::intersectSelf(Coord precision) const
+{
+ auto intersector = PathSelfIntersector(*this, precision);
+ Sweeper(intersector).process();
+ auto result = intersector.moveOutCrossings();
+ std::sort(result.begin(), result.end());
+ return result;
+}
+
+/**
+ * @brief The PathVectorSelfIntersector class is an implementation of a SweepSet whose intended
+ * use is the search for self-intersections in a single PathVector. It's designed to be used as
+ * the template parameter for the Sweeper class template.
+ */
+class PathVectorSelfIntersector
+{
+public:
+ using ItemIterator = PathVector::const_iterator;
+
+private:
+ PathVector const &_pathvector; ///< A reference to the path-vector searched for self-crossings.
+ std::list<ItemIterator> _active; ///< A list of active paths during sweepline passage.
+ std::vector<PathVectorIntersection> _crossings; ///< Stores the crossings found.
+ double const _precision; ///< Numerical epsilon.
+
+public:
+ PathVectorSelfIntersector(PathVector const &subject, double precision)
+ : _pathvector{subject}
+ , _precision{precision}
+ {
+ }
+
+ // == SweepSet API ===
+ auto const &items() { return _pathvector; }
+ Interval itemBounds(ItemIterator path)
+ {
+ auto const r = path->boundsFast();
+ return r ? (*r)[X] : Interval(); // Sweeplines are vertical
+ }
+
+ /// Callback for when the sweepline starts intersecting a new item.
+ void addActiveItem(ItemIterator incoming)
+ {
+ _intersectWithActive(incoming);
+ _intersectWithSelf(incoming);
+ _active.push_back(incoming);
+ }
+
+ /// Callback for when the sweepline stops intersecting an item.
+ void removeActiveItem(ItemIterator to_remove)
+ {
+ auto it = std::find(_active.begin(), _active.end(), to_remove);
+ _active.erase(it);
+ }
+ // ===
+
+ std::vector<PathVectorIntersection> &&moveOutCrossings() { return std::move(_crossings); }
+
+private:
+ /**
+ * @brief Find all intersections of the path pointed to by the given
+ * iterator with all currently active paths and store results
+ * in the instance of the class.
+ *
+ * @param it An iterator to a path to be intersected with the active ones.
+ */
+ void _intersectWithActive(ItemIterator &it);
+
+ /**
+ * @brief Find all intersections of the path pointed to by the given
+ * iterator with itself and store the results in the class instance.
+ *
+ * @param it An iterator to a path which will be intersected with itself.
+ */
+ void _intersectWithSelf(ItemIterator &it);
+
+ /// Append a path crossing to the store.
+ void _appendPathCrossing(PathIntersection const &xing, size_t first_index, size_t second_index)
+ {
+ auto const first_time = PathVectorTime(first_index, xing.first);
+ auto const second_time = PathVectorTime(second_index, xing.second);
+ _crossings.emplace_back(first_time, second_time, xing.point());
+ }
+
+public:
+
+ std::vector<PathVectorIntersection>
+ filterDeduplicate(std::vector<PathVectorIntersection> &&xings) const;
+};
+
+/** Remove duplicate intersections (artifacts of the path/curve crossing algorithms). */
+std::vector<PathVectorIntersection>
+PathVectorSelfIntersector::filterDeduplicate(std::vector<PathVectorIntersection> &&xings) const
+{
+ std::vector<PathVectorIntersection> result;
+ result.reserve(xings.size());
+
+ auto const are_same_times = [&](Coord a1, Coord a2, Coord b1, Coord b2) -> bool {
+ return (are_near(a1, b1) && are_near(a2, b2)) ||
+ (are_near(a1, b2) && are_near(a2, b1));
+ };
+
+ Coord last_time_1 = -1.0, last_time_2 = -1.0; // Invalid path times
+ for (auto &&x : xings) {
+ auto const current_1 = x.first.asFlatTime(), current_2 = x.second.asFlatTime();
+ if (!are_same_times(current_1, current_2, last_time_1, last_time_2)) {
+ result.push_back(std::move(x));
+ }
+ last_time_1 = current_1;
+ last_time_2 = current_2;
+ }
+
+ return result;
+}
+
+/** Compute and store intersections of a path with all active paths. */
+void PathVectorSelfIntersector::_intersectWithActive(ItemIterator &it)
+{
+ auto const start = _pathvector.begin();
+ for (auto &path : _active) {
+ if (!path->boundsFast().intersects(it->boundsFast())) {
+ continue;
+ }
+ for (auto &&xing : path->intersect(*it, _precision)) {
+ _appendPathCrossing(std::move(xing), std::distance(start, path),
+ std::distance(start, it));
+ }
+ }
+}
+
+/** Compute and store intersections of a constituent path with itself. */
+void PathVectorSelfIntersector::_intersectWithSelf(ItemIterator &it)
+{
+ size_t const path_index = std::distance(_pathvector.begin(), it);
+ for (auto &&xing : it->intersectSelf(_precision)) {
+ _appendPathCrossing(std::move(xing), path_index, path_index);
+ }
+}
+
+// Compute self-intersections in a path-vector.
+std::vector<PathVectorIntersection> PathVector::intersectSelf(Coord precision) const
+{
+ auto intersector = PathVectorSelfIntersector(*this, precision);
+ Sweeper(intersector).process();
+ auto result = intersector.moveOutCrossings();
+ std::sort(result.begin(), result.end());
+ return (result.size() > 1) ? intersector.filterDeduplicate(std::move(result)) : result;
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/solve-bezier-one-d.cpp b/src/2geom/solve-bezier-one-d.cpp
new file mode 100644
index 0000000..b82d20b
--- /dev/null
+++ b/src/2geom/solve-bezier-one-d.cpp
@@ -0,0 +1,243 @@
+
+#include <2geom/solver.h>
+#include <2geom/choose.h>
+#include <2geom/bezier.h>
+#include <2geom/point.h>
+
+#include <cmath>
+#include <algorithm>
+//#include <valarray>
+
+/*** Find the zeros of the bernstein function. The code subdivides until it is happy with the
+ * linearity of the function. This requires an O(degree^2) subdivision for each step, even when
+ * there is only one solution.
+ */
+
+namespace Geom {
+
+template<class t>
+static int SGN(t x) { return (x > 0 ? 1 : (x < 0 ? -1 : 0)); }
+
+//const unsigned MAXDEPTH = 23; // Maximum depth for recursion. Using floats means 23 bits precision max
+
+//const double BEPSILON = ldexp(1.0,(-MAXDEPTH-1)); /*Flatness control value */
+//const double SECANT_EPSILON = 1e-13; // secant method converges much faster, get a bit more precision
+/**
+ * This function is called _a lot_. We have included various manual memory management stuff to reduce the amount of mallocing that goes on. In the future it is possible that this will hurt performance.
+ **/
+class Bernsteins
+{
+public:
+ static constexpr size_t MAX_DEPTH = 53;
+ size_t degree, N;
+ std::vector<double> &solutions;
+
+ Bernsteins(size_t _degree, std::vector<double> &sol)
+ : degree(_degree), N(degree+1), solutions(sol)
+ {
+ }
+
+ unsigned
+ control_poly_flat_enough(double const *V);
+
+ void
+ find_bernstein_roots(double const *w, /* The control points */
+ unsigned depth, /* The depth of the recursion */
+ double left_t, double right_t);
+};
+/*
+ * find_bernstein_roots : Given an equation in Bernstein-Bernstein form, find all
+ * of the roots in the open interval (0, 1). Return the number of roots found.
+ */
+void
+find_bernstein_roots(double const *w, /* The control points */
+ unsigned degree, /* The degree of the polynomial */
+ std::vector<double> &solutions, /* RETURN candidate t-values */
+ unsigned depth, /* The depth of the recursion */
+ double left_t, double right_t, bool /*use_secant*/)
+{
+ Bernsteins B(degree, solutions);
+ B.find_bernstein_roots(w, depth, left_t, right_t);
+}
+
+void
+find_bernstein_roots(std::vector<double> &solutions, /* RETURN candidate t-values */
+ Geom::Bezier const &bz, /* The control points */
+ double left_t, double right_t)
+{
+ Bernsteins B(bz.degree(), solutions);
+ Geom::Bezier& bzl = const_cast<Geom::Bezier&>(bz);
+ double* w = &(bzl[0]);
+ B.find_bernstein_roots(w, 0, left_t, right_t);
+}
+
+
+
+void Bernsteins::find_bernstein_roots(double const *w, /* The control points */
+ unsigned depth, /* The depth of the recursion */
+ double left_t,
+ double right_t)
+{
+
+ size_t n_crossings = 0;
+
+ int old_sign = SGN(w[0]);
+ //std::cout << "w[0] = " << w[0] << std::endl;
+ for (size_t i = 1; i < N; i++)
+ {
+ //std::cout << "w[" << i << "] = " << w[i] << std::endl;
+ int sign = SGN(w[i]);
+ if (sign != 0)
+ {
+ if (sign != old_sign && old_sign != 0)
+ {
+ ++n_crossings;
+ }
+ old_sign = sign;
+ }
+ }
+ //std::cout << "n_crossings = " << n_crossings << std::endl;
+ if (n_crossings == 0) return; // no solutions here
+
+ if (n_crossings == 1) /* Unique solution */
+ {
+ //std::cout << "depth = " << depth << std::endl;
+ /* Stop recursion when the tree is deep enough */
+ /* if deep enough, return 1 solution at midpoint */
+ if (depth > MAX_DEPTH)
+ {
+ //printf("bottom out %d\n", depth);
+ const double Ax = right_t - left_t;
+ const double Ay = w[degree] - w[0];
+
+ solutions.push_back(left_t - Ax*w[0] / Ay);
+ return;
+ }
+
+
+ double s = 0, t = 1;
+ double e = 1e-10;
+ int side = 0;
+ double r, fs = w[0], ft = w[degree];
+
+ for (size_t n = 0; n < 100; ++n)
+ {
+ r = (fs*t - ft*s) / (fs - ft);
+ if (fabs(t-s) < e * fabs(t+s)) break;
+
+ double fr = bernstein_value_at(r, w, degree);
+
+ if (fr * ft > 0)
+ {
+ t = r; ft = fr;
+ if (side == -1) fs /= 2;
+ side = -1;
+ }
+ else if (fs * fr > 0)
+ {
+ s = r; fs = fr;
+ if (side == +1) ft /= 2;
+ side = +1;
+ }
+ else break;
+ }
+ solutions.push_back(r*right_t + (1-r)*left_t);
+ return;
+
+ }
+
+ /* Otherwise, solve recursively after subdividing control polygon */
+// double Left[N], /* New left and right */
+// Right[N]; /* control polygons */
+ //const double t = 0.5;
+ double* LR = new double[2*N];
+ double* Left = LR;
+ double* Right = LR + N;
+
+ std::copy(w, w + N, Right);
+
+ Left[0] = Right[0];
+ for (size_t i = 1; i < N; ++i)
+ {
+ for (size_t j = 0; j < N-i; ++j)
+ {
+ Right[j] = (Right[j] + Right[j+1]) * 0.5;
+ }
+ Left[i] = Right[0];
+ }
+
+ double mid_t = (left_t + right_t) * 0.5;
+
+
+ find_bernstein_roots(Left, depth+1, left_t, mid_t);
+
+
+ /* Solution is exactly on the subdivision point. */
+ if (Right[0] == 0)
+ {
+ solutions.push_back(mid_t);
+ }
+
+ find_bernstein_roots(Right, depth+1, mid_t, right_t);
+ delete[] LR;
+}
+
+#if 0
+/*
+ * control_poly_flat_enough :
+ * Check if the control polygon of a Bernstein curve is flat enough
+ * for recursive subdivision to bottom out.
+ *
+ */
+unsigned
+Bernsteins::control_poly_flat_enough(double const *V)
+{
+ /* Find the perpendicular distance from each interior control point to line connecting V[0] and
+ * V[degree] */
+
+ /* Derive the implicit equation for line connecting first */
+ /* and last control points */
+ const double a = V[0] - V[degree];
+
+ double max_distance_above = 0.0;
+ double max_distance_below = 0.0;
+ double ii = 0, dii = 1./degree;
+ for (unsigned i = 1; i < degree; i++) {
+ ii += dii;
+ /* Compute distance from each of the points to that line */
+ const double d = (a + V[i]) * ii - a;
+ double dist = d*d;
+ // Find the largest distance
+ if (d < 0.0)
+ max_distance_below = std::min(max_distance_below, -dist);
+ else
+ max_distance_above = std::max(max_distance_above, dist);
+ }
+
+ const double abSquared = 1./((a * a) + 1);
+
+ const double intercept_1 = (a - max_distance_above * abSquared);
+ const double intercept_2 = (a - max_distance_below * abSquared);
+
+ /* Compute bounding interval*/
+ const double left_intercept = std::min(intercept_1, intercept_2);
+ const double right_intercept = std::max(intercept_1, intercept_2);
+
+ const double error = 0.5 * (right_intercept - left_intercept);
+ //printf("error %g %g %g\n", error, a, BEPSILON * a);
+ return error < BEPSILON * a;
+}
+#endif
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/solve-bezier-parametric.cpp b/src/2geom/solve-bezier-parametric.cpp
new file mode 100644
index 0000000..2fb3f41
--- /dev/null
+++ b/src/2geom/solve-bezier-parametric.cpp
@@ -0,0 +1,189 @@
+#include <2geom/bezier.h>
+#include <2geom/point.h>
+#include <2geom/solver.h>
+#include <algorithm>
+
+namespace Geom {
+
+/*** Find the zeros of the parametric function in 2d defined by two beziers X(t), Y(t). The code subdivides until it happy with the linearity of the bezier. This requires an n^2 subdivision for each step, even when there is only one solution.
+ *
+ * Perhaps it would be better to subdivide particularly around nodes with changing sign, rather than simply cutting in half.
+ */
+
+#define SGN(a) (((a)<0) ? -1 : 1)
+
+/*
+ * Forward declarations
+ */
+unsigned
+crossing_count(Geom::Point const *V, unsigned degree);
+static unsigned
+control_poly_flat_enough(Geom::Point const *V, unsigned degree);
+static double
+compute_x_intercept(Geom::Point const *V, unsigned degree);
+
+const unsigned MAXDEPTH = 64; /* Maximum depth for recursion */
+
+const double BEPSILON = ldexp(1.0,-MAXDEPTH-1); /*Flatness control value */
+
+unsigned total_steps, total_subs;
+
+/*
+ * find_bezier_roots : Given an equation in Bernstein-Bezier form, find all
+ * of the roots in the interval [0, 1]. Return the number of roots found.
+ */
+void
+find_parametric_bezier_roots(Geom::Point const *w, /* The control points */
+ unsigned degree, /* The degree of the polynomial */
+ std::vector<double> &solutions, /* RETURN candidate t-values */
+ unsigned depth) /* The depth of the recursion */
+{
+ total_steps++;
+ const unsigned max_crossings = crossing_count(w, degree);
+ switch (max_crossings) {
+ case 0: /* No solutions here */
+ return;
+
+ case 1:
+ /* Unique solution */
+ /* Stop recursion when the tree is deep enough */
+ /* if deep enough, return 1 solution at midpoint */
+ if (depth >= MAXDEPTH) {
+ solutions.push_back((w[0][Geom::X] + w[degree][Geom::X]) / 2.0);
+ return;
+ }
+
+ // I thought secant method would be faster here, but it'aint. -- njh
+
+ if (control_poly_flat_enough(w, degree)) {
+ solutions.push_back(compute_x_intercept(w, degree));
+ return;
+ }
+ break;
+ }
+
+ /* Otherwise, solve recursively after subdividing control polygon */
+
+ //Geom::Point Left[degree+1], /* New left and right */
+ // Right[degree+1]; /* control polygons */
+ std::vector<Geom::Point> Left( degree+1 ), Right(degree+1);
+
+ casteljau_subdivision(0.5, w, Left.data(), Right.data(), degree);
+ total_subs ++;
+ find_parametric_bezier_roots(Left.data(), degree, solutions, depth+1);
+ find_parametric_bezier_roots(Right.data(), degree, solutions, depth+1);
+}
+
+
+/*
+ * crossing_count:
+ * Count the number of times a Bezier control polygon
+ * crosses the 0-axis. This number is >= the number of roots.
+ *
+ */
+unsigned
+crossing_count(Geom::Point const *V, /* Control pts of Bezier curve */
+ unsigned degree) /* Degree of Bezier curve */
+{
+ unsigned n_crossings = 0; /* Number of zero-crossings */
+
+ int old_sign = SGN(V[0][Geom::Y]);
+ for (unsigned i = 1; i <= degree; i++) {
+ int sign = SGN(V[i][Geom::Y]);
+ if (sign != old_sign)
+ n_crossings++;
+ old_sign = sign;
+ }
+ return n_crossings;
+}
+
+
+
+/*
+ * control_poly_flat_enough :
+ * Check if the control polygon of a Bezier curve is flat enough
+ * for recursive subdivision to bottom out.
+ *
+ */
+static unsigned
+control_poly_flat_enough(Geom::Point const *V, /* Control points */
+ unsigned degree) /* Degree of polynomial */
+{
+ /* Find the perpendicular distance from each interior control point to line connecting V[0] and
+ * V[degree] */
+
+ /* Derive the implicit equation for line connecting first */
+ /* and last control points */
+ const double a = V[0][Geom::Y] - V[degree][Geom::Y];
+ const double b = V[degree][Geom::X] - V[0][Geom::X];
+ const double c = V[0][Geom::X] * V[degree][Geom::Y] - V[degree][Geom::X] * V[0][Geom::Y];
+
+ const double abSquared = (a * a) + (b * b);
+
+ //double distance[degree]; /* Distances from pts to line */
+ std::vector<double> distance(degree); /* Distances from pts to line */
+ for (unsigned i = 1; i < degree; i++) {
+ /* Compute distance from each of the points to that line */
+ double & dist(distance[i-1]);
+ const double d = a * V[i][Geom::X] + b * V[i][Geom::Y] + c;
+ dist = d*d / abSquared;
+ if (d < 0.0)
+ dist = -dist;
+ }
+
+
+ // Find the largest distance
+ double max_distance_above = 0.0;
+ double max_distance_below = 0.0;
+ for (unsigned i = 0; i < degree-1; i++) {
+ const double d = distance[i];
+ if (d < 0.0)
+ max_distance_below = std::min(max_distance_below, d);
+ if (d > 0.0)
+ max_distance_above = std::max(max_distance_above, d);
+ }
+
+ const double intercept_1 = (c + max_distance_above) / -a;
+ const double intercept_2 = (c + max_distance_below) / -a;
+
+ /* Compute bounding interval*/
+ const double left_intercept = std::min(intercept_1, intercept_2);
+ const double right_intercept = std::max(intercept_1, intercept_2);
+
+ const double error = 0.5 * (right_intercept - left_intercept);
+
+ if (error < BEPSILON)
+ return 1;
+
+ return 0;
+}
+
+
+
+/*
+ * compute_x_intercept :
+ * Compute intersection of chord from first control point to last
+ * with 0-axis.
+ *
+ */
+static double
+compute_x_intercept(Geom::Point const *V, /* Control points */
+ unsigned degree) /* Degree of curve */
+{
+ const Geom::Point A = V[degree] - V[0];
+
+ return (A[Geom::X]*V[0][Geom::Y] - A[Geom::Y]*V[0][Geom::X]) / -A[Geom::Y];
+}
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/solve-bezier.cpp b/src/2geom/solve-bezier.cpp
new file mode 100644
index 0000000..4ff42bb
--- /dev/null
+++ b/src/2geom/solve-bezier.cpp
@@ -0,0 +1,304 @@
+
+#include <2geom/solver.h>
+#include <2geom/choose.h>
+#include <2geom/bezier.h>
+#include <2geom/point.h>
+
+#include <cmath>
+#include <algorithm>
+
+/*** Find the zeros of a Bezier. The code subdivides until it is happy with the linearity of the
+ * function. This requires an O(degree^2) subdivision for each step, even when there is only one
+ * solution.
+ *
+ * We try fairly hard to correctly handle multiple roots.
+ */
+
+//#define debug(x) do{x;}while(0)
+#define debug(x)
+
+namespace Geom{
+
+template<class t>
+static int SGN(t x) { return (x > 0 ? 1 : (x < 0 ? -1 : 0)); }
+
+class Bernsteins{
+public:
+ static const size_t MAX_DEPTH = 22;
+ std::vector<double> &solutions;
+ //std::vector<double> dsolutions;
+
+ Bernsteins(std::vector<double> & sol)
+ : solutions(sol)
+ {}
+
+ void subdivide(double const *V,
+ double t,
+ double *Left,
+ double *Right);
+
+ double secant(Bezier const &bz);
+
+
+ void find_bernstein_roots(Bezier const &bz, unsigned depth,
+ double left_t, double right_t);
+};
+
+template <typename T>
+inline std::ostream &operator<< (std::ostream &out_file, const std::vector<T> & b) {
+ out_file << "[";
+ for(unsigned i = 0; i < b.size(); i++) {
+ out_file << b[i] << ", ";
+ }
+ return out_file << "]";
+}
+
+void convex_hull_marching(Bezier const &src_bz, Bezier bz,
+ std::vector<double> &solutions,
+ double left_t,
+ double right_t)
+{
+ while(bz.order() > 0 && bz[0] == 0) {
+ std::cout << "deflate\n";
+ bz = bz.deflate();
+ solutions.push_back(left_t);
+ }
+ std::cout << std::endl;
+ if (bz.order() > 0) {
+
+ int old_sign = SGN(bz[0]);
+
+ double left_bound = 0;
+ double dt = 0;
+ for (size_t i = 1; i < bz.size(); i++)
+ {
+ int sign = SGN(bz[i]);
+ if (sign != old_sign)
+ {
+ dt = double(i) / bz.order();
+ left_bound = dt * bz[0] / (bz[0] - bz[i]);
+ break;
+ }
+ old_sign = sign;
+ }
+ if (dt == 0) return;
+ std::cout << bz << std::endl;
+ std::cout << "dt = " << dt << std::endl;
+ std::cout << "left_t = " << left_t << std::endl;
+ std::cout << "right_t = " << right_t << std::endl;
+ std::cout << "left bound = " << left_bound
+ << " = " << bz(left_bound) << std::endl;
+ double new_left_t = left_bound * (right_t - left_t) + left_t;
+ std::cout << "new_left_t = " << new_left_t << std::endl;
+ Bezier bzr = portion(src_bz, new_left_t, 1);
+ while(bzr.order() > 0 && bzr[0] == 0) {
+ std::cout << "deflate\n";
+ bzr = bzr.deflate();
+ solutions.push_back(new_left_t);
+ }
+ if (left_t < new_left_t) {
+ convex_hull_marching(src_bz, bzr,
+ solutions,
+ new_left_t, right_t);
+ } else {
+ std::cout << "epsilon reached\n";
+ while(bzr.order() > 0 && fabs(bzr[0]) <= 1e-10) {
+ std::cout << "deflate\n";
+ bzr = bzr.deflate();
+ std::cout << bzr << std::endl;
+ solutions.push_back(new_left_t);
+ }
+
+ }
+ }
+}
+
+void
+Bezier::find_bezier_roots(std::vector<double> &solutions,
+ double left_t, double right_t) const {
+ Bezier bz = *this;
+ //convex_hull_marching(bz, bz, solutions, left_t, right_t);
+ //return;
+
+ // a constant bezier, even if identically zero, has no roots
+ if (bz.isConstant()) {
+ return;
+ }
+
+ while(bz[0] == 0) {
+ debug(std::cout << "deflate\n");
+ bz = bz.deflate();
+ solutions.push_back(0);
+ }
+ if (bz.degree() == 1) {
+ debug(std::cout << "linear\n");
+
+ if (SGN(bz[0]) != SGN(bz[1])) {
+ double d = bz[0] - bz[1];
+ if(d != 0) {
+ double r = bz[0] / d;
+ if(0 <= r && r <= 1)
+ solutions.push_back(r);
+ }
+ }
+ return;
+ }
+
+ //std::cout << "initial = " << bz << std::endl;
+ Bernsteins B(solutions);
+ B.find_bernstein_roots(bz, 0, left_t, right_t);
+ //std::cout << solutions << std::endl;
+}
+
+void Bernsteins::find_bernstein_roots(Bezier const &bz,
+ unsigned depth,
+ double left_t,
+ double right_t)
+{
+ debug(std::cout << left_t << ", " << right_t << std::endl);
+ size_t n_crossings = 0;
+
+ int old_sign = SGN(bz[0]);
+ //std::cout << "w[0] = " << bz[0] << std::endl;
+ for (size_t i = 1; i < bz.size(); i++)
+ {
+ //std::cout << "w[" << i << "] = " << w[i] << std::endl;
+ int sign = SGN(bz[i]);
+ if (sign != 0)
+ {
+ if (sign != old_sign && old_sign != 0)
+ {
+ ++n_crossings;
+ }
+ old_sign = sign;
+ }
+ }
+ // if last control point is zero, that counts as crossing too
+ if (SGN(bz[bz.size()-1]) == 0) {
+ ++n_crossings;
+ }
+
+ //std::cout << "n_crossings = " << n_crossings << std::endl;
+ if (n_crossings == 0) return; // no solutions here
+
+ if (n_crossings == 1) /* Unique solution */
+ {
+ //std::cout << "depth = " << depth << std::endl;
+ /* Stop recursion when the tree is deep enough */
+ /* if deep enough, return 1 solution at midpoint */
+ if (depth > MAX_DEPTH)
+ {
+ //printf("bottom out %d\n", depth);
+ const double Ax = right_t - left_t;
+ const double Ay = bz.at1() - bz.at0();
+
+ solutions.push_back(left_t - Ax*bz.at0() / Ay);
+ return;
+ }
+
+ double r = secant(bz);
+ solutions.push_back(r*right_t + (1-r)*left_t);
+ return;
+ }
+ /* Otherwise, solve recursively after subdividing control polygon */
+ Bezier::Order o(bz);
+ Bezier Left(o), Right = bz;
+ double split_t = (left_t + right_t) * 0.5;
+
+ // If subdivision is working poorly, split around the leftmost root of the derivative
+ if (depth > 2) {
+ debug(std::cout << "derivative mode\n");
+ Bezier dbz = derivative(bz);
+
+ debug(std::cout << "initial = " << dbz << std::endl);
+ std::vector<double> dsolutions = dbz.roots(Interval(left_t, right_t));
+ debug(std::cout << "dsolutions = " << dsolutions << std::endl);
+
+ double dsplit_t = 0.5;
+ if(!dsolutions.empty()) {
+ dsplit_t = dsolutions[0];
+ split_t = left_t + (right_t - left_t)*dsplit_t;
+ debug(std::cout << "split_value = " << bz(split_t) << std::endl);
+ debug(std::cout << "splitting around " << dsplit_t << " = "
+ << split_t << "\n");
+
+ }
+ std::pair<Bezier, Bezier> LR = bz.subdivide(dsplit_t);
+ Left = LR.first;
+ Right = LR.second;
+ } else {
+ // split at midpoint, because it is cheap
+ Left[0] = Right[0];
+ for (size_t i = 1; i < bz.size(); ++i)
+ {
+ for (size_t j = 0; j < bz.size()-i; ++j)
+ {
+ Right[j] = (Right[j] + Right[j+1]) * 0.5;
+ }
+ Left[i] = Right[0];
+ }
+ }
+ debug(std::cout << "Solution is exactly on the subdivision point.\n");
+ debug(std::cout << Left << " , " << Right << std::endl);
+ Left = reverse(Left);
+ while(Right.order() > 0 && fabs(Right[0]) <= 1e-10) {
+ debug(std::cout << "deflate\n");
+ Right = Right.deflate();
+ Left = Left.deflate();
+ solutions.push_back(split_t);
+ }
+ Left = reverse(Left);
+ if (Right.order() > 0) {
+ debug(std::cout << Left << " , " << Right << std::endl);
+ find_bernstein_roots(Left, depth+1, left_t, split_t);
+ find_bernstein_roots(Right, depth+1, split_t, right_t);
+ }
+}
+
+double Bernsteins::secant(Bezier const &bz) {
+ double s = 0, t = 1;
+ double e = 1e-14;
+ int side = 0;
+ double r, fs = bz.at0(), ft = bz.at1();
+
+ for (size_t n = 0; n < 100; ++n)
+ {
+ r = (fs*t - ft*s) / (fs - ft);
+ if (fabs(t-s) < e * fabs(t+s)) {
+ debug(std::cout << "error small " << fabs(t-s)
+ << ", accepting solution " << r
+ << "after " << n << "iterations\n");
+ return r;
+ }
+
+ double fr = bz.valueAt(r);
+
+ if (fr * ft > 0)
+ {
+ t = r; ft = fr;
+ if (side == -1) fs /= 2;
+ side = -1;
+ }
+ else if (fs * fr > 0)
+ {
+ s = r; fs = fr;
+ if (side == +1) ft /= 2;
+ side = +1;
+ }
+ else break;
+ }
+ return r;
+}
+
+};
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/svg-path-parser.cpp b/src/2geom/svg-path-parser.cpp
new file mode 100644
index 0000000..6a1cb15
--- /dev/null
+++ b/src/2geom/svg-path-parser.cpp
@@ -0,0 +1,1615 @@
+
+#line 1 "svg-path-parser.rl"
+/**
+ * \file
+ * \brief parse SVG path specifications
+ *
+ * Copyright 2007 MenTaLguY <mental@rydia.net>
+ * Copyright 2007 Aaron Spike <aaron@ekips.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <cstdio>
+#include <cmath>
+#include <vector>
+#include <glib.h>
+
+#include <2geom/point.h>
+#include <2geom/svg-path-parser.h>
+#include <2geom/angle.h>
+
+namespace Geom {
+
+
+#line 48 "svg-path-parser.cpp"
+static const char _svg_path_actions[] = {
+ 0, 1, 0, 1, 1, 1, 2, 1,
+ 3, 1, 4, 1, 5, 1, 15, 2,
+ 1, 0, 2, 1, 6, 2, 1, 7,
+ 2, 1, 8, 2, 1, 9, 2, 1,
+ 10, 2, 1, 11, 2, 1, 12, 2,
+ 1, 13, 2, 1, 14, 2, 2, 0,
+ 2, 3, 0, 2, 4, 0, 2, 5,
+ 0, 3, 1, 6, 0, 3, 1, 7,
+ 0, 3, 1, 8, 0, 3, 1, 9,
+ 0, 3, 1, 10, 0, 3, 1, 11,
+ 0, 3, 1, 12, 0, 3, 1, 13,
+ 0, 3, 1, 14, 0
+};
+
+static const short _svg_path_key_offsets[] = {
+ 0, 0, 9, 18, 21, 23, 35, 45,
+ 48, 50, 53, 55, 67, 77, 80, 82,
+ 91, 103, 112, 121, 130, 133, 135, 147,
+ 157, 160, 162, 174, 184, 187, 189, 198,
+ 205, 211, 218, 225, 231, 241, 251, 254,
+ 256, 268, 278, 281, 283, 295, 304, 316,
+ 325, 335, 339, 341, 348, 352, 354, 364,
+ 368, 370, 380, 389, 398, 401, 403, 415,
+ 425, 428, 430, 442, 452, 455, 457, 469,
+ 479, 482, 484, 496, 506, 509, 511, 523,
+ 533, 536, 538, 550, 559, 571, 580, 592,
+ 601, 613, 622, 634, 643, 647, 649, 658,
+ 667, 670, 672, 676, 678, 687, 696, 705,
+ 708, 710, 722, 732, 735, 737, 749, 759,
+ 762, 764, 776, 786, 789, 791, 803, 812,
+ 824, 833, 845, 854, 858, 860, 869, 878,
+ 881, 883, 895, 905, 908, 910, 922, 932,
+ 935, 937, 949, 959, 962, 964, 976, 985,
+ 997, 1006, 1018, 1027, 1031, 1033, 1042, 1051,
+ 1054, 1056, 1068, 1078, 1081, 1083, 1095, 1104,
+ 1108, 1110, 1119, 1128, 1131, 1133, 1137, 1139,
+ 1148, 1157, 1166, 1175, 1184, 1196, 1205, 1209,
+ 1211, 1220, 1229, 1238, 1247, 1251, 1253, 1263,
+ 1267, 1269, 1279, 1283, 1285, 1295, 1299, 1301,
+ 1311, 1315, 1317, 1327, 1331, 1333, 1343, 1347,
+ 1349, 1359, 1363, 1365, 1375, 1379, 1381, 1391,
+ 1395, 1397, 1407, 1411, 1413, 1423, 1427, 1429,
+ 1439, 1443, 1445, 1455, 1459, 1461, 1470, 1474,
+ 1476, 1486, 1498, 1507, 1517, 1524, 1528, 1530,
+ 1534, 1536, 1546, 1552, 1584, 1614, 1646, 1678,
+ 1710, 1740, 1772, 1802, 1834, 1864, 1896, 1926,
+ 1958, 1988, 2020, 2050, 2082, 2112, 2144, 2174,
+ 2206, 2236, 2268, 2298, 2330, 2360, 2392, 2422,
+ 2454, 2484, 2508, 2532, 2564, 2594, 2624, 2656
+};
+
+static const char _svg_path_trans_keys[] = {
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 13, 32, 43, 45, 46, 9, 10,
+ 48, 57, 46, 48, 57, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 44, 46, 9,
+ 10, 43, 45, 48, 57, 46, 48, 57,
+ 48, 57, 46, 48, 57, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 44, 46, 9,
+ 10, 43, 45, 48, 57, 46, 48, 57,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 13, 32, 44, 46, 69,
+ 101, 9, 10, 43, 45, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 13, 32, 43, 45, 46, 9, 10,
+ 48, 57, 46, 48, 57, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 44, 46, 9,
+ 10, 43, 45, 48, 57, 46, 48, 57,
+ 48, 57, 13, 32, 44, 46, 69, 101,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 9, 10, 43, 45, 48, 57,
+ 46, 48, 57, 48, 57, 13, 32, 44,
+ 69, 101, 9, 10, 48, 57, 13, 32,
+ 44, 48, 49, 9, 10, 13, 32, 48,
+ 49, 9, 10, 13, 32, 44, 48, 49,
+ 9, 10, 13, 32, 44, 48, 49, 9,
+ 10, 13, 32, 48, 49, 9, 10, 13,
+ 32, 44, 46, 9, 10, 43, 45, 48,
+ 57, 13, 32, 44, 46, 9, 10, 43,
+ 45, 48, 57, 46, 48, 57, 48, 57,
+ 13, 32, 44, 46, 69, 101, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 9, 10, 43, 45, 48, 57, 46, 48,
+ 57, 48, 57, 13, 32, 44, 46, 69,
+ 101, 9, 10, 43, 45, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 13, 32, 44, 46, 69, 101, 9, 10,
+ 43, 45, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 13, 32, 44,
+ 46, 69, 101, 9, 10, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 9, 10, 48, 57, 43, 45, 48, 57,
+ 48, 57, 13, 32, 44, 46, 9, 10,
+ 43, 45, 48, 57, 43, 45, 48, 57,
+ 48, 57, 13, 32, 44, 46, 9, 10,
+ 43, 45, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 46, 48,
+ 57, 48, 57, 13, 32, 44, 46, 69,
+ 101, 9, 10, 43, 45, 48, 57, 13,
+ 32, 44, 46, 9, 10, 43, 45, 48,
+ 57, 46, 48, 57, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 9, 10,
+ 43, 45, 48, 57, 46, 48, 57, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 46,
+ 48, 57, 48, 57, 13, 32, 44, 46,
+ 69, 101, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 9, 10, 43, 45,
+ 48, 57, 46, 48, 57, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 44, 46, 9,
+ 10, 43, 45, 48, 57, 46, 48, 57,
+ 48, 57, 13, 32, 44, 46, 69, 101,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 43, 45, 46, 9, 10, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 43, 45, 46,
+ 9, 10, 48, 57, 13, 32, 44, 46,
+ 69, 101, 9, 10, 43, 45, 48, 57,
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 43, 45, 48, 57, 48,
+ 57, 13, 32, 43, 45, 46, 9, 10,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 46, 48, 57, 48, 57,
+ 43, 45, 48, 57, 48, 57, 13, 32,
+ 43, 45, 46, 9, 10, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 46, 48, 57, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 9, 10,
+ 43, 45, 48, 57, 46, 48, 57, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 46,
+ 48, 57, 48, 57, 13, 32, 44, 46,
+ 69, 101, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 9, 10, 43, 45,
+ 48, 57, 46, 48, 57, 48, 57, 13,
+ 32, 44, 46, 69, 101, 9, 10, 43,
+ 45, 48, 57, 13, 32, 43, 45, 46,
+ 9, 10, 48, 57, 13, 32, 44, 46,
+ 69, 101, 9, 10, 43, 45, 48, 57,
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 43, 45,
+ 48, 57, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 46, 48,
+ 57, 48, 57, 13, 32, 44, 46, 69,
+ 101, 9, 10, 43, 45, 48, 57, 13,
+ 32, 44, 46, 9, 10, 43, 45, 48,
+ 57, 46, 48, 57, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 9, 10,
+ 43, 45, 48, 57, 46, 48, 57, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 46,
+ 48, 57, 48, 57, 13, 32, 44, 46,
+ 69, 101, 9, 10, 43, 45, 48, 57,
+ 13, 32, 43, 45, 46, 9, 10, 48,
+ 57, 13, 32, 44, 46, 69, 101, 9,
+ 10, 43, 45, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 43, 45, 48, 57, 48,
+ 57, 13, 32, 43, 45, 46, 9, 10,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 46, 48, 57, 48, 57,
+ 13, 32, 44, 46, 69, 101, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 9, 10, 43, 45, 48, 57, 46, 48,
+ 57, 48, 57, 13, 32, 44, 46, 69,
+ 101, 9, 10, 43, 45, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 43, 45, 48, 57, 48, 57, 13, 32,
+ 43, 45, 46, 9, 10, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 46, 48, 57, 48, 57, 43, 45, 48,
+ 57, 48, 57, 13, 32, 43, 45, 46,
+ 9, 10, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 13, 32,
+ 43, 45, 46, 9, 10, 48, 57, 13,
+ 32, 43, 45, 46, 9, 10, 48, 57,
+ 13, 32, 44, 46, 69, 101, 9, 10,
+ 43, 45, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 43, 45, 48,
+ 57, 48, 57, 13, 32, 43, 45, 46,
+ 9, 10, 48, 57, 13, 32, 43, 45,
+ 46, 9, 10, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 13, 32,
+ 43, 45, 46, 9, 10, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 44,
+ 46, 9, 10, 43, 45, 48, 57, 43,
+ 45, 48, 57, 48, 57, 13, 32, 43,
+ 45, 46, 9, 10, 48, 57, 43, 45,
+ 48, 57, 48, 57, 13, 32, 44, 46,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 69, 101, 9, 10, 43, 45,
+ 48, 57, 13, 32, 43, 45, 46, 9,
+ 10, 48, 57, 13, 32, 44, 46, 9,
+ 10, 43, 45, 48, 57, 13, 32, 44,
+ 48, 49, 9, 10, 43, 45, 48, 57,
+ 48, 57, 43, 45, 48, 57, 48, 57,
+ 13, 32, 44, 46, 9, 10, 43, 45,
+ 48, 57, 13, 32, 77, 109, 9, 10,
+ 13, 32, 44, 46, 65, 67, 69, 72,
+ 76, 77, 81, 83, 84, 86, 90, 97,
+ 99, 101, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 72, 76,
+ 77, 81, 83, 84, 86, 90, 97, 99,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 72, 76, 77, 81,
+ 83, 84, 86, 90, 97, 99, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 69, 72, 76, 77, 81, 83,
+ 84, 86, 90, 97, 99, 101, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 72, 76, 77, 81, 83, 84,
+ 86, 90, 97, 99, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 69, 72, 76, 77, 81, 83, 84, 86,
+ 90, 97, 99, 101, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 69, 72,
+ 76, 77, 81, 83, 84, 86, 90, 97,
+ 99, 101, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 72, 76,
+ 77, 81, 83, 84, 86, 90, 97, 99,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 72, 76, 77, 81,
+ 83, 84, 86, 90, 97, 99, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 69, 72, 76, 77, 81, 83,
+ 84, 86, 90, 97, 99, 101, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 72, 76, 77, 81, 83, 84,
+ 86, 90, 97, 99, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 69, 72, 76, 77, 81, 83, 84, 86,
+ 90, 97, 99, 101, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 69, 72,
+ 76, 77, 81, 83, 84, 86, 90, 97,
+ 99, 101, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 72, 76,
+ 77, 81, 83, 84, 86, 90, 97, 99,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 72, 76, 77, 81,
+ 83, 84, 86, 90, 97, 99, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 69, 72, 76, 77, 81, 83,
+ 84, 86, 90, 97, 99, 101, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 72, 76, 77, 81, 83, 84,
+ 86, 90, 97, 99, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 69, 72, 76, 77, 81, 83, 84, 86,
+ 90, 97, 99, 101, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 69, 72,
+ 76, 77, 81, 83, 84, 86, 90, 97,
+ 99, 101, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 72, 76,
+ 77, 81, 83, 84, 86, 90, 97, 99,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 69, 72, 76, 77,
+ 81, 83, 84, 86, 90, 97, 99, 101,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 13, 32,
+ 44, 46, 65, 67, 72, 76, 77, 81,
+ 83, 84, 86, 90, 97, 99, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 13, 32, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 13, 32, 44, 46,
+ 65, 67, 69, 72, 76, 77, 81, 83,
+ 84, 86, 90, 97, 99, 101, 104, 108,
+ 109, 113, 115, 116, 118, 122, 9, 10,
+ 43, 45, 48, 57, 13, 32, 44, 46,
+ 65, 67, 72, 76, 77, 81, 83, 84,
+ 86, 90, 97, 99, 104, 108, 109, 113,
+ 115, 116, 118, 122, 9, 10, 43, 45,
+ 48, 57, 13, 32, 44, 46, 65, 67,
+ 72, 76, 77, 81, 83, 84, 86, 90,
+ 97, 99, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 69, 72,
+ 76, 77, 81, 83, 84, 86, 90, 97,
+ 99, 101, 104, 108, 109, 113, 115, 116,
+ 118, 122, 9, 10, 43, 45, 48, 57,
+ 13, 32, 44, 46, 65, 67, 72, 76,
+ 77, 81, 83, 84, 86, 90, 97, 99,
+ 104, 108, 109, 113, 115, 116, 118, 122,
+ 9, 10, 43, 45, 48, 57, 0
+};
+
+static const char _svg_path_single_lengths[] = {
+ 0, 5, 5, 1, 0, 6, 4, 1,
+ 0, 1, 0, 6, 4, 1, 0, 5,
+ 6, 5, 5, 5, 1, 0, 6, 4,
+ 1, 0, 6, 4, 1, 0, 5, 5,
+ 4, 5, 5, 4, 4, 4, 1, 0,
+ 6, 4, 1, 0, 6, 5, 6, 5,
+ 6, 2, 0, 3, 2, 0, 4, 2,
+ 0, 4, 5, 5, 1, 0, 6, 4,
+ 1, 0, 6, 4, 1, 0, 6, 4,
+ 1, 0, 6, 4, 1, 0, 6, 4,
+ 1, 0, 6, 5, 6, 5, 6, 5,
+ 6, 5, 6, 5, 2, 0, 5, 5,
+ 1, 0, 2, 0, 5, 5, 5, 1,
+ 0, 6, 4, 1, 0, 6, 4, 1,
+ 0, 6, 4, 1, 0, 6, 5, 6,
+ 5, 6, 5, 2, 0, 5, 5, 1,
+ 0, 6, 4, 1, 0, 6, 4, 1,
+ 0, 6, 4, 1, 0, 6, 5, 6,
+ 5, 6, 5, 2, 0, 5, 5, 1,
+ 0, 6, 4, 1, 0, 6, 5, 2,
+ 0, 5, 5, 1, 0, 2, 0, 5,
+ 5, 5, 5, 5, 6, 5, 2, 0,
+ 5, 5, 5, 5, 2, 0, 4, 2,
+ 0, 4, 2, 0, 4, 2, 0, 4,
+ 2, 0, 4, 2, 0, 4, 2, 0,
+ 4, 2, 0, 4, 2, 0, 4, 2,
+ 0, 4, 2, 0, 4, 2, 0, 4,
+ 2, 0, 4, 2, 0, 5, 2, 0,
+ 4, 6, 5, 4, 5, 2, 0, 2,
+ 0, 4, 4, 26, 24, 26, 26, 26,
+ 24, 26, 24, 26, 24, 26, 24, 26,
+ 24, 26, 24, 26, 24, 26, 24, 26,
+ 24, 26, 24, 26, 24, 26, 24, 26,
+ 24, 22, 22, 26, 24, 24, 26, 24
+};
+
+static const char _svg_path_range_lengths[] = {
+ 0, 2, 2, 1, 1, 3, 3, 1,
+ 1, 1, 1, 3, 3, 1, 1, 2,
+ 3, 2, 2, 2, 1, 1, 3, 3,
+ 1, 1, 3, 3, 1, 1, 2, 1,
+ 1, 1, 1, 1, 3, 3, 1, 1,
+ 3, 3, 1, 1, 3, 2, 3, 2,
+ 2, 1, 1, 2, 1, 1, 3, 1,
+ 1, 3, 2, 2, 1, 1, 3, 3,
+ 1, 1, 3, 3, 1, 1, 3, 3,
+ 1, 1, 3, 3, 1, 1, 3, 3,
+ 1, 1, 3, 2, 3, 2, 3, 2,
+ 3, 2, 3, 2, 1, 1, 2, 2,
+ 1, 1, 1, 1, 2, 2, 2, 1,
+ 1, 3, 3, 1, 1, 3, 3, 1,
+ 1, 3, 3, 1, 1, 3, 2, 3,
+ 2, 3, 2, 1, 1, 2, 2, 1,
+ 1, 3, 3, 1, 1, 3, 3, 1,
+ 1, 3, 3, 1, 1, 3, 2, 3,
+ 2, 3, 2, 1, 1, 2, 2, 1,
+ 1, 3, 3, 1, 1, 3, 2, 1,
+ 1, 2, 2, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 3, 2, 1, 1,
+ 2, 2, 2, 2, 1, 1, 3, 1,
+ 1, 3, 1, 1, 3, 1, 1, 3,
+ 1, 1, 3, 1, 1, 3, 1, 1,
+ 3, 1, 1, 3, 1, 1, 3, 1,
+ 1, 3, 1, 1, 3, 1, 1, 3,
+ 1, 1, 3, 1, 1, 2, 1, 1,
+ 3, 3, 2, 3, 1, 1, 1, 1,
+ 1, 3, 1, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 1, 1, 3, 3, 3, 3, 3
+};
+
+static const short _svg_path_index_offsets[] = {
+ 0, 0, 8, 16, 19, 21, 31, 39,
+ 42, 44, 47, 49, 59, 67, 70, 72,
+ 80, 90, 98, 106, 114, 117, 119, 129,
+ 137, 140, 142, 152, 160, 163, 165, 173,
+ 180, 186, 193, 200, 206, 214, 222, 225,
+ 227, 237, 245, 248, 250, 260, 268, 278,
+ 286, 295, 299, 301, 307, 311, 313, 321,
+ 325, 327, 335, 343, 351, 354, 356, 366,
+ 374, 377, 379, 389, 397, 400, 402, 412,
+ 420, 423, 425, 435, 443, 446, 448, 458,
+ 466, 469, 471, 481, 489, 499, 507, 517,
+ 525, 535, 543, 553, 561, 565, 567, 575,
+ 583, 586, 588, 592, 594, 602, 610, 618,
+ 621, 623, 633, 641, 644, 646, 656, 664,
+ 667, 669, 679, 687, 690, 692, 702, 710,
+ 720, 728, 738, 746, 750, 752, 760, 768,
+ 771, 773, 783, 791, 794, 796, 806, 814,
+ 817, 819, 829, 837, 840, 842, 852, 860,
+ 870, 878, 888, 896, 900, 902, 910, 918,
+ 921, 923, 933, 941, 944, 946, 956, 964,
+ 968, 970, 978, 986, 989, 991, 995, 997,
+ 1005, 1013, 1021, 1029, 1037, 1047, 1055, 1059,
+ 1061, 1069, 1077, 1085, 1093, 1097, 1099, 1107,
+ 1111, 1113, 1121, 1125, 1127, 1135, 1139, 1141,
+ 1149, 1153, 1155, 1163, 1167, 1169, 1177, 1181,
+ 1183, 1191, 1195, 1197, 1205, 1209, 1211, 1219,
+ 1223, 1225, 1233, 1237, 1239, 1247, 1251, 1253,
+ 1261, 1265, 1267, 1275, 1279, 1281, 1289, 1293,
+ 1295, 1303, 1313, 1321, 1329, 1336, 1340, 1342,
+ 1346, 1348, 1356, 1362, 1392, 1420, 1450, 1480,
+ 1510, 1538, 1568, 1596, 1626, 1654, 1684, 1712,
+ 1742, 1770, 1800, 1828, 1858, 1886, 1916, 1944,
+ 1974, 2002, 2032, 2060, 2090, 2118, 2148, 2176,
+ 2206, 2234, 2258, 2282, 2312, 2340, 2368, 2398
+};
+
+static const short _svg_path_indicies[] = {
+ 0, 0, 2, 2, 3, 0, 4, 1,
+ 5, 5, 6, 6, 7, 5, 8, 1,
+ 9, 10, 1, 11, 1, 12, 12, 14,
+ 15, 16, 16, 12, 13, 11, 1, 17,
+ 17, 19, 20, 17, 18, 21, 1, 22,
+ 23, 1, 24, 1, 25, 26, 1, 27,
+ 1, 28, 28, 30, 31, 32, 32, 28,
+ 29, 27, 1, 33, 33, 35, 36, 33,
+ 34, 37, 1, 38, 39, 1, 40, 1,
+ 41, 41, 42, 42, 43, 41, 44, 1,
+ 28, 28, 30, 27, 32, 32, 28, 29,
+ 26, 1, 35, 35, 34, 34, 36, 35,
+ 37, 1, 45, 45, 46, 46, 47, 45,
+ 48, 1, 49, 49, 50, 50, 51, 49,
+ 52, 1, 53, 54, 1, 55, 1, 56,
+ 56, 58, 59, 60, 60, 56, 57, 55,
+ 1, 61, 61, 63, 64, 61, 62, 65,
+ 1, 66, 67, 1, 68, 1, 69, 69,
+ 71, 72, 73, 73, 69, 70, 68, 1,
+ 74, 74, 76, 77, 74, 75, 78, 1,
+ 79, 80, 1, 81, 1, 82, 82, 83,
+ 84, 84, 82, 81, 1, 85, 85, 86,
+ 87, 88, 85, 1, 86, 86, 87, 88,
+ 86, 1, 89, 89, 90, 91, 92, 89,
+ 1, 93, 93, 94, 95, 96, 93, 1,
+ 94, 94, 95, 96, 94, 1, 97, 97,
+ 99, 100, 97, 98, 101, 1, 102, 102,
+ 104, 105, 102, 103, 106, 1, 107, 108,
+ 1, 109, 1, 110, 110, 112, 113, 114,
+ 114, 110, 111, 109, 1, 115, 115, 117,
+ 118, 115, 116, 119, 1, 120, 121, 1,
+ 122, 1, 56, 56, 58, 55, 60, 60,
+ 56, 57, 54, 1, 63, 63, 62, 62,
+ 64, 63, 65, 1, 69, 69, 71, 68,
+ 73, 73, 69, 70, 67, 1, 76, 76,
+ 75, 75, 77, 76, 78, 1, 82, 82,
+ 83, 81, 84, 84, 82, 80, 1, 123,
+ 123, 124, 1, 124, 1, 82, 82, 83,
+ 82, 124, 1, 125, 125, 126, 1, 126,
+ 1, 69, 69, 71, 72, 69, 70, 126,
+ 1, 127, 127, 128, 1, 128, 1, 56,
+ 56, 58, 59, 56, 57, 128, 1, 129,
+ 129, 130, 130, 131, 129, 132, 1, 133,
+ 133, 134, 134, 135, 133, 136, 1, 137,
+ 138, 1, 139, 1, 140, 140, 142, 143,
+ 144, 144, 140, 141, 139, 1, 145, 145,
+ 147, 148, 145, 146, 149, 1, 150, 151,
+ 1, 152, 1, 153, 153, 155, 156, 157,
+ 157, 153, 154, 152, 1, 158, 158, 160,
+ 161, 158, 159, 162, 1, 163, 164, 1,
+ 165, 1, 166, 166, 168, 169, 170, 170,
+ 166, 167, 165, 1, 171, 171, 173, 174,
+ 171, 172, 175, 1, 176, 177, 1, 178,
+ 1, 179, 179, 181, 182, 183, 183, 179,
+ 180, 178, 1, 184, 184, 186, 187, 184,
+ 185, 188, 1, 189, 190, 1, 191, 1,
+ 192, 192, 194, 195, 196, 196, 192, 193,
+ 191, 1, 197, 197, 199, 200, 197, 198,
+ 201, 1, 202, 203, 1, 204, 1, 140,
+ 140, 142, 139, 144, 144, 140, 141, 138,
+ 1, 147, 147, 146, 146, 148, 147, 149,
+ 1, 153, 153, 155, 152, 157, 157, 153,
+ 154, 151, 1, 160, 160, 159, 159, 161,
+ 160, 162, 1, 166, 166, 168, 165, 170,
+ 170, 166, 167, 164, 1, 173, 173, 172,
+ 172, 174, 173, 175, 1, 179, 179, 181,
+ 178, 183, 183, 179, 180, 177, 1, 186,
+ 186, 185, 185, 187, 186, 188, 1, 192,
+ 192, 194, 191, 196, 196, 192, 193, 190,
+ 1, 199, 199, 198, 198, 200, 199, 201,
+ 1, 205, 205, 206, 1, 206, 1, 207,
+ 207, 208, 208, 209, 207, 210, 1, 211,
+ 211, 212, 212, 213, 211, 214, 1, 215,
+ 216, 1, 217, 1, 218, 218, 219, 1,
+ 219, 1, 220, 220, 221, 221, 222, 220,
+ 223, 1, 224, 224, 225, 225, 226, 224,
+ 227, 1, 228, 228, 229, 229, 230, 228,
+ 231, 1, 232, 233, 1, 234, 1, 235,
+ 235, 237, 238, 239, 239, 235, 236, 234,
+ 1, 240, 240, 242, 243, 240, 241, 244,
+ 1, 245, 246, 1, 247, 1, 248, 248,
+ 250, 251, 252, 252, 248, 249, 247, 1,
+ 253, 253, 255, 256, 253, 254, 257, 1,
+ 258, 259, 1, 260, 1, 261, 261, 263,
+ 264, 265, 265, 261, 262, 260, 1, 266,
+ 266, 268, 269, 266, 267, 270, 1, 271,
+ 272, 1, 273, 1, 235, 235, 237, 234,
+ 239, 239, 235, 236, 233, 1, 242, 242,
+ 241, 241, 243, 242, 244, 1, 248, 248,
+ 250, 247, 252, 252, 248, 249, 246, 1,
+ 255, 255, 254, 254, 256, 255, 257, 1,
+ 261, 261, 263, 260, 265, 265, 261, 262,
+ 259, 1, 268, 268, 267, 267, 269, 268,
+ 270, 1, 274, 274, 275, 1, 275, 1,
+ 276, 276, 277, 277, 278, 276, 279, 1,
+ 280, 280, 281, 281, 282, 280, 283, 1,
+ 284, 285, 1, 286, 1, 287, 287, 289,
+ 290, 291, 291, 287, 288, 286, 1, 292,
+ 292, 294, 295, 292, 293, 296, 1, 297,
+ 298, 1, 299, 1, 300, 300, 302, 303,
+ 304, 304, 300, 301, 299, 1, 305, 305,
+ 307, 308, 305, 306, 309, 1, 310, 311,
+ 1, 312, 1, 313, 313, 315, 316, 317,
+ 317, 313, 314, 312, 1, 318, 318, 320,
+ 321, 318, 319, 322, 1, 323, 324, 1,
+ 325, 1, 287, 287, 289, 286, 291, 291,
+ 287, 288, 285, 1, 294, 294, 293, 293,
+ 295, 294, 296, 1, 300, 300, 302, 299,
+ 304, 304, 300, 301, 298, 1, 307, 307,
+ 306, 306, 308, 307, 309, 1, 313, 313,
+ 315, 312, 317, 317, 313, 314, 311, 1,
+ 320, 320, 319, 319, 321, 320, 322, 1,
+ 326, 326, 327, 1, 327, 1, 328, 328,
+ 329, 329, 330, 328, 331, 1, 332, 332,
+ 333, 333, 334, 332, 335, 1, 336, 337,
+ 1, 338, 1, 339, 339, 341, 342, 343,
+ 343, 339, 340, 338, 1, 344, 344, 346,
+ 347, 344, 345, 348, 1, 349, 350, 1,
+ 351, 1, 339, 339, 341, 338, 343, 343,
+ 339, 340, 337, 1, 346, 346, 345, 345,
+ 347, 346, 348, 1, 352, 352, 353, 1,
+ 353, 1, 354, 354, 355, 355, 356, 354,
+ 357, 1, 358, 358, 359, 359, 360, 358,
+ 361, 1, 362, 363, 1, 364, 1, 365,
+ 365, 366, 1, 366, 1, 367, 367, 368,
+ 368, 369, 367, 370, 1, 371, 371, 372,
+ 372, 373, 371, 374, 1, 375, 375, 376,
+ 376, 377, 375, 378, 1, 379, 379, 380,
+ 380, 381, 379, 382, 1, 383, 383, 384,
+ 384, 385, 383, 386, 1, 12, 12, 14,
+ 11, 16, 16, 12, 13, 10, 1, 19,
+ 19, 18, 18, 20, 19, 21, 1, 387,
+ 387, 388, 1, 388, 1, 389, 389, 390,
+ 390, 391, 389, 392, 1, 393, 393, 394,
+ 394, 395, 393, 396, 1, 397, 397, 398,
+ 398, 399, 397, 400, 1, 401, 401, 402,
+ 402, 403, 401, 404, 1, 405, 405, 406,
+ 1, 406, 1, 12, 12, 14, 15, 12,
+ 13, 406, 1, 407, 407, 408, 1, 408,
+ 1, 339, 339, 341, 342, 339, 340, 408,
+ 1, 409, 409, 410, 1, 410, 1, 313,
+ 313, 315, 316, 313, 314, 410, 1, 411,
+ 411, 412, 1, 412, 1, 300, 300, 302,
+ 303, 300, 301, 412, 1, 413, 413, 414,
+ 1, 414, 1, 287, 287, 289, 290, 287,
+ 288, 414, 1, 415, 415, 416, 1, 416,
+ 1, 261, 261, 263, 264, 261, 262, 416,
+ 1, 417, 417, 418, 1, 418, 1, 248,
+ 248, 250, 251, 248, 249, 418, 1, 419,
+ 419, 420, 1, 420, 1, 235, 235, 237,
+ 238, 235, 236, 420, 1, 421, 421, 422,
+ 1, 422, 1, 192, 192, 194, 195, 192,
+ 193, 422, 1, 423, 423, 424, 1, 424,
+ 1, 179, 179, 181, 182, 179, 180, 424,
+ 1, 425, 425, 426, 1, 426, 1, 166,
+ 166, 168, 169, 166, 167, 426, 1, 427,
+ 427, 428, 1, 428, 1, 153, 153, 155,
+ 156, 153, 154, 428, 1, 429, 429, 430,
+ 1, 430, 1, 140, 140, 142, 143, 140,
+ 141, 430, 1, 431, 431, 432, 1, 432,
+ 1, 117, 117, 116, 116, 118, 117, 119,
+ 1, 433, 433, 434, 1, 434, 1, 110,
+ 110, 112, 113, 110, 111, 434, 1, 110,
+ 110, 112, 109, 114, 114, 110, 111, 108,
+ 1, 104, 104, 103, 103, 105, 104, 106,
+ 1, 435, 435, 437, 438, 435, 436, 439,
+ 1, 440, 440, 441, 442, 443, 440, 1,
+ 444, 444, 445, 1, 445, 1, 446, 446,
+ 447, 1, 447, 1, 28, 28, 30, 31,
+ 28, 29, 447, 1, 448, 448, 449, 450,
+ 448, 1, 451, 451, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 457, 468, 469, 470, 471,
+ 472, 473, 474, 465, 451, 452, 24, 1,
+ 475, 475, 41, 43, 476, 477, 478, 479,
+ 449, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 450, 489, 490, 491, 492, 484,
+ 475, 42, 44, 1, 493, 493, 495, 496,
+ 497, 498, 499, 500, 501, 502, 503, 504,
+ 505, 506, 507, 508, 509, 499, 510, 511,
+ 512, 513, 514, 515, 516, 507, 493, 494,
+ 40, 1, 493, 493, 495, 40, 497, 498,
+ 499, 500, 501, 502, 503, 504, 505, 506,
+ 507, 508, 509, 499, 510, 511, 512, 513,
+ 514, 515, 516, 507, 493, 494, 39, 1,
+ 517, 517, 519, 520, 521, 522, 523, 524,
+ 525, 526, 527, 528, 529, 530, 531, 532,
+ 533, 523, 534, 535, 536, 537, 538, 539,
+ 540, 531, 517, 518, 122, 1, 541, 541,
+ 49, 51, 476, 477, 478, 479, 449, 480,
+ 481, 482, 483, 484, 485, 486, 487, 488,
+ 450, 489, 490, 491, 492, 484, 541, 50,
+ 52, 1, 542, 542, 544, 545, 546, 547,
+ 548, 549, 550, 551, 552, 553, 554, 555,
+ 556, 557, 558, 548, 559, 560, 561, 562,
+ 563, 564, 565, 556, 542, 543, 204, 1,
+ 566, 566, 133, 135, 476, 477, 478, 479,
+ 449, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 450, 489, 490, 491, 492, 484,
+ 566, 134, 136, 1, 542, 542, 544, 204,
+ 546, 547, 548, 549, 550, 551, 552, 553,
+ 554, 555, 556, 557, 558, 548, 559, 560,
+ 561, 562, 563, 564, 565, 556, 542, 543,
+ 203, 1, 542, 542, 544, 545, 546, 547,
+ 549, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 560, 561, 562, 563, 564,
+ 565, 556, 542, 543, 206, 1, 567, 567,
+ 569, 570, 571, 572, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 573,
+ 584, 585, 586, 587, 588, 589, 590, 581,
+ 567, 568, 217, 1, 591, 591, 211, 213,
+ 476, 477, 478, 479, 449, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 450, 489,
+ 490, 491, 492, 484, 591, 212, 214, 1,
+ 567, 567, 569, 217, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 573, 584, 585, 586, 587, 588, 589,
+ 590, 581, 567, 568, 216, 1, 567, 567,
+ 569, 570, 571, 572, 574, 575, 576, 577,
+ 578, 579, 580, 581, 582, 583, 584, 585,
+ 586, 587, 588, 589, 590, 581, 567, 568,
+ 219, 1, 592, 592, 594, 595, 596, 597,
+ 598, 599, 600, 601, 602, 603, 604, 605,
+ 606, 607, 608, 598, 609, 610, 611, 612,
+ 613, 614, 615, 606, 592, 593, 273, 1,
+ 616, 616, 228, 230, 476, 477, 478, 479,
+ 449, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 450, 489, 490, 491, 492, 484,
+ 616, 229, 231, 1, 592, 592, 594, 273,
+ 596, 597, 598, 599, 600, 601, 602, 603,
+ 604, 605, 606, 607, 608, 598, 609, 610,
+ 611, 612, 613, 614, 615, 606, 592, 593,
+ 272, 1, 592, 592, 594, 595, 596, 597,
+ 599, 600, 601, 602, 603, 604, 605, 606,
+ 607, 608, 609, 610, 611, 612, 613, 614,
+ 615, 606, 592, 593, 275, 1, 617, 617,
+ 619, 620, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 623,
+ 634, 635, 636, 637, 638, 639, 640, 631,
+ 617, 618, 325, 1, 641, 641, 280, 282,
+ 476, 477, 478, 479, 449, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 450, 489,
+ 490, 491, 492, 484, 641, 281, 283, 1,
+ 617, 617, 619, 325, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632,
+ 633, 623, 634, 635, 636, 637, 638, 639,
+ 640, 631, 617, 618, 324, 1, 617, 617,
+ 619, 620, 621, 622, 624, 625, 626, 627,
+ 628, 629, 630, 631, 632, 633, 634, 635,
+ 636, 637, 638, 639, 640, 631, 617, 618,
+ 327, 1, 642, 642, 644, 645, 646, 647,
+ 648, 649, 650, 651, 652, 653, 654, 655,
+ 656, 657, 658, 648, 659, 660, 661, 662,
+ 663, 664, 665, 656, 642, 643, 351, 1,
+ 666, 666, 332, 334, 476, 477, 478, 479,
+ 449, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 450, 489, 490, 491, 492, 484,
+ 666, 333, 335, 1, 642, 642, 644, 351,
+ 646, 647, 648, 649, 650, 651, 652, 653,
+ 654, 655, 656, 657, 658, 648, 659, 660,
+ 661, 662, 663, 664, 665, 656, 642, 643,
+ 350, 1, 642, 642, 644, 645, 646, 647,
+ 649, 650, 651, 652, 653, 654, 655, 656,
+ 657, 658, 659, 660, 661, 662, 663, 664,
+ 665, 656, 642, 643, 353, 1, 667, 667,
+ 669, 670, 671, 672, 673, 674, 675, 676,
+ 677, 678, 679, 680, 681, 682, 683, 673,
+ 684, 685, 686, 687, 688, 689, 690, 681,
+ 667, 668, 364, 1, 691, 691, 358, 360,
+ 476, 477, 478, 479, 449, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 450, 489,
+ 490, 491, 492, 484, 691, 359, 361, 1,
+ 667, 667, 669, 364, 671, 672, 673, 674,
+ 675, 676, 677, 678, 679, 680, 681, 682,
+ 683, 673, 684, 685, 686, 687, 688, 689,
+ 690, 681, 667, 668, 363, 1, 667, 667,
+ 669, 670, 671, 672, 674, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 684, 685,
+ 686, 687, 688, 689, 690, 681, 667, 668,
+ 366, 1, 692, 692, 693, 694, 695, 696,
+ 697, 698, 699, 700, 701, 702, 703, 704,
+ 705, 706, 707, 708, 709, 710, 711, 702,
+ 692, 1, 712, 712, 476, 477, 478, 479,
+ 449, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 450, 489, 490, 491, 492, 484,
+ 712, 1, 451, 451, 453, 24, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 457, 468, 469, 470, 471,
+ 472, 473, 474, 465, 451, 452, 23, 1,
+ 451, 451, 453, 454, 455, 456, 458, 459,
+ 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 465,
+ 451, 452, 388, 1, 517, 517, 519, 520,
+ 521, 522, 524, 525, 526, 527, 528, 529,
+ 530, 531, 532, 533, 534, 535, 536, 537,
+ 538, 539, 540, 531, 517, 518, 432, 1,
+ 517, 517, 519, 122, 521, 522, 523, 524,
+ 525, 526, 527, 528, 529, 530, 531, 532,
+ 533, 523, 534, 535, 536, 537, 538, 539,
+ 540, 531, 517, 518, 121, 1, 493, 493,
+ 495, 496, 497, 498, 500, 501, 502, 503,
+ 504, 505, 506, 507, 508, 509, 510, 511,
+ 512, 513, 514, 515, 516, 507, 493, 494,
+ 445, 1, 0
+};
+
+static const short _svg_path_trans_targs[] = {
+ 2, 0, 3, 4, 172, 2, 3, 4,
+ 172, 4, 172, 5, 6, 7, 173, 8,
+ 180, 6, 7, 173, 8, 267, 8, 267,
+ 235, 10, 16, 11, 12, 13, 17, 14,
+ 231, 12, 13, 17, 14, 238, 14, 238,
+ 237, 15, 9, 10, 16, 19, 20, 21,
+ 44, 19, 20, 21, 44, 21, 44, 22,
+ 23, 24, 45, 25, 55, 23, 24, 45,
+ 25, 46, 25, 46, 26, 27, 28, 47,
+ 29, 52, 27, 28, 47, 29, 48, 29,
+ 48, 30, 31, 32, 49, 31, 32, 33,
+ 228, 34, 35, 36, 227, 34, 35, 36,
+ 227, 37, 38, 226, 39, 225, 37, 38,
+ 226, 39, 225, 39, 225, 40, 41, 42,
+ 221, 43, 222, 41, 42, 221, 43, 270,
+ 43, 270, 239, 50, 51, 53, 54, 56,
+ 57, 59, 60, 61, 82, 59, 60, 61,
+ 82, 61, 82, 62, 63, 64, 83, 65,
+ 216, 63, 64, 83, 65, 84, 65, 84,
+ 66, 67, 68, 85, 69, 213, 67, 68,
+ 85, 69, 86, 69, 86, 70, 71, 72,
+ 87, 73, 210, 71, 72, 87, 73, 88,
+ 73, 88, 74, 75, 76, 89, 77, 207,
+ 75, 76, 89, 77, 90, 77, 90, 78,
+ 79, 80, 91, 81, 204, 79, 80, 91,
+ 81, 243, 81, 243, 241, 93, 244, 95,
+ 96, 97, 247, 95, 96, 97, 247, 97,
+ 247, 245, 99, 248, 15, 9, 10, 16,
+ 102, 103, 104, 117, 102, 103, 104, 117,
+ 104, 117, 105, 106, 107, 118, 108, 201,
+ 106, 107, 118, 108, 119, 108, 119, 109,
+ 110, 111, 120, 112, 198, 110, 111, 120,
+ 112, 121, 112, 121, 113, 114, 115, 122,
+ 116, 195, 114, 115, 122, 116, 251, 116,
+ 251, 249, 124, 252, 126, 127, 128, 141,
+ 126, 127, 128, 141, 128, 141, 129, 130,
+ 131, 142, 132, 192, 130, 131, 142, 132,
+ 143, 132, 143, 133, 134, 135, 144, 136,
+ 189, 134, 135, 144, 136, 145, 136, 145,
+ 137, 138, 139, 146, 140, 186, 138, 139,
+ 146, 140, 255, 140, 255, 253, 148, 256,
+ 150, 151, 152, 157, 150, 151, 152, 157,
+ 152, 157, 153, 154, 155, 158, 156, 183,
+ 154, 155, 158, 156, 259, 156, 259, 257,
+ 160, 260, 162, 163, 164, 263, 162, 163,
+ 164, 263, 164, 263, 261, 166, 264, 19,
+ 20, 21, 44, 59, 60, 61, 82, 95,
+ 96, 97, 247, 15, 9, 10, 16, 2,
+ 3, 4, 172, 175, 268, 102, 103, 104,
+ 117, 126, 127, 128, 141, 150, 151, 152,
+ 157, 162, 163, 164, 263, 181, 182, 184,
+ 185, 187, 188, 190, 191, 193, 194, 196,
+ 197, 199, 200, 202, 203, 205, 206, 208,
+ 209, 211, 212, 214, 215, 217, 218, 220,
+ 269, 223, 224, 37, 38, 226, 39, 225,
+ 34, 35, 36, 227, 230, 271, 232, 233,
+ 234, 1, 171, 236, 9, 15, 10, 18,
+ 58, 174, 94, 100, 1, 101, 125, 149,
+ 161, 265, 167, 168, 169, 170, 171, 176,
+ 177, 178, 179, 236, 18, 58, 94, 100,
+ 101, 125, 149, 161, 265, 167, 168, 169,
+ 170, 176, 177, 178, 179, 236, 9, 15,
+ 10, 18, 58, 229, 94, 100, 1, 101,
+ 125, 149, 161, 265, 167, 168, 169, 170,
+ 171, 176, 177, 178, 179, 240, 20, 19,
+ 21, 18, 58, 219, 94, 100, 1, 101,
+ 125, 149, 161, 265, 167, 168, 169, 170,
+ 171, 176, 177, 178, 179, 240, 242, 60,
+ 59, 61, 18, 58, 92, 94, 100, 1,
+ 101, 125, 149, 161, 265, 167, 168, 169,
+ 170, 171, 176, 177, 178, 179, 242, 246,
+ 96, 95, 97, 18, 58, 98, 94, 100,
+ 1, 101, 125, 149, 161, 265, 167, 168,
+ 169, 170, 171, 176, 177, 178, 179, 246,
+ 250, 103, 102, 104, 18, 58, 123, 94,
+ 100, 1, 101, 125, 149, 161, 265, 167,
+ 168, 169, 170, 171, 176, 177, 178, 179,
+ 250, 254, 127, 126, 128, 18, 58, 147,
+ 94, 100, 1, 101, 125, 149, 161, 265,
+ 167, 168, 169, 170, 171, 176, 177, 178,
+ 179, 254, 258, 151, 150, 152, 18, 58,
+ 159, 94, 100, 1, 101, 125, 149, 161,
+ 265, 167, 168, 169, 170, 171, 176, 177,
+ 178, 179, 258, 262, 163, 162, 164, 18,
+ 58, 165, 94, 100, 1, 101, 125, 149,
+ 161, 265, 167, 168, 169, 170, 171, 176,
+ 177, 178, 179, 262, 266, 18, 58, 94,
+ 100, 1, 101, 125, 149, 161, 265, 167,
+ 168, 169, 170, 171, 176, 177, 178, 179,
+ 266
+};
+
+static const char _svg_path_trans_actions[] = {
+ 9, 0, 51, 51, 51, 0, 1, 1,
+ 1, 0, 0, 0, 3, 15, 3, 15,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0, 3, 15, 3, 15,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 0, 1, 1, 1, 9, 51, 51,
+ 51, 0, 1, 1, 1, 0, 0, 0,
+ 3, 15, 3, 15, 0, 0, 1, 0,
+ 1, 1, 0, 0, 0, 3, 15, 3,
+ 15, 0, 0, 1, 0, 1, 1, 0,
+ 0, 0, 3, 3, 0, 0, 0, 0,
+ 0, 7, 7, 7, 7, 0, 0, 0,
+ 0, 7, 48, 7, 48, 48, 0, 1,
+ 0, 1, 1, 0, 0, 0, 3, 15,
+ 3, 15, 0, 0, 1, 0, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 9, 51, 51, 51, 0, 1, 1,
+ 1, 0, 0, 0, 3, 15, 3, 15,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 3, 15, 3, 15, 0, 0, 1,
+ 0, 1, 1, 0, 0, 0, 3, 15,
+ 3, 15, 0, 0, 1, 0, 1, 1,
+ 0, 0, 0, 3, 15, 3, 15, 0,
+ 0, 1, 0, 1, 1, 0, 0, 0,
+ 3, 15, 3, 15, 0, 0, 1, 0,
+ 1, 1, 0, 0, 0, 0, 0, 9,
+ 51, 51, 51, 0, 1, 1, 1, 0,
+ 0, 0, 0, 0, 9, 51, 51, 51,
+ 9, 51, 51, 51, 0, 1, 1, 1,
+ 0, 0, 0, 3, 15, 3, 15, 0,
+ 0, 1, 0, 1, 1, 0, 0, 0,
+ 3, 15, 3, 15, 0, 0, 1, 0,
+ 1, 1, 0, 0, 0, 3, 15, 3,
+ 15, 0, 0, 1, 0, 1, 1, 0,
+ 0, 0, 0, 0, 9, 51, 51, 51,
+ 0, 1, 1, 1, 0, 0, 0, 3,
+ 15, 3, 15, 0, 0, 1, 0, 1,
+ 1, 0, 0, 0, 3, 15, 3, 15,
+ 0, 0, 1, 0, 1, 1, 0, 0,
+ 0, 3, 15, 3, 15, 0, 0, 1,
+ 0, 1, 1, 0, 0, 0, 0, 0,
+ 9, 51, 51, 51, 0, 1, 1, 1,
+ 0, 0, 0, 3, 15, 3, 15, 0,
+ 0, 1, 0, 1, 1, 0, 0, 0,
+ 0, 0, 9, 51, 51, 51, 0, 1,
+ 1, 1, 0, 0, 0, 0, 0, 11,
+ 54, 54, 54, 11, 54, 54, 54, 11,
+ 54, 54, 54, 11, 54, 54, 54, 11,
+ 54, 54, 54, 0, 0, 11, 54, 54,
+ 54, 11, 54, 54, 54, 11, 54, 54,
+ 54, 11, 54, 54, 54, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 5, 45, 5, 45, 45,
+ 5, 5, 5, 5, 0, 0, 0, 0,
+ 0, 0, 0, 18, 57, 18, 57, 18,
+ 18, 0, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 21, 61, 21,
+ 61, 21, 21, 0, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 42, 89, 42,
+ 89, 42, 42, 0, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 0, 30, 73,
+ 30, 73, 30, 30, 0, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 0, 24,
+ 65, 24, 65, 24, 24, 0, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 0,
+ 36, 81, 36, 81, 36, 36, 0, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36,
+ 0, 33, 77, 33, 77, 33, 33, 0,
+ 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 0, 39, 85, 39, 85, 39, 39,
+ 0, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 0, 27, 69, 27, 69, 27,
+ 27, 0, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 0, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 0
+};
+
+static const char _svg_path_eof_actions[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 18, 0, 21, 21, 42,
+ 0, 30, 0, 30, 30, 24, 0, 24,
+ 24, 36, 0, 36, 36, 33, 0, 33,
+ 33, 39, 0, 39, 39, 27, 0, 27,
+ 27, 13, 0, 18, 18, 42, 42, 21
+};
+
+static const int svg_path_start = 234;
+static const int svg_path_first_final = 234;
+
+static const int svg_path_en_main = 234;
+
+
+#line 47 "svg-path-parser.rl"
+
+
+SVGPathParser::SVGPathParser(PathSink &sink)
+ : _absolute(false)
+ , _sink(sink)
+ , _z_snap_threshold(0)
+ , _curve(NULL)
+{
+ reset();
+}
+
+SVGPathParser::~SVGPathParser()
+{
+ delete _curve;
+}
+
+void SVGPathParser::reset() {
+ _absolute = false;
+ _current = _initial = Point(0, 0);
+ _quad_tangent = _cubic_tangent = Point(0, 0);
+ _params.clear();
+ delete _curve;
+ _curve = NULL;
+
+
+#line 1113 "svg-path-parser.cpp"
+ {
+ cs = svg_path_start;
+ }
+
+#line 73 "svg-path-parser.rl"
+
+}
+
+void SVGPathParser::parse(char const *str, int len)
+{
+ if (len < 0) {
+ len = std::strlen(str);
+ }
+ _parse(str, str + len, true);
+}
+
+void SVGPathParser::parse(std::string const &s)
+{
+ _parse(s.c_str(), s.c_str() + s.size(), true);
+}
+
+void SVGPathParser::feed(char const *str, int len)
+{
+ if (len < 0) {
+ len = std::strlen(str);
+ }
+ _parse(str, str + len, false);
+}
+
+void SVGPathParser::feed(std::string const &s)
+{
+ _parse(s.c_str(), s.c_str() + s.size(), false);
+}
+
+void SVGPathParser::finish()
+{
+ char const *empty = "";
+ _parse(empty, empty, true);
+}
+
+void SVGPathParser::_push(Coord value)
+{
+ _params.push_back(value);
+}
+
+Coord SVGPathParser::_pop()
+{
+ Coord value = _params.back();
+ _params.pop_back();
+ return value;
+}
+
+bool SVGPathParser::_pop_flag()
+{
+ return _pop() != 0.0;
+}
+
+Coord SVGPathParser::_pop_coord(Dim2 axis)
+{
+ if (_absolute) {
+ return _pop();
+ } else {
+ return _pop() + _current[axis];
+ }
+}
+
+Point SVGPathParser::_pop_point()
+{
+ Coord y = _pop_coord(Y);
+ Coord x = _pop_coord(X);
+ return Point(x, y);
+}
+
+void SVGPathParser::_moveTo(Point const &p)
+{
+ _pushCurve(NULL); // flush
+ _sink.moveTo(p);
+ _quad_tangent = _cubic_tangent = _current = _initial = p;
+}
+
+void SVGPathParser::_lineTo(Point const &p)
+{
+ _pushCurve(new LineSegment(_current, p));
+ _quad_tangent = _cubic_tangent = _current = p;
+}
+
+void SVGPathParser::_curveTo(Point const &c0, Point const &c1, Point const &p)
+{
+ _pushCurve(new CubicBezier(_current, c0, c1, p));
+ _quad_tangent = _current = p;
+ _cubic_tangent = p + ( p - c1 );
+}
+
+void SVGPathParser::_quadTo(Point const &c, Point const &p)
+{
+ _pushCurve(new QuadraticBezier(_current, c, p));
+ _cubic_tangent = _current = p;
+ _quad_tangent = p + ( p - c );
+}
+
+void SVGPathParser::_arcTo(Coord rx, Coord ry, Coord angle,
+ bool large_arc, bool sweep, Point const &p)
+{
+ if (_current == p) {
+ return; // ignore invalid (ambiguous) arc segments where start and end point are the same (per SVG spec)
+ }
+
+ _pushCurve(new EllipticalArc(_current, fabs(rx), fabs(ry), angle, large_arc, sweep, p));
+ _quad_tangent = _cubic_tangent = _current = p;
+}
+
+void SVGPathParser::_closePath()
+{
+ if (_curve && (!_absolute || !_moveto_was_absolute) &&
+ are_near(_initial, _current, _z_snap_threshold))
+ {
+ _curve->setFinal(_initial);
+ }
+
+ _pushCurve(NULL); // flush
+ _sink.closePath();
+ _quad_tangent = _cubic_tangent = _current = _initial;
+}
+
+void SVGPathParser::_pushCurve(Curve *c)
+{
+ if (_curve) {
+ _sink.feed(*_curve, false);
+ delete _curve;
+ }
+ _curve = c;
+}
+
+void SVGPathParser::_parse(char const *str, char const *strend, bool finish)
+{
+ char const *p = str;
+ char const *pe = strend;
+ char const *eof = finish ? pe : NULL;
+ char const *start = NULL;
+
+
+#line 1255 "svg-path-parser.cpp"
+ {
+ int _klen;
+ unsigned int _trans;
+ const char *_acts;
+ unsigned int _nacts;
+ const char *_keys;
+
+ if ( p == pe )
+ goto _test_eof;
+ if ( cs == 0 )
+ goto _out;
+_resume:
+ _keys = _svg_path_trans_keys + _svg_path_key_offsets[cs];
+ _trans = _svg_path_index_offsets[cs];
+
+ _klen = _svg_path_single_lengths[cs];
+ if ( _klen > 0 ) {
+ const char *_lower = _keys;
+ const char *_mid;
+ const char *_upper = _keys + _klen - 1;
+ while (1) {
+ if ( _upper < _lower )
+ break;
+
+ _mid = _lower + ((_upper-_lower) >> 1);
+ if ( (*p) < *_mid )
+ _upper = _mid - 1;
+ else if ( (*p) > *_mid )
+ _lower = _mid + 1;
+ else {
+ _trans += (unsigned int)(_mid - _keys);
+ goto _match;
+ }
+ }
+ _keys += _klen;
+ _trans += _klen;
+ }
+
+ _klen = _svg_path_range_lengths[cs];
+ if ( _klen > 0 ) {
+ const char *_lower = _keys;
+ const char *_mid;
+ const char *_upper = _keys + (_klen<<1) - 2;
+ while (1) {
+ if ( _upper < _lower )
+ break;
+
+ _mid = _lower + (((_upper-_lower) >> 1) & ~1);
+ if ( (*p) < _mid[0] )
+ _upper = _mid - 2;
+ else if ( (*p) > _mid[1] )
+ _lower = _mid + 2;
+ else {
+ _trans += (unsigned int)((_mid - _keys)>>1);
+ goto _match;
+ }
+ }
+ _trans += _klen;
+ }
+
+_match:
+ _trans = _svg_path_indicies[_trans];
+ cs = _svg_path_trans_targs[_trans];
+
+ if ( _svg_path_trans_actions[_trans] == 0 )
+ goto _again;
+
+ _acts = _svg_path_actions + _svg_path_trans_actions[_trans];
+ _nacts = (unsigned int) *_acts++;
+ while ( _nacts-- > 0 )
+ {
+ switch ( *_acts++ )
+ {
+ case 0:
+#line 209 "svg-path-parser.rl"
+ {
+ start = p;
+ }
+ break;
+ case 1:
+#line 213 "svg-path-parser.rl"
+ {
+ if (start) {
+ std::string buf(start, p);
+ _push(g_ascii_strtod(buf.c_str(), NULL));
+ start = NULL;
+ } else {
+ std::string buf(str, p);
+ _push(g_ascii_strtod((_number_part + buf).c_str(), NULL));
+ _number_part.clear();
+ }
+ }
+ break;
+ case 2:
+#line 225 "svg-path-parser.rl"
+ {
+ _push(1.0);
+ }
+ break;
+ case 3:
+#line 229 "svg-path-parser.rl"
+ {
+ _push(0.0);
+ }
+ break;
+ case 4:
+#line 233 "svg-path-parser.rl"
+ {
+ _absolute = true;
+ }
+ break;
+ case 5:
+#line 237 "svg-path-parser.rl"
+ {
+ _absolute = false;
+ }
+ break;
+ case 6:
+#line 241 "svg-path-parser.rl"
+ {
+ _moveto_was_absolute = _absolute;
+ _moveTo(_pop_point());
+ }
+ break;
+ case 7:
+#line 246 "svg-path-parser.rl"
+ {
+ _lineTo(_pop_point());
+ }
+ break;
+ case 8:
+#line 250 "svg-path-parser.rl"
+ {
+ _lineTo(Point(_pop_coord(X), _current[Y]));
+ }
+ break;
+ case 9:
+#line 254 "svg-path-parser.rl"
+ {
+ _lineTo(Point(_current[X], _pop_coord(Y)));
+ }
+ break;
+ case 10:
+#line 258 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ Point c0 = _pop_point();
+ _curveTo(c0, c1, p);
+ }
+ break;
+ case 11:
+#line 265 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ _curveTo(_cubic_tangent, c1, p);
+ }
+ break;
+ case 12:
+#line 271 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c = _pop_point();
+ _quadTo(c, p);
+ }
+ break;
+ case 13:
+#line 277 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ _quadTo(_quad_tangent, p);
+ }
+ break;
+ case 14:
+#line 282 "svg-path-parser.rl"
+ {
+ Point point = _pop_point();
+ bool sweep = _pop_flag();
+ bool large_arc = _pop_flag();
+ double angle = rad_from_deg(_pop());
+ double ry = _pop();
+ double rx = _pop();
+
+ _arcTo(rx, ry, angle, large_arc, sweep, point);
+ }
+ break;
+ case 15:
+#line 293 "svg-path-parser.rl"
+ {
+ _closePath();
+ }
+ break;
+#line 1449 "svg-path-parser.cpp"
+ }
+ }
+
+_again:
+ if ( cs == 0 )
+ goto _out;
+ if ( ++p != pe )
+ goto _resume;
+ _test_eof: {}
+ if ( p == eof )
+ {
+ const char *__acts = _svg_path_actions + _svg_path_eof_actions[cs];
+ unsigned int __nacts = (unsigned int) *__acts++;
+ while ( __nacts-- > 0 ) {
+ switch ( *__acts++ ) {
+ case 1:
+#line 213 "svg-path-parser.rl"
+ {
+ if (start) {
+ std::string buf(start, p);
+ _push(g_ascii_strtod(buf.c_str(), NULL));
+ start = NULL;
+ } else {
+ std::string buf(str, p);
+ _push(g_ascii_strtod((_number_part + buf).c_str(), NULL));
+ _number_part.clear();
+ }
+ }
+ break;
+ case 6:
+#line 241 "svg-path-parser.rl"
+ {
+ _moveto_was_absolute = _absolute;
+ _moveTo(_pop_point());
+ }
+ break;
+ case 7:
+#line 246 "svg-path-parser.rl"
+ {
+ _lineTo(_pop_point());
+ }
+ break;
+ case 8:
+#line 250 "svg-path-parser.rl"
+ {
+ _lineTo(Point(_pop_coord(X), _current[Y]));
+ }
+ break;
+ case 9:
+#line 254 "svg-path-parser.rl"
+ {
+ _lineTo(Point(_current[X], _pop_coord(Y)));
+ }
+ break;
+ case 10:
+#line 258 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ Point c0 = _pop_point();
+ _curveTo(c0, c1, p);
+ }
+ break;
+ case 11:
+#line 265 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ _curveTo(_cubic_tangent, c1, p);
+ }
+ break;
+ case 12:
+#line 271 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ Point c = _pop_point();
+ _quadTo(c, p);
+ }
+ break;
+ case 13:
+#line 277 "svg-path-parser.rl"
+ {
+ Point p = _pop_point();
+ _quadTo(_quad_tangent, p);
+ }
+ break;
+ case 14:
+#line 282 "svg-path-parser.rl"
+ {
+ Point point = _pop_point();
+ bool sweep = _pop_flag();
+ bool large_arc = _pop_flag();
+ double angle = rad_from_deg(_pop());
+ double ry = _pop();
+ double rx = _pop();
+
+ _arcTo(rx, ry, angle, large_arc, sweep, point);
+ }
+ break;
+ case 15:
+#line 293 "svg-path-parser.rl"
+ {
+ _closePath();
+ }
+ break;
+#line 1555 "svg-path-parser.cpp"
+ }
+ }
+ }
+
+ _out: {}
+ }
+
+#line 435 "svg-path-parser.rl"
+
+
+ if (finish) {
+ if (cs < svg_path_first_final) {
+ throw SVGPathParseError();
+ }
+ } else if (start != NULL) {
+ _number_part = std::string(start, pe);
+ }
+
+ if (finish) {
+ _pushCurve(NULL);
+ _sink.flush();
+ reset();
+ }
+}
+
+void parse_svg_path(char const *str, PathSink &sink)
+{
+ SVGPathParser parser(sink);
+ parser.parse(str);
+}
+
+void parse_svg_path_file(FILE *fi, PathSink &sink)
+{
+ static const size_t BUFFER_SIZE = 4096;
+ char buffer[BUFFER_SIZE];
+ size_t bytes_read;
+ SVGPathParser parser(sink);
+
+ while (true) {
+ bytes_read = fread(buffer, 1, BUFFER_SIZE, fi);
+ if (bytes_read < BUFFER_SIZE) {
+ parser.parse(buffer, bytes_read);
+ break;
+ } else {
+ parser.feed(buffer, bytes_read);
+ }
+ }
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=ragel:cindent:expandtab:shiftwidth=4:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/svg-path-parser.rl b/src/2geom/svg-path-parser.rl
new file mode 100644
index 0000000..7b3eb5a
--- /dev/null
+++ b/src/2geom/svg-path-parser.rl
@@ -0,0 +1,487 @@
+/**
+ * \file
+ * \brief parse SVG path specifications
+ *
+ * Copyright 2007 MenTaLguY <mental@rydia.net>
+ * Copyright 2007 Aaron Spike <aaron@ekips.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include <cstdio>
+#include <cmath>
+#include <vector>
+#include <glib.h>
+
+#include <2geom/point.h>
+#include <2geom/svg-path-parser.h>
+#include <2geom/angle.h>
+
+namespace Geom {
+
+%%{
+ machine svg_path;
+ write data noerror;
+}%%
+
+SVGPathParser::SVGPathParser(PathSink &sink)
+ : _absolute(false)
+ , _sink(sink)
+ , _z_snap_threshold(0)
+ , _curve(NULL)
+{
+ reset();
+}
+
+SVGPathParser::~SVGPathParser()
+{
+ delete _curve;
+}
+
+void SVGPathParser::reset() {
+ _absolute = false;
+ _current = _initial = Point(0, 0);
+ _quad_tangent = _cubic_tangent = Point(0, 0);
+ _params.clear();
+ delete _curve;
+ _curve = NULL;
+
+ %%{
+ write init;
+ }%%
+}
+
+void SVGPathParser::parse(char const *str, int len)
+{
+ if (len < 0) {
+ len = std::strlen(str);
+ }
+ _parse(str, str + len, true);
+}
+
+void SVGPathParser::parse(std::string const &s)
+{
+ _parse(s.c_str(), s.c_str() + s.size(), true);
+}
+
+void SVGPathParser::feed(char const *str, int len)
+{
+ if (len < 0) {
+ len = std::strlen(str);
+ }
+ _parse(str, str + len, false);
+}
+
+void SVGPathParser::feed(std::string const &s)
+{
+ _parse(s.c_str(), s.c_str() + s.size(), false);
+}
+
+void SVGPathParser::finish()
+{
+ char const *empty = "";
+ _parse(empty, empty, true);
+}
+
+void SVGPathParser::_push(Coord value)
+{
+ _params.push_back(value);
+}
+
+Coord SVGPathParser::_pop()
+{
+ Coord value = _params.back();
+ _params.pop_back();
+ return value;
+}
+
+bool SVGPathParser::_pop_flag()
+{
+ return _pop() != 0.0;
+}
+
+Coord SVGPathParser::_pop_coord(Dim2 axis)
+{
+ if (_absolute) {
+ return _pop();
+ } else {
+ return _pop() + _current[axis];
+ }
+}
+
+Point SVGPathParser::_pop_point()
+{
+ Coord y = _pop_coord(Y);
+ Coord x = _pop_coord(X);
+ return Point(x, y);
+}
+
+void SVGPathParser::_moveTo(Point const &p)
+{
+ _pushCurve(NULL); // flush
+ _sink.moveTo(p);
+ _quad_tangent = _cubic_tangent = _current = _initial = p;
+}
+
+void SVGPathParser::_lineTo(Point const &p)
+{
+ _pushCurve(new LineSegment(_current, p));
+ _quad_tangent = _cubic_tangent = _current = p;
+}
+
+void SVGPathParser::_curveTo(Point const &c0, Point const &c1, Point const &p)
+{
+ _pushCurve(new CubicBezier(_current, c0, c1, p));
+ _quad_tangent = _current = p;
+ _cubic_tangent = p + ( p - c1 );
+}
+
+void SVGPathParser::_quadTo(Point const &c, Point const &p)
+{
+ _pushCurve(new QuadraticBezier(_current, c, p));
+ _cubic_tangent = _current = p;
+ _quad_tangent = p + ( p - c );
+}
+
+void SVGPathParser::_arcTo(Coord rx, Coord ry, Coord angle,
+ bool large_arc, bool sweep, Point const &p)
+{
+ if (_current == p) {
+ return; // ignore invalid (ambiguous) arc segments where start and end point are the same (per SVG spec)
+ }
+
+ _pushCurve(new EllipticalArc(_current, fabs(rx), fabs(ry), angle, large_arc, sweep, p));
+ _quad_tangent = _cubic_tangent = _current = p;
+}
+
+void SVGPathParser::_closePath()
+{
+ if (_curve && (!_absolute || !_moveto_was_absolute) &&
+ are_near(_initial, _current, _z_snap_threshold))
+ {
+ _curve->setFinal(_initial);
+ }
+
+ _pushCurve(NULL); // flush
+ _sink.closePath();
+ _quad_tangent = _cubic_tangent = _current = _initial;
+}
+
+void SVGPathParser::_pushCurve(Curve *c)
+{
+ if (_curve) {
+ _sink.feed(*_curve, false);
+ delete _curve;
+ }
+ _curve = c;
+}
+
+void SVGPathParser::_parse(char const *str, char const *strend, bool finish)
+{
+ char const *p = str;
+ char const *pe = strend;
+ char const *eof = finish ? pe : NULL;
+ char const *start = NULL;
+
+ %%{
+ action start_number {
+ start = p;
+ }
+
+ action push_number {
+ if (start) {
+ std::string buf(start, p);
+ _push(g_ascii_strtod(buf.c_str(), NULL));
+ start = NULL;
+ } else {
+ std::string buf(str, p);
+ _push(g_ascii_strtod((_number_part + buf).c_str(), NULL));
+ _number_part.clear();
+ }
+ }
+
+ action push_true {
+ _push(1.0);
+ }
+
+ action push_false {
+ _push(0.0);
+ }
+
+ action mode_abs {
+ _absolute = true;
+ }
+
+ action mode_rel {
+ _absolute = false;
+ }
+
+ action moveto {
+ _moveto_was_absolute = _absolute;
+ _moveTo(_pop_point());
+ }
+
+ action lineto {
+ _lineTo(_pop_point());
+ }
+
+ action horizontal_lineto {
+ _lineTo(Point(_pop_coord(X), _current[Y]));
+ }
+
+ action vertical_lineto {
+ _lineTo(Point(_current[X], _pop_coord(Y)));
+ }
+
+ action curveto {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ Point c0 = _pop_point();
+ _curveTo(c0, c1, p);
+ }
+
+ action smooth_curveto {
+ Point p = _pop_point();
+ Point c1 = _pop_point();
+ _curveTo(_cubic_tangent, c1, p);
+ }
+
+ action quadratic_bezier_curveto {
+ Point p = _pop_point();
+ Point c = _pop_point();
+ _quadTo(c, p);
+ }
+
+ action smooth_quadratic_bezier_curveto {
+ Point p = _pop_point();
+ _quadTo(_quad_tangent, p);
+ }
+
+ action elliptical_arc {
+ Point point = _pop_point();
+ bool sweep = _pop_flag();
+ bool large_arc = _pop_flag();
+ double angle = rad_from_deg(_pop());
+ double ry = _pop();
+ double rx = _pop();
+
+ _arcTo(rx, ry, angle, large_arc, sweep, point);
+ }
+
+ action closepath {
+ _closePath();
+ }
+
+ wsp = (' ' | 9 | 10 | 13);
+ sign = ('+' | '-');
+ digit_sequence = digit+;
+ exponent = ('e' | 'E') sign? digit_sequence;
+ fractional_constant =
+ digit_sequence? '.' digit_sequence
+ | digit_sequence '.';
+ floating_point_constant =
+ fractional_constant exponent?
+ | digit_sequence exponent;
+ integer_constant = digit_sequence;
+ comma = ',';
+ comma_wsp = (wsp+ comma? wsp*) | (comma wsp*);
+
+ flag = ('0' %push_false | '1' %push_true);
+
+ number =
+ ( sign? integer_constant
+ | sign? floating_point_constant )
+ >start_number %push_number;
+
+ nonnegative_number =
+ ( integer_constant
+ | floating_point_constant)
+ >start_number %push_number;
+
+ coordinate = number $(number,1) %(number,0);
+ coordinate_pair = (coordinate $(coordinate_pair_a,1) %(coordinate_pair_a,0) comma_wsp? coordinate $(coordinate_pair_b,1) %(coordinate_pair_b,0)) $(coordinate_pair,1) %(coordinate_pair,0);
+ elliptical_arc_argument =
+ (number $(elliptical_arg_a,1) %(elliptical_arg_a,0) comma_wsp?
+ number $(elliptical_arg_b,1) %(elliptical_arg_b,0) comma_wsp?
+ number comma_wsp
+ flag comma_wsp? flag comma_wsp?
+ coordinate_pair)
+ %elliptical_arc;
+ elliptical_arc_argument_sequence =
+ elliptical_arc_argument $1 %0
+ (comma_wsp? elliptical_arc_argument $1 %0)*;
+ elliptical_arc =
+ ('A' %mode_abs| 'a' %mode_rel) wsp*
+ elliptical_arc_argument_sequence;
+
+ smooth_quadratic_bezier_curveto_argument =
+ coordinate_pair %smooth_quadratic_bezier_curveto;
+ smooth_quadratic_bezier_curveto_argument_sequence =
+ smooth_quadratic_bezier_curveto_argument $1 %0
+ (comma_wsp?
+ smooth_quadratic_bezier_curveto_argument $1 %0)*;
+ smooth_quadratic_bezier_curveto =
+ ('T' %mode_abs| 't' %mode_rel) wsp*
+ smooth_quadratic_bezier_curveto_argument_sequence;
+
+ quadratic_bezier_curveto_argument =
+ (coordinate_pair $1 %0 comma_wsp? coordinate_pair)
+ %quadratic_bezier_curveto;
+ quadratic_bezier_curveto_argument_sequence =
+ quadratic_bezier_curveto_argument $1 %0
+ (comma_wsp? quadratic_bezier_curveto_argument $1 %0)*;
+ quadratic_bezier_curveto =
+ ('Q' %mode_abs| 'q' %mode_rel) wsp*
+ quadratic_bezier_curveto_argument_sequence;
+
+ smooth_curveto_argument =
+ (coordinate_pair $1 %0 comma_wsp? coordinate_pair)
+ %smooth_curveto;
+ smooth_curveto_argument_sequence =
+ smooth_curveto_argument $1 %0
+ (comma_wsp? smooth_curveto_argument $1 %0)*;
+ smooth_curveto =
+ ('S' %mode_abs| 's' %mode_rel)
+ wsp* smooth_curveto_argument_sequence;
+
+ curveto_argument =
+ (coordinate_pair $1 %0 comma_wsp?
+ coordinate_pair $1 %0 comma_wsp?
+ coordinate_pair)
+ %curveto;
+ curveto_argument_sequence =
+ curveto_argument $1 %0
+ (comma_wsp? curveto_argument $1 %0)*;
+ curveto =
+ ('C' %mode_abs| 'c' %mode_rel)
+ wsp* curveto_argument_sequence;
+
+ vertical_lineto_argument = coordinate %vertical_lineto;
+ vertical_lineto_argument_sequence =
+ vertical_lineto_argument $(vertical_lineto_argument_a,1) %(vertical_lineto_argument_a,0)
+ (comma_wsp? vertical_lineto_argument $(vertical_lineto_argument_b,1) %(vertical_lineto_argument_b,0))*;
+ vertical_lineto =
+ ('V' %mode_abs| 'v' %mode_rel)
+ wsp* vertical_lineto_argument_sequence;
+
+ horizontal_lineto_argument = coordinate %horizontal_lineto;
+ horizontal_lineto_argument_sequence =
+ horizontal_lineto_argument $(horizontal_lineto_argument_a,1) %(horizontal_lineto_argument_a,0)
+ (comma_wsp? horizontal_lineto_argument $(horizontal_lineto_argument_b,1) %(horizontal_lineto_argument_b,0))*;
+ horizontal_lineto =
+ ('H' %mode_abs| 'h' %mode_rel)
+ wsp* horizontal_lineto_argument_sequence;
+
+ lineto_argument = coordinate_pair %lineto;
+ lineto_argument_sequence =
+ lineto_argument $1 %0
+ (comma_wsp? lineto_argument $1 %0)*;
+ lineto =
+ ('L' %mode_abs| 'l' %mode_rel) wsp*
+ lineto_argument_sequence;
+
+ closepath = ('Z' | 'z') %closepath;
+
+ moveto_argument = coordinate_pair %moveto;
+ moveto_argument_sequence =
+ moveto_argument $1 %0
+ (comma_wsp? lineto_argument $1 %0)*;
+ moveto =
+ ('M' %mode_abs | 'm' %mode_rel)
+ wsp* moveto_argument_sequence;
+
+ drawto_command =
+ closepath | lineto |
+ horizontal_lineto | vertical_lineto |
+ curveto | smooth_curveto |
+ quadratic_bezier_curveto |
+ smooth_quadratic_bezier_curveto |
+ elliptical_arc;
+
+ drawto_commands = drawto_command (wsp* drawto_command)*;
+ moveto_drawto_command_group = moveto wsp* drawto_commands?;
+ moveto_drawto_command_groups =
+ moveto_drawto_command_group
+ (wsp* moveto_drawto_command_group)*;
+
+ svg_path = wsp* moveto_drawto_command_groups? wsp*;
+
+
+ main := svg_path;
+
+ write exec;
+ }%%
+
+ if (finish) {
+ if (cs < svg_path_first_final) {
+ throw SVGPathParseError();
+ }
+ } else if (start != NULL) {
+ _number_part = std::string(start, pe);
+ }
+
+ if (finish) {
+ _pushCurve(NULL);
+ _sink.flush();
+ reset();
+ }
+}
+
+void parse_svg_path(char const *str, PathSink &sink)
+{
+ SVGPathParser parser(sink);
+ parser.parse(str);
+}
+
+void parse_svg_path_file(FILE *fi, PathSink &sink)
+{
+ static const size_t BUFFER_SIZE = 4096;
+ char buffer[BUFFER_SIZE];
+ size_t bytes_read;
+ SVGPathParser parser(sink);
+
+ while (true) {
+ bytes_read = fread(buffer, 1, BUFFER_SIZE, fi);
+ if (bytes_read < BUFFER_SIZE) {
+ parser.parse(buffer, bytes_read);
+ break;
+ } else {
+ parser.feed(buffer, bytes_read);
+ }
+ }
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=ragel:cindent:expandtab:shiftwidth=4:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/svg-path-writer.cpp b/src/2geom/svg-path-writer.cpp
new file mode 100644
index 0000000..1b8cabe
--- /dev/null
+++ b/src/2geom/svg-path-writer.cpp
@@ -0,0 +1,296 @@
+/** @file
+ * @brief Path sink which writes an SVG-compatible command string
+ *//*
+ * Authors:
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ *
+ * Copyright 2014 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <cmath>
+#include <iomanip>
+#include <2geom/coord.h>
+#include <2geom/svg-path-writer.h>
+#include <glib.h>
+
+namespace Geom {
+
+static inline bool is_digit(char c) {
+ return c >= '0' && c <= '9';
+}
+
+SVGPathWriter::SVGPathWriter()
+ : _epsilon(0)
+ , _precision(-1)
+ , _optimize(false)
+ , _use_shorthands(true)
+ , _command(0)
+{
+ // always use C locale for number formatting
+ _ns.imbue(std::locale::classic());
+ _ns.unsetf(std::ios::floatfield);
+}
+
+void SVGPathWriter::moveTo(Point const &p)
+{
+ _setCommand('M');
+ _current_pars.push_back(p[X]);
+ _current_pars.push_back(p[Y]);
+
+ _current = _subpath_start = _quad_tangent = _cubic_tangent = p;
+ if (!_optimize) {
+ flush();
+ }
+}
+
+void SVGPathWriter::lineTo(Point const &p)
+{
+ // The weird setting of _current is to avoid drift with many almost-aligned segments
+ // The additional conditions ensure that the smaller dimension is rounded to zero
+ bool written = false;
+ if (_use_shorthands) {
+ Point r = _current - p;
+ if (are_near(p[X], _current[X], _epsilon) && std::abs(r[X]) < std::abs(r[Y])) {
+ // emit vlineto
+ _setCommand('V');
+ _current_pars.push_back(p[Y]);
+ _current[Y] = p[Y];
+ written = true;
+ } else if (are_near(p[Y], _current[Y], _epsilon) && std::abs(r[Y]) < std::abs(r[X])) {
+ // emit hlineto
+ _setCommand('H');
+ _current_pars.push_back(p[X]);
+ _current[X] = p[X];
+ written = true;
+ }
+ }
+
+ if (!written) {
+ // emit normal lineto
+ if (_command != 'M' && _command != 'L') {
+ _setCommand('L');
+ }
+ _current_pars.push_back(p[X]);
+ _current_pars.push_back(p[Y]);
+ _current = p;
+ }
+
+ _cubic_tangent = _quad_tangent = _current;
+ if (!_optimize) {
+ flush();
+ }
+}
+
+void SVGPathWriter::quadTo(Point const &c, Point const &p)
+{
+ bool shorthand = _use_shorthands && are_near(c, _quad_tangent, _epsilon);
+
+ _setCommand(shorthand ? 'T' : 'Q');
+ if (!shorthand) {
+ _current_pars.push_back(c[X]);
+ _current_pars.push_back(c[Y]);
+ }
+ _current_pars.push_back(p[X]);
+ _current_pars.push_back(p[Y]);
+
+ _current = _cubic_tangent = p;
+ _quad_tangent = p + (p - c);
+ if (!_optimize) {
+ flush();
+ }
+}
+
+void SVGPathWriter::curveTo(Point const &p1, Point const &p2, Point const &p3)
+{
+ bool shorthand = _use_shorthands && are_near(p1, _cubic_tangent, _epsilon);
+
+ _setCommand(shorthand ? 'S' : 'C');
+ if (!shorthand) {
+ _current_pars.push_back(p1[X]);
+ _current_pars.push_back(p1[Y]);
+ }
+ _current_pars.push_back(p2[X]);
+ _current_pars.push_back(p2[Y]);
+ _current_pars.push_back(p3[X]);
+ _current_pars.push_back(p3[Y]);
+
+ _current = _quad_tangent = p3;
+ _cubic_tangent = p3 + (p3 - p2);
+ if (!_optimize) {
+ flush();
+ }
+}
+
+void SVGPathWriter::arcTo(double rx, double ry, double angle,
+ bool large_arc, bool sweep, Point const &p)
+{
+ _setCommand('A');
+ _current_pars.push_back(rx);
+ _current_pars.push_back(ry);
+ _current_pars.push_back(deg_from_rad(angle));
+ _current_pars.push_back(large_arc ? 1. : 0.);
+ _current_pars.push_back(sweep ? 1. : 0.);
+ _current_pars.push_back(p[X]);
+ _current_pars.push_back(p[Y]);
+
+ _current = _quad_tangent = _cubic_tangent = p;
+ if (!_optimize) {
+ flush();
+ }
+}
+
+void SVGPathWriter::closePath()
+{
+ flush();
+ if (_optimize) {
+ _s << "z";
+ } else {
+ _s << " z";
+ }
+ _current = _quad_tangent = _cubic_tangent = _subpath_start;
+}
+
+void SVGPathWriter::flush()
+{
+ if (_command == 0 || _current_pars.empty()) return;
+
+ if (_optimize) {
+ _s << _command;
+ } else {
+ if (_s.tellp() != 0) {
+ _s << ' ';
+ }
+ _s << _command;
+ }
+
+ char lastchar = _command;
+ bool contained_dot = false;
+
+ for (double _current_par : _current_pars) {
+ // TODO: optimize the use of absolute / relative coords
+ std::string cs = _formatCoord(_current_par);
+
+ // Separator handling logic.
+ // Floating point values can end with a digit or dot
+ // and start with a digit, a plus or minus sign, or a dot.
+ // The following cases require a separator:
+ // * digit-digit
+ // * digit-dot (only if the previous number didn't contain a dot)
+ // * dot-digit
+ if (_optimize) {
+ // C++11: change to front()
+ char firstchar = cs[0];
+ if (is_digit(lastchar)) {
+ if (is_digit(firstchar)) {
+ _s << " ";
+ } else if (firstchar == '.' && !contained_dot) {
+ _s << " ";
+ }
+ } else if (lastchar == '.' && is_digit(firstchar)) {
+ _s << " ";
+ }
+ _s << cs;
+
+ // C++11: change to back()
+ lastchar = cs[cs.length()-1];
+ contained_dot = cs.find('.') != std::string::npos;
+ } else {
+ _s << " " << cs;
+ }
+ }
+ _current_pars.clear();
+ _command = 0;
+}
+
+void SVGPathWriter::clear()
+{
+ _s.clear();
+ _s.str("");
+ _ns.clear();
+ _ns.str("");
+ _command = 0;
+ _current_pars.clear();
+ _current = Point(0,0);
+ _subpath_start = Point(0,0);
+}
+
+void SVGPathWriter::setPrecision(int prec)
+{
+ _precision = prec;
+ if (prec < 0) {
+ _epsilon = 0;
+ } else {
+ _epsilon = std::pow(10., -prec);
+ _ns << std::setprecision(_precision);
+ }
+}
+
+void SVGPathWriter::_setCommand(char cmd)
+{
+ if (_command != 0 && _command != cmd) {
+ flush();
+ }
+ _command = cmd;
+}
+
+std::string SVGPathWriter::_formatCoord(Coord par)
+{
+ std::string ret;
+ if (_precision < 0) {
+ ret = format_coord_shortest(par);
+ } else {
+ _ns << par;
+ ret = _ns.str();
+ _ns.clear();
+ _ns.str("");
+ }
+ return ret;
+}
+
+
+std::string write_svg_path(PathVector const &pv, int prec, bool optimize, bool shorthands)
+{
+ SVGPathWriter writer;
+ writer.setPrecision(prec);
+ writer.setOptimize(optimize);
+ writer.setUseShorthands(shorthands);
+
+ writer.feed(pv);
+ return writer.str();
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/sweep-bounds.cpp b/src/2geom/sweep-bounds.cpp
new file mode 100644
index 0000000..2f31b67
--- /dev/null
+++ b/src/2geom/sweep-bounds.cpp
@@ -0,0 +1,154 @@
+#include <2geom/sweep-bounds.h>
+
+#include <algorithm>
+
+namespace Geom {
+
+struct Event {
+ double x;
+ unsigned ix;
+ bool closing;
+ Event(double pos, unsigned i, bool c) : x(pos), ix(i), closing(c) {}
+// Lexicographic ordering by x then closing
+ bool operator<(Event const &other) const {
+ if(x < other.x) return true;
+ if(x > other.x) return false;
+ return closing < other.closing;
+ }
+ bool operator==(Event const &other) const {
+ return other.x == x && other.ix == ix && other.closing == closing;
+ }
+};
+
+std::vector<std::vector<unsigned> > fake_cull(unsigned a, unsigned b);
+
+/**
+ * \brief Make a list of pairs of self intersections in a list of Rects.
+ *
+ * \param rs: vector of Rect.
+ * \param d: dimension to sweep along
+ *
+ * [(A = rs[i], B = rs[j]) for i,J in enumerate(pairs) for j in J]
+ * then A.left <= B.left
+ */
+
+std::vector<std::vector<unsigned> > sweep_bounds(std::vector<Rect> rs, Dim2 d) {
+ std::vector<Event> events; events.reserve(rs.size()*2);
+ std::vector<std::vector<unsigned> > pairs(rs.size());
+
+ for(unsigned i = 0; i < rs.size(); i++) {
+ events.emplace_back(rs[i][d].min(), i, false);
+ events.emplace_back(rs[i][d].max(), i, true);
+ }
+ std::sort(events.begin(), events.end());
+
+ std::vector<unsigned> open;
+ for(auto & event : events) {
+ unsigned ix = event.ix;
+ if(event.closing) {
+ std::vector<unsigned>::iterator iter = std::find(open.begin(), open.end(), ix);
+ //if(iter != open.end())
+ open.erase(iter);
+ } else {
+ for(unsigned int jx : open) {
+ if(rs[jx][1-d].intersects(rs[ix][1-d])) {
+ pairs[jx].push_back(ix);
+ }
+ }
+ open.push_back(ix);
+ }
+ }
+ return pairs;
+}
+
+/**
+ * \brief Make a list of pairs of red-blue intersections between two lists of Rects.
+ *
+ * \param a: vector of Rect.
+ * \param b: vector of Rect.
+ * \param d: dimension to scan along
+ *
+ * [(A = rs[i], B = rs[j]) for i,J in enumerate(pairs) for j in J]
+ * then A.left <= B.left, A in a, B in b
+ */
+std::vector<std::vector<unsigned> > sweep_bounds(std::vector<Rect> a, std::vector<Rect> b, Dim2 d) {
+ std::vector<std::vector<unsigned> > pairs(a.size());
+ if(a.empty() || b.empty()) return pairs;
+ std::vector<Event> events[2];
+ events[0].reserve(a.size()*2);
+ events[1].reserve(b.size()*2);
+
+ for(unsigned n = 0; n < 2; n++) {
+ unsigned sz = n ? b.size() : a.size();
+ events[n].reserve(sz*2);
+ for(unsigned i = 0; i < sz; i++) {
+ Rect r = n ? b[i] : a[i];
+ events[n].emplace_back(r[d].min(), i, false);
+ events[n].emplace_back(r[d].max(), i, true);
+ }
+ std::sort(events[n].begin(), events[n].end());
+ }
+
+ std::vector<unsigned> open[2];
+ bool n = events[1].front() < events[0].front();
+ {// As elegant as putting the initialiser in the for was, it upsets some legacy compilers (MS VS C++)
+ unsigned i[] = {0,0};
+ for(; i[n] < events[n].size();) {
+ unsigned ix = events[n][i[n]].ix;
+ bool closing = events[n][i[n]].closing;
+ //std::cout << n << "[" << ix << "] - " << (closing ? "closer" : "opener") << "\n";
+ if(closing) {
+ open[n].erase(std::find(open[n].begin(), open[n].end(), ix));
+ } else {
+ if(n) {
+ //n = 1
+ //opening a B, add to all open a
+ for(unsigned int jx : open[0]) {
+ if(a[jx][1-d].intersects(b[ix][1-d])) {
+ pairs[jx].push_back(ix);
+ }
+ }
+ } else {
+ //n = 0
+ //opening an A, add all open b
+ for(unsigned int jx : open[1]) {
+ if(b[jx][1-d].intersects(a[ix][1-d])) {
+ pairs[ix].push_back(jx);
+ }
+ }
+ }
+ open[n].push_back(ix);
+ }
+ i[n]++;
+ if(i[n]>=events[n].size()) {break;}
+ n = (events[!n][i[!n]] < events[n][i[n]]) ? !n : n;
+ }}
+ return pairs;
+}
+
+//Fake cull, until the switch to the real sweep is made.
+std::vector<std::vector<unsigned> > fake_cull(unsigned a, unsigned b) {
+ std::vector<std::vector<unsigned> > ret;
+
+ std::vector<unsigned> all;
+ for(unsigned j = 0; j < b; j++)
+ all.push_back(j);
+
+ for(unsigned i = 0; i < a; i++)
+ ret.push_back(all);
+
+ return ret;
+}
+
+}
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/transforms.cpp b/src/2geom/transforms.cpp
new file mode 100644
index 0000000..41d3952
--- /dev/null
+++ b/src/2geom/transforms.cpp
@@ -0,0 +1,205 @@
+/**
+ * @file
+ * @brief Affine transformation classes
+ *//*
+ * Authors:
+ * ? <?@?.?>
+ * Krzysztof Kosiński <tweenk.pl@gmail.com>
+ * Johan Engelen
+ *
+ * Copyright ?-2012 Authors
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ */
+
+#include <boost/concept_check.hpp>
+#include <2geom/point.h>
+#include <2geom/transforms.h>
+#include <2geom/rect.h>
+
+namespace Geom {
+
+/** @brief Zoom between rectangles.
+ * Given two rectangles, compute a zoom that maps one to the other.
+ * Rectangles are assumed to have the same aspect ratio. */
+Zoom Zoom::map_rect(Rect const &old_r, Rect const &new_r)
+{
+ Zoom ret;
+ ret._scale = new_r.width() / old_r.width();
+ ret._trans = new_r.min() - old_r.min();
+ return ret;
+}
+
+// Point transformation methods.
+Point &Point::operator*=(Translate const &t)
+{
+ _pt[X] += t.vec[X];
+ _pt[Y] += t.vec[Y];
+ return *this;
+}
+Point &Point::operator*=(Scale const &s)
+{
+ _pt[X] *= s.vec[X];
+ _pt[Y] *= s.vec[Y];
+ return *this;
+}
+Point &Point::operator*=(Rotate const &r)
+{
+ double x = _pt[X], y = _pt[Y];
+ _pt[X] = x * r.vec[X] - y * r.vec[Y];
+ _pt[Y] = y * r.vec[X] + x * r.vec[Y];
+ return *this;
+}
+Point &Point::operator*=(HShear const &h)
+{
+ _pt[X] += h.f * _pt[X];
+ return *this;
+}
+Point &Point::operator*=(VShear const &v)
+{
+ _pt[Y] += v.f * _pt[Y];
+ return *this;
+}
+Point &Point::operator*=(Zoom const &z)
+{
+ _pt[X] += z._trans[X];
+ _pt[Y] += z._trans[Y];
+ _pt[X] *= z._scale;
+ _pt[Y] *= z._scale;
+ return *this;
+}
+
+// Affine multiplication methods.
+
+/** @brief Combine this transformation with a translation. */
+Affine &Affine::operator*=(Translate const &t) {
+ _c[4] += t[X];
+ _c[5] += t[Y];
+ return *this;
+}
+
+/** @brief Combine this transformation with scaling. */
+Affine &Affine::operator*=(Scale const &s) {
+ _c[0] *= s[X]; _c[1] *= s[Y];
+ _c[2] *= s[X]; _c[3] *= s[Y];
+ _c[4] *= s[X]; _c[5] *= s[Y];
+ return *this;
+}
+
+/** @brief Combine this transformation a rotation. */
+Affine &Affine::operator*=(Rotate const &r) {
+ // TODO: we just convert the Rotate to an Affine and use the existing operator*=()
+ // is there a better way?
+ *this *= (Affine) r;
+ return *this;
+}
+
+/** @brief Combine this transformation with horizontal shearing (skew). */
+Affine &Affine::operator*=(HShear const &h) {
+ _c[0] += h.f * _c[1];
+ _c[2] += h.f * _c[3];
+ _c[4] += h.f * _c[5];
+ return *this;
+}
+
+/** @brief Combine this transformation with vertical shearing (skew). */
+Affine &Affine::operator*=(VShear const &v) {
+ _c[1] += v.f * _c[0];
+ _c[3] += v.f * _c[2];
+ _c[5] += v.f * _c[4];
+ return *this;
+}
+
+Affine &Affine::operator*=(Zoom const &z) {
+ _c[0] *= z._scale; _c[1] *= z._scale;
+ _c[2] *= z._scale; _c[3] *= z._scale;
+ _c[4] += z._trans[X]; _c[5] += z._trans[Y];
+ _c[4] *= z._scale; _c[5] *= z._scale;
+ return *this;
+}
+
+Affine Rotate::around(Point const &p, Coord angle)
+{
+ Affine result = Translate(-p) * Rotate(angle) * Translate(p);
+ return result;
+}
+
+Affine reflection(Point const & vector, Point const & origin)
+{
+ Geom::Point vn = unit_vector(vector);
+ Coord cx2 = vn[X] * vn[X];
+ Coord cy2 = vn[Y] * vn[Y];
+ Coord c2xy = 2 * vn[X] * vn[Y];
+ Affine mirror ( cx2 - cy2, c2xy,
+ c2xy, cy2 - cx2,
+ 0, 0 );
+ return Translate(-origin) * mirror * Translate(origin);
+}
+
+// this checks whether the requirements of TransformConcept are satisfied for all transforms.
+// if you add a new transform type, include it here!
+void check_transforms()
+{
+#ifdef BOOST_CONCEPT_ASSERT
+ BOOST_CONCEPT_ASSERT((TransformConcept<Translate>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<Scale>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<Rotate>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<HShear>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<VShear>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<Zoom>));
+ BOOST_CONCEPT_ASSERT((TransformConcept<Affine>)); // Affine is also a transform
+#endif
+
+ // check inter-transform multiplication
+ Affine m;
+ Translate t(Translate::identity());
+ Scale s(Scale::identity());
+ Rotate r(Rotate::identity());
+ HShear h(HShear::identity());
+ VShear v(VShear::identity());
+ Zoom z(Zoom::identity());
+
+ // notice that the first column is always the same and enumerates all transform types,
+ // while the second one changes to each transform type in turn.
+ // cppcheck-suppress redundantAssignment
+ m = t * t; m = t * s; m = t * r; m = t * h; m = t * v; m = t * z;
+ m = s * t; m = s * s; m = s * r; m = s * h; m = s * v; m = s * z;
+ m = r * t; m = r * s; m = r * r; m = r * h; m = r * v; m = r * z;
+ m = h * t; m = h * s; m = h * r; m = h * h; m = h * v; m = h * z;
+ m = v * t; m = v * s; m = v * r; m = v * h; m = v * v; m = v * z;
+ m = z * t; m = z * s; m = z * r; m = z * h; m = z * v; m = z * z;
+}
+
+} // namespace Geom
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :
diff --git a/src/2geom/utils.cpp b/src/2geom/utils.cpp
new file mode 100644
index 0000000..83d93cc
--- /dev/null
+++ b/src/2geom/utils.cpp
@@ -0,0 +1,86 @@
+/** Various utility functions.
+ *
+ * Copyright 2008 Marco Cecchetti <mrcekets at gmail.com>
+ * Copyright 2007 Johan Engelen <goejendaagh@zonnet.nl>
+ * Copyright 2006 Michael G. Sloan <mgsloan@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+
+#include <2geom/utils.h>
+
+
+namespace Geom
+{
+
+// return a vector that contains all the binomial coefficients of degree n
+void binomial_coefficients(std::vector<size_t>& bc, std::size_t n)
+{
+ size_t s = n+1;
+ bc.clear();
+ bc.resize(s);
+ bc[0] = 1;
+ for (size_t i = 1; i < n; ++i)
+ {
+ size_t k = i >> 1;
+ if (i & 1u)
+ {
+ bc[k+1] = bc[k] << 1;
+ }
+ for (size_t j = k; j > 0; --j)
+ {
+ bc[j] += bc[j-1];
+ }
+ }
+ s >>= 1;
+ for (size_t i = 0; i < s; ++i)
+ {
+ bc[n-i] = bc[i];
+ }
+}
+
+} // end namespace Geom
+
+
+
+
+
+
+
+
+
+
+
+/*
+ Local Variables:
+ mode:c++
+ c-file-style:"stroustrup"
+ c-file-offsets:((innamespace . 0)(inline-open . 0)(case-label . +))
+ indent-tabs-mode:nil
+ fill-column:99
+ End:
+*/
+// vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:fileencoding=utf-8:textwidth=99 :