summaryrefslogtreecommitdiffstats
path: root/third_party/rust/regex-automata/src/meta
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/regex-automata/src/meta
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/regex-automata/src/meta')
-rw-r--r--third_party/rust/regex-automata/src/meta/error.rs241
-rw-r--r--third_party/rust/regex-automata/src/meta/limited.rs267
-rw-r--r--third_party/rust/regex-automata/src/meta/literal.rs81
-rw-r--r--third_party/rust/regex-automata/src/meta/mod.rs62
-rw-r--r--third_party/rust/regex-automata/src/meta/regex.rs3649
-rw-r--r--third_party/rust/regex-automata/src/meta/reverse_inner.rs220
-rw-r--r--third_party/rust/regex-automata/src/meta/stopat.rs224
-rw-r--r--third_party/rust/regex-automata/src/meta/strategy.rs1908
-rw-r--r--third_party/rust/regex-automata/src/meta/wrappers.rs1348
9 files changed, 8000 insertions, 0 deletions
diff --git a/third_party/rust/regex-automata/src/meta/error.rs b/third_party/rust/regex-automata/src/meta/error.rs
new file mode 100644
index 0000000000..ea9a3160e0
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/error.rs
@@ -0,0 +1,241 @@
+use regex_syntax::{ast, hir};
+
+use crate::{nfa, util::search::MatchError, PatternID};
+
+/// An error that occurs when construction of a `Regex` fails.
+///
+/// A build error is generally a result of one of two possible failure
+/// modes. First is a parse or syntax error in the concrete syntax of a
+/// pattern. Second is that the construction of the underlying regex matcher
+/// fails, usually because it gets too big with respect to limits like
+/// [`Config::nfa_size_limit`](crate::meta::Config::nfa_size_limit).
+///
+/// This error provides very little introspection capabilities. You can:
+///
+/// * Ask for the [`PatternID`] of the pattern that caused an error, if one
+/// is available. This is available for things like syntax errors, but not for
+/// cases where build limits are exceeded.
+/// * Ask for the underlying syntax error, but only if the error is a syntax
+/// error.
+/// * Ask for a human readable message corresponding to the underlying error.
+/// * The `BuildError::source` method (from the `std::error::Error`
+/// trait implementation) may be used to query for an underlying error if one
+/// exists. There are no API guarantees about which error is returned.
+///
+/// When the `std` feature is enabled, this implements `std::error::Error`.
+#[derive(Clone, Debug)]
+pub struct BuildError {
+ kind: BuildErrorKind,
+}
+
+#[derive(Clone, Debug)]
+enum BuildErrorKind {
+ Syntax { pid: PatternID, err: regex_syntax::Error },
+ NFA(nfa::thompson::BuildError),
+}
+
+impl BuildError {
+ /// If it is known which pattern ID caused this build error to occur, then
+ /// this method returns it.
+ ///
+ /// Some errors are not associated with a particular pattern. However, any
+ /// errors that occur as part of parsing a pattern are guaranteed to be
+ /// associated with a pattern ID.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, PatternID};
+ ///
+ /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err();
+ /// assert_eq!(Some(PatternID::must(2)), err.pattern());
+ /// ```
+ pub fn pattern(&self) -> Option<PatternID> {
+ match self.kind {
+ BuildErrorKind::Syntax { pid, .. } => Some(pid),
+ _ => None,
+ }
+ }
+
+ /// If this error occurred because the regex exceeded the configured size
+ /// limit before being built, then this returns the configured size limit.
+ ///
+ /// The limit returned is what was configured, and corresponds to the
+ /// maximum amount of heap usage in bytes.
+ pub fn size_limit(&self) -> Option<usize> {
+ match self.kind {
+ BuildErrorKind::NFA(ref err) => err.size_limit(),
+ _ => None,
+ }
+ }
+
+ /// If this error corresponds to a syntax error, then a reference to it is
+ /// returned by this method.
+ pub fn syntax_error(&self) -> Option<&regex_syntax::Error> {
+ match self.kind {
+ BuildErrorKind::Syntax { ref err, .. } => Some(err),
+ _ => None,
+ }
+ }
+
+ pub(crate) fn ast(pid: PatternID, err: ast::Error) -> BuildError {
+ let err = regex_syntax::Error::from(err);
+ BuildError { kind: BuildErrorKind::Syntax { pid, err } }
+ }
+
+ pub(crate) fn hir(pid: PatternID, err: hir::Error) -> BuildError {
+ let err = regex_syntax::Error::from(err);
+ BuildError { kind: BuildErrorKind::Syntax { pid, err } }
+ }
+
+ pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError {
+ BuildError { kind: BuildErrorKind::NFA(err) }
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for BuildError {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ match self.kind {
+ BuildErrorKind::Syntax { ref err, .. } => Some(err),
+ BuildErrorKind::NFA(ref err) => Some(err),
+ }
+ }
+}
+
+impl core::fmt::Display for BuildError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self.kind {
+ BuildErrorKind::Syntax { pid, .. } => {
+ write!(f, "error parsing pattern {}", pid.as_usize())
+ }
+ BuildErrorKind::NFA(_) => write!(f, "error building NFA"),
+ }
+ }
+}
+
+/// An error that occurs when a search should be retried.
+///
+/// This retry error distinguishes between two different failure modes.
+///
+/// The first is one where potential quadratic behavior has been detected.
+/// In this case, whatever optimization that led to this behavior should be
+/// stopped, and the next best strategy should be used.
+///
+/// The second indicates that the underlying regex engine has failed for some
+/// reason. This usually occurs because either a lazy DFA's cache has become
+/// ineffective or because a non-ASCII byte has been seen *and* a Unicode word
+/// boundary was used in one of the patterns. In this failure case, a different
+/// regex engine that won't fail in these ways (PikeVM, backtracker or the
+/// one-pass DFA) should be used.
+///
+/// This is an internal error only and should never bleed into the public
+/// API.
+#[derive(Debug)]
+pub(crate) enum RetryError {
+ Quadratic(RetryQuadraticError),
+ Fail(RetryFailError),
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for RetryError {}
+
+impl core::fmt::Display for RetryError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match *self {
+ RetryError::Quadratic(ref err) => err.fmt(f),
+ RetryError::Fail(ref err) => err.fmt(f),
+ }
+ }
+}
+
+impl From<MatchError> for RetryError {
+ fn from(merr: MatchError) -> RetryError {
+ RetryError::Fail(RetryFailError::from(merr))
+ }
+}
+
+/// An error that occurs when potential quadratic behavior has been detected
+/// when applying either the "reverse suffix" or "reverse inner" optimizations.
+///
+/// When this error occurs, callers should abandon the "reverse" optimization
+/// and use a normal forward search.
+#[derive(Debug)]
+pub(crate) struct RetryQuadraticError(());
+
+impl RetryQuadraticError {
+ pub(crate) fn new() -> RetryQuadraticError {
+ RetryQuadraticError(())
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for RetryQuadraticError {}
+
+impl core::fmt::Display for RetryQuadraticError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(f, "regex engine gave up to avoid quadratic behavior")
+ }
+}
+
+impl From<RetryQuadraticError> for RetryError {
+ fn from(err: RetryQuadraticError) -> RetryError {
+ RetryError::Quadratic(err)
+ }
+}
+
+/// An error that occurs when a regex engine "gives up" for some reason before
+/// finishing a search. Usually this occurs because of heuristic Unicode word
+/// boundary support or because of ineffective cache usage in the lazy DFA.
+///
+/// When this error occurs, callers should retry the regex search with a
+/// different regex engine.
+///
+/// Note that this has convenient `From` impls that will automatically
+/// convert a `MatchError` into this error. This works because the meta
+/// regex engine internals guarantee that errors like `HaystackTooLong` and
+/// `UnsupportedAnchored` will never occur. The only errors left are `Quit` and
+/// `GaveUp`, which both correspond to this "failure" error.
+#[derive(Debug)]
+pub(crate) struct RetryFailError {
+ offset: usize,
+}
+
+impl RetryFailError {
+ pub(crate) fn from_offset(offset: usize) -> RetryFailError {
+ RetryFailError { offset }
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for RetryFailError {}
+
+impl core::fmt::Display for RetryFailError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(f, "regex engine failed at offset {:?}", self.offset)
+ }
+}
+
+impl From<RetryFailError> for RetryError {
+ fn from(err: RetryFailError) -> RetryError {
+ RetryError::Fail(err)
+ }
+}
+
+impl From<MatchError> for RetryFailError {
+ fn from(merr: MatchError) -> RetryFailError {
+ use crate::util::search::MatchErrorKind::*;
+
+ match *merr.kind() {
+ Quit { offset, .. } => RetryFailError::from_offset(offset),
+ GaveUp { offset } => RetryFailError::from_offset(offset),
+ // These can never occur because we avoid them by construction
+ // or with higher level control flow logic. For example, the
+ // backtracker's wrapper will never hand out a backtracker engine
+ // when the haystack would be too long.
+ HaystackTooLong { .. } | UnsupportedAnchored { .. } => {
+ unreachable!("found impossible error in meta engine: {}", merr)
+ }
+ }
+ }
+}
diff --git a/third_party/rust/regex-automata/src/meta/limited.rs b/third_party/rust/regex-automata/src/meta/limited.rs
new file mode 100644
index 0000000000..192a2625e4
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/limited.rs
@@ -0,0 +1,267 @@
+/*!
+This module defines two bespoke reverse DFA searching routines. (One for the
+lazy DFA and one for the fully compiled DFA.) These routines differ from the
+usual ones by permitting the caller to specify a minimum starting position.
+That is, the search will begin at `input.end()` and will usually stop at
+`input.start()`, unless `min_start > input.start()`, in which case, the search
+will stop at `min_start`.
+
+In other words, this lets you say, "no, the search must not extend past this
+point, even if it's within the bounds of the given `Input`." And if the search
+*does* want to go past that point, it stops and returns a "may be quadratic"
+error, which indicates that the caller should retry using some other technique.
+
+These routines specifically exist to protect against quadratic behavior when
+employing the "reverse suffix" and "reverse inner" optimizations. Without the
+backstop these routines provide, it is possible for parts of the haystack to
+get re-scanned over and over again. The backstop not only prevents this, but
+*tells you when it is happening* so that you can change the strategy.
+
+Why can't we just use the normal search routines? We could use the normal
+search routines and just set the start bound on the provided `Input` to our
+`min_start` position. The problem here is that it's impossible to distinguish
+between "no match because we reached the end of input" and "determined there
+was no match well before the end of input." The former case is what we care
+about with respect to quadratic behavior. The latter case is totally fine.
+
+Why don't we modify the normal search routines to report the position at which
+the search stops? I considered this, and I still wonder if it is indeed the
+right thing to do. However, I think the straight-forward thing to do there
+would be to complicate the return type signature of almost every search routine
+in this crate, which I really do not want to do. It therefore might make more
+sense to provide a richer way for search routines to report meta data, but that
+was beyond my bandwidth to work on at the time of writing.
+
+See the 'opt/reverse-inner' and 'opt/reverse-suffix' benchmarks in rebar for a
+real demonstration of how quadratic behavior is mitigated.
+*/
+
+use crate::{
+ meta::error::{RetryError, RetryQuadraticError},
+ HalfMatch, Input, MatchError,
+};
+
+#[cfg(feature = "dfa-build")]
+pub(crate) fn dfa_try_search_half_rev(
+ dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>,
+ input: &Input<'_>,
+ min_start: usize,
+) -> Result<Option<HalfMatch>, RetryError> {
+ use crate::dfa::Automaton;
+
+ let mut mat = None;
+ let mut sid = dfa.start_state_reverse(input)?;
+ if input.start() == input.end() {
+ dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?;
+ return Ok(mat);
+ }
+ let mut at = input.end() - 1;
+ loop {
+ sid = dfa.next_state(sid, input.haystack()[at]);
+ if dfa.is_special_state(sid) {
+ if dfa.is_match_state(sid) {
+ let pattern = dfa.match_pattern(sid, 0);
+ // Since reverse searches report the beginning of a
+ // match and the beginning is inclusive (not exclusive
+ // like the end of a match), we add 1 to make it
+ // inclusive.
+ mat = Some(HalfMatch::new(pattern, at + 1));
+ } else if dfa.is_dead_state(sid) {
+ return Ok(mat);
+ } else if dfa.is_quit_state(sid) {
+ if mat.is_some() {
+ return Ok(mat);
+ }
+ return Err(MatchError::quit(input.haystack()[at], at).into());
+ }
+ }
+ if at == input.start() {
+ break;
+ }
+ at -= 1;
+ if at < min_start {
+ trace!(
+ "reached position {} which is before the previous literal \
+ match, quitting to avoid quadratic behavior",
+ at,
+ );
+ return Err(RetryError::Quadratic(RetryQuadraticError::new()));
+ }
+ }
+ let was_dead = dfa.is_dead_state(sid);
+ dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?;
+ // If we reach the beginning of the search and we could otherwise still
+ // potentially keep matching if there was more to match, then we actually
+ // return an error to indicate giving up on this optimization. Why? Because
+ // we can't prove that the real match begins at where we would report it.
+ //
+ // This only happens when all of the following are true:
+ //
+ // 1) We reach the starting point of our search span.
+ // 2) The match we found is before the starting point.
+ // 3) The FSM reports we could possibly find a longer match.
+ //
+ // We need (1) because otherwise the search stopped before the starting
+ // point and there is no possible way to find a more leftmost position.
+ //
+ // We need (2) because if the match found has an offset equal to the minimum
+ // possible offset, then there is no possible more leftmost match.
+ //
+ // We need (3) because if the FSM couldn't continue anyway (i.e., it's in
+ // a dead state), then we know we couldn't find anything more leftmost
+ // than what we have. (We have to check the state we were in prior to the
+ // EOI transition since the EOI transition will usually bring us to a dead
+ // state by virtue of it represents the end-of-input.)
+ if at == input.start()
+ && mat.map_or(false, |m| m.offset() > input.start())
+ && !was_dead
+ {
+ trace!(
+ "reached beginning of search at offset {} without hitting \
+ a dead state, quitting to avoid potential false positive match",
+ at,
+ );
+ return Err(RetryError::Quadratic(RetryQuadraticError::new()));
+ }
+ Ok(mat)
+}
+
+#[cfg(feature = "hybrid")]
+pub(crate) fn hybrid_try_search_half_rev(
+ dfa: &crate::hybrid::dfa::DFA,
+ cache: &mut crate::hybrid::dfa::Cache,
+ input: &Input<'_>,
+ min_start: usize,
+) -> Result<Option<HalfMatch>, RetryError> {
+ let mut mat = None;
+ let mut sid = dfa.start_state_reverse(cache, input)?;
+ if input.start() == input.end() {
+ hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?;
+ return Ok(mat);
+ }
+ let mut at = input.end() - 1;
+ loop {
+ sid = dfa
+ .next_state(cache, sid, input.haystack()[at])
+ .map_err(|_| MatchError::gave_up(at))?;
+ if sid.is_tagged() {
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, sid, 0);
+ // Since reverse searches report the beginning of a
+ // match and the beginning is inclusive (not exclusive
+ // like the end of a match), we add 1 to make it
+ // inclusive.
+ mat = Some(HalfMatch::new(pattern, at + 1));
+ } else if sid.is_dead() {
+ return Ok(mat);
+ } else if sid.is_quit() {
+ if mat.is_some() {
+ return Ok(mat);
+ }
+ return Err(MatchError::quit(input.haystack()[at], at).into());
+ }
+ }
+ if at == input.start() {
+ break;
+ }
+ at -= 1;
+ if at < min_start {
+ trace!(
+ "reached position {} which is before the previous literal \
+ match, quitting to avoid quadratic behavior",
+ at,
+ );
+ return Err(RetryError::Quadratic(RetryQuadraticError::new()));
+ }
+ }
+ let was_dead = sid.is_dead();
+ hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?;
+ // See the comments in the full DFA routine above for why we need this.
+ if at == input.start()
+ && mat.map_or(false, |m| m.offset() > input.start())
+ && !was_dead
+ {
+ trace!(
+ "reached beginning of search at offset {} without hitting \
+ a dead state, quitting to avoid potential false positive match",
+ at,
+ );
+ return Err(RetryError::Quadratic(RetryQuadraticError::new()));
+ }
+ Ok(mat)
+}
+
+#[cfg(feature = "dfa-build")]
+#[cfg_attr(feature = "perf-inline", inline(always))]
+fn dfa_eoi_rev(
+ dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>,
+ input: &Input<'_>,
+ sid: &mut crate::util::primitives::StateID,
+ mat: &mut Option<HalfMatch>,
+) -> Result<(), MatchError> {
+ use crate::dfa::Automaton;
+
+ let sp = input.get_span();
+ if sp.start > 0 {
+ let byte = input.haystack()[sp.start - 1];
+ *sid = dfa.next_state(*sid, byte);
+ if dfa.is_match_state(*sid) {
+ let pattern = dfa.match_pattern(*sid, 0);
+ *mat = Some(HalfMatch::new(pattern, sp.start));
+ } else if dfa.is_quit_state(*sid) {
+ if mat.is_some() {
+ return Ok(());
+ }
+ return Err(MatchError::quit(byte, sp.start - 1));
+ }
+ } else {
+ *sid = dfa.next_eoi_state(*sid);
+ if dfa.is_match_state(*sid) {
+ let pattern = dfa.match_pattern(*sid, 0);
+ *mat = Some(HalfMatch::new(pattern, 0));
+ }
+ // N.B. We don't have to check 'is_quit' here because the EOI
+ // transition can never lead to a quit state.
+ debug_assert!(!dfa.is_quit_state(*sid));
+ }
+ Ok(())
+}
+
+#[cfg(feature = "hybrid")]
+#[cfg_attr(feature = "perf-inline", inline(always))]
+fn hybrid_eoi_rev(
+ dfa: &crate::hybrid::dfa::DFA,
+ cache: &mut crate::hybrid::dfa::Cache,
+ input: &Input<'_>,
+ sid: &mut crate::hybrid::LazyStateID,
+ mat: &mut Option<HalfMatch>,
+) -> Result<(), MatchError> {
+ let sp = input.get_span();
+ if sp.start > 0 {
+ let byte = input.haystack()[sp.start - 1];
+ *sid = dfa
+ .next_state(cache, *sid, byte)
+ .map_err(|_| MatchError::gave_up(sp.start))?;
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, *sid, 0);
+ *mat = Some(HalfMatch::new(pattern, sp.start));
+ } else if sid.is_quit() {
+ if mat.is_some() {
+ return Ok(());
+ }
+ return Err(MatchError::quit(byte, sp.start - 1));
+ }
+ } else {
+ *sid = dfa
+ .next_eoi_state(cache, *sid)
+ .map_err(|_| MatchError::gave_up(sp.start))?;
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, *sid, 0);
+ *mat = Some(HalfMatch::new(pattern, 0));
+ }
+ // N.B. We don't have to check 'is_quit' here because the EOI
+ // transition can never lead to a quit state.
+ debug_assert!(!sid.is_quit());
+ }
+ Ok(())
+}
diff --git a/third_party/rust/regex-automata/src/meta/literal.rs b/third_party/rust/regex-automata/src/meta/literal.rs
new file mode 100644
index 0000000000..a68b93b7a6
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/literal.rs
@@ -0,0 +1,81 @@
+use alloc::{vec, vec::Vec};
+
+use regex_syntax::hir::Hir;
+
+use crate::{meta::regex::RegexInfo, util::search::MatchKind};
+
+/// Pull out an alternation of literals from the given sequence of HIR
+/// expressions.
+///
+/// There are numerous ways for this to fail. Generally, this only applies
+/// to regexes of the form 'foo|bar|baz|...|quux'. It can also fail if there
+/// are "too few" alternates, in which case, the regex engine is likely faster.
+///
+/// And currently, this only returns something when 'hirs.len() == 1'.
+pub(crate) fn alternation_literals(
+ info: &RegexInfo,
+ hirs: &[&Hir],
+) -> Option<Vec<Vec<u8>>> {
+ use regex_syntax::hir::{HirKind, Literal};
+
+ // Might as well skip the work below if we know we can't build an
+ // Aho-Corasick searcher.
+ if !cfg!(feature = "perf-literal-multisubstring") {
+ return None;
+ }
+ // This is pretty hacky, but basically, if `is_alternation_literal` is
+ // true, then we can make several assumptions about the structure of our
+ // HIR. This is what justifies the `unreachable!` statements below.
+ if hirs.len() != 1
+ || !info.props()[0].look_set().is_empty()
+ || info.props()[0].explicit_captures_len() > 0
+ || !info.props()[0].is_alternation_literal()
+ || info.config().get_match_kind() != MatchKind::LeftmostFirst
+ {
+ return None;
+ }
+ let hir = &hirs[0];
+ let alts = match *hir.kind() {
+ HirKind::Alternation(ref alts) => alts,
+ _ => return None, // one literal isn't worth it
+ };
+
+ let mut lits = vec![];
+ for alt in alts {
+ let mut lit = vec![];
+ match *alt.kind() {
+ HirKind::Literal(Literal(ref bytes)) => {
+ lit.extend_from_slice(bytes)
+ }
+ HirKind::Concat(ref exprs) => {
+ for e in exprs {
+ match *e.kind() {
+ HirKind::Literal(Literal(ref bytes)) => {
+ lit.extend_from_slice(bytes);
+ }
+ _ => unreachable!("expected literal, got {:?}", e),
+ }
+ }
+ }
+ _ => unreachable!("expected literal or concat, got {:?}", alt),
+ }
+ lits.push(lit);
+ }
+ // Why do this? Well, when the number of literals is small, it's likely
+ // that we'll use the lazy DFA which is in turn likely to be faster than
+ // Aho-Corasick in such cases. Primarily because Aho-Corasick doesn't have
+ // a "lazy DFA" but either a contiguous NFA or a full DFA. We rarely use
+ // the latter because it is so hungry (in time and space), and the former
+ // is decently fast, but not as fast as a well oiled lazy DFA.
+ //
+ // However, once the number starts getting large, the lazy DFA is likely
+ // to start thrashing because of the modest default cache size. When
+ // exactly does this happen? Dunno. But at whatever point that is (we make
+ // a guess below based on ad hoc benchmarking), we'll want to cut over to
+ // Aho-Corasick, where even the contiguous NFA is likely to do much better.
+ if lits.len() < 3000 {
+ debug!("skipping Aho-Corasick because there are too few literals");
+ return None;
+ }
+ Some(lits)
+}
diff --git a/third_party/rust/regex-automata/src/meta/mod.rs b/third_party/rust/regex-automata/src/meta/mod.rs
new file mode 100644
index 0000000000..01f430fcb7
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/mod.rs
@@ -0,0 +1,62 @@
+/*!
+Provides a regex matcher that composes several other regex matchers
+automatically.
+
+This module is home to a meta [`Regex`], which provides a convenient high
+level API for executing regular expressions in linear time.
+
+# Comparison with the `regex` crate
+
+A meta `Regex` is the implementation used directly by the `regex` crate.
+Indeed, the `regex` crate API is essentially just a light wrapper over a meta
+`Regex`. This means that if you need the full flexibility offered by this
+API, then you should be able to switch to using this API directly without
+any changes in match semantics or syntax. However, there are some API level
+differences:
+
+* The `regex` crate API returns match objects that include references to the
+haystack itself, which in turn makes it easy to access the matching strings
+without having to slice the haystack yourself. In contrast, a meta `Regex`
+returns match objects that only have offsets in them.
+* At time of writing, a meta `Regex` doesn't have some of the convenience
+routines that the `regex` crate has, such as replacements. Note though that
+[`Captures::interpolate_string`](crate::util::captures::Captures::interpolate_string)
+will handle the replacement string interpolation for you.
+* A meta `Regex` supports the [`Input`](crate::Input) abstraction, which
+provides a way to configure a search in more ways than is supported by the
+`regex` crate. For example, [`Input::anchored`](crate::Input::anchored) can
+be used to run an anchored search, regardless of whether the pattern is itself
+anchored with a `^`.
+* A meta `Regex` supports multi-pattern searching everywhere.
+Indeed, every [`Match`](crate::Match) returned by the search APIs
+include a [`PatternID`](crate::PatternID) indicating which pattern
+matched. In the single pattern case, all matches correspond to
+[`PatternID::ZERO`](crate::PatternID::ZERO). In contrast, the `regex` crate
+has distinct `Regex` and a `RegexSet` APIs. The former only supports a single
+pattern, while the latter supports multiple patterns but cannot report the
+offsets of a match.
+* A meta `Regex` provides the explicit capability of bypassing its internal
+memory pool for automatically acquiring mutable scratch space required by its
+internal regex engines. Namely, a [`Cache`] can be explicitly provided to lower
+level routines such as [`Regex::search_with`].
+
+*/
+
+pub use self::{
+ error::BuildError,
+ regex::{
+ Builder, Cache, CapturesMatches, Config, FindMatches, Regex, Split,
+ SplitN,
+ },
+};
+
+mod error;
+#[cfg(any(feature = "dfa-build", feature = "hybrid"))]
+mod limited;
+mod literal;
+mod regex;
+mod reverse_inner;
+#[cfg(any(feature = "dfa-build", feature = "hybrid"))]
+mod stopat;
+mod strategy;
+mod wrappers;
diff --git a/third_party/rust/regex-automata/src/meta/regex.rs b/third_party/rust/regex-automata/src/meta/regex.rs
new file mode 100644
index 0000000000..3a04b14d88
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/regex.rs
@@ -0,0 +1,3649 @@
+use core::{
+ borrow::Borrow,
+ panic::{RefUnwindSafe, UnwindSafe},
+};
+
+use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
+
+use regex_syntax::{
+ ast,
+ hir::{self, Hir},
+};
+
+use crate::{
+ meta::{
+ error::BuildError,
+ strategy::{self, Strategy},
+ wrappers,
+ },
+ nfa::thompson::WhichCaptures,
+ util::{
+ captures::{Captures, GroupInfo},
+ iter,
+ pool::{Pool, PoolGuard},
+ prefilter::Prefilter,
+ primitives::{NonMaxUsize, PatternID},
+ search::{HalfMatch, Input, Match, MatchKind, PatternSet, Span},
+ },
+};
+
+/// A type alias for our pool of meta::Cache that fixes the type parameters to
+/// what we use for the meta regex below.
+type CachePool = Pool<Cache, CachePoolFn>;
+
+/// Same as above, but for the guard returned by a pool.
+type CachePoolGuard<'a> = PoolGuard<'a, Cache, CachePoolFn>;
+
+/// The type of the closure we use to create new caches. We need to spell out
+/// all of the marker traits or else we risk leaking !MARKER impls.
+type CachePoolFn =
+ Box<dyn Fn() -> Cache + Send + Sync + UnwindSafe + RefUnwindSafe>;
+
+/// A regex matcher that works by composing several other regex matchers
+/// automatically.
+///
+/// In effect, a meta regex papers over a lot of the quirks or performance
+/// problems in each of the regex engines in this crate. Its goal is to provide
+/// an infallible and simple API that "just does the right thing" in the common
+/// case.
+///
+/// A meta regex is the implementation of a `Regex` in the `regex` crate.
+/// Indeed, the `regex` crate API is essentially just a light wrapper over
+/// this type. This includes the `regex` crate's `RegexSet` API!
+///
+/// # Composition
+///
+/// This is called a "meta" matcher precisely because it uses other regex
+/// matchers to provide a convenient high level regex API. Here are some
+/// examples of how other regex matchers are composed:
+///
+/// * When calling [`Regex::captures`], instead of immediately
+/// running a slower but more capable regex engine like the
+/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM), the meta regex engine
+/// will usually first look for the bounds of a match with a higher throughput
+/// regex engine like a [lazy DFA](crate::hybrid). Only when a match is found
+/// is a slower engine like `PikeVM` used to find the matching span for each
+/// capture group.
+/// * While higher throughout engines like the lazy DFA cannot handle
+/// Unicode word boundaries in general, they can still be used on pure ASCII
+/// haystacks by pretending that Unicode word boundaries are just plain ASCII
+/// word boundaries. However, if a haystack is not ASCII, the meta regex engine
+/// will automatically switch to a (possibly slower) regex engine that supports
+/// Unicode word boundaries in general.
+/// * In some cases where a regex pattern is just a simple literal or a small
+/// set of literals, an actual regex engine won't be used at all. Instead,
+/// substring or multi-substring search algorithms will be employed.
+///
+/// There are many other forms of composition happening too, but the above
+/// should give a general idea. In particular, it may perhaps be surprising
+/// that *multiple* regex engines might get executed for a single search. That
+/// is, the decision of what regex engine to use is not _just_ based on the
+/// pattern, but also based on the dynamic execution of the search itself.
+///
+/// The primary reason for this composition is performance. The fundamental
+/// tension is that the faster engines tend to be less capable, and the more
+/// capable engines tend to be slower.
+///
+/// Note that the forms of composition that are allowed are determined by
+/// compile time crate features and configuration. For example, if the `hybrid`
+/// feature isn't enabled, or if [`Config::hybrid`] has been disabled, then the
+/// meta regex engine will never use a lazy DFA.
+///
+/// # Synchronization and cloning
+///
+/// Most of the regex engines in this crate require some kind of mutable
+/// "scratch" space to read and write from while performing a search. Since
+/// a meta regex composes these regex engines, a meta regex also requires
+/// mutable scratch space. This scratch space is called a [`Cache`].
+///
+/// Most regex engines _also_ usually have a read-only component, typically
+/// a [Thompson `NFA`](crate::nfa::thompson::NFA).
+///
+/// In order to make the `Regex` API convenient, most of the routines hide
+/// the fact that a `Cache` is needed at all. To achieve this, a [memory
+/// pool](crate::util::pool::Pool) is used internally to retrieve `Cache`
+/// values in a thread safe way that also permits reuse. This in turn implies
+/// that every such search call requires some form of synchronization. Usually
+/// this synchronization is fast enough to not notice, but in some cases, it
+/// can be a bottleneck. This typically occurs when all of the following are
+/// true:
+///
+/// * The same `Regex` is shared across multiple threads simultaneously,
+/// usually via a [`util::lazy::Lazy`](crate::util::lazy::Lazy) or something
+/// similar from the `once_cell` or `lazy_static` crates.
+/// * The primary unit of work in each thread is a regex search.
+/// * Searches are run on very short haystacks.
+///
+/// This particular case can lead to high contention on the pool used by a
+/// `Regex` internally, which can in turn increase latency to a noticeable
+/// effect. This cost can be mitigated in one of the following ways:
+///
+/// * Use a distinct copy of a `Regex` in each thread, usually by cloning it.
+/// Cloning a `Regex` _does not_ do a deep copy of its read-only component.
+/// But it does lead to each `Regex` having its own memory pool, which in
+/// turn eliminates the problem of contention. In general, this technique should
+/// not result in any additional memory usage when compared to sharing the same
+/// `Regex` across multiple threads simultaneously.
+/// * Use lower level APIs, like [`Regex::search_with`], which permit passing
+/// a `Cache` explicitly. In this case, it is up to you to determine how best
+/// to provide a `Cache`. For example, you might put a `Cache` in thread-local
+/// storage if your use case allows for it.
+///
+/// Overall, this is an issue that happens rarely in practice, but it can
+/// happen.
+///
+/// # Warning: spin-locks may be used in alloc-only mode
+///
+/// When this crate is built without the `std` feature and the high level APIs
+/// on a `Regex` are used, then a spin-lock will be used to synchronize access
+/// to an internal pool of `Cache` values. This may be undesirable because
+/// a spin-lock is [effectively impossible to implement correctly in user
+/// space][spinlocks-are-bad]. That is, more concretely, the spin-lock could
+/// result in a deadlock.
+///
+/// [spinlocks-are-bad]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
+///
+/// If one wants to avoid the use of spin-locks when the `std` feature is
+/// disabled, then you must use APIs that accept a `Cache` value explicitly.
+/// For example, [`Regex::search_with`].
+///
+/// # Example
+///
+/// ```
+/// use regex_automata::meta::Regex;
+///
+/// let re = Regex::new(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")?;
+/// assert!(re.is_match("2010-03-14"));
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+///
+/// # Example: anchored search
+///
+/// This example shows how to use [`Input::anchored`] to run an anchored
+/// search, even when the regex pattern itself isn't anchored. An anchored
+/// search guarantees that if a match is found, then the start offset of the
+/// match corresponds to the offset at which the search was started.
+///
+/// ```
+/// use regex_automata::{meta::Regex, Anchored, Input, Match};
+///
+/// let re = Regex::new(r"\bfoo\b")?;
+/// let input = Input::new("xx foo xx").range(3..).anchored(Anchored::Yes);
+/// // The offsets are in terms of the original haystack.
+/// assert_eq!(Some(Match::must(0, 3..6)), re.find(input));
+///
+/// // Notice that no match occurs here, because \b still takes the
+/// // surrounding context into account, even if it means looking back
+/// // before the start of your search.
+/// let hay = "xxfoo xx";
+/// let input = Input::new(hay).range(2..).anchored(Anchored::Yes);
+/// assert_eq!(None, re.find(input));
+/// // Indeed, you cannot achieve the above by simply slicing the
+/// // haystack itself, since the regex engine can't see the
+/// // surrounding context. This is why 'Input' permits setting
+/// // the bounds of a search!
+/// let input = Input::new(&hay[2..]).anchored(Anchored::Yes);
+/// // WRONG!
+/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input));
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+///
+/// # Example: earliest search
+///
+/// This example shows how to use [`Input::earliest`] to run a search that
+/// might stop before finding the typical leftmost match.
+///
+/// ```
+/// use regex_automata::{meta::Regex, Anchored, Input, Match};
+///
+/// let re = Regex::new(r"[a-z]{3}|b")?;
+/// let input = Input::new("abc").earliest(true);
+/// assert_eq!(Some(Match::must(0, 1..2)), re.find(input));
+///
+/// // Note that "earliest" isn't really a match semantic unto itself.
+/// // Instead, it is merely an instruction to whatever regex engine
+/// // gets used internally to quit as soon as it can. For example,
+/// // this regex uses a different search technique, and winds up
+/// // producing a different (but valid) match!
+/// let re = Regex::new(r"abc|b")?;
+/// let input = Input::new("abc").earliest(true);
+/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input));
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+///
+/// # Example: change the line terminator
+///
+/// This example shows how to enable multi-line mode by default and change
+/// the line terminator to the NUL byte:
+///
+/// ```
+/// use regex_automata::{meta::Regex, util::syntax, Match};
+///
+/// let re = Regex::builder()
+/// .syntax(syntax::Config::new().multi_line(true))
+/// .configure(Regex::config().line_terminator(b'\x00'))
+/// .build(r"^foo$")?;
+/// let hay = "\x00foo\x00";
+/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+#[derive(Debug)]
+pub struct Regex {
+ /// The actual regex implementation.
+ imp: Arc<RegexI>,
+ /// A thread safe pool of caches.
+ ///
+ /// For the higher level search APIs, a `Cache` is automatically plucked
+ /// from this pool before running a search. The lower level `with` methods
+ /// permit the caller to provide their own cache, thereby bypassing
+ /// accesses to this pool.
+ ///
+ /// Note that we put this outside the `Arc` so that cloning a `Regex`
+ /// results in creating a fresh `CachePool`. This in turn permits callers
+ /// to clone regexes into separate threads where each such regex gets
+ /// the pool's "thread owner" optimization. Otherwise, if one shares the
+ /// `Regex` directly, then the pool will go through a slower mutex path for
+ /// all threads except for the "owner."
+ pool: CachePool,
+}
+
+/// The internal implementation of `Regex`, split out so that it can be wrapped
+/// in an `Arc`.
+#[derive(Debug)]
+struct RegexI {
+ /// The core matching engine.
+ ///
+ /// Why is this reference counted when RegexI is already wrapped in an Arc?
+ /// Well, we need to capture this in a closure to our `Pool` below in order
+ /// to create new `Cache` values when needed. So since it needs to be in
+ /// two places, we make it reference counted.
+ ///
+ /// We make `RegexI` itself reference counted too so that `Regex` itself
+ /// stays extremely small and very cheap to clone.
+ strat: Arc<dyn Strategy>,
+ /// Metadata about the regexes driving the strategy. The metadata is also
+ /// usually stored inside the strategy too, but we put it here as well
+ /// so that we can get quick access to it (without virtual calls) before
+ /// executing the regex engine. For example, we use this metadata to
+ /// detect a subset of cases where we know a match is impossible, and can
+ /// thus avoid calling into the strategy at all.
+ ///
+ /// Since `RegexInfo` is stored in multiple places, it is also reference
+ /// counted.
+ info: RegexInfo,
+}
+
+/// Convenience constructors for a `Regex` using the default configuration.
+impl Regex {
+ /// Builds a `Regex` from a single pattern string using the default
+ /// configuration.
+ ///
+ /// If there was a problem parsing the pattern or a problem turning it into
+ /// a regex matcher, then an error is returned.
+ ///
+ /// If you want to change the configuration of a `Regex`, use a [`Builder`]
+ /// with a [`Config`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new(r"(?Rm)^foo$")?;
+ /// let hay = "\r\nfoo\r\n";
+ /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn new(pattern: &str) -> Result<Regex, BuildError> {
+ Regex::builder().build(pattern)
+ }
+
+ /// Builds a `Regex` from many pattern strings using the default
+ /// configuration.
+ ///
+ /// If there was a problem parsing any of the patterns or a problem turning
+ /// them into a regex matcher, then an error is returned.
+ ///
+ /// If you want to change the configuration of a `Regex`, use a [`Builder`]
+ /// with a [`Config`].
+ ///
+ /// # Example: simple lexer
+ ///
+ /// This simplistic example leverages the multi-pattern support to build a
+ /// simple little lexer. The pattern ID in the match tells you which regex
+ /// matched, which in turn might be used to map back to the "type" of the
+ /// token returned by the lexer.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new_many(&[
+ /// r"[[:space:]]",
+ /// r"[A-Za-z0-9][A-Za-z0-9_]+",
+ /// r"->",
+ /// r".",
+ /// ])?;
+ /// let haystack = "fn is_boss(bruce: i32, springsteen: String) -> bool;";
+ /// let matches: Vec<Match> = re.find_iter(haystack).collect();
+ /// assert_eq!(matches, vec![
+ /// Match::must(1, 0..2), // 'fn'
+ /// Match::must(0, 2..3), // ' '
+ /// Match::must(1, 3..10), // 'is_boss'
+ /// Match::must(3, 10..11), // '('
+ /// Match::must(1, 11..16), // 'bruce'
+ /// Match::must(3, 16..17), // ':'
+ /// Match::must(0, 17..18), // ' '
+ /// Match::must(1, 18..21), // 'i32'
+ /// Match::must(3, 21..22), // ','
+ /// Match::must(0, 22..23), // ' '
+ /// Match::must(1, 23..34), // 'springsteen'
+ /// Match::must(3, 34..35), // ':'
+ /// Match::must(0, 35..36), // ' '
+ /// Match::must(1, 36..42), // 'String'
+ /// Match::must(3, 42..43), // ')'
+ /// Match::must(0, 43..44), // ' '
+ /// Match::must(2, 44..46), // '->'
+ /// Match::must(0, 46..47), // ' '
+ /// Match::must(1, 47..51), // 'bool'
+ /// Match::must(3, 51..52), // ';'
+ /// ]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// One can write a lexer like the above using a regex like
+ /// `(?P<space>[[:space:]])|(?P<ident>[A-Za-z0-9][A-Za-z0-9_]+)|...`,
+ /// but then you need to ask whether capture group matched to determine
+ /// which branch in the regex matched, and thus, which token the match
+ /// corresponds to. In contrast, the above example includes the pattern ID
+ /// in the match. There's no need to use capture groups at all.
+ ///
+ /// # Example: finding the pattern that caused an error
+ ///
+ /// When a syntax error occurs, it is possible to ask which pattern
+ /// caused the syntax error.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, PatternID};
+ ///
+ /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err();
+ /// assert_eq!(Some(PatternID::must(2)), err.pattern());
+ /// ```
+ ///
+ /// # Example: zero patterns is valid
+ ///
+ /// Building a regex with zero patterns results in a regex that never
+ /// matches anything. Because this routine is generic, passing an empty
+ /// slice usually requires a turbo-fish (or something else to help type
+ /// inference).
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::new_many::<&str>(&[])?;
+ /// assert_eq!(None, re.find(""));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn new_many<P: AsRef<str>>(
+ patterns: &[P],
+ ) -> Result<Regex, BuildError> {
+ Regex::builder().build_many(patterns)
+ }
+
+ /// Return a default configuration for a `Regex`.
+ ///
+ /// This is a convenience routine to avoid needing to import the [`Config`]
+ /// type when customizing the construction of a `Regex`.
+ ///
+ /// # Example: lower the NFA size limit
+ ///
+ /// In some cases, the default size limit might be too big. The size limit
+ /// can be lowered, which will prevent large regex patterns from compiling.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10))))
+ /// // Not even 20KB is enough to build a single large Unicode class!
+ /// .build(r"\pL");
+ /// assert!(result.is_err());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn config() -> Config {
+ Config::new()
+ }
+
+ /// Return a builder for configuring the construction of a `Regex`.
+ ///
+ /// This is a convenience routine to avoid needing to import the
+ /// [`Builder`] type in common cases.
+ ///
+ /// # Example: change the line terminator
+ ///
+ /// This example shows how to enable multi-line mode by default and change
+ /// the line terminator to the NUL byte:
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .syntax(syntax::Config::new().multi_line(true))
+ /// .configure(Regex::config().line_terminator(b'\x00'))
+ /// .build(r"^foo$")?;
+ /// let hay = "\x00foo\x00";
+ /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn builder() -> Builder {
+ Builder::new()
+ }
+}
+
+/// High level convenience routines for using a regex to search a haystack.
+impl Regex {
+ /// Returns true if and only if this regex matches the given haystack.
+ ///
+ /// This routine may short circuit if it knows that scanning future input
+ /// will never lead to a different result. (Consider how this might make
+ /// a difference given the regex `a+` on the haystack `aaaaaaaaaaaaaaa`.
+ /// This routine _may_ stop after it sees the first `a`, but routines like
+ /// `find` need to continue searching because `+` is greedy by default.)
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new("foo[0-9]+bar")?;
+ ///
+ /// assert!(re.is_match("foo12345bar"));
+ /// assert!(!re.is_match("foobar"));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: consistency with search APIs
+ ///
+ /// `is_match` is guaranteed to return `true` whenever `find` returns a
+ /// match. This includes searches that are executed entirely within a
+ /// codepoint:
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input};
+ ///
+ /// let re = Regex::new("a*")?;
+ ///
+ /// // This doesn't match because the default configuration bans empty
+ /// // matches from splitting a codepoint.
+ /// assert!(!re.is_match(Input::new("☃").span(1..2)));
+ /// assert_eq!(None, re.find(Input::new("☃").span(1..2)));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// Notice that when UTF-8 mode is disabled, then the above reports a
+ /// match because the restriction against zero-width matches that split a
+ /// codepoint has been lifted:
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().utf8_empty(false))
+ /// .build("a*")?;
+ ///
+ /// assert!(re.is_match(Input::new("☃").span(1..2)));
+ /// assert_eq!(
+ /// Some(Match::must(0, 1..1)),
+ /// re.find(Input::new("☃").span(1..2)),
+ /// );
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// A similar idea applies when using line anchors with CRLF mode enabled,
+ /// which prevents them from matching between a `\r` and a `\n`.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, Match};
+ ///
+ /// let re = Regex::new(r"(?Rm:$)")?;
+ /// assert!(!re.is_match(Input::new("\r\n").span(1..1)));
+ /// // A regular line anchor, which only considers \n as a
+ /// // line terminator, will match.
+ /// let re = Regex::new(r"(?m:$)")?;
+ /// assert!(re.is_match(Input::new("\r\n").span(1..1)));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool {
+ let input = input.into().earliest(true);
+ if self.imp.info.is_impossible(&input) {
+ return false;
+ }
+ let mut guard = self.pool.get();
+ let result = self.imp.strat.is_match(&mut guard, &input);
+ // See 'Regex::search' for why we put the guard back explicitly.
+ PoolGuard::put(guard);
+ result
+ }
+
+ /// Executes a leftmost search and returns the first match that is found,
+ /// if one exists.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new("foo[0-9]+")?;
+ /// assert_eq!(Some(Match::must(0, 0..8)), re.find("foo12345"));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> {
+ self.search(&input.into())
+ }
+
+ /// Executes a leftmost forward search and writes the spans of capturing
+ /// groups that participated in a match into the provided [`Captures`]
+ /// value. If no match was found, then [`Captures::is_match`] is guaranteed
+ /// to return `false`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Span};
+ ///
+ /// let re = Regex::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?;
+ /// let mut caps = re.create_captures();
+ ///
+ /// re.captures("2010-03-14", &mut caps);
+ /// assert!(caps.is_match());
+ /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1));
+ /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2));
+ /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn captures<'h, I: Into<Input<'h>>>(
+ &self,
+ input: I,
+ caps: &mut Captures,
+ ) {
+ self.search_captures(&input.into(), caps)
+ }
+
+ /// Returns an iterator over all non-overlapping leftmost matches in
+ /// the given haystack. If no match exists, then the iterator yields no
+ /// elements.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new("foo[0-9]+")?;
+ /// let haystack = "foo1 foo12 foo123";
+ /// let matches: Vec<Match> = re.find_iter(haystack).collect();
+ /// assert_eq!(matches, vec![
+ /// Match::must(0, 0..4),
+ /// Match::must(0, 5..10),
+ /// Match::must(0, 11..17),
+ /// ]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn find_iter<'r, 'h, I: Into<Input<'h>>>(
+ &'r self,
+ input: I,
+ ) -> FindMatches<'r, 'h> {
+ let cache = self.pool.get();
+ let it = iter::Searcher::new(input.into());
+ FindMatches { re: self, cache, it }
+ }
+
+ /// Returns an iterator over all non-overlapping `Captures` values. If no
+ /// match exists, then the iterator yields no elements.
+ ///
+ /// This yields the same matches as [`Regex::find_iter`], but it includes
+ /// the spans of all capturing groups that participate in each match.
+ ///
+ /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for
+ /// how to correctly iterate over all matches in a haystack while avoiding
+ /// the creation of a new `Captures` value for every match. (Which you are
+ /// forced to do with an `Iterator`.)
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Span};
+ ///
+ /// let re = Regex::new("foo(?P<numbers>[0-9]+)")?;
+ ///
+ /// let haystack = "foo1 foo12 foo123";
+ /// let matches: Vec<Span> = re
+ /// .captures_iter(haystack)
+ /// // The unwrap is OK since 'numbers' matches if the pattern matches.
+ /// .map(|caps| caps.get_group_by_name("numbers").unwrap())
+ /// .collect();
+ /// assert_eq!(matches, vec![
+ /// Span::from(3..4),
+ /// Span::from(8..10),
+ /// Span::from(14..17),
+ /// ]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn captures_iter<'r, 'h, I: Into<Input<'h>>>(
+ &'r self,
+ input: I,
+ ) -> CapturesMatches<'r, 'h> {
+ let cache = self.pool.get();
+ let caps = self.create_captures();
+ let it = iter::Searcher::new(input.into());
+ CapturesMatches { re: self, cache, caps, it }
+ }
+
+ /// Returns an iterator of spans of the haystack given, delimited by a
+ /// match of the regex. Namely, each element of the iterator corresponds to
+ /// a part of the haystack that *isn't* matched by the regular expression.
+ ///
+ /// # Example
+ ///
+ /// To split a string delimited by arbitrary amounts of spaces or tabs:
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"[ \t]+")?;
+ /// let hay = "a b \t c\td e";
+ /// let fields: Vec<&str> = re.split(hay).map(|span| &hay[span]).collect();
+ /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: more cases
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r" ")?;
+ /// let hay = "Mary had a little lamb";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec![""]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "lionXXtigerXleopard";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]);
+ ///
+ /// let re = Regex::new(r"::")?;
+ /// let hay = "lion::tiger::leopard";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["lion", "tiger", "leopard"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// If a haystack contains multiple contiguous matches, you will end up
+ /// with empty spans yielded by the iterator:
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "XXXXaXXbXc";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
+ ///
+ /// let re = Regex::new(r"/")?;
+ /// let hay = "(///)";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["(", "", "", ")"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// Separators at the start or end of a haystack are neighbored by empty
+ /// spans.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"0")?;
+ /// let hay = "010";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["", "1", ""]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// When the empty string is used as a regex, it splits at every valid
+ /// UTF-8 boundary by default (which includes the beginning and end of the
+ /// haystack):
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"")?;
+ /// let hay = "rust";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]);
+ ///
+ /// // Splitting by an empty string is UTF-8 aware by default!
+ /// let re = Regex::new(r"")?;
+ /// let hay = "☃";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["", "☃", ""]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// But note that UTF-8 mode for empty strings can be disabled, which will
+ /// then result in a match at every byte offset in the haystack,
+ /// including between every UTF-8 code unit.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().utf8_empty(false))
+ /// .build(r"")?;
+ /// let hay = "☃".as_bytes();
+ /// let got: Vec<&[u8]> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec![
+ /// // Writing byte string slices is just brutal. The problem is that
+ /// // b"foo" has type &[u8; 3] instead of &[u8].
+ /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..],
+ /// ]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// Contiguous separators (commonly shows up with whitespace), can lead to
+ /// possibly surprising behavior. For example, this code is correct:
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r" ")?;
+ /// let hay = " a b c";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want
+ /// to match contiguous space characters:
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r" +")?;
+ /// let hay = " a b c";
+ /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect();
+ /// // N.B. This does still include a leading empty span because ' +'
+ /// // matches at the beginning of the haystack.
+ /// assert_eq!(got, vec!["", "a", "b", "c"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn split<'r, 'h, I: Into<Input<'h>>>(
+ &'r self,
+ input: I,
+ ) -> Split<'r, 'h> {
+ Split { finder: self.find_iter(input), last: 0 }
+ }
+
+ /// Returns an iterator of at most `limit` spans of the haystack given,
+ /// delimited by a match of the regex. (A `limit` of `0` will return no
+ /// spans.) Namely, each element of the iterator corresponds to a part
+ /// of the haystack that *isn't* matched by the regular expression. The
+ /// remainder of the haystack that is not split will be the last element in
+ /// the iterator.
+ ///
+ /// # Example
+ ///
+ /// Get the first two words in some haystack:
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"\W+").unwrap();
+ /// let hay = "Hey! How are you?";
+ /// let fields: Vec<&str> =
+ /// re.splitn(hay, 3).map(|span| &hay[span]).collect();
+ /// assert_eq!(fields, vec!["Hey", "How", "are you?"]);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Examples: more cases
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r" ")?;
+ /// let hay = "Mary had a little lamb";
+ /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "";
+ /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec![""]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "lionXXtigerXleopard";
+ /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]);
+ ///
+ /// let re = Regex::new(r"::")?;
+ /// let hay = "lion::tiger::leopard";
+ /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["lion", "tiger::leopard"]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "abcXdef";
+ /// let got: Vec<&str> = re.splitn(hay, 1).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["abcXdef"]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "abcdef";
+ /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect();
+ /// assert_eq!(got, vec!["abcdef"]);
+ ///
+ /// let re = Regex::new(r"X")?;
+ /// let hay = "abcXdef";
+ /// let got: Vec<&str> = re.splitn(hay, 0).map(|sp| &hay[sp]).collect();
+ /// assert!(got.is_empty());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn splitn<'r, 'h, I: Into<Input<'h>>>(
+ &'r self,
+ input: I,
+ limit: usize,
+ ) -> SplitN<'r, 'h> {
+ SplitN { splits: self.split(input), limit }
+ }
+}
+
+/// Lower level search routines that give more control.
+impl Regex {
+ /// Returns the start and end offset of the leftmost match. If no match
+ /// exists, then `None` is returned.
+ ///
+ /// This is like [`Regex::find`] but, but it accepts a concrete `&Input`
+ /// instead of an `Into<Input>`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, Match};
+ ///
+ /// let re = Regex::new(r"Samwise|Sam")?;
+ /// let input = Input::new(
+ /// "one of the chief characters, Samwise the Brave",
+ /// );
+ /// assert_eq!(Some(Match::must(0, 29..36)), re.search(&input));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search(&self, input: &Input<'_>) -> Option<Match> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ let mut guard = self.pool.get();
+ let result = self.imp.strat.search(&mut guard, input);
+ // We do this dance with the guard and explicitly put it back in the
+ // pool because it seems to result in better codegen. If we let the
+ // guard's Drop impl put it back in the pool, then functions like
+ // ptr::drop_in_place get called and they *don't* get inlined. This
+ // isn't usually a big deal, but in latency sensitive benchmarks the
+ // extra function call can matter.
+ //
+ // I used `rebar measure -f '^grep/every-line$' -e meta` to measure
+ // the effects here.
+ //
+ // Note that this doesn't eliminate the latency effects of using the
+ // pool. There is still some (minor) cost for the "thread owner" of the
+ // pool. (i.e., The thread that first calls a regex search routine.)
+ // However, for other threads using the regex, the pool access can be
+ // quite expensive as it goes through a mutex. Callers can avoid this
+ // by either cloning the Regex (which creates a distinct copy of the
+ // pool), or callers can use the lower level APIs that accept a 'Cache'
+ // directly and do their own handling.
+ PoolGuard::put(guard);
+ result
+ }
+
+ /// Returns the end offset of the leftmost match. If no match exists, then
+ /// `None` is returned.
+ ///
+ /// This is distinct from [`Regex::search`] in that it only returns the end
+ /// of a match and not the start of the match. Depending on a variety of
+ /// implementation details, this _may_ permit the regex engine to do less
+ /// overall work. For example, if a DFA is being used to execute a search,
+ /// then the start of a match usually requires running a separate DFA in
+ /// reverse to the find the start of a match. If one only needs the end of
+ /// a match, then the separate reverse scan to find the start of a match
+ /// can be skipped. (Note that the reverse scan is avoided even when using
+ /// `Regex::search` when possible, for example, in the case of an anchored
+ /// search.)
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, HalfMatch};
+ ///
+ /// let re = Regex::new(r"Samwise|Sam")?;
+ /// let input = Input::new(
+ /// "one of the chief characters, Samwise the Brave",
+ /// );
+ /// assert_eq!(Some(HalfMatch::must(0, 36)), re.search_half(&input));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_half(&self, input: &Input<'_>) -> Option<HalfMatch> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ let mut guard = self.pool.get();
+ let result = self.imp.strat.search_half(&mut guard, input);
+ // See 'Regex::search' for why we put the guard back explicitly.
+ PoolGuard::put(guard);
+ result
+ }
+
+ /// Executes a leftmost forward search and writes the spans of capturing
+ /// groups that participated in a match into the provided [`Captures`]
+ /// value. If no match was found, then [`Captures::is_match`] is guaranteed
+ /// to return `false`.
+ ///
+ /// This is like [`Regex::captures`], but it accepts a concrete `&Input`
+ /// instead of an `Into<Input>`.
+ ///
+ /// # Example: specific pattern search
+ ///
+ /// This example shows how to build a multi-pattern `Regex` that permits
+ /// searching for specific patterns.
+ ///
+ /// ```
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// Anchored, Match, PatternID, Input,
+ /// };
+ ///
+ /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?;
+ /// let mut caps = re.create_captures();
+ /// let haystack = "foo123";
+ ///
+ /// // Since we are using the default leftmost-first match and both
+ /// // patterns match at the same starting position, only the first pattern
+ /// // will be returned in this case when doing a search for any of the
+ /// // patterns.
+ /// let expected = Some(Match::must(0, 0..6));
+ /// re.search_captures(&Input::new(haystack), &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// // But if we want to check whether some other pattern matches, then we
+ /// // can provide its pattern ID.
+ /// let expected = Some(Match::must(1, 0..6));
+ /// let input = Input::new(haystack)
+ /// .anchored(Anchored::Pattern(PatternID::must(1)));
+ /// re.search_captures(&input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: specifying the bounds of a search
+ ///
+ /// This example shows how providing the bounds of a search can produce
+ /// different results than simply sub-slicing the haystack.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Match, Input};
+ ///
+ /// let re = Regex::new(r"\b[0-9]{3}\b")?;
+ /// let mut caps = re.create_captures();
+ /// let haystack = "foo123bar";
+ ///
+ /// // Since we sub-slice the haystack, the search doesn't know about
+ /// // the larger context and assumes that `123` is surrounded by word
+ /// // boundaries. And of course, the match position is reported relative
+ /// // to the sub-slice as well, which means we get `0..3` instead of
+ /// // `3..6`.
+ /// let expected = Some(Match::must(0, 0..3));
+ /// let input = Input::new(&haystack[3..6]);
+ /// re.search_captures(&input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// // But if we provide the bounds of the search within the context of the
+ /// // entire haystack, then the search can take the surrounding context
+ /// // into account. (And if we did find a match, it would be reported
+ /// // as a valid offset into `haystack` instead of its sub-slice.)
+ /// let expected = None;
+ /// let input = Input::new(haystack).range(3..6);
+ /// re.search_captures(&input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_captures(&self, input: &Input<'_>, caps: &mut Captures) {
+ caps.set_pattern(None);
+ let pid = self.search_slots(input, caps.slots_mut());
+ caps.set_pattern(pid);
+ }
+
+ /// Executes a leftmost forward search and writes the spans of capturing
+ /// groups that participated in a match into the provided `slots`, and
+ /// returns the matching pattern ID. The contents of the slots for patterns
+ /// other than the matching pattern are unspecified. If no match was found,
+ /// then `None` is returned and the contents of `slots` is unspecified.
+ ///
+ /// This is like [`Regex::search`], but it accepts a raw slots slice
+ /// instead of a `Captures` value. This is useful in contexts where you
+ /// don't want or need to allocate a `Captures`.
+ ///
+ /// It is legal to pass _any_ number of slots to this routine. If the regex
+ /// engine would otherwise write a slot offset that doesn't fit in the
+ /// provided slice, then it is simply skipped. In general though, there are
+ /// usually three slice lengths you might want to use:
+ ///
+ /// * An empty slice, if you only care about which pattern matched.
+ /// * A slice with [`pattern_len() * 2`](Regex::pattern_len) slots, if you
+ /// only care about the overall match spans for each matching pattern.
+ /// * A slice with
+ /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which
+ /// permits recording match offsets for every capturing group in every
+ /// pattern.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to find the overall match offsets in a
+ /// multi-pattern search without allocating a `Captures` value. Indeed, we
+ /// can put our slots right on the stack.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, PatternID, Input};
+ ///
+ /// let re = Regex::new_many(&[
+ /// r"\pL+",
+ /// r"\d+",
+ /// ])?;
+ /// let input = Input::new("!@#123");
+ ///
+ /// // We only care about the overall match offsets here, so we just
+ /// // allocate two slots for each pattern. Each slot records the start
+ /// // and end of the match.
+ /// let mut slots = [None; 4];
+ /// let pid = re.search_slots(&input, &mut slots);
+ /// assert_eq!(Some(PatternID::must(1)), pid);
+ ///
+ /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'.
+ /// // See 'GroupInfo' for more details on the mapping between groups and
+ /// // slot indices.
+ /// let slot_start = pid.unwrap().as_usize() * 2;
+ /// let slot_end = slot_start + 1;
+ /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get()));
+ /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get()));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_slots(
+ &self,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ let mut guard = self.pool.get();
+ let result = self.imp.strat.search_slots(&mut guard, input, slots);
+ // See 'Regex::search' for why we put the guard back explicitly.
+ PoolGuard::put(guard);
+ result
+ }
+
+ /// Writes the set of patterns that match anywhere in the given search
+ /// configuration to `patset`. If multiple patterns match at the same
+ /// position and this `Regex` was configured with [`MatchKind::All`]
+ /// semantics, then all matching patterns are written to the given set.
+ ///
+ /// Unless all of the patterns in this `Regex` are anchored, then generally
+ /// speaking, this will scan the entire haystack.
+ ///
+ /// This search routine *does not* clear the pattern set. This gives some
+ /// flexibility to the caller (e.g., running multiple searches with the
+ /// same pattern set), but does make the API bug-prone if you're reusing
+ /// the same pattern set for multiple searches but intended them to be
+ /// independent.
+ ///
+ /// If a pattern ID matched but the given `PatternSet` does not have
+ /// sufficient capacity to store it, then it is not inserted and silently
+ /// dropped.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to find all matching patterns in a haystack,
+ /// even when some patterns match at the same position as other patterns.
+ /// It is important that we configure the `Regex` with [`MatchKind::All`]
+ /// semantics here, or else overlapping matches will not be reported.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet};
+ ///
+ /// let patterns = &[
+ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar",
+ /// ];
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().match_kind(MatchKind::All))
+ /// .build_many(patterns)?;
+ ///
+ /// let input = Input::new("foobar");
+ /// let mut patset = PatternSet::new(re.pattern_len());
+ /// re.which_overlapping_matches(&input, &mut patset);
+ /// let expected = vec![0, 2, 3, 4, 6];
+ /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect();
+ /// assert_eq!(expected, got);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn which_overlapping_matches(
+ &self,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ if self.imp.info.is_impossible(input) {
+ return;
+ }
+ let mut guard = self.pool.get();
+ let result = self
+ .imp
+ .strat
+ .which_overlapping_matches(&mut guard, input, patset);
+ // See 'Regex::search' for why we put the guard back explicitly.
+ PoolGuard::put(guard);
+ result
+ }
+}
+
+/// Lower level search routines that give more control, and require the caller
+/// to provide an explicit [`Cache`] parameter.
+impl Regex {
+ /// This is like [`Regex::search`], but requires the caller to
+ /// explicitly pass a [`Cache`].
+ ///
+ /// # Why pass a `Cache` explicitly?
+ ///
+ /// Passing a `Cache` explicitly will bypass the use of an internal memory
+ /// pool used by `Regex` to get a `Cache` for a search. The use of this
+ /// pool can be slower in some cases when a `Regex` is used from multiple
+ /// threads simultaneously. Typically, performance only becomes an issue
+ /// when there is heavy contention, which in turn usually only occurs
+ /// when each thread's primary unit of work is a regex search on a small
+ /// haystack.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, Match};
+ ///
+ /// let re = Regex::new(r"Samwise|Sam")?;
+ /// let mut cache = re.create_cache();
+ /// let input = Input::new(
+ /// "one of the chief characters, Samwise the Brave",
+ /// );
+ /// assert_eq!(
+ /// Some(Match::must(0, 29..36)),
+ /// re.search_with(&mut cache, &input),
+ /// );
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_with(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<Match> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ self.imp.strat.search(cache, input)
+ }
+
+ /// This is like [`Regex::search_half`], but requires the caller to
+ /// explicitly pass a [`Cache`].
+ ///
+ /// # Why pass a `Cache` explicitly?
+ ///
+ /// Passing a `Cache` explicitly will bypass the use of an internal memory
+ /// pool used by `Regex` to get a `Cache` for a search. The use of this
+ /// pool can be slower in some cases when a `Regex` is used from multiple
+ /// threads simultaneously. Typically, performance only becomes an issue
+ /// when there is heavy contention, which in turn usually only occurs
+ /// when each thread's primary unit of work is a regex search on a small
+ /// haystack.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, HalfMatch};
+ ///
+ /// let re = Regex::new(r"Samwise|Sam")?;
+ /// let mut cache = re.create_cache();
+ /// let input = Input::new(
+ /// "one of the chief characters, Samwise the Brave",
+ /// );
+ /// assert_eq!(
+ /// Some(HalfMatch::must(0, 36)),
+ /// re.search_half_with(&mut cache, &input),
+ /// );
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_half_with(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ self.imp.strat.search_half(cache, input)
+ }
+
+ /// This is like [`Regex::search_captures`], but requires the caller to
+ /// explicitly pass a [`Cache`].
+ ///
+ /// # Why pass a `Cache` explicitly?
+ ///
+ /// Passing a `Cache` explicitly will bypass the use of an internal memory
+ /// pool used by `Regex` to get a `Cache` for a search. The use of this
+ /// pool can be slower in some cases when a `Regex` is used from multiple
+ /// threads simultaneously. Typically, performance only becomes an issue
+ /// when there is heavy contention, which in turn usually only occurs
+ /// when each thread's primary unit of work is a regex search on a small
+ /// haystack.
+ ///
+ /// # Example: specific pattern search
+ ///
+ /// This example shows how to build a multi-pattern `Regex` that permits
+ /// searching for specific patterns.
+ ///
+ /// ```
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// Anchored, Match, PatternID, Input,
+ /// };
+ ///
+ /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?;
+ /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures());
+ /// let haystack = "foo123";
+ ///
+ /// // Since we are using the default leftmost-first match and both
+ /// // patterns match at the same starting position, only the first pattern
+ /// // will be returned in this case when doing a search for any of the
+ /// // patterns.
+ /// let expected = Some(Match::must(0, 0..6));
+ /// re.search_captures_with(&mut cache, &Input::new(haystack), &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// // But if we want to check whether some other pattern matches, then we
+ /// // can provide its pattern ID.
+ /// let expected = Some(Match::must(1, 0..6));
+ /// let input = Input::new(haystack)
+ /// .anchored(Anchored::Pattern(PatternID::must(1)));
+ /// re.search_captures_with(&mut cache, &input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: specifying the bounds of a search
+ ///
+ /// This example shows how providing the bounds of a search can produce
+ /// different results than simply sub-slicing the haystack.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Match, Input};
+ ///
+ /// let re = Regex::new(r"\b[0-9]{3}\b")?;
+ /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures());
+ /// let haystack = "foo123bar";
+ ///
+ /// // Since we sub-slice the haystack, the search doesn't know about
+ /// // the larger context and assumes that `123` is surrounded by word
+ /// // boundaries. And of course, the match position is reported relative
+ /// // to the sub-slice as well, which means we get `0..3` instead of
+ /// // `3..6`.
+ /// let expected = Some(Match::must(0, 0..3));
+ /// let input = Input::new(&haystack[3..6]);
+ /// re.search_captures_with(&mut cache, &input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// // But if we provide the bounds of the search within the context of the
+ /// // entire haystack, then the search can take the surrounding context
+ /// // into account. (And if we did find a match, it would be reported
+ /// // as a valid offset into `haystack` instead of its sub-slice.)
+ /// let expected = None;
+ /// let input = Input::new(haystack).range(3..6);
+ /// re.search_captures_with(&mut cache, &input, &mut caps);
+ /// assert_eq!(expected, caps.get_match());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_captures_with(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ caps: &mut Captures,
+ ) {
+ caps.set_pattern(None);
+ let pid = self.search_slots_with(cache, input, caps.slots_mut());
+ caps.set_pattern(pid);
+ }
+
+ /// This is like [`Regex::search_slots`], but requires the caller to
+ /// explicitly pass a [`Cache`].
+ ///
+ /// # Why pass a `Cache` explicitly?
+ ///
+ /// Passing a `Cache` explicitly will bypass the use of an internal memory
+ /// pool used by `Regex` to get a `Cache` for a search. The use of this
+ /// pool can be slower in some cases when a `Regex` is used from multiple
+ /// threads simultaneously. Typically, performance only becomes an issue
+ /// when there is heavy contention, which in turn usually only occurs
+ /// when each thread's primary unit of work is a regex search on a small
+ /// haystack.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to find the overall match offsets in a
+ /// multi-pattern search without allocating a `Captures` value. Indeed, we
+ /// can put our slots right on the stack.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, PatternID, Input};
+ ///
+ /// let re = Regex::new_many(&[
+ /// r"\pL+",
+ /// r"\d+",
+ /// ])?;
+ /// let mut cache = re.create_cache();
+ /// let input = Input::new("!@#123");
+ ///
+ /// // We only care about the overall match offsets here, so we just
+ /// // allocate two slots for each pattern. Each slot records the start
+ /// // and end of the match.
+ /// let mut slots = [None; 4];
+ /// let pid = re.search_slots_with(&mut cache, &input, &mut slots);
+ /// assert_eq!(Some(PatternID::must(1)), pid);
+ ///
+ /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'.
+ /// // See 'GroupInfo' for more details on the mapping between groups and
+ /// // slot indices.
+ /// let slot_start = pid.unwrap().as_usize() * 2;
+ /// let slot_end = slot_start + 1;
+ /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get()));
+ /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get()));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn search_slots_with(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if self.imp.info.is_impossible(input) {
+ return None;
+ }
+ self.imp.strat.search_slots(cache, input, slots)
+ }
+
+ /// This is like [`Regex::which_overlapping_matches`], but requires the
+ /// caller to explicitly pass a [`Cache`].
+ ///
+ /// Passing a `Cache` explicitly will bypass the use of an internal memory
+ /// pool used by `Regex` to get a `Cache` for a search. The use of this
+ /// pool can be slower in some cases when a `Regex` is used from multiple
+ /// threads simultaneously. Typically, performance only becomes an issue
+ /// when there is heavy contention, which in turn usually only occurs
+ /// when each thread's primary unit of work is a regex search on a small
+ /// haystack.
+ ///
+ /// # Why pass a `Cache` explicitly?
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet};
+ ///
+ /// let patterns = &[
+ /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar",
+ /// ];
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().match_kind(MatchKind::All))
+ /// .build_many(patterns)?;
+ /// let mut cache = re.create_cache();
+ ///
+ /// let input = Input::new("foobar");
+ /// let mut patset = PatternSet::new(re.pattern_len());
+ /// re.which_overlapping_matches_with(&mut cache, &input, &mut patset);
+ /// let expected = vec![0, 2, 3, 4, 6];
+ /// let got: Vec<usize> = patset.iter().map(|p| p.as_usize()).collect();
+ /// assert_eq!(expected, got);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn which_overlapping_matches_with(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ if self.imp.info.is_impossible(input) {
+ return;
+ }
+ self.imp.strat.which_overlapping_matches(cache, input, patset)
+ }
+}
+
+/// Various non-search routines for querying properties of a `Regex` and
+/// convenience routines for creating [`Captures`] and [`Cache`] values.
+impl Regex {
+ /// Creates a new object for recording capture group offsets. This is used
+ /// in search APIs like [`Regex::captures`] and [`Regex::search_captures`].
+ ///
+ /// This is a convenience routine for
+ /// `Captures::all(re.group_info().clone())`. Callers may build other types
+ /// of `Captures` values that record less information (and thus require
+ /// less work from the regex engine) using [`Captures::matches`] and
+ /// [`Captures::empty`].
+ ///
+ /// # Example
+ ///
+ /// This shows some alternatives to [`Regex::create_captures`]:
+ ///
+ /// ```
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// util::captures::Captures,
+ /// Match, PatternID, Span,
+ /// };
+ ///
+ /// let re = Regex::new(r"(?<first>[A-Z][a-z]+) (?<last>[A-Z][a-z]+)")?;
+ ///
+ /// // This is equivalent to Regex::create_captures. It stores matching
+ /// // offsets for all groups in the regex.
+ /// let mut all = Captures::all(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut all);
+ /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match());
+ /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first"));
+ /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last"));
+ ///
+ /// // In this version, we only care about the implicit groups, which
+ /// // means offsets for the explicit groups will be unavailable. It can
+ /// // sometimes be faster to ask for fewer groups, since the underlying
+ /// // regex engine needs to do less work to keep track of them.
+ /// let mut matches = Captures::matches(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut matches);
+ /// // We still get the overall match info.
+ /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match());
+ /// // But now the explicit groups are unavailable.
+ /// assert_eq!(None, matches.get_group_by_name("first"));
+ /// assert_eq!(None, matches.get_group_by_name("last"));
+ ///
+ /// // Finally, in this version, we don't ask to keep track of offsets for
+ /// // *any* groups. All we get back is whether a match occurred, and if
+ /// // so, the ID of the pattern that matched.
+ /// let mut empty = Captures::empty(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut empty);
+ /// // it's a match!
+ /// assert!(empty.is_match());
+ /// // for pattern ID 0
+ /// assert_eq!(Some(PatternID::ZERO), empty.pattern());
+ /// // Match offsets are unavailable.
+ /// assert_eq!(None, empty.get_match());
+ /// // And of course, explicit groups are unavailable too.
+ /// assert_eq!(None, empty.get_group_by_name("first"));
+ /// assert_eq!(None, empty.get_group_by_name("last"));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn create_captures(&self) -> Captures {
+ Captures::all(self.group_info().clone())
+ }
+
+ /// Creates a new cache for use with lower level search APIs like
+ /// [`Regex::search_with`].
+ ///
+ /// The cache returned should only be used for searches for this `Regex`.
+ /// If you want to reuse the cache for another `Regex`, then you must call
+ /// [`Cache::reset`] with that `Regex`.
+ ///
+ /// This is a convenience routine for [`Cache::new`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Input, Match};
+ ///
+ /// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?;
+ /// let mut cache = re.create_cache();
+ /// let input = Input::new("crazy janey and her mission man");
+ /// assert_eq!(
+ /// Some(Match::must(0, 20..31)),
+ /// re.search_with(&mut cache, &input),
+ /// );
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn create_cache(&self) -> Cache {
+ self.imp.strat.create_cache()
+ }
+
+ /// Returns the total number of patterns in this regex.
+ ///
+ /// The standard [`Regex::new`] constructor always results in a `Regex`
+ /// with a single pattern, but [`Regex::new_many`] permits building a
+ /// multi-pattern regex.
+ ///
+ /// A `Regex` guarantees that the maximum possible `PatternID` returned in
+ /// any match is `Regex::pattern_len() - 1`. In the case where the number
+ /// of patterns is `0`, a match is impossible.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let re = Regex::new(r"(?m)^[a-z]$")?;
+ /// assert_eq!(1, re.pattern_len());
+ ///
+ /// let re = Regex::new_many::<&str>(&[])?;
+ /// assert_eq!(0, re.pattern_len());
+ ///
+ /// let re = Regex::new_many(&["a", "b", "c"])?;
+ /// assert_eq!(3, re.pattern_len());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn pattern_len(&self) -> usize {
+ self.imp.info.pattern_len()
+ }
+
+ /// Returns the total number of capturing groups.
+ ///
+ /// This includes the implicit capturing group corresponding to the
+ /// entire match. Therefore, the minimum value returned is `1`.
+ ///
+ /// # Example
+ ///
+ /// This shows a few patterns and how many capture groups they have.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let len = |pattern| {
+ /// Regex::new(pattern).map(|re| re.captures_len())
+ /// };
+ ///
+ /// assert_eq!(1, len("a")?);
+ /// assert_eq!(2, len("(a)")?);
+ /// assert_eq!(3, len("(a)|(b)")?);
+ /// assert_eq!(5, len("(a)(b)|(c)(d)")?);
+ /// assert_eq!(2, len("(a)|b")?);
+ /// assert_eq!(2, len("a|(b)")?);
+ /// assert_eq!(2, len("(b)*")?);
+ /// assert_eq!(2, len("(b)+")?);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: multiple patterns
+ ///
+ /// This routine also works for multiple patterns. The total number is
+ /// the sum of the capture groups of each pattern.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let len = |patterns| {
+ /// Regex::new_many(patterns).map(|re| re.captures_len())
+ /// };
+ ///
+ /// assert_eq!(2, len(&["a", "b"])?);
+ /// assert_eq!(4, len(&["(a)", "(b)"])?);
+ /// assert_eq!(6, len(&["(a)|(b)", "(c)|(d)"])?);
+ /// assert_eq!(8, len(&["(a)(b)|(c)(d)", "(x)(y)"])?);
+ /// assert_eq!(3, len(&["(a)", "b"])?);
+ /// assert_eq!(3, len(&["a", "(b)"])?);
+ /// assert_eq!(4, len(&["(a)", "(b)*"])?);
+ /// assert_eq!(4, len(&["(a)+", "(b)+"])?);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn captures_len(&self) -> usize {
+ self.imp
+ .info
+ .props_union()
+ .explicit_captures_len()
+ .saturating_add(self.pattern_len())
+ }
+
+ /// Returns the total number of capturing groups that appear in every
+ /// possible match.
+ ///
+ /// If the number of capture groups can vary depending on the match, then
+ /// this returns `None`. That is, a value is only returned when the number
+ /// of matching groups is invariant or "static."
+ ///
+ /// Note that like [`Regex::captures_len`], this **does** include the
+ /// implicit capturing group corresponding to the entire match. Therefore,
+ /// when a non-None value is returned, it is guaranteed to be at least `1`.
+ /// Stated differently, a return value of `Some(0)` is impossible.
+ ///
+ /// # Example
+ ///
+ /// This shows a few cases where a static number of capture groups is
+ /// available and a few cases where it is not.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let len = |pattern| {
+ /// Regex::new(pattern).map(|re| re.static_captures_len())
+ /// };
+ ///
+ /// assert_eq!(Some(1), len("a")?);
+ /// assert_eq!(Some(2), len("(a)")?);
+ /// assert_eq!(Some(2), len("(a)|(b)")?);
+ /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?);
+ /// assert_eq!(None, len("(a)|b")?);
+ /// assert_eq!(None, len("a|(b)")?);
+ /// assert_eq!(None, len("(b)*")?);
+ /// assert_eq!(Some(2), len("(b)+")?);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: multiple patterns
+ ///
+ /// This property extends to regexes with multiple patterns as well. In
+ /// order for their to be a static number of capture groups in this case,
+ /// every pattern must have the same static number.
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let len = |patterns| {
+ /// Regex::new_many(patterns).map(|re| re.static_captures_len())
+ /// };
+ ///
+ /// assert_eq!(Some(1), len(&["a", "b"])?);
+ /// assert_eq!(Some(2), len(&["(a)", "(b)"])?);
+ /// assert_eq!(Some(2), len(&["(a)|(b)", "(c)|(d)"])?);
+ /// assert_eq!(Some(3), len(&["(a)(b)|(c)(d)", "(x)(y)"])?);
+ /// assert_eq!(None, len(&["(a)", "b"])?);
+ /// assert_eq!(None, len(&["a", "(b)"])?);
+ /// assert_eq!(None, len(&["(a)", "(b)*"])?);
+ /// assert_eq!(Some(2), len(&["(a)+", "(b)+"])?);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn static_captures_len(&self) -> Option<usize> {
+ self.imp
+ .info
+ .props_union()
+ .static_explicit_captures_len()
+ .map(|len| len.saturating_add(1))
+ }
+
+ /// Return information about the capture groups in this `Regex`.
+ ///
+ /// A `GroupInfo` is an immutable object that can be cheaply cloned. It
+ /// is responsible for maintaining a mapping between the capture groups
+ /// in the concrete syntax of zero or more regex patterns and their
+ /// internal representation used by some of the regex matchers. It is also
+ /// responsible for maintaining a mapping between the name of each group
+ /// (if one exists) and its corresponding group index.
+ ///
+ /// A `GroupInfo` is ultimately what is used to build a [`Captures`] value,
+ /// which is some mutable space where group offsets are stored as a result
+ /// of a search.
+ ///
+ /// # Example
+ ///
+ /// This shows some alternatives to [`Regex::create_captures`]:
+ ///
+ /// ```
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// util::captures::Captures,
+ /// Match, PatternID, Span,
+ /// };
+ ///
+ /// let re = Regex::new(r"(?<first>[A-Z][a-z]+) (?<last>[A-Z][a-z]+)")?;
+ ///
+ /// // This is equivalent to Regex::create_captures. It stores matching
+ /// // offsets for all groups in the regex.
+ /// let mut all = Captures::all(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut all);
+ /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match());
+ /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first"));
+ /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last"));
+ ///
+ /// // In this version, we only care about the implicit groups, which
+ /// // means offsets for the explicit groups will be unavailable. It can
+ /// // sometimes be faster to ask for fewer groups, since the underlying
+ /// // regex engine needs to do less work to keep track of them.
+ /// let mut matches = Captures::matches(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut matches);
+ /// // We still get the overall match info.
+ /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match());
+ /// // But now the explicit groups are unavailable.
+ /// assert_eq!(None, matches.get_group_by_name("first"));
+ /// assert_eq!(None, matches.get_group_by_name("last"));
+ ///
+ /// // Finally, in this version, we don't ask to keep track of offsets for
+ /// // *any* groups. All we get back is whether a match occurred, and if
+ /// // so, the ID of the pattern that matched.
+ /// let mut empty = Captures::empty(re.group_info().clone());
+ /// re.captures("Bruce Springsteen", &mut empty);
+ /// // it's a match!
+ /// assert!(empty.is_match());
+ /// // for pattern ID 0
+ /// assert_eq!(Some(PatternID::ZERO), empty.pattern());
+ /// // Match offsets are unavailable.
+ /// assert_eq!(None, empty.get_match());
+ /// // And of course, explicit groups are unavailable too.
+ /// assert_eq!(None, empty.get_group_by_name("first"));
+ /// assert_eq!(None, empty.get_group_by_name("last"));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn group_info(&self) -> &GroupInfo {
+ self.imp.strat.group_info()
+ }
+
+ /// Returns the configuration object used to build this `Regex`.
+ ///
+ /// If no configuration object was explicitly passed, then the
+ /// configuration returned represents the default.
+ #[inline]
+ pub fn get_config(&self) -> &Config {
+ self.imp.info.config()
+ }
+
+ /// Returns true if this regex has a high chance of being "accelerated."
+ ///
+ /// The precise meaning of "accelerated" is specifically left unspecified,
+ /// but the general meaning is that the search is a high likelihood of
+ /// running faster than than a character-at-a-time loop inside a standard
+ /// regex engine.
+ ///
+ /// When a regex is accelerated, it is only a *probabilistic* claim. That
+ /// is, just because the regex is believed to be accelerated, that doesn't
+ /// mean it will definitely execute searches very fast. Similarly, if a
+ /// regex is *not* accelerated, that is also a probabilistic claim. That
+ /// is, a regex for which `is_accelerated` returns `false` could still run
+ /// searches more quickly than a regex for which `is_accelerated` returns
+ /// `true`.
+ ///
+ /// Whether a regex is marked as accelerated or not is dependent on
+ /// implementations details that may change in a semver compatible release.
+ /// That is, a regex that is accelerated in a `x.y.1` release might not be
+ /// accelerated in a `x.y.2` release.
+ ///
+ /// Basically, the value of acceleration boils down to a hedge: a hodge
+ /// podge of internal heuristics combine to make a probabilistic guess
+ /// that this regex search may run "fast." The value in knowing this from
+ /// a caller's perspective is that it may act as a signal that no further
+ /// work should be done to accelerate a search. For example, a grep-like
+ /// tool might try to do some extra work extracting literals from a regex
+ /// to create its own heuristic acceleration strategies. But it might
+ /// choose to defer to this crate's acceleration strategy if one exists.
+ /// This routine permits querying whether such a strategy is active for a
+ /// particular regex.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::meta::Regex;
+ ///
+ /// // A simple literal is very likely to be accelerated.
+ /// let re = Regex::new(r"foo")?;
+ /// assert!(re.is_accelerated());
+ ///
+ /// // A regex with no literals is likely to not be accelerated.
+ /// let re = Regex::new(r"\w")?;
+ /// assert!(!re.is_accelerated());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[inline]
+ pub fn is_accelerated(&self) -> bool {
+ self.imp.strat.is_accelerated()
+ }
+
+ /// Return the total approximate heap memory, in bytes, used by this `Regex`.
+ ///
+ /// Note that currently, there is no high level configuration for setting
+ /// a limit on the specific value returned by this routine. Instead, the
+ /// following routines can be used to control heap memory at a bit of a
+ /// lower level:
+ ///
+ /// * [`Config::nfa_size_limit`] controls how big _any_ of the NFAs are
+ /// allowed to be.
+ /// * [`Config::onepass_size_limit`] controls how big the one-pass DFA is
+ /// allowed to be.
+ /// * [`Config::hybrid_cache_capacity`] controls how much memory the lazy
+ /// DFA is permitted to allocate to store its transition table.
+ /// * [`Config::dfa_size_limit`] controls how big a fully compiled DFA is
+ /// allowed to be.
+ /// * [`Config::dfa_state_limit`] controls the conditions under which the
+ /// meta regex engine will even attempt to build a fully compiled DFA.
+ #[inline]
+ pub fn memory_usage(&self) -> usize {
+ self.imp.strat.memory_usage()
+ }
+}
+
+impl Clone for Regex {
+ fn clone(&self) -> Regex {
+ let imp = Arc::clone(&self.imp);
+ let pool = {
+ let strat = Arc::clone(&imp.strat);
+ let create: CachePoolFn = Box::new(move || strat.create_cache());
+ Pool::new(create)
+ };
+ Regex { imp, pool }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct RegexInfo(Arc<RegexInfoI>);
+
+#[derive(Clone, Debug)]
+struct RegexInfoI {
+ config: Config,
+ props: Vec<hir::Properties>,
+ props_union: hir::Properties,
+}
+
+impl RegexInfo {
+ fn new(config: Config, hirs: &[&Hir]) -> RegexInfo {
+ // Collect all of the properties from each of the HIRs, and also
+ // union them into one big set of properties representing all HIRs
+ // as if they were in one big alternation.
+ let mut props = vec![];
+ for hir in hirs.iter() {
+ props.push(hir.properties().clone());
+ }
+ let props_union = hir::Properties::union(&props);
+
+ RegexInfo(Arc::new(RegexInfoI { config, props, props_union }))
+ }
+
+ pub(crate) fn config(&self) -> &Config {
+ &self.0.config
+ }
+
+ pub(crate) fn props(&self) -> &[hir::Properties] {
+ &self.0.props
+ }
+
+ pub(crate) fn props_union(&self) -> &hir::Properties {
+ &self.0.props_union
+ }
+
+ pub(crate) fn pattern_len(&self) -> usize {
+ self.props().len()
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ self.props().iter().map(|p| p.memory_usage()).sum::<usize>()
+ + self.props_union().memory_usage()
+ }
+
+ /// Returns true when the search is guaranteed to be anchored. That is,
+ /// when a match is reported, its offset is guaranteed to correspond to
+ /// the start of the search.
+ ///
+ /// This includes returning true when `input` _isn't_ anchored but the
+ /// underlying regex is.
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn is_anchored_start(&self, input: &Input<'_>) -> bool {
+ input.get_anchored().is_anchored() || self.is_always_anchored_start()
+ }
+
+ /// Returns true when this regex is always anchored to the start of a
+ /// search. And in particular, that regardless of an `Input` configuration,
+ /// if any match is reported it must start at `0`.
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn is_always_anchored_start(&self) -> bool {
+ use regex_syntax::hir::Look;
+ self.props_union().look_set_prefix().contains(Look::Start)
+ }
+
+ /// Returns true when this regex is always anchored to the end of a
+ /// search. And in particular, that regardless of an `Input` configuration,
+ /// if any match is reported it must end at the end of the haystack.
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn is_always_anchored_end(&self) -> bool {
+ use regex_syntax::hir::Look;
+ self.props_union().look_set_suffix().contains(Look::End)
+ }
+
+ /// Returns true if and only if it is known that a match is impossible
+ /// for the given input. This is useful for short-circuiting and avoiding
+ /// running the regex engine if it's known no match can be reported.
+ ///
+ /// Note that this doesn't necessarily detect every possible case. For
+ /// example, when `pattern_len() == 0`, a match is impossible, but that
+ /// case is so rare that it's fine to be handled by the regex engine
+ /// itself. That is, it's not worth the cost of adding it here in order to
+ /// make it a little faster. The reason is that this is called for every
+ /// search. so there is some cost to adding checks here. Arguably, some of
+ /// the checks that are here already probably shouldn't be here...
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn is_impossible(&self, input: &Input<'_>) -> bool {
+ // The underlying regex is anchored, so if we don't start the search
+ // at position 0, a match is impossible, because the anchor can only
+ // match at position 0.
+ if input.start() > 0 && self.is_always_anchored_start() {
+ return true;
+ }
+ // Same idea, but for the end anchor.
+ if input.end() < input.haystack().len()
+ && self.is_always_anchored_end()
+ {
+ return true;
+ }
+ // If the haystack is smaller than the minimum length required, then
+ // we know there can be no match.
+ let minlen = match self.props_union().minimum_len() {
+ None => return false,
+ Some(minlen) => minlen,
+ };
+ if input.get_span().len() < minlen {
+ return true;
+ }
+ // Same idea as minimum, but for maximum. This is trickier. We can
+ // only apply the maximum when we know the entire span that we're
+ // searching *has* to match according to the regex (and possibly the
+ // input configuration). If we know there is too much for the regex
+ // to match, we can bail early.
+ //
+ // I don't think we can apply the maximum otherwise unfortunately.
+ if self.is_anchored_start(input) && self.is_always_anchored_end() {
+ let maxlen = match self.props_union().maximum_len() {
+ None => return false,
+ Some(maxlen) => maxlen,
+ };
+ if input.get_span().len() > maxlen {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// An iterator over all non-overlapping matches.
+///
+/// The iterator yields a [`Match`] value until no more matches could be found.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'r` represents the lifetime of the `Regex` that produced this iterator.
+/// * `'h` represents the lifetime of the haystack being searched.
+///
+/// This iterator can be created with the [`Regex::find_iter`] method.
+#[derive(Debug)]
+pub struct FindMatches<'r, 'h> {
+ re: &'r Regex,
+ cache: CachePoolGuard<'r>,
+ it: iter::Searcher<'h>,
+}
+
+impl<'r, 'h> FindMatches<'r, 'h> {
+ /// Returns the `Regex` value that created this iterator.
+ #[inline]
+ pub fn regex(&self) -> &'r Regex {
+ self.re
+ }
+
+ /// Returns the current `Input` associated with this iterator.
+ ///
+ /// The `start` position on the given `Input` may change during iteration,
+ /// but all other values are guaranteed to remain invariant.
+ #[inline]
+ pub fn input<'s>(&'s self) -> &'s Input<'h> {
+ self.it.input()
+ }
+}
+
+impl<'r, 'h> Iterator for FindMatches<'r, 'h> {
+ type Item = Match;
+
+ #[inline]
+ fn next(&mut self) -> Option<Match> {
+ let FindMatches { re, ref mut cache, ref mut it } = *self;
+ it.advance(|input| Ok(re.search_with(cache, input)))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ // If all we care about is a count of matches, then we only need to
+ // find the end position of each match. This can give us a 2x perf
+ // boost in some cases, because it avoids needing to do a reverse scan
+ // to find the start of a match.
+ let FindMatches { re, mut cache, it } = self;
+ // This does the deref for PoolGuard once instead of every iter.
+ let cache = &mut *cache;
+ it.into_half_matches_iter(
+ |input| Ok(re.search_half_with(cache, input)),
+ )
+ .count()
+ }
+}
+
+impl<'r, 'h> core::iter::FusedIterator for FindMatches<'r, 'h> {}
+
+/// An iterator over all non-overlapping leftmost matches with their capturing
+/// groups.
+///
+/// The iterator yields a [`Captures`] value until no more matches could be
+/// found.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'r` represents the lifetime of the `Regex` that produced this iterator.
+/// * `'h` represents the lifetime of the haystack being searched.
+///
+/// This iterator can be created with the [`Regex::captures_iter`] method.
+#[derive(Debug)]
+pub struct CapturesMatches<'r, 'h> {
+ re: &'r Regex,
+ cache: CachePoolGuard<'r>,
+ caps: Captures,
+ it: iter::Searcher<'h>,
+}
+
+impl<'r, 'h> CapturesMatches<'r, 'h> {
+ /// Returns the `Regex` value that created this iterator.
+ #[inline]
+ pub fn regex(&self) -> &'r Regex {
+ self.re
+ }
+
+ /// Returns the current `Input` associated with this iterator.
+ ///
+ /// The `start` position on the given `Input` may change during iteration,
+ /// but all other values are guaranteed to remain invariant.
+ #[inline]
+ pub fn input<'s>(&'s self) -> &'s Input<'h> {
+ self.it.input()
+ }
+}
+
+impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> {
+ type Item = Captures;
+
+ #[inline]
+ fn next(&mut self) -> Option<Captures> {
+ // Splitting 'self' apart seems necessary to appease borrowck.
+ let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } =
+ *self;
+ let _ = it.advance(|input| {
+ re.search_captures_with(cache, input, caps);
+ Ok(caps.get_match())
+ });
+ if caps.is_match() {
+ Some(caps.clone())
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ let CapturesMatches { re, mut cache, it, .. } = self;
+ // This does the deref for PoolGuard once instead of every iter.
+ let cache = &mut *cache;
+ it.into_half_matches_iter(
+ |input| Ok(re.search_half_with(cache, input)),
+ )
+ .count()
+ }
+}
+
+impl<'r, 'h> core::iter::FusedIterator for CapturesMatches<'r, 'h> {}
+
+/// Yields all substrings delimited by a regular expression match.
+///
+/// The spans correspond to the offsets between matches.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'r` represents the lifetime of the `Regex` that produced this iterator.
+/// * `'h` represents the lifetime of the haystack being searched.
+///
+/// This iterator can be created with the [`Regex::split`] method.
+#[derive(Debug)]
+pub struct Split<'r, 'h> {
+ finder: FindMatches<'r, 'h>,
+ last: usize,
+}
+
+impl<'r, 'h> Split<'r, 'h> {
+ /// Returns the current `Input` associated with this iterator.
+ ///
+ /// The `start` position on the given `Input` may change during iteration,
+ /// but all other values are guaranteed to remain invariant.
+ #[inline]
+ pub fn input<'s>(&'s self) -> &'s Input<'h> {
+ self.finder.input()
+ }
+}
+
+impl<'r, 'h> Iterator for Split<'r, 'h> {
+ type Item = Span;
+
+ fn next(&mut self) -> Option<Span> {
+ match self.finder.next() {
+ None => {
+ let len = self.finder.it.input().haystack().len();
+ if self.last > len {
+ None
+ } else {
+ let span = Span::from(self.last..len);
+ self.last = len + 1; // Next call will return None
+ Some(span)
+ }
+ }
+ Some(m) => {
+ let span = Span::from(self.last..m.start());
+ self.last = m.end();
+ Some(span)
+ }
+ }
+ }
+}
+
+impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {}
+
+/// Yields at most `N` spans delimited by a regular expression match.
+///
+/// The spans correspond to the offsets between matches. The last span will be
+/// whatever remains after splitting.
+///
+/// The lifetime parameters are as follows:
+///
+/// * `'r` represents the lifetime of the `Regex` that produced this iterator.
+/// * `'h` represents the lifetime of the haystack being searched.
+///
+/// This iterator can be created with the [`Regex::splitn`] method.
+#[derive(Debug)]
+pub struct SplitN<'r, 'h> {
+ splits: Split<'r, 'h>,
+ limit: usize,
+}
+
+impl<'r, 'h> SplitN<'r, 'h> {
+ /// Returns the current `Input` associated with this iterator.
+ ///
+ /// The `start` position on the given `Input` may change during iteration,
+ /// but all other values are guaranteed to remain invariant.
+ #[inline]
+ pub fn input<'s>(&'s self) -> &'s Input<'h> {
+ self.splits.input()
+ }
+}
+
+impl<'r, 'h> Iterator for SplitN<'r, 'h> {
+ type Item = Span;
+
+ fn next(&mut self) -> Option<Span> {
+ if self.limit == 0 {
+ return None;
+ }
+
+ self.limit -= 1;
+ if self.limit > 0 {
+ return self.splits.next();
+ }
+
+ let len = self.splits.finder.it.input().haystack().len();
+ if self.splits.last > len {
+ // We've already returned all substrings.
+ None
+ } else {
+ // self.n == 0, so future calls will return None immediately
+ Some(Span::from(self.splits.last..len))
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.limit))
+ }
+}
+
+impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {}
+
+/// Represents mutable scratch space used by regex engines during a search.
+///
+/// Most of the regex engines in this crate require some kind of
+/// mutable state in order to execute a search. This mutable state is
+/// explicitly separated from the the core regex object (such as a
+/// [`thompson::NFA`](crate::nfa::thompson::NFA)) so that the read-only regex
+/// object can be shared across multiple threads simultaneously without any
+/// synchronization. Conversely, a `Cache` must either be duplicated if using
+/// the same `Regex` from multiple threads, or else there must be some kind of
+/// synchronization that guarantees exclusive access while it's in use by one
+/// thread.
+///
+/// A `Regex` attempts to do this synchronization for you by using a thread
+/// pool internally. Its size scales roughly with the number of simultaneous
+/// regex searches.
+///
+/// For cases where one does not want to rely on a `Regex`'s internal thread
+/// pool, lower level routines such as [`Regex::search_with`] are provided
+/// that permit callers to pass a `Cache` into the search routine explicitly.
+///
+/// General advice is that the thread pool is often more than good enough.
+/// However, it may be possible to observe the effects of its latency,
+/// especially when searching many small haystacks from many threads
+/// simultaneously.
+///
+/// Caches can be created from their corresponding `Regex` via
+/// [`Regex::create_cache`]. A cache can only be used with either the `Regex`
+/// that created it, or the `Regex` that was most recently used to reset it
+/// with [`Cache::reset`]. Using a cache with any other `Regex` may result in
+/// panics or incorrect results.
+///
+/// # Example
+///
+/// ```
+/// use regex_automata::{meta::Regex, Input, Match};
+///
+/// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?;
+/// let mut cache = re.create_cache();
+/// let input = Input::new("crazy janey and her mission man");
+/// assert_eq!(
+/// Some(Match::must(0, 20..31)),
+/// re.search_with(&mut cache, &input),
+/// );
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+#[derive(Debug, Clone)]
+pub struct Cache {
+ pub(crate) capmatches: Captures,
+ pub(crate) pikevm: wrappers::PikeVMCache,
+ pub(crate) backtrack: wrappers::BoundedBacktrackerCache,
+ pub(crate) onepass: wrappers::OnePassCache,
+ pub(crate) hybrid: wrappers::HybridCache,
+ pub(crate) revhybrid: wrappers::ReverseHybridCache,
+}
+
+impl Cache {
+ /// Creates a new `Cache` for use with this regex.
+ ///
+ /// The cache returned should only be used for searches for the given
+ /// `Regex`. If you want to reuse the cache for another `Regex`, then you
+ /// must call [`Cache::reset`] with that `Regex`.
+ pub fn new(re: &Regex) -> Cache {
+ re.create_cache()
+ }
+
+ /// Reset this cache such that it can be used for searching with the given
+ /// `Regex` (and only that `Regex`).
+ ///
+ /// A cache reset permits potentially reusing memory already allocated in
+ /// this cache with a different `Regex`.
+ ///
+ /// # Example
+ ///
+ /// This shows how to re-purpose a cache for use with a different `Regex`.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Match, Input};
+ ///
+ /// let re1 = Regex::new(r"\w")?;
+ /// let re2 = Regex::new(r"\W")?;
+ ///
+ /// let mut cache = re1.create_cache();
+ /// assert_eq!(
+ /// Some(Match::must(0, 0..2)),
+ /// re1.search_with(&mut cache, &Input::new("Δ")),
+ /// );
+ ///
+ /// // Using 'cache' with re2 is not allowed. It may result in panics or
+ /// // incorrect results. In order to re-purpose the cache, we must reset
+ /// // it with the Regex we'd like to use it with.
+ /// //
+ /// // Similarly, after this reset, using the cache with 're1' is also not
+ /// // allowed.
+ /// cache.reset(&re2);
+ /// assert_eq!(
+ /// Some(Match::must(0, 0..3)),
+ /// re2.search_with(&mut cache, &Input::new("☃")),
+ /// );
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn reset(&mut self, re: &Regex) {
+ re.imp.strat.reset_cache(self)
+ }
+
+ /// Returns the heap memory usage, in bytes, of this cache.
+ ///
+ /// This does **not** include the stack size used up by this cache. To
+ /// compute that, use `std::mem::size_of::<Cache>()`.
+ pub fn memory_usage(&self) -> usize {
+ let mut bytes = 0;
+ bytes += self.pikevm.memory_usage();
+ bytes += self.backtrack.memory_usage();
+ bytes += self.onepass.memory_usage();
+ bytes += self.hybrid.memory_usage();
+ bytes += self.revhybrid.memory_usage();
+ bytes
+ }
+}
+
+/// An object describing the configuration of a `Regex`.
+///
+/// This configuration only includes options for the
+/// non-syntax behavior of a `Regex`, and can be applied via the
+/// [`Builder::configure`] method. For configuring the syntax options, see
+/// [`util::syntax::Config`](crate::util::syntax::Config).
+///
+/// # Example: lower the NFA size limit
+///
+/// In some cases, the default size limit might be too big. The size limit can
+/// be lowered, which will prevent large regex patterns from compiling.
+///
+/// ```
+/// # if cfg!(miri) { return Ok(()); } // miri takes too long
+/// use regex_automata::meta::Regex;
+///
+/// let result = Regex::builder()
+/// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10))))
+/// // Not even 20KB is enough to build a single large Unicode class!
+/// .build(r"\pL");
+/// assert!(result.is_err());
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+#[derive(Clone, Debug, Default)]
+pub struct Config {
+ // As with other configuration types in this crate, we put all our knobs
+ // in options so that we can distinguish between "default" and "not set."
+ // This makes it possible to easily combine multiple configurations
+ // without default values overwriting explicitly specified values. See the
+ // 'overwrite' method.
+ //
+ // For docs on the fields below, see the corresponding method setters.
+ match_kind: Option<MatchKind>,
+ utf8_empty: Option<bool>,
+ autopre: Option<bool>,
+ pre: Option<Option<Prefilter>>,
+ which_captures: Option<WhichCaptures>,
+ nfa_size_limit: Option<Option<usize>>,
+ onepass_size_limit: Option<Option<usize>>,
+ hybrid_cache_capacity: Option<usize>,
+ hybrid: Option<bool>,
+ dfa: Option<bool>,
+ dfa_size_limit: Option<Option<usize>>,
+ dfa_state_limit: Option<Option<usize>>,
+ onepass: Option<bool>,
+ backtrack: Option<bool>,
+ byte_classes: Option<bool>,
+ line_terminator: Option<u8>,
+}
+
+impl Config {
+ /// Create a new configuration object for a `Regex`.
+ pub fn new() -> Config {
+ Config::default()
+ }
+
+ /// Set the match semantics for a `Regex`.
+ ///
+ /// The default value is [`MatchKind::LeftmostFirst`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match, MatchKind};
+ ///
+ /// // By default, leftmost-first semantics are used, which
+ /// // disambiguates matches at the same position by selecting
+ /// // the one that corresponds earlier in the pattern.
+ /// let re = Regex::new("sam|samwise")?;
+ /// assert_eq!(Some(Match::must(0, 0..3)), re.find("samwise"));
+ ///
+ /// // But with 'all' semantics, match priority is ignored
+ /// // and all match states are included. When coupled with
+ /// // a leftmost search, the search will report the last
+ /// // possible match.
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().match_kind(MatchKind::All))
+ /// .build("sam|samwise")?;
+ /// assert_eq!(Some(Match::must(0, 0..7)), re.find("samwise"));
+ /// // Beware that this can lead to skipping matches!
+ /// // Usually 'all' is used for anchored reverse searches
+ /// // only, or for overlapping searches.
+ /// assert_eq!(Some(Match::must(0, 4..11)), re.find("sam samwise"));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn match_kind(self, kind: MatchKind) -> Config {
+ Config { match_kind: Some(kind), ..self }
+ }
+
+ /// Toggles whether empty matches are permitted to occur between the code
+ /// units of a UTF-8 encoded codepoint.
+ ///
+ /// This should generally be enabled when search a `&str` or anything that
+ /// you otherwise know is valid UTF-8. It should be disabled in all other
+ /// cases. Namely, if the haystack is not valid UTF-8 and this is enabled,
+ /// then behavior is unspecified.
+ ///
+ /// By default, this is enabled.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new("")?;
+ /// let got: Vec<Match> = re.find_iter("☃").collect();
+ /// // Matches only occur at the beginning and end of the snowman.
+ /// assert_eq!(got, vec![
+ /// Match::must(0, 0..0),
+ /// Match::must(0, 3..3),
+ /// ]);
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().utf8_empty(false))
+ /// .build("")?;
+ /// let got: Vec<Match> = re.find_iter("☃").collect();
+ /// // Matches now occur at every position!
+ /// assert_eq!(got, vec![
+ /// Match::must(0, 0..0),
+ /// Match::must(0, 1..1),
+ /// Match::must(0, 2..2),
+ /// Match::must(0, 3..3),
+ /// ]);
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn utf8_empty(self, yes: bool) -> Config {
+ Config { utf8_empty: Some(yes), ..self }
+ }
+
+ /// Toggles whether automatic prefilter support is enabled.
+ ///
+ /// If this is disabled and [`Config::prefilter`] is not set, then the
+ /// meta regex engine will not use any prefilters. This can sometimes
+ /// be beneficial in cases where you know (or have measured) that the
+ /// prefilter leads to overall worse search performance.
+ ///
+ /// By default, this is enabled.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().auto_prefilter(false))
+ /// .build(r"Bruce \w+")?;
+ /// let hay = "Hello Bruce Springsteen!";
+ /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay));
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn auto_prefilter(self, yes: bool) -> Config {
+ Config { autopre: Some(yes), ..self }
+ }
+
+ /// Overrides and sets the prefilter to use inside a `Regex`.
+ ///
+ /// This permits one to forcefully set a prefilter in cases where the
+ /// caller knows better than whatever the automatic prefilter logic is
+ /// capable of.
+ ///
+ /// By default, this is set to `None` and an automatic prefilter will be
+ /// used if one could be built. (Assuming [`Config::auto_prefilter`] is
+ /// enabled, which it is by default.)
+ ///
+ /// # Example
+ ///
+ /// This example shows how to set your own prefilter. In the case of a
+ /// pattern like `Bruce \w+`, the automatic prefilter is likely to be
+ /// constructed in a way that it will look for occurrences of `Bruce `.
+ /// In most cases, this is the best choice. But in some cases, it may be
+ /// the case that running `memchr` on `B` is the best choice. One can
+ /// achieve that behavior by overriding the automatic prefilter logic
+ /// and providing a prefilter that just matches `B`.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// util::prefilter::Prefilter,
+ /// Match, MatchKind,
+ /// };
+ ///
+ /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["B"])
+ /// .expect("a prefilter");
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().prefilter(Some(pre)))
+ /// .build(r"Bruce \w+")?;
+ /// let hay = "Hello Bruce Springsteen!";
+ /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// # Example: incorrect prefilters can lead to incorrect results!
+ ///
+ /// Be warned that setting an incorrect prefilter can lead to missed
+ /// matches. So if you use this option, ensure your prefilter can _never_
+ /// report false negatives. (A false positive is, on the other hand, quite
+ /// okay and generally unavoidable.)
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// util::prefilter::Prefilter,
+ /// Match, MatchKind,
+ /// };
+ ///
+ /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Z"])
+ /// .expect("a prefilter");
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().prefilter(Some(pre)))
+ /// .build(r"Bruce \w+")?;
+ /// let hay = "Hello Bruce Springsteen!";
+ /// // Oops! No match found, but there should be one!
+ /// assert_eq!(None, re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn prefilter(self, pre: Option<Prefilter>) -> Config {
+ Config { pre: Some(pre), ..self }
+ }
+
+ /// Configures what kinds of groups are compiled as "capturing" in the
+ /// underlying regex engine.
+ ///
+ /// This is set to [`WhichCaptures::All`] by default. Callers may wish to
+ /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the
+ /// overhead of capture states for explicit groups.
+ ///
+ /// Note that another approach to avoiding the overhead of capture groups
+ /// is by using non-capturing groups in the regex pattern. That is,
+ /// `(?:a)` instead of `(a)`. This option is useful when you can't control
+ /// the concrete syntax but know that you don't need the underlying capture
+ /// states. For example, using `WhichCaptures::Implicit` will behave as if
+ /// all explicit capturing groups in the pattern were non-capturing.
+ ///
+ /// Setting this to `WhichCaptures::None` is usually not the right thing to
+ /// do. When no capture states are compiled, some regex engines (such as
+ /// the `PikeVM`) won't be able to report match offsets. This will manifest
+ /// as no match being found.
+ ///
+ /// # Example
+ ///
+ /// This example demonstrates how the results of capture groups can change
+ /// based on this option. First we show the default (all capture groups in
+ /// the pattern are capturing):
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match, Span};
+ ///
+ /// let re = Regex::new(r"foo([0-9]+)bar")?;
+ /// let hay = "foo123bar";
+ ///
+ /// let mut caps = re.create_captures();
+ /// re.captures(hay, &mut caps);
+ /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0));
+ /// assert_eq!(Some(Span::from(3..6)), caps.get_group(1));
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ ///
+ /// And now we show the behavior when we only include implicit capture
+ /// groups. In this case, we can only find the overall match span, but the
+ /// spans of any other explicit group don't exist because they are treated
+ /// as non-capturing. (In effect, when `WhichCaptures::Implicit` is used,
+ /// there is no real point in using [`Regex::captures`] since it will never
+ /// be able to report more information than [`Regex::find`].)
+ ///
+ /// ```
+ /// use regex_automata::{
+ /// meta::Regex,
+ /// nfa::thompson::WhichCaptures,
+ /// Match,
+ /// Span,
+ /// };
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().which_captures(WhichCaptures::Implicit))
+ /// .build(r"foo([0-9]+)bar")?;
+ /// let hay = "foo123bar";
+ ///
+ /// let mut caps = re.create_captures();
+ /// re.captures(hay, &mut caps);
+ /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0));
+ /// assert_eq!(None, caps.get_group(1));
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config {
+ self.which_captures = Some(which_captures);
+ self
+ }
+
+ /// Sets the size limit, in bytes, to enforce on the construction of every
+ /// NFA build by the meta regex engine.
+ ///
+ /// Setting it to `None` disables the limit. This is not recommended if
+ /// you're compiling untrusted patterns.
+ ///
+ /// Note that this limit is applied to _each_ NFA built, and if any of
+ /// them excceed the limit, then construction will fail. This limit does
+ /// _not_ correspond to the total memory used by all NFAs in the meta regex
+ /// engine.
+ ///
+ /// This defaults to some reasonable number that permits most reasonable
+ /// patterns.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10))))
+ /// // Not even 20KB is enough to build a single large Unicode class!
+ /// .build(r"\pL");
+ /// assert!(result.is_err());
+ ///
+ /// // But notice that building such a regex with the exact same limit
+ /// // can succeed depending on other aspects of the configuration. For
+ /// // example, a single *forward* NFA will (at time of writing) fit into
+ /// // the 20KB limit, but a *reverse* NFA of the same pattern will not.
+ /// // So if one configures a meta regex such that a reverse NFA is never
+ /// // needed and thus never built, then the 20KB limit will be enough for
+ /// // a pattern like \pL!
+ /// let result = Regex::builder()
+ /// .configure(Regex::config()
+ /// .nfa_size_limit(Some(20 * (1<<10)))
+ /// // The DFAs are the only thing that (currently) need a reverse
+ /// // NFA. So if both are disabled, the meta regex engine will
+ /// // skip building the reverse NFA. Note that this isn't an API
+ /// // guarantee. A future semver compatible version may introduce
+ /// // new use cases for a reverse NFA.
+ /// .hybrid(false)
+ /// .dfa(false)
+ /// )
+ /// // Not even 20KB is enough to build a single large Unicode class!
+ /// .build(r"\pL");
+ /// assert!(result.is_ok());
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn nfa_size_limit(self, limit: Option<usize>) -> Config {
+ Config { nfa_size_limit: Some(limit), ..self }
+ }
+
+ /// Sets the size limit, in bytes, for the one-pass DFA.
+ ///
+ /// Setting it to `None` disables the limit. Disabling the limit is
+ /// strongly discouraged when compiling untrusted patterns. Even if the
+ /// patterns are trusted, it still may not be a good idea, since a one-pass
+ /// DFA can use a lot of memory. With that said, as the size of a regex
+ /// increases, the likelihood of it being one-pass likely decreases.
+ ///
+ /// This defaults to some reasonable number that permits most reasonable
+ /// one-pass patterns.
+ ///
+ /// # Example
+ ///
+ /// This shows how to set the one-pass DFA size limit. Note that since
+ /// a one-pass DFA is an optional component of the meta regex engine,
+ /// this size limit only impacts what is built internally and will never
+ /// determine whether a `Regex` itself fails to build.
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// .configure(Regex::config().onepass_size_limit(Some(2 * (1<<20))))
+ /// .build(r"\pL{5}");
+ /// assert!(result.is_ok());
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn onepass_size_limit(self, limit: Option<usize>) -> Config {
+ Config { onepass_size_limit: Some(limit), ..self }
+ }
+
+ /// Set the cache capacity, in bytes, for the lazy DFA.
+ ///
+ /// The cache capacity of the lazy DFA determines approximately how much
+ /// heap memory it is allowed to use to store its state transitions. The
+ /// state transitions are computed at search time, and if the cache fills
+ /// up it, it is cleared. At this point, any previously generated state
+ /// transitions are lost and are re-generated if they're needed again.
+ ///
+ /// This sort of cache filling and clearing works quite well _so long as
+ /// cache clearing happens infrequently_. If it happens too often, then the
+ /// meta regex engine will stop using the lazy DFA and switch over to a
+ /// different regex engine.
+ ///
+ /// In cases where the cache is cleared too often, it may be possible to
+ /// give the cache more space and reduce (or eliminate) how often it is
+ /// cleared. Similarly, sometimes a regex is so big that the lazy DFA isn't
+ /// used at all if its cache capacity isn't big enough.
+ ///
+ /// The capacity set here is a _limit_ on how much memory is used. The
+ /// actual memory used is only allocated as it's needed.
+ ///
+ /// Determining the right value for this is a little tricky and will likely
+ /// required some profiling. Enabling the `logging` feature and setting the
+ /// log level to `trace` will also tell you how often the cache is being
+ /// cleared.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// .configure(Regex::config().hybrid_cache_capacity(20 * (1<<20)))
+ /// .build(r"\pL{5}");
+ /// assert!(result.is_ok());
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn hybrid_cache_capacity(self, limit: usize) -> Config {
+ Config { hybrid_cache_capacity: Some(limit), ..self }
+ }
+
+ /// Sets the size limit, in bytes, for heap memory used for a fully
+ /// compiled DFA.
+ ///
+ /// **NOTE:** If you increase this, you'll likely also need to increase
+ /// [`Config::dfa_state_limit`].
+ ///
+ /// In contrast to the lazy DFA, building a full DFA requires computing
+ /// all of its state transitions up front. This can be a very expensive
+ /// process, and runs in worst case `2^n` time and space (where `n` is
+ /// proportional to the size of the regex). However, a full DFA unlocks
+ /// some additional optimization opportunities.
+ ///
+ /// Because full DFAs can be so expensive, the default limits for them are
+ /// incredibly small. Generally speaking, if your regex is moderately big
+ /// or if you're using Unicode features (`\w` is Unicode-aware by default
+ /// for example), then you can expect that the meta regex engine won't even
+ /// attempt to build a DFA for it.
+ ///
+ /// If this and [`Config::dfa_state_limit`] are set to `None`, then the
+ /// meta regex will not use any sort of limits when deciding whether to
+ /// build a DFA. This in turn makes construction of a `Regex` take
+ /// worst case exponential time and space. Even short patterns can result
+ /// in huge space blow ups. So it is strongly recommended to keep some kind
+ /// of limit set!
+ ///
+ /// The default is set to a small number that permits some simple regexes
+ /// to get compiled into DFAs in reasonable time.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// // 100MB is much bigger than the default.
+ /// .configure(Regex::config()
+ /// .dfa_size_limit(Some(100 * (1<<20)))
+ /// // We don't care about size too much here, so just
+ /// // remove the NFA state limit altogether.
+ /// .dfa_state_limit(None))
+ /// .build(r"\pL{5}");
+ /// assert!(result.is_ok());
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn dfa_size_limit(self, limit: Option<usize>) -> Config {
+ Config { dfa_size_limit: Some(limit), ..self }
+ }
+
+ /// Sets a limit on the total number of NFA states, beyond which, a full
+ /// DFA is not attempted to be compiled.
+ ///
+ /// This limit works in concert with [`Config::dfa_size_limit`]. Namely,
+ /// where as `Config::dfa_size_limit` is applied by attempting to construct
+ /// a DFA, this limit is used to avoid the attempt in the first place. This
+ /// is useful to avoid hefty initialization costs associated with building
+ /// a DFA for cases where it is obvious the DFA will ultimately be too big.
+ ///
+ /// By default, this is set to a very small number.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # if cfg!(miri) { return Ok(()); } // miri takes too long
+ /// use regex_automata::meta::Regex;
+ ///
+ /// let result = Regex::builder()
+ /// .configure(Regex::config()
+ /// // Sometimes the default state limit rejects DFAs even
+ /// // if they would fit in the size limit. Here, we disable
+ /// // the check on the number of NFA states and just rely on
+ /// // the size limit.
+ /// .dfa_state_limit(None))
+ /// .build(r"(?-u)\w{30}");
+ /// assert!(result.is_ok());
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn dfa_state_limit(self, limit: Option<usize>) -> Config {
+ Config { dfa_state_limit: Some(limit), ..self }
+ }
+
+ /// Whether to attempt to shrink the size of the alphabet for the regex
+ /// pattern or not. When enabled, the alphabet is shrunk into a set of
+ /// equivalence classes, where every byte in the same equivalence class
+ /// cannot discriminate between a match or non-match.
+ ///
+ /// **WARNING:** This is only useful for debugging DFAs. Disabling this
+ /// does not yield any speed advantages. Indeed, disabling it can result
+ /// in much higher memory usage. Disabling byte classes is useful for
+ /// debugging the actual generated transitions because it lets one see the
+ /// transitions defined on actual bytes instead of the equivalence classes.
+ ///
+ /// This option is enabled by default and should never be disabled unless
+ /// one is debugging the meta regex engine's internals.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().byte_classes(false))
+ /// .build(r"[a-z]+")?;
+ /// let hay = "!!quux!!";
+ /// assert_eq!(Some(Match::must(0, 2..6)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn byte_classes(self, yes: bool) -> Config {
+ Config { byte_classes: Some(yes), ..self }
+ }
+
+ /// Set the line terminator to be used by the `^` and `$` anchors in
+ /// multi-line mode.
+ ///
+ /// This option has no effect when CRLF mode is enabled. That is,
+ /// regardless of this setting, `(?Rm:^)` and `(?Rm:$)` will always treat
+ /// `\r` and `\n` as line terminators (and will never match between a `\r`
+ /// and a `\n`).
+ ///
+ /// By default, `\n` is the line terminator.
+ ///
+ /// **Warning**: This does not change the behavior of `.`. To do that,
+ /// you'll need to configure the syntax option
+ /// [`syntax::Config::line_terminator`](crate::util::syntax::Config::line_terminator)
+ /// in addition to this. Otherwise, `.` will continue to match any
+ /// character other than `\n`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .syntax(syntax::Config::new().multi_line(true))
+ /// .configure(Regex::config().line_terminator(b'\x00'))
+ /// .build(r"^foo$")?;
+ /// let hay = "\x00foo\x00";
+ /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn line_terminator(self, byte: u8) -> Config {
+ Config { line_terminator: Some(byte), ..self }
+ }
+
+ /// Toggle whether the hybrid NFA/DFA (also known as the "lazy DFA") should
+ /// be available for use by the meta regex engine.
+ ///
+ /// Enabling this does not necessarily mean that the lazy DFA will
+ /// definitely be used. It just means that it will be _available_ for use
+ /// if the meta regex engine thinks it will be useful.
+ ///
+ /// When the `hybrid` crate feature is enabled, then this is enabled by
+ /// default. Otherwise, if the crate feature is disabled, then this is
+ /// always disabled, regardless of its setting by the caller.
+ pub fn hybrid(self, yes: bool) -> Config {
+ Config { hybrid: Some(yes), ..self }
+ }
+
+ /// Toggle whether a fully compiled DFA should be available for use by the
+ /// meta regex engine.
+ ///
+ /// Enabling this does not necessarily mean that a DFA will definitely be
+ /// used. It just means that it will be _available_ for use if the meta
+ /// regex engine thinks it will be useful.
+ ///
+ /// When the `dfa-build` crate feature is enabled, then this is enabled by
+ /// default. Otherwise, if the crate feature is disabled, then this is
+ /// always disabled, regardless of its setting by the caller.
+ pub fn dfa(self, yes: bool) -> Config {
+ Config { dfa: Some(yes), ..self }
+ }
+
+ /// Toggle whether a one-pass DFA should be available for use by the meta
+ /// regex engine.
+ ///
+ /// Enabling this does not necessarily mean that a one-pass DFA will
+ /// definitely be used. It just means that it will be _available_ for
+ /// use if the meta regex engine thinks it will be useful. (Indeed, a
+ /// one-pass DFA can only be used when the regex is one-pass. See the
+ /// [`dfa::onepass`](crate::dfa::onepass) module for more details.)
+ ///
+ /// When the `dfa-onepass` crate feature is enabled, then this is enabled
+ /// by default. Otherwise, if the crate feature is disabled, then this is
+ /// always disabled, regardless of its setting by the caller.
+ pub fn onepass(self, yes: bool) -> Config {
+ Config { onepass: Some(yes), ..self }
+ }
+
+ /// Toggle whether a bounded backtracking regex engine should be available
+ /// for use by the meta regex engine.
+ ///
+ /// Enabling this does not necessarily mean that a bounded backtracker will
+ /// definitely be used. It just means that it will be _available_ for use
+ /// if the meta regex engine thinks it will be useful.
+ ///
+ /// When the `nfa-backtrack` crate feature is enabled, then this is enabled
+ /// by default. Otherwise, if the crate feature is disabled, then this is
+ /// always disabled, regardless of its setting by the caller.
+ pub fn backtrack(self, yes: bool) -> Config {
+ Config { backtrack: Some(yes), ..self }
+ }
+
+ /// Returns the match kind on this configuration, as set by
+ /// [`Config::match_kind`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_match_kind(&self) -> MatchKind {
+ self.match_kind.unwrap_or(MatchKind::LeftmostFirst)
+ }
+
+ /// Returns whether empty matches must fall on valid UTF-8 boundaries, as
+ /// set by [`Config::utf8_empty`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_utf8_empty(&self) -> bool {
+ self.utf8_empty.unwrap_or(true)
+ }
+
+ /// Returns whether automatic prefilters are enabled, as set by
+ /// [`Config::auto_prefilter`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_auto_prefilter(&self) -> bool {
+ self.autopre.unwrap_or(true)
+ }
+
+ /// Returns a manually set prefilter, if one was set by
+ /// [`Config::prefilter`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_prefilter(&self) -> Option<&Prefilter> {
+ self.pre.as_ref().unwrap_or(&None).as_ref()
+ }
+
+ /// Returns the capture configuration, as set by
+ /// [`Config::which_captures`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_which_captures(&self) -> WhichCaptures {
+ self.which_captures.unwrap_or(WhichCaptures::All)
+ }
+
+ /// Returns NFA size limit, as set by [`Config::nfa_size_limit`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_nfa_size_limit(&self) -> Option<usize> {
+ self.nfa_size_limit.unwrap_or(Some(10 * (1 << 20)))
+ }
+
+ /// Returns one-pass DFA size limit, as set by
+ /// [`Config::onepass_size_limit`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_onepass_size_limit(&self) -> Option<usize> {
+ self.onepass_size_limit.unwrap_or(Some(1 * (1 << 20)))
+ }
+
+ /// Returns hybrid NFA/DFA cache capacity, as set by
+ /// [`Config::hybrid_cache_capacity`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_hybrid_cache_capacity(&self) -> usize {
+ self.hybrid_cache_capacity.unwrap_or(2 * (1 << 20))
+ }
+
+ /// Returns DFA size limit, as set by [`Config::dfa_size_limit`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_dfa_size_limit(&self) -> Option<usize> {
+ // The default for this is VERY small because building a full DFA is
+ // ridiculously costly. But for regexes that are very small, it can be
+ // beneficial to use a full DFA. In particular, a full DFA can enable
+ // additional optimizations via something called "accelerated" states.
+ // Namely, when there's a state with only a few outgoing transitions,
+ // we can temporary suspend walking the transition table and use memchr
+ // for just those outgoing transitions to skip ahead very quickly.
+ //
+ // Generally speaking, if Unicode is enabled in your regex and you're
+ // using some kind of Unicode feature, then it's going to blow this
+ // size limit. Moreover, Unicode tends to defeat the "accelerated"
+ // state optimization too, so it's a double whammy.
+ //
+ // We also use a limit on the number of NFA states to avoid even
+ // starting the DFA construction process. Namely, DFA construction
+ // itself could make lots of initial allocs proportional to the size
+ // of the NFA, and if the NFA is large, it doesn't make sense to pay
+ // that cost if we know it's likely to be blown by a large margin.
+ self.dfa_size_limit.unwrap_or(Some(40 * (1 << 10)))
+ }
+
+ /// Returns DFA size limit in terms of the number of states in the NFA, as
+ /// set by [`Config::dfa_state_limit`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_dfa_state_limit(&self) -> Option<usize> {
+ // Again, as with the size limit, we keep this very small.
+ self.dfa_state_limit.unwrap_or(Some(30))
+ }
+
+ /// Returns whether byte classes are enabled, as set by
+ /// [`Config::byte_classes`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_byte_classes(&self) -> bool {
+ self.byte_classes.unwrap_or(true)
+ }
+
+ /// Returns the line terminator for this configuration, as set by
+ /// [`Config::line_terminator`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_line_terminator(&self) -> u8 {
+ self.line_terminator.unwrap_or(b'\n')
+ }
+
+ /// Returns whether the hybrid NFA/DFA regex engine may be used, as set by
+ /// [`Config::hybrid`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_hybrid(&self) -> bool {
+ #[cfg(feature = "hybrid")]
+ {
+ self.hybrid.unwrap_or(true)
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ false
+ }
+ }
+
+ /// Returns whether the DFA regex engine may be used, as set by
+ /// [`Config::dfa`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_dfa(&self) -> bool {
+ #[cfg(feature = "dfa-build")]
+ {
+ self.dfa.unwrap_or(true)
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ false
+ }
+ }
+
+ /// Returns whether the one-pass DFA regex engine may be used, as set by
+ /// [`Config::onepass`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_onepass(&self) -> bool {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ self.onepass.unwrap_or(true)
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ false
+ }
+ }
+
+ /// Returns whether the bounded backtracking regex engine may be used, as
+ /// set by [`Config::backtrack`].
+ ///
+ /// If it was not explicitly set, then a default value is returned.
+ pub fn get_backtrack(&self) -> bool {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ self.backtrack.unwrap_or(true)
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ false
+ }
+ }
+
+ /// Overwrite the default configuration such that the options in `o` are
+ /// always used. If an option in `o` is not set, then the corresponding
+ /// option in `self` is used. If it's not set in `self` either, then it
+ /// remains not set.
+ pub(crate) fn overwrite(&self, o: Config) -> Config {
+ Config {
+ match_kind: o.match_kind.or(self.match_kind),
+ utf8_empty: o.utf8_empty.or(self.utf8_empty),
+ autopre: o.autopre.or(self.autopre),
+ pre: o.pre.or_else(|| self.pre.clone()),
+ which_captures: o.which_captures.or(self.which_captures),
+ nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit),
+ onepass_size_limit: o
+ .onepass_size_limit
+ .or(self.onepass_size_limit),
+ hybrid_cache_capacity: o
+ .hybrid_cache_capacity
+ .or(self.hybrid_cache_capacity),
+ hybrid: o.hybrid.or(self.hybrid),
+ dfa: o.dfa.or(self.dfa),
+ dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit),
+ dfa_state_limit: o.dfa_state_limit.or(self.dfa_state_limit),
+ onepass: o.onepass.or(self.onepass),
+ backtrack: o.backtrack.or(self.backtrack),
+ byte_classes: o.byte_classes.or(self.byte_classes),
+ line_terminator: o.line_terminator.or(self.line_terminator),
+ }
+ }
+}
+
+/// A builder for configuring and constructing a `Regex`.
+///
+/// The builder permits configuring two different aspects of a `Regex`:
+///
+/// * [`Builder::configure`] will set high-level configuration options as
+/// described by a [`Config`].
+/// * [`Builder::syntax`] will set the syntax level configuration options
+/// as described by a [`util::syntax::Config`](crate::util::syntax::Config).
+/// This only applies when building a `Regex` from pattern strings.
+///
+/// Once configured, the builder can then be used to construct a `Regex` from
+/// one of 4 different inputs:
+///
+/// * [`Builder::build`] creates a regex from a single pattern string.
+/// * [`Builder::build_many`] creates a regex from many pattern strings.
+/// * [`Builder::build_from_hir`] creates a regex from a
+/// [`regex-syntax::Hir`](Hir) expression.
+/// * [`Builder::build_many_from_hir`] creates a regex from many
+/// [`regex-syntax::Hir`](Hir) expressions.
+///
+/// The latter two methods in particular provide a way to construct a fully
+/// feature regular expression matcher directly from an `Hir` expression
+/// without having to first convert it to a string. (This is in contrast to the
+/// top-level `regex` crate which intentionally provides no such API in order
+/// to avoid making `regex-syntax` a public dependency.)
+///
+/// As a convenience, this builder may be created via [`Regex::builder`], which
+/// may help avoid an extra import.
+///
+/// # Example: change the line terminator
+///
+/// This example shows how to enable multi-line mode by default and change the
+/// line terminator to the NUL byte:
+///
+/// ```
+/// use regex_automata::{meta::Regex, util::syntax, Match};
+///
+/// let re = Regex::builder()
+/// .syntax(syntax::Config::new().multi_line(true))
+/// .configure(Regex::config().line_terminator(b'\x00'))
+/// .build(r"^foo$")?;
+/// let hay = "\x00foo\x00";
+/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay));
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+///
+/// # Example: disable UTF-8 requirement
+///
+/// By default, regex patterns are required to match UTF-8. This includes
+/// regex patterns that can produce matches of length zero. In the case of an
+/// empty match, by default, matches will not appear between the code units of
+/// a UTF-8 encoded codepoint.
+///
+/// However, it can be useful to disable this requirement, particularly if
+/// you're searching things like `&[u8]` that are not known to be valid UTF-8.
+///
+/// ```
+/// use regex_automata::{meta::Regex, util::syntax, Match};
+///
+/// let mut builder = Regex::builder();
+/// // Disables the requirement that non-empty matches match UTF-8.
+/// builder.syntax(syntax::Config::new().utf8(false));
+/// // Disables the requirement that empty matches match UTF-8 boundaries.
+/// builder.configure(Regex::config().utf8_empty(false));
+///
+/// // We can match raw bytes via \xZZ syntax, but we need to disable
+/// // Unicode mode to do that. We could disable it everywhere, or just
+/// // selectively, as shown here.
+/// let re = builder.build(r"(?-u:\xFF)foo(?-u:\xFF)")?;
+/// let hay = b"\xFFfoo\xFF";
+/// assert_eq!(Some(Match::must(0, 0..5)), re.find(hay));
+///
+/// // We can also match between code units.
+/// let re = builder.build(r"")?;
+/// let hay = "☃";
+/// assert_eq!(re.find_iter(hay).collect::<Vec<Match>>(), vec![
+/// Match::must(0, 0..0),
+/// Match::must(0, 1..1),
+/// Match::must(0, 2..2),
+/// Match::must(0, 3..3),
+/// ]);
+///
+/// # Ok::<(), Box<dyn std::error::Error>>(())
+/// ```
+#[derive(Clone, Debug)]
+pub struct Builder {
+ config: Config,
+ ast: ast::parse::ParserBuilder,
+ hir: hir::translate::TranslatorBuilder,
+}
+
+impl Builder {
+ /// Creates a new builder for configuring and constructing a [`Regex`].
+ pub fn new() -> Builder {
+ Builder {
+ config: Config::default(),
+ ast: ast::parse::ParserBuilder::new(),
+ hir: hir::translate::TranslatorBuilder::new(),
+ }
+ }
+
+ /// Builds a `Regex` from a single pattern string.
+ ///
+ /// If there was a problem parsing the pattern or a problem turning it into
+ /// a regex matcher, then an error is returned.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to configure syntax options.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .syntax(syntax::Config::new().crlf(true).multi_line(true))
+ /// .build(r"^foo$")?;
+ /// let hay = "\r\nfoo\r\n";
+ /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn build(&self, pattern: &str) -> Result<Regex, BuildError> {
+ self.build_many(&[pattern])
+ }
+
+ /// Builds a `Regex` from many pattern strings.
+ ///
+ /// If there was a problem parsing any of the patterns or a problem turning
+ /// them into a regex matcher, then an error is returned.
+ ///
+ /// # Example: finding the pattern that caused an error
+ ///
+ /// When a syntax error occurs, it is possible to ask which pattern
+ /// caused the syntax error.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, PatternID};
+ ///
+ /// let err = Regex::builder()
+ /// .build_many(&["a", "b", r"\p{Foo}", "c"])
+ /// .unwrap_err();
+ /// assert_eq!(Some(PatternID::must(2)), err.pattern());
+ /// ```
+ ///
+ /// # Example: zero patterns is valid
+ ///
+ /// Building a regex with zero patterns results in a regex that never
+ /// matches anything. Because this routine is generic, passing an empty
+ /// slice usually requires a turbo-fish (or something else to help type
+ /// inference).
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .build_many::<&str>(&[])?;
+ /// assert_eq!(None, re.find(""));
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn build_many<P: AsRef<str>>(
+ &self,
+ patterns: &[P],
+ ) -> Result<Regex, BuildError> {
+ use crate::util::primitives::IteratorIndexExt;
+ log! {
+ debug!("building meta regex with {} patterns:", patterns.len());
+ for (pid, p) in patterns.iter().with_pattern_ids() {
+ let p = p.as_ref();
+ // We might split a grapheme with this truncation logic, but
+ // that's fine. We at least avoid splitting a codepoint.
+ let maxoff = p
+ .char_indices()
+ .map(|(i, ch)| i + ch.len_utf8())
+ .take(1000)
+ .last()
+ .unwrap_or(0);
+ if maxoff < p.len() {
+ debug!("{:?}: {}[... snip ...]", pid, &p[..maxoff]);
+ } else {
+ debug!("{:?}: {}", pid, p);
+ }
+ }
+ }
+ let (mut asts, mut hirs) = (vec![], vec![]);
+ for (pid, p) in patterns.iter().with_pattern_ids() {
+ let ast = self
+ .ast
+ .build()
+ .parse(p.as_ref())
+ .map_err(|err| BuildError::ast(pid, err))?;
+ asts.push(ast);
+ }
+ for ((pid, p), ast) in
+ patterns.iter().with_pattern_ids().zip(asts.iter())
+ {
+ let hir = self
+ .hir
+ .build()
+ .translate(p.as_ref(), ast)
+ .map_err(|err| BuildError::hir(pid, err))?;
+ hirs.push(hir);
+ }
+ self.build_many_from_hir(&hirs)
+ }
+
+ /// Builds a `Regex` directly from an `Hir` expression.
+ ///
+ /// This is useful if you needed to parse a pattern string into an `Hir`
+ /// for other reasons (such as analysis or transformations). This routine
+ /// permits building a `Regex` directly from the `Hir` expression instead
+ /// of first converting the `Hir` back to a pattern string.
+ ///
+ /// When using this method, any options set via [`Builder::syntax`] are
+ /// ignored. Namely, the syntax options only apply when parsing a pattern
+ /// string, which isn't relevant here.
+ ///
+ /// If there was a problem building the underlying regex matcher for the
+ /// given `Hir`, then an error is returned.
+ ///
+ /// # Example
+ ///
+ /// This example shows how one can hand-construct an `Hir` expression and
+ /// build a regex from it without doing any parsing at all.
+ ///
+ /// ```
+ /// use {
+ /// regex_automata::{meta::Regex, Match},
+ /// regex_syntax::hir::{Hir, Look},
+ /// };
+ ///
+ /// // (?Rm)^foo$
+ /// let hir = Hir::concat(vec![
+ /// Hir::look(Look::StartCRLF),
+ /// Hir::literal("foo".as_bytes()),
+ /// Hir::look(Look::EndCRLF),
+ /// ]);
+ /// let re = Regex::builder()
+ /// .build_from_hir(&hir)?;
+ /// let hay = "\r\nfoo\r\n";
+ /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay));
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn build_from_hir(&self, hir: &Hir) -> Result<Regex, BuildError> {
+ self.build_many_from_hir(&[hir])
+ }
+
+ /// Builds a `Regex` directly from many `Hir` expressions.
+ ///
+ /// This is useful if you needed to parse pattern strings into `Hir`
+ /// expressions for other reasons (such as analysis or transformations).
+ /// This routine permits building a `Regex` directly from the `Hir`
+ /// expressions instead of first converting the `Hir` expressions back to
+ /// pattern strings.
+ ///
+ /// When using this method, any options set via [`Builder::syntax`] are
+ /// ignored. Namely, the syntax options only apply when parsing a pattern
+ /// string, which isn't relevant here.
+ ///
+ /// If there was a problem building the underlying regex matcher for the
+ /// given `Hir` expressions, then an error is returned.
+ ///
+ /// Note that unlike [`Builder::build_many`], this can only fail as a
+ /// result of building the underlying matcher. In that case, there is
+ /// no single `Hir` expression that can be isolated as a reason for the
+ /// failure. So if this routine fails, it's not possible to determine which
+ /// `Hir` expression caused the failure.
+ ///
+ /// # Example
+ ///
+ /// This example shows how one can hand-construct multiple `Hir`
+ /// expressions and build a single regex from them without doing any
+ /// parsing at all.
+ ///
+ /// ```
+ /// use {
+ /// regex_automata::{meta::Regex, Match},
+ /// regex_syntax::hir::{Hir, Look},
+ /// };
+ ///
+ /// // (?Rm)^foo$
+ /// let hir1 = Hir::concat(vec![
+ /// Hir::look(Look::StartCRLF),
+ /// Hir::literal("foo".as_bytes()),
+ /// Hir::look(Look::EndCRLF),
+ /// ]);
+ /// // (?Rm)^bar$
+ /// let hir2 = Hir::concat(vec![
+ /// Hir::look(Look::StartCRLF),
+ /// Hir::literal("bar".as_bytes()),
+ /// Hir::look(Look::EndCRLF),
+ /// ]);
+ /// let re = Regex::builder()
+ /// .build_many_from_hir(&[&hir1, &hir2])?;
+ /// let hay = "\r\nfoo\r\nbar";
+ /// let got: Vec<Match> = re.find_iter(hay).collect();
+ /// let expected = vec![
+ /// Match::must(0, 2..5),
+ /// Match::must(1, 7..10),
+ /// ];
+ /// assert_eq!(expected, got);
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn build_many_from_hir<H: Borrow<Hir>>(
+ &self,
+ hirs: &[H],
+ ) -> Result<Regex, BuildError> {
+ let config = self.config.clone();
+ // We collect the HIRs into a vec so we can write internal routines
+ // with '&[&Hir]'. i.e., Don't use generics everywhere to keep code
+ // bloat down..
+ let hirs: Vec<&Hir> = hirs.iter().map(|hir| hir.borrow()).collect();
+ let info = RegexInfo::new(config, &hirs);
+ let strat = strategy::new(&info, &hirs)?;
+ let pool = {
+ let strat = Arc::clone(&strat);
+ let create: CachePoolFn = Box::new(move || strat.create_cache());
+ Pool::new(create)
+ };
+ Ok(Regex { imp: Arc::new(RegexI { strat, info }), pool })
+ }
+
+ /// Configure the behavior of a `Regex`.
+ ///
+ /// This configuration controls non-syntax options related to the behavior
+ /// of a `Regex`. This includes things like whether empty matches can split
+ /// a codepoint, prefilters, line terminators and a long list of options
+ /// for configuring which regex engines the meta regex engine will be able
+ /// to use internally.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to disable UTF-8 empty mode. This will permit
+ /// empty matches to occur between the UTF-8 encoding of a codepoint.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, Match};
+ ///
+ /// let re = Regex::new("")?;
+ /// let got: Vec<Match> = re.find_iter("☃").collect();
+ /// // Matches only occur at the beginning and end of the snowman.
+ /// assert_eq!(got, vec![
+ /// Match::must(0, 0..0),
+ /// Match::must(0, 3..3),
+ /// ]);
+ ///
+ /// let re = Regex::builder()
+ /// .configure(Regex::config().utf8_empty(false))
+ /// .build("")?;
+ /// let got: Vec<Match> = re.find_iter("☃").collect();
+ /// // Matches now occur at every position!
+ /// assert_eq!(got, vec![
+ /// Match::must(0, 0..0),
+ /// Match::must(0, 1..1),
+ /// Match::must(0, 2..2),
+ /// Match::must(0, 3..3),
+ /// ]);
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn configure(&mut self, config: Config) -> &mut Builder {
+ self.config = self.config.overwrite(config);
+ self
+ }
+
+ /// Configure the syntax options when parsing a pattern string while
+ /// building a `Regex`.
+ ///
+ /// These options _only_ apply when [`Builder::build`] or [`Builder::build_many`]
+ /// are used. The other build methods accept `Hir` values, which have
+ /// already been parsed.
+ ///
+ /// # Example
+ ///
+ /// This example shows how to enable case insensitive mode.
+ ///
+ /// ```
+ /// use regex_automata::{meta::Regex, util::syntax, Match};
+ ///
+ /// let re = Regex::builder()
+ /// .syntax(syntax::Config::new().case_insensitive(true))
+ /// .build(r"δ")?;
+ /// assert_eq!(Some(Match::must(0, 0..2)), re.find(r"Δ"));
+ ///
+ /// Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ pub fn syntax(
+ &mut self,
+ config: crate::util::syntax::Config,
+ ) -> &mut Builder {
+ config.apply_ast(&mut self.ast);
+ config.apply_hir(&mut self.hir);
+ self
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // I found this in the course of building out the benchmark suite for
+ // rebar.
+ #[test]
+ fn regression() {
+ env_logger::init();
+
+ let re = Regex::new(r"[a-zA-Z]+ing").unwrap();
+ assert_eq!(1, re.find_iter("tingling").count());
+ }
+}
diff --git a/third_party/rust/regex-automata/src/meta/reverse_inner.rs b/third_party/rust/regex-automata/src/meta/reverse_inner.rs
new file mode 100644
index 0000000000..3d78779f6f
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/reverse_inner.rs
@@ -0,0 +1,220 @@
+/*!
+A module dedicated to plucking inner literals out of a regex pattern, and
+then constructing a prefilter for them. We also include a regex pattern
+"prefix" that corresponds to the bits of the regex that need to match before
+the literals do. The reverse inner optimization then proceeds by looking for
+matches of the inner literal(s), and then doing a reverse search of the prefix
+from the start of the literal match to find the overall start position of the
+match.
+
+The essential invariant we want to uphold here is that the literals we return
+reflect a set where *at least* one of them must match in order for the overall
+regex to match. We also need to maintain the invariant that the regex prefix
+returned corresponds to the entirety of the regex up until the literals we
+return.
+
+This somewhat limits what we can do. That is, if we a regex like
+`\w+(@!|%%)\w+`, then we can pluck the `{@!, %%}` out and build a prefilter
+from it. Then we just need to compile `\w+` in reverse. No fuss no muss. But if
+we have a regex like \d+@!|\w+%%`, then we get kind of stymied. Technically,
+we could still extract `{@!, %%}`, and it is true that at least of them must
+match. But then, what is our regex prefix? Again, in theory, that could be
+`\d+|\w+`, but that's not quite right, because the `\d+` only matches when `@!`
+matches, and `\w+` only matches when `%%` matches.
+
+All of that is technically possible to do, but it seemingly requires a lot of
+sophistication and machinery. Probably the way to tackle that is with some kind
+of formalism and approach this problem more generally.
+
+For now, the code below basically just looks for a top-level concatenation.
+And if it can find one, it looks for literals in each of the direct child
+sub-expressions of that concatenation. If some good ones are found, we return
+those and a concatenation of the Hir expressions seen up to that point.
+*/
+
+use alloc::vec::Vec;
+
+use regex_syntax::hir::{self, literal, Hir, HirKind};
+
+use crate::{util::prefilter::Prefilter, MatchKind};
+
+/// Attempts to extract an "inner" prefilter from the given HIR expressions. If
+/// one was found, then a concatenation of the HIR expressions that precede it
+/// is returned.
+///
+/// The idea here is that the prefilter returned can be used to find candidate
+/// matches. And then the HIR returned can be used to build a reverse regex
+/// matcher, which will find the start of the candidate match. Finally, the
+/// match still has to be confirmed with a normal anchored forward scan to find
+/// the end position of the match.
+///
+/// Note that this assumes leftmost-first match semantics, so callers must
+/// not call this otherwise.
+pub(crate) fn extract(hirs: &[&Hir]) -> Option<(Hir, Prefilter)> {
+ if hirs.len() != 1 {
+ debug!(
+ "skipping reverse inner optimization since it only \
+ supports 1 pattern, {} were given",
+ hirs.len(),
+ );
+ return None;
+ }
+ let mut concat = match top_concat(hirs[0]) {
+ Some(concat) => concat,
+ None => {
+ debug!(
+ "skipping reverse inner optimization because a top-level \
+ concatenation could not found",
+ );
+ return None;
+ }
+ };
+ // We skip the first HIR because if it did have a prefix prefilter in it,
+ // we probably wouldn't be here looking for an inner prefilter.
+ for i in 1..concat.len() {
+ let hir = &concat[i];
+ let pre = match prefilter(hir) {
+ None => continue,
+ Some(pre) => pre,
+ };
+ // Even if we got a prefilter, if it isn't consider "fast," then we
+ // probably don't want to bother with it. Namely, since the reverse
+ // inner optimization requires some overhead, it likely only makes
+ // sense if the prefilter scan itself is (believed) to be much faster
+ // than the regex engine.
+ if !pre.is_fast() {
+ debug!(
+ "skipping extracted inner prefilter because \
+ it probably isn't fast"
+ );
+ continue;
+ }
+ let concat_suffix = Hir::concat(concat.split_off(i));
+ let concat_prefix = Hir::concat(concat);
+ // Look for a prefilter again. Why? Because above we only looked for
+ // a prefilter on the individual 'hir', but we might be able to find
+ // something better and more discriminatory by looking at the entire
+ // suffix. We don't do this above to avoid making this loop worst case
+ // quadratic in the length of 'concat'.
+ let pre2 = match prefilter(&concat_suffix) {
+ None => pre,
+ Some(pre2) => {
+ if pre2.is_fast() {
+ pre2
+ } else {
+ pre
+ }
+ }
+ };
+ return Some((concat_prefix, pre2));
+ }
+ debug!(
+ "skipping reverse inner optimization because a top-level \
+ sub-expression with a fast prefilter could not be found"
+ );
+ None
+}
+
+/// Attempt to extract a prefilter from an HIR expression.
+///
+/// We do a little massaging here to do our best that the prefilter we get out
+/// of this is *probably* fast. Basically, the false positive rate has a much
+/// higher impact for things like the reverse inner optimization because more
+/// work needs to potentially be done for each candidate match.
+///
+/// Note that this assumes leftmost-first match semantics, so callers must
+/// not call this otherwise.
+fn prefilter(hir: &Hir) -> Option<Prefilter> {
+ let mut extractor = literal::Extractor::new();
+ extractor.kind(literal::ExtractKind::Prefix);
+ let mut prefixes = extractor.extract(hir);
+ debug!(
+ "inner prefixes (len={:?}) extracted before optimization: {:?}",
+ prefixes.len(),
+ prefixes
+ );
+ // Since these are inner literals, we know they cannot be exact. But the
+ // extractor doesn't know this. We mark them as inexact because this might
+ // impact literal optimization. Namely, optimization weights "all literals
+ // are exact" as very high, because it presumes that any match results in
+ // an overall match. But of course, that is not the case here.
+ //
+ // In practice, this avoids plucking out a ASCII-only \s as an alternation
+ // of single-byte whitespace characters.
+ prefixes.make_inexact();
+ prefixes.optimize_for_prefix_by_preference();
+ debug!(
+ "inner prefixes (len={:?}) extracted after optimization: {:?}",
+ prefixes.len(),
+ prefixes
+ );
+ prefixes
+ .literals()
+ .and_then(|lits| Prefilter::new(MatchKind::LeftmostFirst, lits))
+}
+
+/// Looks for a "top level" HirKind::Concat item in the given HIR. This will
+/// try to return one even if it's embedded in a capturing group, but is
+/// otherwise pretty conservative in what is returned.
+///
+/// The HIR returned is a complete copy of the concat with all capturing
+/// groups removed. In effect, the concat returned is "flattened" with respect
+/// to capturing groups. This makes the detection logic above for prefixes
+/// a bit simpler, and it works because 1) capturing groups never influence
+/// whether a match occurs or not and 2) capturing groups are not used when
+/// doing the reverse inner search to find the start of the match.
+fn top_concat(mut hir: &Hir) -> Option<Vec<Hir>> {
+ loop {
+ hir = match hir.kind() {
+ HirKind::Empty
+ | HirKind::Literal(_)
+ | HirKind::Class(_)
+ | HirKind::Look(_)
+ | HirKind::Repetition(_)
+ | HirKind::Alternation(_) => return None,
+ HirKind::Capture(hir::Capture { ref sub, .. }) => sub,
+ HirKind::Concat(ref subs) => {
+ // We are careful to only do the flattening/copy when we know
+ // we have a "top level" concat we can inspect. This avoids
+ // doing extra work in cases where we definitely won't use it.
+ // (This might still be wasted work if we can't go on to find
+ // some literals to extract.)
+ let concat =
+ Hir::concat(subs.iter().map(|h| flatten(h)).collect());
+ return match concat.into_kind() {
+ HirKind::Concat(xs) => Some(xs),
+ // It is actually possible for this case to occur, because
+ // 'Hir::concat' might simplify the expression to the point
+ // that concatenations are actually removed. One wonders
+ // whether this leads to other cases where we should be
+ // extracting literals, but in theory, I believe if we do
+ // get here, then it means that a "real" prefilter failed
+ // to be extracted and we should probably leave well enough
+ // alone. (A "real" prefilter is unbothered by "top-level
+ // concats" and "capturing groups.")
+ _ => return None,
+ };
+ }
+ };
+ }
+}
+
+/// Returns a copy of the given HIR but with all capturing groups removed.
+fn flatten(hir: &Hir) -> Hir {
+ match hir.kind() {
+ HirKind::Empty => Hir::empty(),
+ HirKind::Literal(hir::Literal(ref x)) => Hir::literal(x.clone()),
+ HirKind::Class(ref x) => Hir::class(x.clone()),
+ HirKind::Look(ref x) => Hir::look(x.clone()),
+ HirKind::Repetition(ref x) => Hir::repetition(x.with(flatten(&x.sub))),
+ // This is the interesting case. We just drop the group information
+ // entirely and use the child HIR itself.
+ HirKind::Capture(hir::Capture { ref sub, .. }) => flatten(sub),
+ HirKind::Alternation(ref xs) => {
+ Hir::alternation(xs.iter().map(|x| flatten(x)).collect())
+ }
+ HirKind::Concat(ref xs) => {
+ Hir::concat(xs.iter().map(|x| flatten(x)).collect())
+ }
+ }
+}
diff --git a/third_party/rust/regex-automata/src/meta/stopat.rs b/third_party/rust/regex-automata/src/meta/stopat.rs
new file mode 100644
index 0000000000..e8d716689c
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/stopat.rs
@@ -0,0 +1,224 @@
+/*!
+This module defines two bespoke forward DFA search routines. One for the lazy
+DFA and one for the fully compiled DFA. These routines differ from the normal
+ones by reporting the position at which the search terminates when a match
+*isn't* found.
+
+This position at which a search terminates is useful in contexts where the meta
+regex engine runs optimizations that could go quadratic if we aren't careful.
+Namely, a regex search *could* scan to the end of the haystack only to report a
+non-match. If the caller doesn't know that the search scanned to the end of the
+haystack, it might restart the search at the next literal candidate it finds
+and repeat the process.
+
+Providing the caller with the position at which the search stopped provides a
+way for the caller to determine the point at which subsequent scans should not
+pass. This is principally used in the "reverse inner" optimization, which works
+like this:
+
+1. Look for a match of an inner literal. Say, 'Z' in '\w+Z\d+'.
+2. At the spot where 'Z' matches, do a reverse anchored search from there for
+'\w+'.
+3. If the reverse search matches, it corresponds to the start position of a
+(possible) match. At this point, do a forward anchored search to find the end
+position. If an end position is found, then we have a match and we know its
+bounds.
+
+If the forward anchored search in (3) searches the entire rest of the haystack
+but reports a non-match, then a naive implementation of the above will continue
+back at step 1 looking for more candidates. There might still be a match to be
+found! It's possible. But we already scanned the whole haystack. So if we keep
+repeating the process, then we might wind up taking quadratic time in the size
+of the haystack, which is not great.
+
+So if the forward anchored search in (3) reports the position at which it
+stops, then we can detect whether quadratic behavior might be occurring in
+steps (1) and (2). For (1), it occurs if the literal candidate found occurs
+*before* the end of the previous search in (3), since that means we're now
+going to look for another match in a place where the forward search has already
+scanned. It is *correct* to do so, but our technique has become inefficient.
+For (2), quadratic behavior occurs similarly when its reverse search extends
+past the point where the previous forward search in (3) terminated. Indeed, to
+implement (2), we use the sibling 'limited' module for ensuring our reverse
+scan doesn't go further than we want.
+
+See the 'opt/reverse-inner' benchmarks in rebar for a real demonstration of
+how quadratic behavior is mitigated.
+*/
+
+use crate::{meta::error::RetryFailError, HalfMatch, Input, MatchError};
+
+#[cfg(feature = "dfa-build")]
+pub(crate) fn dfa_try_search_half_fwd(
+ dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>,
+ input: &Input<'_>,
+) -> Result<Result<HalfMatch, usize>, RetryFailError> {
+ use crate::dfa::{accel, Automaton};
+
+ let mut mat = None;
+ let mut sid = dfa.start_state_forward(input)?;
+ let mut at = input.start();
+ while at < input.end() {
+ sid = dfa.next_state(sid, input.haystack()[at]);
+ if dfa.is_special_state(sid) {
+ if dfa.is_match_state(sid) {
+ let pattern = dfa.match_pattern(sid, 0);
+ mat = Some(HalfMatch::new(pattern, at));
+ if input.get_earliest() {
+ return Ok(mat.ok_or(at));
+ }
+ if dfa.is_accel_state(sid) {
+ let needs = dfa.accelerator(sid);
+ at = accel::find_fwd(needs, input.haystack(), at)
+ .unwrap_or(input.end());
+ continue;
+ }
+ } else if dfa.is_accel_state(sid) {
+ let needs = dfa.accelerator(sid);
+ at = accel::find_fwd(needs, input.haystack(), at)
+ .unwrap_or(input.end());
+ continue;
+ } else if dfa.is_dead_state(sid) {
+ return Ok(mat.ok_or(at));
+ } else if dfa.is_quit_state(sid) {
+ if mat.is_some() {
+ return Ok(mat.ok_or(at));
+ }
+ return Err(MatchError::quit(input.haystack()[at], at).into());
+ } else {
+ // Ideally we wouldn't use a DFA that specialized start states
+ // and thus 'is_start_state()' could never be true here, but in
+ // practice we reuse the DFA created for the full regex which
+ // will specialize start states whenever there is a prefilter.
+ debug_assert!(dfa.is_start_state(sid));
+ }
+ }
+ at += 1;
+ }
+ dfa_eoi_fwd(dfa, input, &mut sid, &mut mat)?;
+ Ok(mat.ok_or(at))
+}
+
+#[cfg(feature = "hybrid")]
+pub(crate) fn hybrid_try_search_half_fwd(
+ dfa: &crate::hybrid::dfa::DFA,
+ cache: &mut crate::hybrid::dfa::Cache,
+ input: &Input<'_>,
+) -> Result<Result<HalfMatch, usize>, RetryFailError> {
+ let mut mat = None;
+ let mut sid = dfa.start_state_forward(cache, input)?;
+ let mut at = input.start();
+ while at < input.end() {
+ sid = dfa
+ .next_state(cache, sid, input.haystack()[at])
+ .map_err(|_| MatchError::gave_up(at))?;
+ if sid.is_tagged() {
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, sid, 0);
+ mat = Some(HalfMatch::new(pattern, at));
+ if input.get_earliest() {
+ return Ok(mat.ok_or(at));
+ }
+ } else if sid.is_dead() {
+ return Ok(mat.ok_or(at));
+ } else if sid.is_quit() {
+ if mat.is_some() {
+ return Ok(mat.ok_or(at));
+ }
+ return Err(MatchError::quit(input.haystack()[at], at).into());
+ } else {
+ // We should NEVER get an unknown state ID back from
+ // dfa.next_state().
+ debug_assert!(!sid.is_unknown());
+ // Ideally we wouldn't use a lazy DFA that specialized start
+ // states and thus 'sid.is_start()' could never be true here,
+ // but in practice we reuse the lazy DFA created for the full
+ // regex which will specialize start states whenever there is
+ // a prefilter.
+ debug_assert!(sid.is_start());
+ }
+ }
+ at += 1;
+ }
+ hybrid_eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?;
+ Ok(mat.ok_or(at))
+}
+
+#[cfg(feature = "dfa-build")]
+#[cfg_attr(feature = "perf-inline", inline(always))]
+fn dfa_eoi_fwd(
+ dfa: &crate::dfa::dense::DFA<alloc::vec::Vec<u32>>,
+ input: &Input<'_>,
+ sid: &mut crate::util::primitives::StateID,
+ mat: &mut Option<HalfMatch>,
+) -> Result<(), MatchError> {
+ use crate::dfa::Automaton;
+
+ let sp = input.get_span();
+ match input.haystack().get(sp.end) {
+ Some(&b) => {
+ *sid = dfa.next_state(*sid, b);
+ if dfa.is_match_state(*sid) {
+ let pattern = dfa.match_pattern(*sid, 0);
+ *mat = Some(HalfMatch::new(pattern, sp.end));
+ } else if dfa.is_quit_state(*sid) {
+ if mat.is_some() {
+ return Ok(());
+ }
+ return Err(MatchError::quit(b, sp.end));
+ }
+ }
+ None => {
+ *sid = dfa.next_eoi_state(*sid);
+ if dfa.is_match_state(*sid) {
+ let pattern = dfa.match_pattern(*sid, 0);
+ *mat = Some(HalfMatch::new(pattern, input.haystack().len()));
+ }
+ // N.B. We don't have to check 'is_quit' here because the EOI
+ // transition can never lead to a quit state.
+ debug_assert!(!dfa.is_quit_state(*sid));
+ }
+ }
+ Ok(())
+}
+
+#[cfg(feature = "hybrid")]
+#[cfg_attr(feature = "perf-inline", inline(always))]
+fn hybrid_eoi_fwd(
+ dfa: &crate::hybrid::dfa::DFA,
+ cache: &mut crate::hybrid::dfa::Cache,
+ input: &Input<'_>,
+ sid: &mut crate::hybrid::LazyStateID,
+ mat: &mut Option<HalfMatch>,
+) -> Result<(), MatchError> {
+ let sp = input.get_span();
+ match input.haystack().get(sp.end) {
+ Some(&b) => {
+ *sid = dfa
+ .next_state(cache, *sid, b)
+ .map_err(|_| MatchError::gave_up(sp.end))?;
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, *sid, 0);
+ *mat = Some(HalfMatch::new(pattern, sp.end));
+ } else if sid.is_quit() {
+ if mat.is_some() {
+ return Ok(());
+ }
+ return Err(MatchError::quit(b, sp.end));
+ }
+ }
+ None => {
+ *sid = dfa
+ .next_eoi_state(cache, *sid)
+ .map_err(|_| MatchError::gave_up(input.haystack().len()))?;
+ if sid.is_match() {
+ let pattern = dfa.match_pattern(cache, *sid, 0);
+ *mat = Some(HalfMatch::new(pattern, input.haystack().len()));
+ }
+ // N.B. We don't have to check 'is_quit' here because the EOI
+ // transition can never lead to a quit state.
+ debug_assert!(!sid.is_quit());
+ }
+ }
+ Ok(())
+}
diff --git a/third_party/rust/regex-automata/src/meta/strategy.rs b/third_party/rust/regex-automata/src/meta/strategy.rs
new file mode 100644
index 0000000000..ea6c6ab576
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/strategy.rs
@@ -0,0 +1,1908 @@
+use core::{
+ fmt::Debug,
+ panic::{RefUnwindSafe, UnwindSafe},
+};
+
+use alloc::sync::Arc;
+
+use regex_syntax::hir::{literal, Hir};
+
+use crate::{
+ meta::{
+ error::{BuildError, RetryError, RetryFailError, RetryQuadraticError},
+ regex::{Cache, RegexInfo},
+ reverse_inner, wrappers,
+ },
+ nfa::thompson::{self, WhichCaptures, NFA},
+ util::{
+ captures::{Captures, GroupInfo},
+ look::LookMatcher,
+ prefilter::{self, Prefilter, PrefilterI},
+ primitives::{NonMaxUsize, PatternID},
+ search::{Anchored, HalfMatch, Input, Match, MatchKind, PatternSet},
+ },
+};
+
+/// A trait that represents a single meta strategy. Its main utility is in
+/// providing a way to do dynamic dispatch over a few choices.
+///
+/// Why dynamic dispatch? I actually don't have a super compelling reason, and
+/// importantly, I have not benchmarked it with the main alternative: an enum.
+/// I went with dynamic dispatch initially because the regex engine search code
+/// really can't be inlined into caller code in most cases because it's just
+/// too big. In other words, it is already expected that every regex search
+/// will entail at least the cost of a function call.
+///
+/// I do wonder whether using enums would result in better codegen overall
+/// though. It's a worthwhile experiment to try. Probably the most interesting
+/// benchmark to run in such a case would be one with a high match count. That
+/// is, a benchmark to test the overall latency of a search call.
+pub(super) trait Strategy:
+ Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static
+{
+ fn group_info(&self) -> &GroupInfo;
+
+ fn create_cache(&self) -> Cache;
+
+ fn reset_cache(&self, cache: &mut Cache);
+
+ fn is_accelerated(&self) -> bool;
+
+ fn memory_usage(&self) -> usize;
+
+ fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match>;
+
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch>;
+
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool;
+
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID>;
+
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ );
+}
+
+pub(super) fn new(
+ info: &RegexInfo,
+ hirs: &[&Hir],
+) -> Result<Arc<dyn Strategy>, BuildError> {
+ // At this point, we're committed to a regex engine of some kind. So pull
+ // out a prefilter if we can, which will feed to each of the constituent
+ // regex engines.
+ let pre = if info.is_always_anchored_start() {
+ // PERF: I'm not sure we necessarily want to do this... We may want to
+ // run a prefilter for quickly rejecting in some cases. The problem
+ // is that anchored searches overlap quite a bit with the use case
+ // of "run a regex on every line to extract data." In that case, the
+ // regex always matches, so running a prefilter doesn't really help us
+ // there. The main place where a prefilter helps in an anchored search
+ // is if the anchored search is not expected to match frequently. That
+ // is, the prefilter gives us a way to possibly reject a haystack very
+ // quickly.
+ //
+ // Maybe we should do use a prefilter, but only for longer haystacks?
+ // Or maybe we should only use a prefilter when we think it's "fast"?
+ //
+ // Interestingly, I think we currently lack the infrastructure for
+ // disabling a prefilter based on haystack length. That would probably
+ // need to be a new 'Input' option. (Interestingly, an 'Input' used to
+ // carry a 'Prefilter' with it, but I moved away from that.)
+ debug!("skipping literal extraction since regex is anchored");
+ None
+ } else if let Some(pre) = info.config().get_prefilter() {
+ debug!(
+ "skipping literal extraction since the caller provided a prefilter"
+ );
+ Some(pre.clone())
+ } else if info.config().get_auto_prefilter() {
+ let kind = info.config().get_match_kind();
+ let prefixes = crate::util::prefilter::prefixes(kind, hirs);
+ // If we can build a full `Strategy` from just the extracted prefixes,
+ // then we can short-circuit and avoid building a regex engine at all.
+ if let Some(pre) = Pre::from_prefixes(info, &prefixes) {
+ debug!(
+ "found that the regex can be broken down to a literal \
+ search, avoiding the regex engine entirely",
+ );
+ return Ok(pre);
+ }
+ // This now attempts another short-circuit of the regex engine: if we
+ // have a huge alternation of just plain literals, then we can just use
+ // Aho-Corasick for that and avoid the regex engine entirely.
+ //
+ // You might think this case would just be handled by
+ // `Pre::from_prefixes`, but that technique relies on heuristic literal
+ // extraction from the corresponding `Hir`. That works, but part of
+ // heuristics limit the size and number of literals returned. This case
+ // will specifically handle patterns with very large alternations.
+ //
+ // One wonders if we should just roll this our heuristic literal
+ // extraction, and then I think this case could disappear entirely.
+ if let Some(pre) = Pre::from_alternation_literals(info, hirs) {
+ debug!(
+ "found plain alternation of literals, \
+ avoiding regex engine entirely and using Aho-Corasick"
+ );
+ return Ok(pre);
+ }
+ prefixes.literals().and_then(|strings| {
+ debug!(
+ "creating prefilter from {} literals: {:?}",
+ strings.len(),
+ strings,
+ );
+ Prefilter::new(kind, strings)
+ })
+ } else {
+ debug!("skipping literal extraction since prefilters were disabled");
+ None
+ };
+ let mut core = Core::new(info.clone(), pre.clone(), hirs)?;
+ // Now that we have our core regex engines built, there are a few cases
+ // where we can do a little bit better than just a normal "search forward
+ // and maybe use a prefilter when in a start state." However, these cases
+ // may not always work or otherwise build on top of the Core searcher.
+ // For example, the reverse anchored optimization seems like it might
+ // always work, but only the DFAs support reverse searching and the DFAs
+ // might give up or quit for reasons. If we had, e.g., a PikeVM that
+ // supported reverse searching, then we could avoid building a full Core
+ // engine for this case.
+ core = match ReverseAnchored::new(core) {
+ Err(core) => core,
+ Ok(ra) => {
+ debug!("using reverse anchored strategy");
+ return Ok(Arc::new(ra));
+ }
+ };
+ core = match ReverseSuffix::new(core, hirs) {
+ Err(core) => core,
+ Ok(rs) => {
+ debug!("using reverse suffix strategy");
+ return Ok(Arc::new(rs));
+ }
+ };
+ core = match ReverseInner::new(core, hirs) {
+ Err(core) => core,
+ Ok(ri) => {
+ debug!("using reverse inner strategy");
+ return Ok(Arc::new(ri));
+ }
+ };
+ debug!("using core strategy");
+ Ok(Arc::new(core))
+}
+
+#[derive(Clone, Debug)]
+struct Pre<P> {
+ pre: P,
+ group_info: GroupInfo,
+}
+
+impl<P: PrefilterI> Pre<P> {
+ fn new(pre: P) -> Arc<dyn Strategy> {
+ // The only thing we support when we use prefilters directly as a
+ // strategy is the start and end of the overall match for a single
+ // pattern. In other words, exactly one implicit capturing group. Which
+ // is exactly what we use here for a GroupInfo.
+ let group_info = GroupInfo::new([[None::<&str>]]).unwrap();
+ Arc::new(Pre { pre, group_info })
+ }
+}
+
+// This is a little weird, but we don't actually care about the type parameter
+// here because we're selecting which underlying prefilter to use. So we just
+// define it on an arbitrary type.
+impl Pre<()> {
+ /// Given a sequence of prefixes, attempt to return a full `Strategy` using
+ /// just the prefixes.
+ ///
+ /// Basically, this occurs when the prefixes given not just prefixes,
+ /// but an enumeration of the entire language matched by the regular
+ /// expression.
+ ///
+ /// A number of other conditions need to be true too. For example, there
+ /// can be only one pattern, the number of explicit capture groups is 0, no
+ /// look-around assertions and so on.
+ ///
+ /// Note that this ignores `Config::get_auto_prefilter` because if this
+ /// returns something, then it isn't a prefilter but a matcher itself.
+ /// Therefore, it shouldn't suffer from the problems typical to prefilters
+ /// (such as a high false positive rate).
+ fn from_prefixes(
+ info: &RegexInfo,
+ prefixes: &literal::Seq,
+ ) -> Option<Arc<dyn Strategy>> {
+ let kind = info.config().get_match_kind();
+ // Check to see if our prefixes are exact, which means we might be
+ // able to bypass the regex engine entirely and just rely on literal
+ // searches.
+ if !prefixes.is_exact() {
+ return None;
+ }
+ // We also require that we have a single regex pattern. Namely,
+ // we reuse the prefilter infrastructure to implement search and
+ // prefilters only report spans. Prefilters don't know about pattern
+ // IDs. The multi-regex case isn't a lost cause, we might still use
+ // Aho-Corasick and we might still just use a regular prefilter, but
+ // that's done below.
+ if info.pattern_len() != 1 {
+ return None;
+ }
+ // We can't have any capture groups either. The literal engines don't
+ // know how to deal with things like '(foo)(bar)'. In that case, a
+ // prefilter will just be used and then the regex engine will resolve
+ // the capture groups.
+ if info.props()[0].explicit_captures_len() != 0 {
+ return None;
+ }
+ // We also require that it has zero look-around assertions. Namely,
+ // literal extraction treats look-around assertions as if they match
+ // *every* empty string. But of course, that isn't true. So for
+ // example, 'foo\bquux' never matches anything, but 'fooquux' is
+ // extracted from that as an exact literal. Such cases should just run
+ // the regex engine. 'fooquux' will be used as a normal prefilter, and
+ // then the regex engine will try to look for an actual match.
+ if !info.props()[0].look_set().is_empty() {
+ return None;
+ }
+ // Finally, currently, our prefilters are all oriented around
+ // leftmost-first match semantics, so don't try to use them if the
+ // caller asked for anything else.
+ if kind != MatchKind::LeftmostFirst {
+ return None;
+ }
+ // The above seems like a lot of requirements to meet, but it applies
+ // to a lot of cases. 'foo', '[abc][123]' and 'foo|bar|quux' all meet
+ // the above criteria, for example.
+ //
+ // Note that this is effectively a latency optimization. If we didn't
+ // do this, then the extracted literals would still get bundled into
+ // a prefilter, and every regex engine capable of running unanchored
+ // searches supports prefilters. So this optimization merely sidesteps
+ // having to run the regex engine at all to confirm the match. Thus, it
+ // decreases the latency of a match.
+
+ // OK because we know the set is exact and thus finite.
+ let prefixes = prefixes.literals().unwrap();
+ debug!(
+ "trying to bypass regex engine by creating \
+ prefilter from {} literals: {:?}",
+ prefixes.len(),
+ prefixes,
+ );
+ let choice = match prefilter::Choice::new(kind, prefixes) {
+ Some(choice) => choice,
+ None => {
+ debug!(
+ "regex bypass failed because no prefilter could be built"
+ );
+ return None;
+ }
+ };
+ let strat: Arc<dyn Strategy> = match choice {
+ prefilter::Choice::Memchr(pre) => Pre::new(pre),
+ prefilter::Choice::Memchr2(pre) => Pre::new(pre),
+ prefilter::Choice::Memchr3(pre) => Pre::new(pre),
+ prefilter::Choice::Memmem(pre) => Pre::new(pre),
+ prefilter::Choice::Teddy(pre) => Pre::new(pre),
+ prefilter::Choice::ByteSet(pre) => Pre::new(pre),
+ prefilter::Choice::AhoCorasick(pre) => Pre::new(pre),
+ };
+ Some(strat)
+ }
+
+ /// Attempts to extract an alternation of literals, and if it's deemed
+ /// worth doing, returns an Aho-Corasick prefilter as a strategy.
+ ///
+ /// And currently, this only returns something when 'hirs.len() == 1'. This
+ /// could in theory do something if there are multiple HIRs where all of
+ /// them are alternation of literals, but I haven't had the time to go down
+ /// that path yet.
+ fn from_alternation_literals(
+ info: &RegexInfo,
+ hirs: &[&Hir],
+ ) -> Option<Arc<dyn Strategy>> {
+ use crate::util::prefilter::AhoCorasick;
+
+ let lits = crate::meta::literal::alternation_literals(info, hirs)?;
+ let ac = AhoCorasick::new(MatchKind::LeftmostFirst, &lits)?;
+ Some(Pre::new(ac))
+ }
+}
+
+// This implements Strategy for anything that implements PrefilterI.
+//
+// Note that this must only be used for regexes of length 1. Multi-regexes
+// don't work here. The prefilter interface only provides the span of a match
+// and not the pattern ID. (I did consider making it more expressive, but I
+// couldn't figure out how to tie everything together elegantly.) Thus, so long
+// as the regex only contains one pattern, we can simply assume that a match
+// corresponds to PatternID::ZERO. And indeed, that's what we do here.
+//
+// In practice, since this impl is used to report matches directly and thus
+// completely bypasses the regex engine, we only wind up using this under the
+// following restrictions:
+//
+// * There must be only one pattern. As explained above.
+// * The literal sequence must be finite and only contain exact literals.
+// * There must not be any look-around assertions. If there are, the literals
+// extracted might be exact, but a match doesn't necessarily imply an overall
+// match. As a trivial example, 'foo\bbar' does not match 'foobar'.
+// * The pattern must not have any explicit capturing groups. If it does, the
+// caller might expect them to be resolved. e.g., 'foo(bar)'.
+//
+// So when all of those things are true, we use a prefilter directly as a
+// strategy.
+//
+// In the case where the number of patterns is more than 1, we don't use this
+// but do use a special Aho-Corasick strategy if all of the regexes are just
+// simple literals or alternations of literals. (We also use the Aho-Corasick
+// strategy when len(patterns)==1 if the number of literals is large. In that
+// case, literal extraction gives up and will return an infinite set.)
+impl<P: PrefilterI> Strategy for Pre<P> {
+ fn group_info(&self) -> &GroupInfo {
+ &self.group_info
+ }
+
+ fn create_cache(&self) -> Cache {
+ Cache {
+ capmatches: Captures::all(self.group_info().clone()),
+ pikevm: wrappers::PikeVMCache::none(),
+ backtrack: wrappers::BoundedBacktrackerCache::none(),
+ onepass: wrappers::OnePassCache::none(),
+ hybrid: wrappers::HybridCache::none(),
+ revhybrid: wrappers::ReverseHybridCache::none(),
+ }
+ }
+
+ fn reset_cache(&self, _cache: &mut Cache) {}
+
+ fn is_accelerated(&self) -> bool {
+ self.pre.is_fast()
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.pre.memory_usage()
+ }
+
+ fn search(&self, _cache: &mut Cache, input: &Input<'_>) -> Option<Match> {
+ if input.is_done() {
+ return None;
+ }
+ if input.get_anchored().is_anchored() {
+ return self
+ .pre
+ .prefix(input.haystack(), input.get_span())
+ .map(|sp| Match::new(PatternID::ZERO, sp));
+ }
+ self.pre
+ .find(input.haystack(), input.get_span())
+ .map(|sp| Match::new(PatternID::ZERO, sp))
+ }
+
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ self.search(cache, input).map(|m| HalfMatch::new(m.pattern(), m.end()))
+ }
+
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ self.search(cache, input).is_some()
+ }
+
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ let m = self.search(cache, input)?;
+ if let Some(slot) = slots.get_mut(0) {
+ *slot = NonMaxUsize::new(m.start());
+ }
+ if let Some(slot) = slots.get_mut(1) {
+ *slot = NonMaxUsize::new(m.end());
+ }
+ Some(m.pattern())
+ }
+
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ if self.search(cache, input).is_some() {
+ patset.insert(PatternID::ZERO);
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Core {
+ info: RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: NFA,
+ nfarev: Option<NFA>,
+ pikevm: wrappers::PikeVM,
+ backtrack: wrappers::BoundedBacktracker,
+ onepass: wrappers::OnePass,
+ hybrid: wrappers::Hybrid,
+ dfa: wrappers::DFA,
+}
+
+impl Core {
+ fn new(
+ info: RegexInfo,
+ pre: Option<Prefilter>,
+ hirs: &[&Hir],
+ ) -> Result<Core, BuildError> {
+ let mut lookm = LookMatcher::new();
+ lookm.set_line_terminator(info.config().get_line_terminator());
+ let thompson_config = thompson::Config::new()
+ .utf8(info.config().get_utf8_empty())
+ .nfa_size_limit(info.config().get_nfa_size_limit())
+ .shrink(false)
+ .which_captures(info.config().get_which_captures())
+ .look_matcher(lookm);
+ let nfa = thompson::Compiler::new()
+ .configure(thompson_config.clone())
+ .build_many_from_hir(hirs)
+ .map_err(BuildError::nfa)?;
+ // It's possible for the PikeVM or the BB to fail to build, even though
+ // at this point, we already have a full NFA in hand. They can fail
+ // when a Unicode word boundary is used but where Unicode word boundary
+ // support is disabled at compile time, thus making it impossible to
+ // match. (Construction can also fail if the NFA was compiled without
+ // captures, but we always enable that above.)
+ let pikevm = wrappers::PikeVM::new(&info, pre.clone(), &nfa)?;
+ let backtrack =
+ wrappers::BoundedBacktracker::new(&info, pre.clone(), &nfa)?;
+ // The onepass engine can of course fail to build, but we expect it to
+ // fail in many cases because it is an optimization that doesn't apply
+ // to all regexes. The 'OnePass' wrapper encapsulates this failure (and
+ // logs a message if it occurs).
+ let onepass = wrappers::OnePass::new(&info, &nfa);
+ // We try to encapsulate whether a particular regex engine should be
+ // used within each respective wrapper, but the DFAs need a reverse NFA
+ // to build itself, and we really do not want to build a reverse NFA if
+ // we know we aren't going to use the lazy DFA. So we do a config check
+ // up front, which is in practice the only way we won't try to use the
+ // DFA.
+ let (nfarev, hybrid, dfa) =
+ if !info.config().get_hybrid() && !info.config().get_dfa() {
+ (None, wrappers::Hybrid::none(), wrappers::DFA::none())
+ } else {
+ // FIXME: Technically, we don't quite yet KNOW that we need
+ // a reverse NFA. It's possible for the DFAs below to both
+ // fail to build just based on the forward NFA. In which case,
+ // building the reverse NFA was totally wasted work. But...
+ // fixing this requires breaking DFA construction apart into
+ // two pieces: one for the forward part and another for the
+ // reverse part. Quite annoying. Making it worse, when building
+ // both DFAs fails, it's quite likely that the NFA is large and
+ // that it will take quite some time to build the reverse NFA
+ // too. So... it's really probably worth it to do this!
+ let nfarev = thompson::Compiler::new()
+ // Currently, reverse NFAs don't support capturing groups,
+ // so we MUST disable them. But even if we didn't have to,
+ // we would, because nothing in this crate does anything
+ // useful with capturing groups in reverse. And of course,
+ // the lazy DFA ignores capturing groups in all cases.
+ .configure(
+ thompson_config
+ .clone()
+ .which_captures(WhichCaptures::None)
+ .reverse(true),
+ )
+ .build_many_from_hir(hirs)
+ .map_err(BuildError::nfa)?;
+ let dfa = if !info.config().get_dfa() {
+ wrappers::DFA::none()
+ } else {
+ wrappers::DFA::new(&info, pre.clone(), &nfa, &nfarev)
+ };
+ let hybrid = if !info.config().get_hybrid() {
+ wrappers::Hybrid::none()
+ } else if dfa.is_some() {
+ debug!("skipping lazy DFA because we have a full DFA");
+ wrappers::Hybrid::none()
+ } else {
+ wrappers::Hybrid::new(&info, pre.clone(), &nfa, &nfarev)
+ };
+ (Some(nfarev), hybrid, dfa)
+ };
+ Ok(Core {
+ info,
+ pre,
+ nfa,
+ nfarev,
+ pikevm,
+ backtrack,
+ onepass,
+ hybrid,
+ dfa,
+ })
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_mayfail(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<Result<Option<Match>, RetryFailError>> {
+ if let Some(e) = self.dfa.get(input) {
+ trace!("using full DFA for search at {:?}", input.get_span());
+ Some(e.try_search(input))
+ } else if let Some(e) = self.hybrid.get(input) {
+ trace!("using lazy DFA for search at {:?}", input.get_span());
+ Some(e.try_search(&mut cache.hybrid, input))
+ } else {
+ None
+ }
+ }
+
+ fn search_nofail(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<Match> {
+ let caps = &mut cache.capmatches;
+ caps.set_pattern(None);
+ // We manually inline 'try_search_slots_nofail' here because we need to
+ // borrow from 'cache.capmatches' in this method, but if we do, then
+ // we can't pass 'cache' wholesale to to 'try_slots_no_hybrid'. It's a
+ // classic example of how the borrow checker inhibits decomposition.
+ // There are of course work-arounds (more types and/or interior
+ // mutability), but that's more annoying than this IMO.
+ let pid = if let Some(ref e) = self.onepass.get(input) {
+ trace!("using OnePass for search at {:?}", input.get_span());
+ e.search_slots(&mut cache.onepass, input, caps.slots_mut())
+ } else if let Some(ref e) = self.backtrack.get(input) {
+ trace!(
+ "using BoundedBacktracker for search at {:?}",
+ input.get_span()
+ );
+ e.search_slots(&mut cache.backtrack, input, caps.slots_mut())
+ } else {
+ trace!("using PikeVM for search at {:?}", input.get_span());
+ let e = self.pikevm.get();
+ e.search_slots(&mut cache.pikevm, input, caps.slots_mut())
+ };
+ caps.set_pattern(pid);
+ caps.get_match()
+ }
+
+ fn search_half_nofail(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ // Only the lazy/full DFA returns half-matches, since the DFA requires
+ // a reverse scan to find the start position. These fallback regex
+ // engines can find the start and end in a single pass, so we just do
+ // that and throw away the start offset to conform to the API.
+ let m = self.search_nofail(cache, input)?;
+ Some(HalfMatch::new(m.pattern(), m.end()))
+ }
+
+ fn search_slots_nofail(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if let Some(ref e) = self.onepass.get(input) {
+ trace!(
+ "using OnePass for capture search at {:?}",
+ input.get_span()
+ );
+ e.search_slots(&mut cache.onepass, input, slots)
+ } else if let Some(ref e) = self.backtrack.get(input) {
+ trace!(
+ "using BoundedBacktracker for capture search at {:?}",
+ input.get_span()
+ );
+ e.search_slots(&mut cache.backtrack, input, slots)
+ } else {
+ trace!(
+ "using PikeVM for capture search at {:?}",
+ input.get_span()
+ );
+ let e = self.pikevm.get();
+ e.search_slots(&mut cache.pikevm, input, slots)
+ }
+ }
+
+ fn is_match_nofail(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ if let Some(ref e) = self.onepass.get(input) {
+ trace!(
+ "using OnePass for is-match search at {:?}",
+ input.get_span()
+ );
+ e.search_slots(&mut cache.onepass, input, &mut []).is_some()
+ } else if let Some(ref e) = self.backtrack.get(input) {
+ trace!(
+ "using BoundedBacktracker for is-match search at {:?}",
+ input.get_span()
+ );
+ e.is_match(&mut cache.backtrack, input)
+ } else {
+ trace!(
+ "using PikeVM for is-match search at {:?}",
+ input.get_span()
+ );
+ let e = self.pikevm.get();
+ e.is_match(&mut cache.pikevm, input)
+ }
+ }
+
+ fn is_capture_search_needed(&self, slots_len: usize) -> bool {
+ slots_len > self.nfa.group_info().implicit_slot_len()
+ }
+}
+
+impl Strategy for Core {
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn group_info(&self) -> &GroupInfo {
+ self.nfa.group_info()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn create_cache(&self) -> Cache {
+ Cache {
+ capmatches: Captures::all(self.group_info().clone()),
+ pikevm: self.pikevm.create_cache(),
+ backtrack: self.backtrack.create_cache(),
+ onepass: self.onepass.create_cache(),
+ hybrid: self.hybrid.create_cache(),
+ revhybrid: wrappers::ReverseHybridCache::none(),
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn reset_cache(&self, cache: &mut Cache) {
+ cache.pikevm.reset(&self.pikevm);
+ cache.backtrack.reset(&self.backtrack);
+ cache.onepass.reset(&self.onepass);
+ cache.hybrid.reset(&self.hybrid);
+ }
+
+ fn is_accelerated(&self) -> bool {
+ self.pre.as_ref().map_or(false, |pre| pre.is_fast())
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.info.memory_usage()
+ + self.pre.as_ref().map_or(0, |pre| pre.memory_usage())
+ + self.nfa.memory_usage()
+ + self.nfarev.as_ref().map_or(0, |nfa| nfa.memory_usage())
+ + self.onepass.memory_usage()
+ + self.dfa.memory_usage()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> {
+ // We manually inline try_search_mayfail here because letting the
+ // compiler do it seems to produce pretty crappy codegen.
+ return if let Some(e) = self.dfa.get(input) {
+ trace!("using full DFA for full search at {:?}", input.get_span());
+ match e.try_search(input) {
+ Ok(x) => x,
+ Err(_err) => {
+ trace!("full DFA search failed: {}", _err);
+ self.search_nofail(cache, input)
+ }
+ }
+ } else if let Some(e) = self.hybrid.get(input) {
+ trace!("using lazy DFA for full search at {:?}", input.get_span());
+ match e.try_search(&mut cache.hybrid, input) {
+ Ok(x) => x,
+ Err(_err) => {
+ trace!("lazy DFA search failed: {}", _err);
+ self.search_nofail(cache, input)
+ }
+ }
+ } else {
+ self.search_nofail(cache, input)
+ };
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ // The main difference with 'search' is that if we're using a DFA, we
+ // can use a single forward scan without needing to run the reverse
+ // DFA.
+ if let Some(e) = self.dfa.get(input) {
+ trace!("using full DFA for half search at {:?}", input.get_span());
+ match e.try_search_half_fwd(input) {
+ Ok(x) => x,
+ Err(_err) => {
+ trace!("full DFA half search failed: {}", _err);
+ self.search_half_nofail(cache, input)
+ }
+ }
+ } else if let Some(e) = self.hybrid.get(input) {
+ trace!("using lazy DFA for half search at {:?}", input.get_span());
+ match e.try_search_half_fwd(&mut cache.hybrid, input) {
+ Ok(x) => x,
+ Err(_err) => {
+ trace!("lazy DFA half search failed: {}", _err);
+ self.search_half_nofail(cache, input)
+ }
+ }
+ } else {
+ self.search_half_nofail(cache, input)
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ if let Some(e) = self.dfa.get(input) {
+ trace!(
+ "using full DFA for is-match search at {:?}",
+ input.get_span()
+ );
+ match e.try_search_half_fwd(input) {
+ Ok(x) => x.is_some(),
+ Err(_err) => {
+ trace!("full DFA half search failed: {}", _err);
+ self.is_match_nofail(cache, input)
+ }
+ }
+ } else if let Some(e) = self.hybrid.get(input) {
+ trace!(
+ "using lazy DFA for is-match search at {:?}",
+ input.get_span()
+ );
+ match e.try_search_half_fwd(&mut cache.hybrid, input) {
+ Ok(x) => x.is_some(),
+ Err(_err) => {
+ trace!("lazy DFA half search failed: {}", _err);
+ self.is_match_nofail(cache, input)
+ }
+ }
+ } else {
+ self.is_match_nofail(cache, input)
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ // Even if the regex has explicit capture groups, if the caller didn't
+ // provide any explicit slots, then it doesn't make sense to try and do
+ // extra work to get offsets for those slots. Ideally the caller should
+ // realize this and not call this routine in the first place, but alas,
+ // we try to save the caller from themselves if they do.
+ if !self.is_capture_search_needed(slots.len()) {
+ trace!("asked for slots unnecessarily, trying fast path");
+ let m = self.search(cache, input)?;
+ copy_match_to_slots(m, slots);
+ return Some(m.pattern());
+ }
+ // If the onepass DFA is available for this search (which only happens
+ // when it's anchored), then skip running a fallible DFA. The onepass
+ // DFA isn't as fast as a full or lazy DFA, but it is typically quite
+ // a bit faster than the backtracker or the PikeVM. So it isn't as
+ // advantageous to try and do a full/lazy DFA scan first.
+ //
+ // We still theorize that it's better to do a full/lazy DFA scan, even
+ // when it's anchored, because it's usually much faster and permits us
+ // to say "no match" much more quickly. This does hurt the case of,
+ // say, parsing each line in a log file into capture groups, because
+ // in that case, the line always matches. So the lazy DFA scan is
+ // usually just wasted work. But, the lazy DFA is usually quite fast
+ // and doesn't cost too much here.
+ if self.onepass.get(&input).is_some() {
+ return self.search_slots_nofail(cache, &input, slots);
+ }
+ let m = match self.try_search_mayfail(cache, input) {
+ Some(Ok(Some(m))) => m,
+ Some(Ok(None)) => return None,
+ Some(Err(_err)) => {
+ trace!("fast capture search failed: {}", _err);
+ return self.search_slots_nofail(cache, input, slots);
+ }
+ None => {
+ return self.search_slots_nofail(cache, input, slots);
+ }
+ };
+ // At this point, now that we've found the bounds of the
+ // match, we need to re-run something that can resolve
+ // capturing groups. But we only need to run on it on the
+ // match bounds and not the entire haystack.
+ trace!(
+ "match found at {}..{} in capture search, \
+ using another engine to find captures",
+ m.start(),
+ m.end(),
+ );
+ let input = input
+ .clone()
+ .span(m.start()..m.end())
+ .anchored(Anchored::Pattern(m.pattern()));
+ Some(
+ self.search_slots_nofail(cache, &input, slots)
+ .expect("should find a match"),
+ )
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ if let Some(e) = self.dfa.get(input) {
+ trace!(
+ "using full DFA for overlapping search at {:?}",
+ input.get_span()
+ );
+ let _err = match e.try_which_overlapping_matches(input, patset) {
+ Ok(()) => return,
+ Err(err) => err,
+ };
+ trace!("fast overlapping search failed: {}", _err);
+ } else if let Some(e) = self.hybrid.get(input) {
+ trace!(
+ "using lazy DFA for overlapping search at {:?}",
+ input.get_span()
+ );
+ let _err = match e.try_which_overlapping_matches(
+ &mut cache.hybrid,
+ input,
+ patset,
+ ) {
+ Ok(()) => {
+ return;
+ }
+ Err(err) => err,
+ };
+ trace!("fast overlapping search failed: {}", _err);
+ }
+ trace!(
+ "using PikeVM for overlapping search at {:?}",
+ input.get_span()
+ );
+ let e = self.pikevm.get();
+ e.which_overlapping_matches(&mut cache.pikevm, input, patset)
+ }
+}
+
+#[derive(Debug)]
+struct ReverseAnchored {
+ core: Core,
+}
+
+impl ReverseAnchored {
+ fn new(core: Core) -> Result<ReverseAnchored, Core> {
+ if !core.info.is_always_anchored_end() {
+ debug!(
+ "skipping reverse anchored optimization because \
+ the regex is not always anchored at the end"
+ );
+ return Err(core);
+ }
+ // Note that the caller can still request an anchored search even when
+ // the regex isn't anchored at the start. We detect that case in the
+ // search routines below and just fallback to the core engine. This
+ // is fine because both searches are anchored. It's just a matter of
+ // picking one. Falling back to the core engine is a little simpler,
+ // since if we used the reverse anchored approach, we'd have to add an
+ // extra check to ensure the match reported starts at the place where
+ // the caller requested the search to start.
+ if core.info.is_always_anchored_start() {
+ debug!(
+ "skipping reverse anchored optimization because \
+ the regex is also anchored at the start"
+ );
+ return Err(core);
+ }
+ // Only DFAs can do reverse searches (currently), so we need one of
+ // them in order to do this optimization. It's possible (although
+ // pretty unlikely) that we have neither and need to give up.
+ if !core.hybrid.is_some() && !core.dfa.is_some() {
+ debug!(
+ "skipping reverse anchored optimization because \
+ we don't have a lazy DFA or a full DFA"
+ );
+ return Err(core);
+ }
+ Ok(ReverseAnchored { core })
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_anchored_rev(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ // We of course always want an anchored search. In theory, the
+ // underlying regex engines should automatically enable anchored
+ // searches since the regex is itself anchored, but this more clearly
+ // expresses intent and is always correct.
+ let input = input.clone().anchored(Anchored::Yes);
+ if let Some(e) = self.core.dfa.get(&input) {
+ trace!(
+ "using full DFA for reverse anchored search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_rev(&input)
+ } else if let Some(e) = self.core.hybrid.get(&input) {
+ trace!(
+ "using lazy DFA for reverse anchored search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_rev(&mut cache.hybrid, &input)
+ } else {
+ unreachable!("ReverseAnchored always has a DFA")
+ }
+ }
+}
+
+// Note that in this impl, we don't check that 'input.end() ==
+// input.haystack().len()'. In particular, when that condition is false, a
+// match is always impossible because we know that the regex is always anchored
+// at the end (or else 'ReverseAnchored' won't be built). We don't check that
+// here because the 'Regex' wrapper actually does that for us in all cases.
+// Thus, in this impl, we can actually assume that the end position in 'input'
+// is equivalent to the length of the haystack.
+impl Strategy for ReverseAnchored {
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn group_info(&self) -> &GroupInfo {
+ self.core.group_info()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn create_cache(&self) -> Cache {
+ self.core.create_cache()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn reset_cache(&self, cache: &mut Cache) {
+ self.core.reset_cache(cache);
+ }
+
+ fn is_accelerated(&self) -> bool {
+ // Since this is anchored at the end, a reverse anchored search is
+ // almost certainly guaranteed to result in a much faster search than
+ // a standard forward search.
+ true
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.core.memory_usage()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search(cache, input);
+ }
+ match self.try_search_half_anchored_rev(cache, input) {
+ Err(_err) => {
+ trace!("fast reverse anchored search failed: {}", _err);
+ self.core.search_nofail(cache, input)
+ }
+ Ok(None) => None,
+ Ok(Some(hm)) => {
+ Some(Match::new(hm.pattern(), hm.offset()..input.end()))
+ }
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_half(cache, input);
+ }
+ match self.try_search_half_anchored_rev(cache, input) {
+ Err(_err) => {
+ trace!("fast reverse anchored search failed: {}", _err);
+ self.core.search_half_nofail(cache, input)
+ }
+ Ok(None) => None,
+ Ok(Some(hm)) => {
+ // Careful here! 'try_search_half' is a *forward* search that
+ // only cares about the *end* position of a match. But
+ // 'hm.offset()' is actually the start of the match. So we
+ // actually just throw that away here and, since we know we
+ // have a match, return the only possible position at which a
+ // match can occur: input.end().
+ Some(HalfMatch::new(hm.pattern(), input.end()))
+ }
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ if input.get_anchored().is_anchored() {
+ return self.core.is_match(cache, input);
+ }
+ match self.try_search_half_anchored_rev(cache, input) {
+ Err(_err) => {
+ trace!("fast reverse anchored search failed: {}", _err);
+ self.core.is_match_nofail(cache, input)
+ }
+ Ok(None) => false,
+ Ok(Some(_)) => true,
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_slots(cache, input, slots);
+ }
+ match self.try_search_half_anchored_rev(cache, input) {
+ Err(_err) => {
+ trace!("fast reverse anchored search failed: {}", _err);
+ self.core.search_slots_nofail(cache, input, slots)
+ }
+ Ok(None) => None,
+ Ok(Some(hm)) => {
+ if !self.core.is_capture_search_needed(slots.len()) {
+ trace!("asked for slots unnecessarily, skipping captures");
+ let m = Match::new(hm.pattern(), hm.offset()..input.end());
+ copy_match_to_slots(m, slots);
+ return Some(m.pattern());
+ }
+ let start = hm.offset();
+ let input = input
+ .clone()
+ .span(start..input.end())
+ .anchored(Anchored::Pattern(hm.pattern()));
+ self.core.search_slots_nofail(cache, &input, slots)
+ }
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ // It seems like this could probably benefit from a reverse anchored
+ // optimization, perhaps by doing an overlapping reverse search (which
+ // the DFAs do support). I haven't given it much thought though, and
+ // I'm currently focus more on the single pattern case.
+ self.core.which_overlapping_matches(cache, input, patset)
+ }
+}
+
+#[derive(Debug)]
+struct ReverseSuffix {
+ core: Core,
+ pre: Prefilter,
+}
+
+impl ReverseSuffix {
+ fn new(core: Core, hirs: &[&Hir]) -> Result<ReverseSuffix, Core> {
+ if !core.info.config().get_auto_prefilter() {
+ debug!(
+ "skipping reverse suffix optimization because \
+ automatic prefilters are disabled"
+ );
+ return Err(core);
+ }
+ // Like the reverse inner optimization, we don't do this for regexes
+ // that are always anchored. It could lead to scanning too much, but
+ // could say "no match" much more quickly than running the regex
+ // engine if the initial literal scan doesn't match. With that said,
+ // the reverse suffix optimization has lower overhead, since it only
+ // requires a reverse scan after a literal match to confirm or reject
+ // the match. (Although, in the case of confirmation, it then needs to
+ // do another forward scan to find the end position.)
+ //
+ // Note that the caller can still request an anchored search even
+ // when the regex isn't anchored. We detect that case in the search
+ // routines below and just fallback to the core engine. Currently this
+ // optimization assumes all searches are unanchored, so if we do want
+ // to enable this optimization for anchored searches, it will need a
+ // little work to support it.
+ if core.info.is_always_anchored_start() {
+ debug!(
+ "skipping reverse suffix optimization because \
+ the regex is always anchored at the start",
+ );
+ return Err(core);
+ }
+ // Only DFAs can do reverse searches (currently), so we need one of
+ // them in order to do this optimization. It's possible (although
+ // pretty unlikely) that we have neither and need to give up.
+ if !core.hybrid.is_some() && !core.dfa.is_some() {
+ debug!(
+ "skipping reverse suffix optimization because \
+ we don't have a lazy DFA or a full DFA"
+ );
+ return Err(core);
+ }
+ if core.pre.as_ref().map_or(false, |p| p.is_fast()) {
+ debug!(
+ "skipping reverse suffix optimization because \
+ we already have a prefilter that we think is fast"
+ );
+ return Err(core);
+ }
+ let kind = core.info.config().get_match_kind();
+ let suffixes = crate::util::prefilter::suffixes(kind, hirs);
+ let lcs = match suffixes.longest_common_suffix() {
+ None => {
+ debug!(
+ "skipping reverse suffix optimization because \
+ a longest common suffix could not be found",
+ );
+ return Err(core);
+ }
+ Some(lcs) if lcs.is_empty() => {
+ debug!(
+ "skipping reverse suffix optimization because \
+ the longest common suffix is the empty string",
+ );
+ return Err(core);
+ }
+ Some(lcs) => lcs,
+ };
+ let pre = match Prefilter::new(kind, &[lcs]) {
+ Some(pre) => pre,
+ None => {
+ debug!(
+ "skipping reverse suffix optimization because \
+ a prefilter could not be constructed from the \
+ longest common suffix",
+ );
+ return Err(core);
+ }
+ };
+ if !pre.is_fast() {
+ debug!(
+ "skipping reverse suffix optimization because \
+ while we have a suffix prefilter, it is not \
+ believed to be 'fast'"
+ );
+ return Err(core);
+ }
+ Ok(ReverseSuffix { core, pre })
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_start(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ let mut span = input.get_span();
+ let mut min_start = 0;
+ loop {
+ let litmatch = match self.pre.find(input.haystack(), span) {
+ None => return Ok(None),
+ Some(span) => span,
+ };
+ trace!("reverse suffix scan found suffix match at {:?}", litmatch);
+ let revinput = input
+ .clone()
+ .anchored(Anchored::Yes)
+ .span(input.start()..litmatch.end);
+ match self
+ .try_search_half_rev_limited(cache, &revinput, min_start)?
+ {
+ None => {
+ if span.start >= span.end {
+ break;
+ }
+ span.start = litmatch.start.checked_add(1).unwrap();
+ }
+ Some(hm) => return Ok(Some(hm)),
+ }
+ min_start = litmatch.end;
+ }
+ Ok(None)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_fwd(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ if let Some(e) = self.core.dfa.get(&input) {
+ trace!(
+ "using full DFA for forward reverse suffix search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_fwd(&input)
+ } else if let Some(e) = self.core.hybrid.get(&input) {
+ trace!(
+ "using lazy DFA for forward reverse suffix search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_fwd(&mut cache.hybrid, &input)
+ } else {
+ unreachable!("ReverseSuffix always has a DFA")
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_rev_limited(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ if let Some(e) = self.core.dfa.get(&input) {
+ trace!(
+ "using full DFA for reverse suffix search at {:?}, \
+ but will be stopped at {} to avoid quadratic behavior",
+ input.get_span(),
+ min_start,
+ );
+ e.try_search_half_rev_limited(&input, min_start)
+ } else if let Some(e) = self.core.hybrid.get(&input) {
+ trace!(
+ "using lazy DFA for reverse inner search at {:?}, \
+ but will be stopped at {} to avoid quadratic behavior",
+ input.get_span(),
+ min_start,
+ );
+ e.try_search_half_rev_limited(&mut cache.hybrid, &input, min_start)
+ } else {
+ unreachable!("ReverseSuffix always has a DFA")
+ }
+ }
+}
+
+impl Strategy for ReverseSuffix {
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn group_info(&self) -> &GroupInfo {
+ self.core.group_info()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn create_cache(&self) -> Cache {
+ self.core.create_cache()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn reset_cache(&self, cache: &mut Cache) {
+ self.core.reset_cache(cache);
+ }
+
+ fn is_accelerated(&self) -> bool {
+ self.pre.is_fast()
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.core.memory_usage() + self.pre.memory_usage()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search(cache, input);
+ }
+ match self.try_search_half_start(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse suffix optimization failed: {}", _err);
+ self.core.search(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!("reverse suffix reverse fast search failed: {}", _err);
+ self.core.search_nofail(cache, input)
+ }
+ Ok(None) => None,
+ Ok(Some(hm_start)) => {
+ let fwdinput = input
+ .clone()
+ .anchored(Anchored::Pattern(hm_start.pattern()))
+ .span(hm_start.offset()..input.end());
+ match self.try_search_half_fwd(cache, &fwdinput) {
+ Err(_err) => {
+ trace!(
+ "reverse suffix forward fast search failed: {}",
+ _err
+ );
+ self.core.search_nofail(cache, input)
+ }
+ Ok(None) => {
+ unreachable!(
+ "suffix match plus reverse match implies \
+ there must be a match",
+ )
+ }
+ Ok(Some(hm_end)) => Some(Match::new(
+ hm_start.pattern(),
+ hm_start.offset()..hm_end.offset(),
+ )),
+ }
+ }
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_half(cache, input);
+ }
+ match self.try_search_half_start(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse suffix half optimization failed: {}", _err);
+ self.core.search_half(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!(
+ "reverse suffix reverse fast half search failed: {}",
+ _err
+ );
+ self.core.search_half_nofail(cache, input)
+ }
+ Ok(None) => None,
+ Ok(Some(hm_start)) => {
+ // This is a bit subtle. It is tempting to just stop searching
+ // at this point and return a half-match with an offset
+ // corresponding to where the suffix was found. But the suffix
+ // match does not necessarily correspond to the end of the
+ // proper leftmost-first match. Consider /[a-z]+ing/ against
+ // 'tingling'. The first suffix match is the first 'ing', and
+ // the /[a-z]+/ matches the 't'. So if we stopped here, then
+ // we'd report 'ting' as the match. But 'tingling' is the
+ // correct match because of greediness.
+ let fwdinput = input
+ .clone()
+ .anchored(Anchored::Pattern(hm_start.pattern()))
+ .span(hm_start.offset()..input.end());
+ match self.try_search_half_fwd(cache, &fwdinput) {
+ Err(_err) => {
+ trace!(
+ "reverse suffix forward fast search failed: {}",
+ _err
+ );
+ self.core.search_half_nofail(cache, input)
+ }
+ Ok(None) => {
+ unreachable!(
+ "suffix match plus reverse match implies \
+ there must be a match",
+ )
+ }
+ Ok(Some(hm_end)) => Some(hm_end),
+ }
+ }
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ if input.get_anchored().is_anchored() {
+ return self.core.is_match(cache, input);
+ }
+ match self.try_search_half_start(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse suffix half optimization failed: {}", _err);
+ self.core.is_match_nofail(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!(
+ "reverse suffix reverse fast half search failed: {}",
+ _err
+ );
+ self.core.is_match_nofail(cache, input)
+ }
+ Ok(None) => false,
+ Ok(Some(_)) => true,
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_slots(cache, input, slots);
+ }
+ if !self.core.is_capture_search_needed(slots.len()) {
+ trace!("asked for slots unnecessarily, trying fast path");
+ let m = self.search(cache, input)?;
+ copy_match_to_slots(m, slots);
+ return Some(m.pattern());
+ }
+ let hm_start = match self.try_search_half_start(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!(
+ "reverse suffix captures optimization failed: {}",
+ _err
+ );
+ return self.core.search_slots(cache, input, slots);
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!(
+ "reverse suffix reverse fast captures search failed: {}",
+ _err
+ );
+ return self.core.search_slots_nofail(cache, input, slots);
+ }
+ Ok(None) => return None,
+ Ok(Some(hm_start)) => hm_start,
+ };
+ trace!(
+ "match found at {}..{} in capture search, \
+ using another engine to find captures",
+ hm_start.offset(),
+ input.end(),
+ );
+ let start = hm_start.offset();
+ let input = input
+ .clone()
+ .span(start..input.end())
+ .anchored(Anchored::Pattern(hm_start.pattern()));
+ self.core.search_slots_nofail(cache, &input, slots)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ self.core.which_overlapping_matches(cache, input, patset)
+ }
+}
+
+#[derive(Debug)]
+struct ReverseInner {
+ core: Core,
+ preinner: Prefilter,
+ nfarev: NFA,
+ hybrid: wrappers::ReverseHybrid,
+ dfa: wrappers::ReverseDFA,
+}
+
+impl ReverseInner {
+ fn new(core: Core, hirs: &[&Hir]) -> Result<ReverseInner, Core> {
+ if !core.info.config().get_auto_prefilter() {
+ debug!(
+ "skipping reverse inner optimization because \
+ automatic prefilters are disabled"
+ );
+ return Err(core);
+ }
+ // Currently we hard-code the assumption of leftmost-first match
+ // semantics. This isn't a huge deal because 'all' semantics tend to
+ // only be used for forward overlapping searches with multiple regexes,
+ // and this optimization only supports a single pattern at the moment.
+ if core.info.config().get_match_kind() != MatchKind::LeftmostFirst {
+ debug!(
+ "skipping reverse inner optimization because \
+ match kind is {:?} but this only supports leftmost-first",
+ core.info.config().get_match_kind(),
+ );
+ return Err(core);
+ }
+ // It's likely that a reverse inner scan has too much overhead for it
+ // to be worth it when the regex is anchored at the start. It is
+ // possible for it to be quite a bit faster if the initial literal
+ // scan fails to detect a match, in which case, we can say "no match"
+ // very quickly. But this could be undesirable, e.g., scanning too far
+ // or when the literal scan matches. If it matches, then confirming the
+ // match requires a reverse scan followed by a forward scan to confirm
+ // or reject, which is a fair bit of work.
+ //
+ // Note that the caller can still request an anchored search even
+ // when the regex isn't anchored. We detect that case in the search
+ // routines below and just fallback to the core engine. Currently this
+ // optimization assumes all searches are unanchored, so if we do want
+ // to enable this optimization for anchored searches, it will need a
+ // little work to support it.
+ if core.info.is_always_anchored_start() {
+ debug!(
+ "skipping reverse inner optimization because \
+ the regex is always anchored at the start",
+ );
+ return Err(core);
+ }
+ // Only DFAs can do reverse searches (currently), so we need one of
+ // them in order to do this optimization. It's possible (although
+ // pretty unlikely) that we have neither and need to give up.
+ if !core.hybrid.is_some() && !core.dfa.is_some() {
+ debug!(
+ "skipping reverse inner optimization because \
+ we don't have a lazy DFA or a full DFA"
+ );
+ return Err(core);
+ }
+ if core.pre.as_ref().map_or(false, |p| p.is_fast()) {
+ debug!(
+ "skipping reverse inner optimization because \
+ we already have a prefilter that we think is fast"
+ );
+ return Err(core);
+ } else if core.pre.is_some() {
+ debug!(
+ "core engine has a prefix prefilter, but it is \
+ probably not fast, so continuing with attempt to \
+ use reverse inner prefilter"
+ );
+ }
+ let (concat_prefix, preinner) = match reverse_inner::extract(hirs) {
+ Some(x) => x,
+ // N.B. the 'extract' function emits debug messages explaining
+ // why we bailed out here.
+ None => return Err(core),
+ };
+ debug!("building reverse NFA for prefix before inner literal");
+ let mut lookm = LookMatcher::new();
+ lookm.set_line_terminator(core.info.config().get_line_terminator());
+ let thompson_config = thompson::Config::new()
+ .reverse(true)
+ .utf8(core.info.config().get_utf8_empty())
+ .nfa_size_limit(core.info.config().get_nfa_size_limit())
+ .shrink(false)
+ .which_captures(WhichCaptures::None)
+ .look_matcher(lookm);
+ let result = thompson::Compiler::new()
+ .configure(thompson_config)
+ .build_from_hir(&concat_prefix);
+ let nfarev = match result {
+ Ok(nfarev) => nfarev,
+ Err(_err) => {
+ debug!(
+ "skipping reverse inner optimization because the \
+ reverse NFA failed to build: {}",
+ _err,
+ );
+ return Err(core);
+ }
+ };
+ debug!("building reverse DFA for prefix before inner literal");
+ let dfa = if !core.info.config().get_dfa() {
+ wrappers::ReverseDFA::none()
+ } else {
+ wrappers::ReverseDFA::new(&core.info, &nfarev)
+ };
+ let hybrid = if !core.info.config().get_hybrid() {
+ wrappers::ReverseHybrid::none()
+ } else if dfa.is_some() {
+ debug!(
+ "skipping lazy DFA for reverse inner optimization \
+ because we have a full DFA"
+ );
+ wrappers::ReverseHybrid::none()
+ } else {
+ wrappers::ReverseHybrid::new(&core.info, &nfarev)
+ };
+ Ok(ReverseInner { core, preinner, nfarev, hybrid, dfa })
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_full(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Result<Option<Match>, RetryError> {
+ let mut span = input.get_span();
+ let mut min_match_start = 0;
+ let mut min_pre_start = 0;
+ loop {
+ let litmatch = match self.preinner.find(input.haystack(), span) {
+ None => return Ok(None),
+ Some(span) => span,
+ };
+ if litmatch.start < min_pre_start {
+ trace!(
+ "found inner prefilter match at {:?}, which starts \
+ before the end of the last forward scan at {}, \
+ quitting to avoid quadratic behavior",
+ litmatch,
+ min_pre_start,
+ );
+ return Err(RetryError::Quadratic(RetryQuadraticError::new()));
+ }
+ trace!("reverse inner scan found inner match at {:?}", litmatch);
+ let revinput = input
+ .clone()
+ .anchored(Anchored::Yes)
+ .span(input.start()..litmatch.start);
+ // Note that in addition to the literal search above scanning past
+ // our minimum start point, this routine can also return an error
+ // as a result of detecting possible quadratic behavior if the
+ // reverse scan goes past the minimum start point. That is, the
+ // literal search might not, but the reverse regex search for the
+ // prefix might!
+ match self.try_search_half_rev_limited(
+ cache,
+ &revinput,
+ min_match_start,
+ )? {
+ None => {
+ if span.start >= span.end {
+ break;
+ }
+ span.start = litmatch.start.checked_add(1).unwrap();
+ }
+ Some(hm_start) => {
+ let fwdinput = input
+ .clone()
+ .anchored(Anchored::Pattern(hm_start.pattern()))
+ .span(hm_start.offset()..input.end());
+ match self.try_search_half_fwd_stopat(cache, &fwdinput)? {
+ Err(stopat) => {
+ min_pre_start = stopat;
+ span.start =
+ litmatch.start.checked_add(1).unwrap();
+ }
+ Ok(hm_end) => {
+ return Ok(Some(Match::new(
+ hm_start.pattern(),
+ hm_start.offset()..hm_end.offset(),
+ )))
+ }
+ }
+ }
+ }
+ min_match_start = litmatch.end;
+ }
+ Ok(None)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_fwd_stopat(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Result<Result<HalfMatch, usize>, RetryFailError> {
+ if let Some(e) = self.core.dfa.get(&input) {
+ trace!(
+ "using full DFA for forward reverse inner search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_fwd_stopat(&input)
+ } else if let Some(e) = self.core.hybrid.get(&input) {
+ trace!(
+ "using lazy DFA for forward reverse inner search at {:?}",
+ input.get_span()
+ );
+ e.try_search_half_fwd_stopat(&mut cache.hybrid, &input)
+ } else {
+ unreachable!("ReverseInner always has a DFA")
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn try_search_half_rev_limited(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ if let Some(e) = self.dfa.get(&input) {
+ trace!(
+ "using full DFA for reverse inner search at {:?}, \
+ but will be stopped at {} to avoid quadratic behavior",
+ input.get_span(),
+ min_start,
+ );
+ e.try_search_half_rev_limited(&input, min_start)
+ } else if let Some(e) = self.hybrid.get(&input) {
+ trace!(
+ "using lazy DFA for reverse inner search at {:?}, \
+ but will be stopped at {} to avoid quadratic behavior",
+ input.get_span(),
+ min_start,
+ );
+ e.try_search_half_rev_limited(
+ &mut cache.revhybrid,
+ &input,
+ min_start,
+ )
+ } else {
+ unreachable!("ReverseInner always has a DFA")
+ }
+ }
+}
+
+impl Strategy for ReverseInner {
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn group_info(&self) -> &GroupInfo {
+ self.core.group_info()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn create_cache(&self) -> Cache {
+ let mut cache = self.core.create_cache();
+ cache.revhybrid = self.hybrid.create_cache();
+ cache
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn reset_cache(&self, cache: &mut Cache) {
+ self.core.reset_cache(cache);
+ cache.revhybrid.reset(&self.hybrid);
+ }
+
+ fn is_accelerated(&self) -> bool {
+ self.preinner.is_fast()
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.core.memory_usage()
+ + self.preinner.memory_usage()
+ + self.nfarev.memory_usage()
+ + self.dfa.memory_usage()
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option<Match> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search(cache, input);
+ }
+ match self.try_search_full(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse inner optimization failed: {}", _err);
+ self.core.search(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!("reverse inner fast search failed: {}", _err);
+ self.core.search_nofail(cache, input)
+ }
+ Ok(matornot) => matornot,
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_half(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ ) -> Option<HalfMatch> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_half(cache, input);
+ }
+ match self.try_search_full(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse inner half optimization failed: {}", _err);
+ self.core.search_half(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!("reverse inner fast half search failed: {}", _err);
+ self.core.search_half_nofail(cache, input)
+ }
+ Ok(None) => None,
+ Ok(Some(m)) => Some(HalfMatch::new(m.pattern(), m.end())),
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool {
+ if input.get_anchored().is_anchored() {
+ return self.core.is_match(cache, input);
+ }
+ match self.try_search_full(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse inner half optimization failed: {}", _err);
+ self.core.is_match_nofail(cache, input)
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!("reverse inner fast half search failed: {}", _err);
+ self.core.is_match_nofail(cache, input)
+ }
+ Ok(None) => false,
+ Ok(Some(_)) => true,
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn search_slots(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ if input.get_anchored().is_anchored() {
+ return self.core.search_slots(cache, input, slots);
+ }
+ if !self.core.is_capture_search_needed(slots.len()) {
+ trace!("asked for slots unnecessarily, trying fast path");
+ let m = self.search(cache, input)?;
+ copy_match_to_slots(m, slots);
+ return Some(m.pattern());
+ }
+ let m = match self.try_search_full(cache, input) {
+ Err(RetryError::Quadratic(_err)) => {
+ trace!("reverse inner captures optimization failed: {}", _err);
+ return self.core.search_slots(cache, input, slots);
+ }
+ Err(RetryError::Fail(_err)) => {
+ trace!("reverse inner fast captures search failed: {}", _err);
+ return self.core.search_slots_nofail(cache, input, slots);
+ }
+ Ok(None) => return None,
+ Ok(Some(m)) => m,
+ };
+ trace!(
+ "match found at {}..{} in capture search, \
+ using another engine to find captures",
+ m.start(),
+ m.end(),
+ );
+ let input = input
+ .clone()
+ .span(m.start()..m.end())
+ .anchored(Anchored::Pattern(m.pattern()));
+ self.core.search_slots_nofail(cache, &input, slots)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn which_overlapping_matches(
+ &self,
+ cache: &mut Cache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ self.core.which_overlapping_matches(cache, input, patset)
+ }
+}
+
+/// Copies the offsets in the given match to the corresponding positions in
+/// `slots`.
+///
+/// In effect, this sets the slots corresponding to the implicit group for the
+/// pattern in the given match. If the indices for the corresponding slots do
+/// not exist, then no slots are set.
+///
+/// This is useful when the caller provides slots (or captures), but you use a
+/// regex engine that doesn't operate on slots (like a lazy DFA). This function
+/// lets you map the match you get back to the slots provided by the caller.
+#[cfg_attr(feature = "perf-inline", inline(always))]
+fn copy_match_to_slots(m: Match, slots: &mut [Option<NonMaxUsize>]) {
+ let slot_start = m.pattern().as_usize() * 2;
+ let slot_end = slot_start + 1;
+ if let Some(slot) = slots.get_mut(slot_start) {
+ *slot = NonMaxUsize::new(m.start());
+ }
+ if let Some(slot) = slots.get_mut(slot_end) {
+ *slot = NonMaxUsize::new(m.end());
+ }
+}
diff --git a/third_party/rust/regex-automata/src/meta/wrappers.rs b/third_party/rust/regex-automata/src/meta/wrappers.rs
new file mode 100644
index 0000000000..08110d9bb8
--- /dev/null
+++ b/third_party/rust/regex-automata/src/meta/wrappers.rs
@@ -0,0 +1,1348 @@
+/*!
+This module contains a boat load of wrappers around each of our internal regex
+engines. They encapsulate a few things:
+
+1. The wrappers manage the conditional existence of the regex engine. Namely,
+the PikeVM is the only required regex engine. The rest are optional. These
+wrappers present a uniform API regardless of which engines are available. And
+availability might be determined by compile time features or by dynamic
+configuration via `meta::Config`. Encapsulating the conditional compilation
+features is in particular a huge simplification for the higher level code that
+composes these engines.
+2. The wrappers manage construction of each engine, including skipping it if
+the engine is unavailable or configured to not be used.
+3. The wrappers manage whether an engine *can* be used for a particular
+search configuration. For example, `BoundedBacktracker::get` only returns a
+backtracking engine when the haystack is bigger than the maximum supported
+length. The wrappers also sometimes take a position on when an engine *ought*
+to be used, but only in cases where the logic is extremely local to the engine
+itself. Otherwise, things like "choose between the backtracker and the one-pass
+DFA" are managed by the higher level meta strategy code.
+
+There are also corresponding wrappers for the various `Cache` types for each
+regex engine that needs them. If an engine is unavailable or not used, then a
+cache for it will *not* actually be allocated.
+*/
+
+use alloc::vec::Vec;
+
+use crate::{
+ meta::{
+ error::{BuildError, RetryError, RetryFailError},
+ regex::RegexInfo,
+ },
+ nfa::thompson::{pikevm, NFA},
+ util::{prefilter::Prefilter, primitives::NonMaxUsize},
+ HalfMatch, Input, Match, MatchKind, PatternID, PatternSet,
+};
+
+#[cfg(feature = "dfa-build")]
+use crate::dfa;
+#[cfg(feature = "dfa-onepass")]
+use crate::dfa::onepass;
+#[cfg(feature = "hybrid")]
+use crate::hybrid;
+#[cfg(feature = "nfa-backtrack")]
+use crate::nfa::thompson::backtrack;
+
+#[derive(Debug)]
+pub(crate) struct PikeVM(PikeVMEngine);
+
+impl PikeVM {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ ) -> Result<PikeVM, BuildError> {
+ PikeVMEngine::new(info, pre, nfa).map(PikeVM)
+ }
+
+ pub(crate) fn create_cache(&self) -> PikeVMCache {
+ PikeVMCache::new(self)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(&self) -> &PikeVMEngine {
+ &self.0
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct PikeVMEngine(pikevm::PikeVM);
+
+impl PikeVMEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ ) -> Result<PikeVMEngine, BuildError> {
+ let pikevm_config = pikevm::Config::new()
+ .match_kind(info.config().get_match_kind())
+ .prefilter(pre);
+ let engine = pikevm::Builder::new()
+ .configure(pikevm_config)
+ .build_from_nfa(nfa.clone())
+ .map_err(BuildError::nfa)?;
+ debug!("PikeVM built");
+ Ok(PikeVMEngine(engine))
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn is_match(
+ &self,
+ cache: &mut PikeVMCache,
+ input: &Input<'_>,
+ ) -> bool {
+ self.0.is_match(cache.0.as_mut().unwrap(), input.clone())
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn search_slots(
+ &self,
+ cache: &mut PikeVMCache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ self.0.search_slots(cache.0.as_mut().unwrap(), input, slots)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn which_overlapping_matches(
+ &self,
+ cache: &mut PikeVMCache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) {
+ self.0.which_overlapping_matches(
+ cache.0.as_mut().unwrap(),
+ input,
+ patset,
+ )
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct PikeVMCache(Option<pikevm::Cache>);
+
+impl PikeVMCache {
+ pub(crate) fn none() -> PikeVMCache {
+ PikeVMCache(None)
+ }
+
+ pub(crate) fn new(builder: &PikeVM) -> PikeVMCache {
+ PikeVMCache(Some(builder.get().0.create_cache()))
+ }
+
+ pub(crate) fn reset(&mut self, builder: &PikeVM) {
+ self.0.as_mut().unwrap().reset(&builder.get().0);
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ self.0.as_ref().map_or(0, |c| c.memory_usage())
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct BoundedBacktracker(Option<BoundedBacktrackerEngine>);
+
+impl BoundedBacktracker {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ ) -> Result<BoundedBacktracker, BuildError> {
+ BoundedBacktrackerEngine::new(info, pre, nfa).map(BoundedBacktracker)
+ }
+
+ pub(crate) fn create_cache(&self) -> BoundedBacktrackerCache {
+ BoundedBacktrackerCache::new(self)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(
+ &self,
+ input: &Input<'_>,
+ ) -> Option<&BoundedBacktrackerEngine> {
+ let engine = self.0.as_ref()?;
+ // It is difficult to make the backtracker give up early if it is
+ // guaranteed to eventually wind up in a match state. This is because
+ // of the greedy nature of a backtracker: it just blindly mushes
+ // forward. Every other regex engine is able to give up more quickly,
+ // so even if the backtracker might be able to zip through faster than
+ // (say) the PikeVM, we prefer the theoretical benefit that some other
+ // engine might be able to scan much less of the haystack than the
+ // backtracker.
+ //
+ // Now, if the haystack is really short already, then we allow the
+ // backtracker to run. (This hasn't been litigated quantitatively with
+ // benchmarks. Just a hunch.)
+ if input.get_earliest() && input.haystack().len() > 128 {
+ return None;
+ }
+ // If the backtracker is just going to return an error because the
+ // haystack is too long, then obviously do not use it.
+ if input.get_span().len() > engine.max_haystack_len() {
+ return None;
+ }
+ Some(engine)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct BoundedBacktrackerEngine(
+ #[cfg(feature = "nfa-backtrack")] backtrack::BoundedBacktracker,
+ #[cfg(not(feature = "nfa-backtrack"))] (),
+);
+
+impl BoundedBacktrackerEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ ) -> Result<Option<BoundedBacktrackerEngine>, BuildError> {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ if !info.config().get_backtrack()
+ || info.config().get_match_kind() != MatchKind::LeftmostFirst
+ {
+ return Ok(None);
+ }
+ let backtrack_config = backtrack::Config::new().prefilter(pre);
+ let engine = backtrack::Builder::new()
+ .configure(backtrack_config)
+ .build_from_nfa(nfa.clone())
+ .map_err(BuildError::nfa)?;
+ debug!("BoundedBacktracker built");
+ Ok(Some(BoundedBacktrackerEngine(engine)))
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ Ok(None)
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn is_match(
+ &self,
+ cache: &mut BoundedBacktrackerCache,
+ input: &Input<'_>,
+ ) -> bool {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ // OK because we only permit access to this engine when we know
+ // the haystack is short enough for the backtracker to run without
+ // reporting an error.
+ self.0
+ .try_is_match(cache.0.as_mut().unwrap(), input.clone())
+ .unwrap()
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn search_slots(
+ &self,
+ cache: &mut BoundedBacktrackerCache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ // OK because we only permit access to this engine when we know
+ // the haystack is short enough for the backtracker to run without
+ // reporting an error.
+ self.0
+ .try_search_slots(cache.0.as_mut().unwrap(), input, slots)
+ .unwrap()
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn max_haystack_len(&self) -> usize {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ self.0.max_haystack_len()
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct BoundedBacktrackerCache(
+ #[cfg(feature = "nfa-backtrack")] Option<backtrack::Cache>,
+ #[cfg(not(feature = "nfa-backtrack"))] (),
+);
+
+impl BoundedBacktrackerCache {
+ pub(crate) fn none() -> BoundedBacktrackerCache {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ BoundedBacktrackerCache(None)
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ BoundedBacktrackerCache(())
+ }
+ }
+
+ pub(crate) fn new(
+ builder: &BoundedBacktracker,
+ ) -> BoundedBacktrackerCache {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ BoundedBacktrackerCache(
+ builder.0.as_ref().map(|e| e.0.create_cache()),
+ )
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ BoundedBacktrackerCache(())
+ }
+ }
+
+ pub(crate) fn reset(&mut self, builder: &BoundedBacktracker) {
+ #[cfg(feature = "nfa-backtrack")]
+ if let Some(ref e) = builder.0 {
+ self.0.as_mut().unwrap().reset(&e.0);
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "nfa-backtrack")]
+ {
+ self.0.as_ref().map_or(0, |c| c.memory_usage())
+ }
+ #[cfg(not(feature = "nfa-backtrack"))]
+ {
+ 0
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct OnePass(Option<OnePassEngine>);
+
+impl OnePass {
+ pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> OnePass {
+ OnePass(OnePassEngine::new(info, nfa))
+ }
+
+ pub(crate) fn create_cache(&self) -> OnePassCache {
+ OnePassCache::new(self)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(&self, input: &Input<'_>) -> Option<&OnePassEngine> {
+ let engine = self.0.as_ref()?;
+ if !input.get_anchored().is_anchored()
+ && !engine.get_nfa().is_always_start_anchored()
+ {
+ return None;
+ }
+ Some(engine)
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ self.0.as_ref().map_or(0, |e| e.memory_usage())
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct OnePassEngine(
+ #[cfg(feature = "dfa-onepass")] onepass::DFA,
+ #[cfg(not(feature = "dfa-onepass"))] (),
+);
+
+impl OnePassEngine {
+ pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> Option<OnePassEngine> {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ if !info.config().get_onepass() {
+ return None;
+ }
+ // In order to even attempt building a one-pass DFA, we require
+ // that we either have at least one explicit capturing group or
+ // there's a Unicode word boundary somewhere. If we don't have
+ // either of these things, then the lazy DFA will almost certainly
+ // be useable and be much faster. The only case where it might
+ // not is if the lazy DFA isn't utilizing its cache effectively,
+ // but in those cases, the underlying regex is almost certainly
+ // not one-pass or is too big to fit within the current one-pass
+ // implementation limits.
+ if info.props_union().explicit_captures_len() == 0
+ && !info.props_union().look_set().contains_word_unicode()
+ {
+ debug!("not building OnePass because it isn't worth it");
+ return None;
+ }
+ let onepass_config = onepass::Config::new()
+ .match_kind(info.config().get_match_kind())
+ // Like for the lazy DFA, we unconditionally enable this
+ // because it doesn't cost much and makes the API more
+ // flexible.
+ .starts_for_each_pattern(true)
+ .byte_classes(info.config().get_byte_classes())
+ .size_limit(info.config().get_onepass_size_limit());
+ let result = onepass::Builder::new()
+ .configure(onepass_config)
+ .build_from_nfa(nfa.clone());
+ let engine = match result {
+ Ok(engine) => engine,
+ Err(_err) => {
+ debug!("OnePass failed to build: {}", _err);
+ return None;
+ }
+ };
+ debug!("OnePass built, {} bytes", engine.memory_usage());
+ Some(OnePassEngine(engine))
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ None
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn search_slots(
+ &self,
+ cache: &mut OnePassCache,
+ input: &Input<'_>,
+ slots: &mut [Option<NonMaxUsize>],
+ ) -> Option<PatternID> {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ // OK because we only permit getting a OnePassEngine when we know
+ // the search is anchored and thus an error cannot occur.
+ self.0
+ .try_search_slots(cache.0.as_mut().unwrap(), input, slots)
+ .unwrap()
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ self.0.memory_usage()
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ fn get_nfa(&self) -> &NFA {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ self.0.get_nfa()
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct OnePassCache(
+ #[cfg(feature = "dfa-onepass")] Option<onepass::Cache>,
+ #[cfg(not(feature = "dfa-onepass"))] (),
+);
+
+impl OnePassCache {
+ pub(crate) fn none() -> OnePassCache {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ OnePassCache(None)
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ OnePassCache(())
+ }
+ }
+
+ pub(crate) fn new(builder: &OnePass) -> OnePassCache {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ OnePassCache(builder.0.as_ref().map(|e| e.0.create_cache()))
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ OnePassCache(())
+ }
+ }
+
+ pub(crate) fn reset(&mut self, builder: &OnePass) {
+ #[cfg(feature = "dfa-onepass")]
+ if let Some(ref e) = builder.0 {
+ self.0.as_mut().unwrap().reset(&e.0);
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "dfa-onepass")]
+ {
+ self.0.as_ref().map_or(0, |c| c.memory_usage())
+ }
+ #[cfg(not(feature = "dfa-onepass"))]
+ {
+ 0
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct Hybrid(Option<HybridEngine>);
+
+impl Hybrid {
+ pub(crate) fn none() -> Hybrid {
+ Hybrid(None)
+ }
+
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ nfarev: &NFA,
+ ) -> Hybrid {
+ Hybrid(HybridEngine::new(info, pre, nfa, nfarev))
+ }
+
+ pub(crate) fn create_cache(&self) -> HybridCache {
+ HybridCache::new(self)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&HybridEngine> {
+ let engine = self.0.as_ref()?;
+ Some(engine)
+ }
+
+ pub(crate) fn is_some(&self) -> bool {
+ self.0.is_some()
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct HybridEngine(
+ #[cfg(feature = "hybrid")] hybrid::regex::Regex,
+ #[cfg(not(feature = "hybrid"))] (),
+);
+
+impl HybridEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ nfarev: &NFA,
+ ) -> Option<HybridEngine> {
+ #[cfg(feature = "hybrid")]
+ {
+ if !info.config().get_hybrid() {
+ return None;
+ }
+ let dfa_config = hybrid::dfa::Config::new()
+ .match_kind(info.config().get_match_kind())
+ .prefilter(pre.clone())
+ // Enabling this is necessary for ensuring we can service any
+ // kind of 'Input' search without error. For the lazy DFA,
+ // this is not particularly costly, since the start states are
+ // generated lazily.
+ .starts_for_each_pattern(true)
+ .byte_classes(info.config().get_byte_classes())
+ .unicode_word_boundary(true)
+ .specialize_start_states(pre.is_some())
+ .cache_capacity(info.config().get_hybrid_cache_capacity())
+ // This makes it possible for building a lazy DFA to
+ // fail even though the NFA has already been built. Namely,
+ // if the cache capacity is too small to fit some minimum
+ // number of states (which is small, like 4 or 5), then the
+ // DFA will refuse to build.
+ //
+ // We shouldn't enable this to make building always work, since
+ // this could cause the allocation of a cache bigger than the
+ // provided capacity amount.
+ //
+ // This is effectively the only reason why building a lazy DFA
+ // could fail. If it does, then we simply suppress the error
+ // and return None.
+ .skip_cache_capacity_check(false)
+ // This and enabling heuristic Unicode word boundary support
+ // above make it so the lazy DFA can quit at match time.
+ .minimum_cache_clear_count(Some(3))
+ .minimum_bytes_per_state(Some(10));
+ let result = hybrid::dfa::Builder::new()
+ .configure(dfa_config.clone())
+ .build_from_nfa(nfa.clone());
+ let fwd = match result {
+ Ok(fwd) => fwd,
+ Err(_err) => {
+ debug!("forward lazy DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ let result = hybrid::dfa::Builder::new()
+ .configure(
+ dfa_config
+ .clone()
+ .match_kind(MatchKind::All)
+ .prefilter(None)
+ .specialize_start_states(false),
+ )
+ .build_from_nfa(nfarev.clone());
+ let rev = match result {
+ Ok(rev) => rev,
+ Err(_err) => {
+ debug!("reverse lazy DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ let engine =
+ hybrid::regex::Builder::new().build_from_dfas(fwd, rev);
+ debug!("lazy DFA built");
+ Some(HybridEngine(engine))
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ None
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ ) -> Result<Option<Match>, RetryFailError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let cache = cache.0.as_mut().unwrap();
+ self.0.try_search(cache, input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_fwd(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let fwd = self.0.forward();
+ let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0;
+ fwd.try_search_fwd(&mut fwdcache, input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_fwd_stopat(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ ) -> Result<Result<HalfMatch, usize>, RetryFailError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let dfa = self.0.forward();
+ let mut cache = cache.0.as_mut().unwrap().as_parts_mut().0;
+ crate::meta::stopat::hybrid_try_search_half_fwd(
+ dfa, &mut cache, input,
+ )
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let rev = self.0.reverse();
+ let mut revcache = cache.0.as_mut().unwrap().as_parts_mut().1;
+ rev.try_search_rev(&mut revcache, input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev_limited(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let dfa = self.0.reverse();
+ let mut cache = cache.0.as_mut().unwrap().as_parts_mut().1;
+ crate::meta::limited::hybrid_try_search_half_rev(
+ dfa, &mut cache, input, min_start,
+ )
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[inline]
+ pub(crate) fn try_which_overlapping_matches(
+ &self,
+ cache: &mut HybridCache,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) -> Result<(), RetryFailError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let fwd = self.0.forward();
+ let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0;
+ fwd.try_which_overlapping_matches(&mut fwdcache, input, patset)
+ .map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct HybridCache(
+ #[cfg(feature = "hybrid")] Option<hybrid::regex::Cache>,
+ #[cfg(not(feature = "hybrid"))] (),
+);
+
+impl HybridCache {
+ pub(crate) fn none() -> HybridCache {
+ #[cfg(feature = "hybrid")]
+ {
+ HybridCache(None)
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ HybridCache(())
+ }
+ }
+
+ pub(crate) fn new(builder: &Hybrid) -> HybridCache {
+ #[cfg(feature = "hybrid")]
+ {
+ HybridCache(builder.0.as_ref().map(|e| e.0.create_cache()))
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ HybridCache(())
+ }
+ }
+
+ pub(crate) fn reset(&mut self, builder: &Hybrid) {
+ #[cfg(feature = "hybrid")]
+ if let Some(ref e) = builder.0 {
+ self.0.as_mut().unwrap().reset(&e.0);
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "hybrid")]
+ {
+ self.0.as_ref().map_or(0, |c| c.memory_usage())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ 0
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct DFA(Option<DFAEngine>);
+
+impl DFA {
+ pub(crate) fn none() -> DFA {
+ DFA(None)
+ }
+
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ nfarev: &NFA,
+ ) -> DFA {
+ DFA(DFAEngine::new(info, pre, nfa, nfarev))
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&DFAEngine> {
+ let engine = self.0.as_ref()?;
+ Some(engine)
+ }
+
+ pub(crate) fn is_some(&self) -> bool {
+ self.0.is_some()
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ self.0.as_ref().map_or(0, |e| e.memory_usage())
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct DFAEngine(
+ #[cfg(feature = "dfa-build")] dfa::regex::Regex,
+ #[cfg(not(feature = "dfa-build"))] (),
+);
+
+impl DFAEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ pre: Option<Prefilter>,
+ nfa: &NFA,
+ nfarev: &NFA,
+ ) -> Option<DFAEngine> {
+ #[cfg(feature = "dfa-build")]
+ {
+ if !info.config().get_dfa() {
+ return None;
+ }
+ // If our NFA is anything but small, don't even bother with a DFA.
+ if let Some(state_limit) = info.config().get_dfa_state_limit() {
+ if nfa.states().len() > state_limit {
+ debug!(
+ "skipping full DFA because NFA has {} states, \
+ which exceeds the heuristic limit of {}",
+ nfa.states().len(),
+ state_limit,
+ );
+ return None;
+ }
+ }
+ // We cut the size limit in four because the total heap used by
+ // DFA construction is determinization aux memory and the DFA
+ // itself, and those things are configured independently in the
+ // lower level DFA builder API. And then split that in two because
+ // of forward and reverse DFAs.
+ let size_limit = info.config().get_dfa_size_limit().map(|n| n / 4);
+ let dfa_config = dfa::dense::Config::new()
+ .match_kind(info.config().get_match_kind())
+ .prefilter(pre.clone())
+ // Enabling this is necessary for ensuring we can service any
+ // kind of 'Input' search without error. For the full DFA, this
+ // can be quite costly. But since we have such a small bound
+ // on the size of the DFA, in practice, any multl-regexes are
+ // probably going to blow the limit anyway.
+ .starts_for_each_pattern(true)
+ .byte_classes(info.config().get_byte_classes())
+ .unicode_word_boundary(true)
+ .specialize_start_states(pre.is_some())
+ .determinize_size_limit(size_limit)
+ .dfa_size_limit(size_limit);
+ let result = dfa::dense::Builder::new()
+ .configure(dfa_config.clone())
+ .build_from_nfa(&nfa);
+ let fwd = match result {
+ Ok(fwd) => fwd,
+ Err(_err) => {
+ debug!("forward full DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ let result = dfa::dense::Builder::new()
+ .configure(
+ dfa_config
+ .clone()
+ // We never need unanchored reverse searches, so
+ // there's no point in building it into the DFA, which
+ // WILL take more space. (This isn't done for the lazy
+ // DFA because the DFA is, well, lazy. It doesn't pay
+ // the cost for supporting unanchored searches unless
+ // you actually do an unanchored search, which we
+ // don't.)
+ .start_kind(dfa::StartKind::Anchored)
+ .match_kind(MatchKind::All)
+ .prefilter(None)
+ .specialize_start_states(false),
+ )
+ .build_from_nfa(&nfarev);
+ let rev = match result {
+ Ok(rev) => rev,
+ Err(_err) => {
+ debug!("reverse full DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ let engine = dfa::regex::Builder::new().build_from_dfas(fwd, rev);
+ debug!(
+ "fully compiled forward and reverse DFAs built, {} bytes",
+ engine.forward().memory_usage()
+ + engine.reverse().memory_usage(),
+ );
+ Some(DFAEngine(engine))
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ None
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search(
+ &self,
+ input: &Input<'_>,
+ ) -> Result<Option<Match>, RetryFailError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ self.0.try_search(input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_fwd(
+ &self,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ use crate::dfa::Automaton;
+ self.0.forward().try_search_fwd(input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_fwd_stopat(
+ &self,
+ input: &Input<'_>,
+ ) -> Result<Result<HalfMatch, usize>, RetryFailError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ let dfa = self.0.forward();
+ crate::meta::stopat::dfa_try_search_half_fwd(dfa, input)
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev(
+ &self,
+ input: &Input<'_>,
+ ) -> Result<Option<HalfMatch>, RetryFailError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ use crate::dfa::Automaton;
+ self.0.reverse().try_search_rev(&input).map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev_limited(
+ &self,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ let dfa = self.0.reverse();
+ crate::meta::limited::dfa_try_search_half_rev(
+ dfa, input, min_start,
+ )
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ #[inline]
+ pub(crate) fn try_which_overlapping_matches(
+ &self,
+ input: &Input<'_>,
+ patset: &mut PatternSet,
+ ) -> Result<(), RetryFailError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ use crate::dfa::Automaton;
+ self.0
+ .forward()
+ .try_which_overlapping_matches(input, patset)
+ .map_err(|e| e.into())
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "dfa-build")]
+ {
+ self.0.forward().memory_usage() + self.0.reverse().memory_usage()
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReverseHybrid(Option<ReverseHybridEngine>);
+
+impl ReverseHybrid {
+ pub(crate) fn none() -> ReverseHybrid {
+ ReverseHybrid(None)
+ }
+
+ pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseHybrid {
+ ReverseHybrid(ReverseHybridEngine::new(info, nfarev))
+ }
+
+ pub(crate) fn create_cache(&self) -> ReverseHybridCache {
+ ReverseHybridCache::new(self)
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(
+ &self,
+ _input: &Input<'_>,
+ ) -> Option<&ReverseHybridEngine> {
+ let engine = self.0.as_ref()?;
+ Some(engine)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReverseHybridEngine(
+ #[cfg(feature = "hybrid")] hybrid::dfa::DFA,
+ #[cfg(not(feature = "hybrid"))] (),
+);
+
+impl ReverseHybridEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ nfarev: &NFA,
+ ) -> Option<ReverseHybridEngine> {
+ #[cfg(feature = "hybrid")]
+ {
+ if !info.config().get_hybrid() {
+ return None;
+ }
+ // Since we only use this for reverse searches, we can hard-code
+ // a number of things like match semantics, prefilters, starts
+ // for each pattern and so on.
+ let dfa_config = hybrid::dfa::Config::new()
+ .match_kind(MatchKind::All)
+ .prefilter(None)
+ .starts_for_each_pattern(false)
+ .byte_classes(info.config().get_byte_classes())
+ .unicode_word_boundary(true)
+ .specialize_start_states(false)
+ .cache_capacity(info.config().get_hybrid_cache_capacity())
+ .skip_cache_capacity_check(false)
+ .minimum_cache_clear_count(Some(3))
+ .minimum_bytes_per_state(Some(10));
+ let result = hybrid::dfa::Builder::new()
+ .configure(dfa_config)
+ .build_from_nfa(nfarev.clone());
+ let rev = match result {
+ Ok(rev) => rev,
+ Err(_err) => {
+ debug!("lazy reverse DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ debug!("lazy reverse DFA built");
+ Some(ReverseHybridEngine(rev))
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ None
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev_limited(
+ &self,
+ cache: &mut ReverseHybridCache,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ #[cfg(feature = "hybrid")]
+ {
+ let dfa = &self.0;
+ let mut cache = cache.0.as_mut().unwrap();
+ crate::meta::limited::hybrid_try_search_half_rev(
+ dfa, &mut cache, input, min_start,
+ )
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct ReverseHybridCache(
+ #[cfg(feature = "hybrid")] Option<hybrid::dfa::Cache>,
+ #[cfg(not(feature = "hybrid"))] (),
+);
+
+impl ReverseHybridCache {
+ pub(crate) fn none() -> ReverseHybridCache {
+ #[cfg(feature = "hybrid")]
+ {
+ ReverseHybridCache(None)
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ ReverseHybridCache(())
+ }
+ }
+
+ pub(crate) fn new(builder: &ReverseHybrid) -> ReverseHybridCache {
+ #[cfg(feature = "hybrid")]
+ {
+ ReverseHybridCache(builder.0.as_ref().map(|e| e.0.create_cache()))
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ ReverseHybridCache(())
+ }
+ }
+
+ pub(crate) fn reset(&mut self, builder: &ReverseHybrid) {
+ #[cfg(feature = "hybrid")]
+ if let Some(ref e) = builder.0 {
+ self.0.as_mut().unwrap().reset(&e.0);
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "hybrid")]
+ {
+ self.0.as_ref().map_or(0, |c| c.memory_usage())
+ }
+ #[cfg(not(feature = "hybrid"))]
+ {
+ 0
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReverseDFA(Option<ReverseDFAEngine>);
+
+impl ReverseDFA {
+ pub(crate) fn none() -> ReverseDFA {
+ ReverseDFA(None)
+ }
+
+ pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseDFA {
+ ReverseDFA(ReverseDFAEngine::new(info, nfarev))
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&ReverseDFAEngine> {
+ let engine = self.0.as_ref()?;
+ Some(engine)
+ }
+
+ pub(crate) fn is_some(&self) -> bool {
+ self.0.is_some()
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ self.0.as_ref().map_or(0, |e| e.memory_usage())
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReverseDFAEngine(
+ #[cfg(feature = "dfa-build")] dfa::dense::DFA<Vec<u32>>,
+ #[cfg(not(feature = "dfa-build"))] (),
+);
+
+impl ReverseDFAEngine {
+ pub(crate) fn new(
+ info: &RegexInfo,
+ nfarev: &NFA,
+ ) -> Option<ReverseDFAEngine> {
+ #[cfg(feature = "dfa-build")]
+ {
+ if !info.config().get_dfa() {
+ return None;
+ }
+ // If our NFA is anything but small, don't even bother with a DFA.
+ if let Some(state_limit) = info.config().get_dfa_state_limit() {
+ if nfarev.states().len() > state_limit {
+ debug!(
+ "skipping full reverse DFA because NFA has {} states, \
+ which exceeds the heuristic limit of {}",
+ nfarev.states().len(),
+ state_limit,
+ );
+ return None;
+ }
+ }
+ // We cut the size limit in two because the total heap used by DFA
+ // construction is determinization aux memory and the DFA itself,
+ // and those things are configured independently in the lower level
+ // DFA builder API.
+ let size_limit = info.config().get_dfa_size_limit().map(|n| n / 2);
+ // Since we only use this for reverse searches, we can hard-code
+ // a number of things like match semantics, prefilters, starts
+ // for each pattern and so on. We also disable acceleration since
+ // it's incompatible with limited searches (which is the only
+ // operation we support for this kind of engine at the moment).
+ let dfa_config = dfa::dense::Config::new()
+ .match_kind(MatchKind::All)
+ .prefilter(None)
+ .accelerate(false)
+ .start_kind(dfa::StartKind::Anchored)
+ .starts_for_each_pattern(false)
+ .byte_classes(info.config().get_byte_classes())
+ .unicode_word_boundary(true)
+ .specialize_start_states(false)
+ .determinize_size_limit(size_limit)
+ .dfa_size_limit(size_limit);
+ let result = dfa::dense::Builder::new()
+ .configure(dfa_config)
+ .build_from_nfa(&nfarev);
+ let rev = match result {
+ Ok(rev) => rev,
+ Err(_err) => {
+ debug!("full reverse DFA failed to build: {}", _err);
+ return None;
+ }
+ };
+ debug!(
+ "fully compiled reverse DFA built, {} bytes",
+ rev.memory_usage()
+ );
+ Some(ReverseDFAEngine(rev))
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ None
+ }
+ }
+
+ #[cfg_attr(feature = "perf-inline", inline(always))]
+ pub(crate) fn try_search_half_rev_limited(
+ &self,
+ input: &Input<'_>,
+ min_start: usize,
+ ) -> Result<Option<HalfMatch>, RetryError> {
+ #[cfg(feature = "dfa-build")]
+ {
+ let dfa = &self.0;
+ crate::meta::limited::dfa_try_search_half_rev(
+ dfa, input, min_start,
+ )
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+
+ pub(crate) fn memory_usage(&self) -> usize {
+ #[cfg(feature = "dfa-build")]
+ {
+ self.0.memory_usage()
+ }
+ #[cfg(not(feature = "dfa-build"))]
+ {
+ // Impossible to reach because this engine is never constructed
+ // if the requisite features aren't enabled.
+ unreachable!()
+ }
+ }
+}