mirror of
https://github.com/github/codeql.git
synced 2025-12-17 01:03:14 +01:00
Merge pull request #9422 from erik-krogh/refacReDoS
Refactorizations of the ReDoS libraries
This commit is contained in:
@@ -485,22 +485,27 @@
|
||||
"ruby/ql/lib/codeql/ruby/security/internal/SensitiveDataHeuristics.qll"
|
||||
],
|
||||
"ReDoS Util Python/JS/Ruby/Java": [
|
||||
"javascript/ql/lib/semmle/javascript/security/performance/ReDoSUtil.qll",
|
||||
"python/ql/lib/semmle/python/security/performance/ReDoSUtil.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/performance/ReDoSUtil.qll",
|
||||
"java/ql/lib/semmle/code/java/security/performance/ReDoSUtil.qll"
|
||||
"javascript/ql/lib/semmle/javascript/security/regexp/NfaUtils.qll",
|
||||
"python/ql/lib/semmle/python/security/regexp/NfaUtils.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/regexp/NfaUtils.qll",
|
||||
"java/ql/lib/semmle/code/java/security/regexp/NfaUtils.qll"
|
||||
],
|
||||
"ReDoS Exponential Python/JS/Ruby/Java": [
|
||||
"javascript/ql/lib/semmle/javascript/security/performance/ExponentialBackTracking.qll",
|
||||
"python/ql/lib/semmle/python/security/performance/ExponentialBackTracking.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/performance/ExponentialBackTracking.qll",
|
||||
"java/ql/lib/semmle/code/java/security/performance/ExponentialBackTracking.qll"
|
||||
"javascript/ql/lib/semmle/javascript/security/regexp/ExponentialBackTracking.qll",
|
||||
"python/ql/lib/semmle/python/security/regexp/ExponentialBackTracking.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/regexp/ExponentialBackTracking.qll",
|
||||
"java/ql/lib/semmle/code/java/security/regexp/ExponentialBackTracking.qll"
|
||||
],
|
||||
"ReDoS Polynomial Python/JS/Ruby/Java": [
|
||||
"javascript/ql/lib/semmle/javascript/security/performance/SuperlinearBackTracking.qll",
|
||||
"python/ql/lib/semmle/python/security/performance/SuperlinearBackTracking.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/performance/SuperlinearBackTracking.qll",
|
||||
"java/ql/lib/semmle/code/java/security/performance/SuperlinearBackTracking.qll"
|
||||
"javascript/ql/lib/semmle/javascript/security/regexp/SuperlinearBackTracking.qll",
|
||||
"python/ql/lib/semmle/python/security/regexp/SuperlinearBackTracking.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/regexp/SuperlinearBackTracking.qll",
|
||||
"java/ql/lib/semmle/code/java/security/regexp/SuperlinearBackTracking.qll"
|
||||
],
|
||||
"RegexpMatching Python/JS/Ruby": [
|
||||
"javascript/ql/lib/semmle/javascript/security/regexp/RegexpMatching.qll",
|
||||
"python/ql/lib/semmle/python/security/regexp/RegexpMatching.qll",
|
||||
"ruby/ql/lib/codeql/ruby/security/regexp/RegexpMatching.qll"
|
||||
],
|
||||
"BadTagFilterQuery Python/JS/Ruby": [
|
||||
"javascript/ql/lib/semmle/javascript/security/BadTagFilterQuery.qll",
|
||||
|
||||
5
java/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
5
java/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
category: deprecated
|
||||
---
|
||||
* The utility files previously in the `semmle.code.java.security.performance` package have been moved to the `semmle.code.java.security.regexp` package.
|
||||
The previous files still exist as deprecated aliases.
|
||||
@@ -1,374 +1,4 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.code.java.security.regexp.ExponentialBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State fork, StatePair q |
|
||||
isReachableFromFork(fork, q, this, _) and
|
||||
q = getAForkPair(fork)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string intersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2 |
|
||||
hasTuple(s1, s2, this, i) and
|
||||
result = intersect(s1, s2)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `intersect(s1, s2)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result = strictconcat(int i | hasTuple(_, _, this, i) | this.intersect(i) order by i desc)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, RelevantTrace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An instantiation of `ReDoSConfiguration` for exponential backtracking.
|
||||
*/
|
||||
class ExponentialReDoSConfiguration extends ReDoSConfiguration {
|
||||
ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) }
|
||||
}
|
||||
deprecated import semmle.code.java.security.regexp.ExponentialBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,56 +1,4 @@
|
||||
/** Definitions and configurations for the Polynomial ReDoS query */
|
||||
/** DEPRECATED. Import `semmle.code.java.security.regexp.PolynomialReDoSQuery` instead. */
|
||||
|
||||
import semmle.code.java.security.performance.SuperlinearBackTracking
|
||||
import semmle.code.java.dataflow.DataFlow
|
||||
import semmle.code.java.regex.RegexTreeView
|
||||
import semmle.code.java.regex.RegexFlowConfigs
|
||||
import semmle.code.java.dataflow.FlowSources
|
||||
|
||||
/** A sink for polynomial redos queries, where a regex is matched. */
|
||||
class PolynomialRedosSink extends DataFlow::Node {
|
||||
RegExpLiteral reg;
|
||||
|
||||
PolynomialRedosSink() { regexMatchedAgainst(reg.getRegex(), this.asExpr()) }
|
||||
|
||||
/** Gets the regex that is matched against this node. */
|
||||
RegExpTerm getRegExp() { result.getParent() = reg }
|
||||
}
|
||||
|
||||
/**
|
||||
* A method whose result typically has a limited length,
|
||||
* such as HTTP headers, and values derrived from them.
|
||||
*/
|
||||
private class LengthRestrictedMethod extends Method {
|
||||
LengthRestrictedMethod() {
|
||||
this.getName().toLowerCase().matches(["%header%", "%requesturi%", "%requesturl%", "%cookie%"])
|
||||
or
|
||||
this.getDeclaringType().getName().toLowerCase().matches("%cookie%") and
|
||||
this.getName().matches("get%")
|
||||
or
|
||||
this.getDeclaringType().getName().toLowerCase().matches("%request%") and
|
||||
this.getName().toLowerCase().matches(["%get%path%", "get%user%", "%querystring%"])
|
||||
}
|
||||
}
|
||||
|
||||
/** A configuration for Polynomial ReDoS queries. */
|
||||
class PolynomialRedosConfig extends TaintTracking::Configuration {
|
||||
PolynomialRedosConfig() { this = "PolynomialRedosConfig" }
|
||||
|
||||
override predicate isSource(DataFlow::Node src) { src instanceof RemoteFlowSource }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof PolynomialRedosSink }
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) {
|
||||
node.getType() instanceof PrimitiveType or
|
||||
node.getType() instanceof BoxedType or
|
||||
node.asExpr().(MethodAccess).getMethod() instanceof LengthRestrictedMethod
|
||||
}
|
||||
}
|
||||
|
||||
/** Holds if there is flow from `source` to `sink` that is matched against the regexp term `regexp` that is vulnerable to Polynomial ReDoS. */
|
||||
predicate hasPolynomialReDoSResult(
|
||||
DataFlow::PathNode source, DataFlow::PathNode sink, PolynomialBackTrackingTerm regexp
|
||||
) {
|
||||
any(PolynomialRedosConfig config).hasFlowPath(source, sink) and
|
||||
regexp.getRootTerm() = sink.getNode().(PolynomialRedosSink).getRegExp()
|
||||
}
|
||||
deprecated import semmle.code.java.security.regexp.PolynomialReDoSQuery as Dep
|
||||
import Dep
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,454 +1,4 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.code.java.security.regexp.SuperlinearBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* An instantiaion of `ReDoSConfiguration` for superlinear ReDoS.
|
||||
*/
|
||||
class SuperLinearReDoSConfiguration extends ReDoSConfiguration {
|
||||
SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(_, _, p, t, _) and
|
||||
step(p, s1, s2, s3, _)
|
||||
)
|
||||
or
|
||||
exists(State pivot, State succ | isStartLoops(pivot, succ) |
|
||||
t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) {
|
||||
// base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path.
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Step(s1, s2, s3, Nil()) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and
|
||||
dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper predicate for the recursive case in `isReachableFromStartTuple`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private int isReachableFromStartTupleHelper(
|
||||
State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2,
|
||||
InputSymbol s3
|
||||
) {
|
||||
result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, r)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, s3, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State pivot, State succ, StateTuple q |
|
||||
isReachableFromStartTuple(pivot, succ, q, this, _) and
|
||||
q = getAnEndTuple(pivot, succ)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string getAThreewayIntersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
hasTuple(s1, s2, s3, this, i) and
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `getAThreewayIntersect(s1, s2, s3)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result =
|
||||
strictconcat(int i |
|
||||
hasTuple(_, _, _, this, i)
|
||||
|
|
||||
this.getAThreewayIntersect(i) order by i desc
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, RelevantTrace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynimalReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
deprecated import semmle.code.java.security.regexp.SuperlinearBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, result) }
|
||||
|
||||
/** Holds if `n` is a trace that is used by `concretize` in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State f | isReachableFromFork(f, getAForkPair(f), n, _))
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2 | t = Step(s1, s2, _) | result = intersect(s1, s2))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, Trace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `state` has exponential ReDoS */
|
||||
predicate hasReDoSResult = ReDoSPruning<isPumpable/2>::hasReDoSResult/4;
|
||||
1319
java/ql/lib/semmle/code/java/security/regexp/NfaUtils.qll
Normal file
1319
java/ql/lib/semmle/code/java/security/regexp/NfaUtils.qll
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,56 @@
|
||||
/** Definitions and configurations for the Polynomial ReDoS query */
|
||||
|
||||
import semmle.code.java.security.regexp.SuperlinearBackTracking
|
||||
import semmle.code.java.dataflow.DataFlow
|
||||
import semmle.code.java.regex.RegexTreeView
|
||||
import semmle.code.java.regex.RegexFlowConfigs
|
||||
import semmle.code.java.dataflow.FlowSources
|
||||
|
||||
/** A sink for polynomial redos queries, where a regex is matched. */
|
||||
class PolynomialRedosSink extends DataFlow::Node {
|
||||
RegExpLiteral reg;
|
||||
|
||||
PolynomialRedosSink() { regexMatchedAgainst(reg.getRegex(), this.asExpr()) }
|
||||
|
||||
/** Gets the regex that is matched against this node. */
|
||||
RegExpTerm getRegExp() { result.getParent() = reg }
|
||||
}
|
||||
|
||||
/**
|
||||
* A method whose result typically has a limited length,
|
||||
* such as HTTP headers, and values derrived from them.
|
||||
*/
|
||||
private class LengthRestrictedMethod extends Method {
|
||||
LengthRestrictedMethod() {
|
||||
this.getName().toLowerCase().matches(["%header%", "%requesturi%", "%requesturl%", "%cookie%"])
|
||||
or
|
||||
this.getDeclaringType().getName().toLowerCase().matches("%cookie%") and
|
||||
this.getName().matches("get%")
|
||||
or
|
||||
this.getDeclaringType().getName().toLowerCase().matches("%request%") and
|
||||
this.getName().toLowerCase().matches(["%get%path%", "get%user%", "%querystring%"])
|
||||
}
|
||||
}
|
||||
|
||||
/** A configuration for Polynomial ReDoS queries. */
|
||||
class PolynomialRedosConfig extends TaintTracking::Configuration {
|
||||
PolynomialRedosConfig() { this = "PolynomialRedosConfig" }
|
||||
|
||||
override predicate isSource(DataFlow::Node src) { src instanceof RemoteFlowSource }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof PolynomialRedosSink }
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) {
|
||||
node.getType() instanceof PrimitiveType or
|
||||
node.getType() instanceof BoxedType or
|
||||
node.asExpr().(MethodAccess).getMethod() instanceof LengthRestrictedMethod
|
||||
}
|
||||
}
|
||||
|
||||
/** Holds if there is flow from `source` to `sink` that is matched against the regexp term `regexp` that is vulnerable to Polynomial ReDoS. */
|
||||
predicate hasPolynomialReDoSResult(
|
||||
DataFlow::PathNode source, DataFlow::PathNode sink, PolynomialBackTrackingTerm regexp
|
||||
) {
|
||||
any(PolynomialRedosConfig config).hasFlowPath(source, sink) and
|
||||
regexp.getRootTerm() = sink.getNode().(PolynomialRedosSink).getRegExp()
|
||||
}
|
||||
@@ -0,0 +1,418 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
isReachableFromStartTuple(_, _, t, s1, s2, s3, _, _)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, StateTuple tuple, Trace trace, int dist
|
||||
) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace v |
|
||||
isReachableFromStartTuple(pivot, succ, v, s1, s2, s3, tuple, dist) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, Trace trace, InputSymbol s1, InputSymbol s2, InputSymbol s3,
|
||||
StateTuple tuple, int dist
|
||||
) {
|
||||
// base case.
|
||||
exists(State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Nil() and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(pivot, succ, p, trace, dist + 1) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, tuple)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, _, result) }
|
||||
|
||||
/** Holds if `n` is used in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State pivot, State succ |
|
||||
isReachableFromStartTuple(pivot, succ, getAnEndTuple(pivot, succ), n, _)
|
||||
)
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 | t = Step(s1, s2, s3, _) |
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, Trace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if states starting in `state` can have polynomial backtracking with the string `pump`.
|
||||
*/
|
||||
predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynomialReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
ReDoSPruning<isReDoSCandidate/2>::hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynomialReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
*/
|
||||
|
||||
import java
|
||||
import semmle.code.java.security.performance.PolynomialReDoSQuery
|
||||
import semmle.code.java.security.regexp.PolynomialReDoSQuery
|
||||
import DataFlow::PathGraph
|
||||
|
||||
from DataFlow::PathNode source, DataFlow::PathNode sink, PolynomialBackTrackingTerm regexp
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*/
|
||||
|
||||
import java
|
||||
import semmle.code.java.security.performance.ExponentialBackTracking
|
||||
import semmle.code.java.security.regexp.ExponentialBackTracking
|
||||
|
||||
from RegExpTerm t, string pump, State s, string prefixMsg
|
||||
where
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import java
|
||||
import TestUtilities.InlineExpectationsTest
|
||||
import semmle.code.java.security.performance.PolynomialReDoSQuery
|
||||
import semmle.code.java.security.regexp.PolynomialReDoSQuery
|
||||
|
||||
class HasPolyRedos extends InlineExpectationsTest {
|
||||
HasPolyRedos() { this = "HasPolyRedos" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import java
|
||||
import TestUtilities.InlineExpectationsTest
|
||||
import semmle.code.java.security.performance.ExponentialBackTracking
|
||||
import semmle.code.java.security.regexp.ExponentialBackTracking
|
||||
import semmle.code.java.regex.regex
|
||||
|
||||
class HasExpRedos extends InlineExpectationsTest {
|
||||
|
||||
5
javascript/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
5
javascript/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
category: deprecated
|
||||
---
|
||||
* The utility files previously in the `semmle.javascript.security.performance` package have been moved to the `semmle.javascript.security.regexp` package.
|
||||
The previous files still exist as deprecated aliases.
|
||||
@@ -2,196 +2,27 @@
|
||||
* Provides precicates for reasoning about bad tag filter vulnerabilities.
|
||||
*/
|
||||
|
||||
import performance.ReDoSUtil
|
||||
import regexp.RegexpMatching
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
* Holds if the regexp `root` should be tested against `str`.
|
||||
* Implements the `isRegexpMatchingCandidateSig` signature from `RegexpMatching`.
|
||||
* `ignorePrefix` toggles whether the regular expression should be treated as accepting any prefix if it's unanchored.
|
||||
* `testWithGroups` toggles whether it's tested which groups are filled by a given input string.
|
||||
*/
|
||||
private module RegexpMatching {
|
||||
/**
|
||||
* A class to test whether a regular expression matches a string.
|
||||
* Override this class and extend `test`/`testWithGroups` to configure which strings should be tested for acceptance by this regular expression.
|
||||
* The result can afterwards be read from the `matches` predicate.
|
||||
*
|
||||
* Strings in the `testWithGroups` predicate are also tested for which capture groups are filled by the given string.
|
||||
* The result is available in the `fillCaptureGroup` predicate.
|
||||
*/
|
||||
abstract class MatchedRegExp extends RegExpTerm {
|
||||
MatchedRegExp() { this.isRootTerm() }
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether this regular expression matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*/
|
||||
predicate test(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as `test(..)`, but where the `fillsCaptureGroup` afterwards tells which capture groups were filled by the given string.
|
||||
*/
|
||||
predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if this RegExp matches `str`, where `str` is either in the `test` or `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate matches(string str) {
|
||||
exists(State state | state = getAState(this, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate fillsCaptureGroup(string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(this, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(MatchedRegExp reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
(
|
||||
reg.test(str, ignorePrefix)
|
||||
or
|
||||
reg.testWithGroups(str, ignorePrefix)
|
||||
) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
private predicate isBadTagFilterCandidate(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
) {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
root
|
||||
) and
|
||||
ignorePrefix = true and
|
||||
(
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"] and
|
||||
testWithGroups = true
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, MatchedRegExp r |
|
||||
r.test(s, _)
|
||||
or
|
||||
r.testWithGroups(s, _)
|
||||
|
|
||||
char = s.charAt(_)
|
||||
) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate holds the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(
|
||||
MatchedRegExp reg, int i, string str, boolean ignorePrefix
|
||||
) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
reg.testWithGroups(str, ignorePrefix) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
}
|
||||
|
||||
/** A class to test whether a regular expression matches certain HTML tags. */
|
||||
class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
HtmlMatchingRegExp() {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
this
|
||||
)
|
||||
}
|
||||
|
||||
override predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"]
|
||||
}
|
||||
|
||||
override predicate test(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str =
|
||||
[
|
||||
"<!-- foo -->", "<!- foo ->", "<!-- foo --!>", "<!-- foo\n -->", "<script>foo</script>",
|
||||
@@ -200,7 +31,23 @@ class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
"<script src='foo'></script>", "<SCRIPT>foo</SCRIPT>", "<script\tsrc=\"foo\"/>",
|
||||
"<script\tsrc='foo'></script>", "<sCrIpT>foo</ScRiPt>", "<script src=\"foo\">foo</script >",
|
||||
"<script src=\"foo\">foo</script foo=\"bar\">", "<script src=\"foo\">foo</script\t\n bar>"
|
||||
]
|
||||
] and
|
||||
testWithGroups = false
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A regexp that matches some string from the `isBadTagFilterCandidate` predicate.
|
||||
*/
|
||||
class HtmlMatchingRegExp extends RootTerm {
|
||||
HtmlMatchingRegExp() { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, _) }
|
||||
|
||||
/** Holds if this regexp matched `str`, where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate matches(string str) { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, str) }
|
||||
|
||||
/** Holds if this regexp fills capture group `g' when matching `str', where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate fillsCaptureGroup(string str, int g) {
|
||||
RegexpMatching<isBadTagFilterCandidate/4>::fillsCaptureGroup(this, str, g)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,374 +1,4 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.javascript.security.regexp.ExponentialBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State fork, StatePair q |
|
||||
isReachableFromFork(fork, q, this, _) and
|
||||
q = getAForkPair(fork)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string intersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2 |
|
||||
hasTuple(s1, s2, this, i) and
|
||||
result = intersect(s1, s2)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `intersect(s1, s2)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result = strictconcat(int i | hasTuple(_, _, this, i) | this.intersect(i) order by i desc)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, RelevantTrace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An instantiation of `ReDoSConfiguration` for exponential backtracking.
|
||||
*/
|
||||
class ExponentialReDoSConfiguration extends ReDoSConfiguration {
|
||||
ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) }
|
||||
}
|
||||
deprecated import semmle.javascript.security.regexp.ExponentialBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,42 +1,7 @@
|
||||
/**
|
||||
* Provides a taint tracking configuration for reasoning about
|
||||
* polynomial regular expression denial-of-service attacks.
|
||||
*
|
||||
* Note, for performance reasons: only import this file if
|
||||
* `PolynomialReDoS::Configuration` is needed, otherwise
|
||||
* `PolynomialReDoSCustomizations` should be imported instead.
|
||||
*/
|
||||
/** DEPRECATED. Import `PolynomialReDoSQuery` instead. */
|
||||
|
||||
import javascript
|
||||
private import semmle.javascript.security.regexp.PolynomialReDoSQuery as PolynomialReDoSQuery // ignore-query-import
|
||||
|
||||
module PolynomialReDoS {
|
||||
import PolynomialReDoSCustomizations::PolynomialReDoS
|
||||
|
||||
class Configuration extends TaintTracking::Configuration {
|
||||
Configuration() { this = "PolynomialReDoS" }
|
||||
|
||||
override predicate isSource(DataFlow::Node source) { source instanceof Source }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof Sink }
|
||||
|
||||
override predicate isSanitizerGuard(TaintTracking::SanitizerGuardNode node) {
|
||||
super.isSanitizerGuard(node) or
|
||||
node instanceof LengthGuard
|
||||
}
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) {
|
||||
super.isSanitizer(node) or
|
||||
node instanceof Sanitizer
|
||||
}
|
||||
|
||||
override predicate hasFlowPath(DataFlow::SourcePathNode source, DataFlow::SinkPathNode sink) {
|
||||
super.hasFlowPath(source, sink) and
|
||||
// require that there is a path without unmatched return steps
|
||||
DataFlow::hasPathWithoutUnmatchedReturn(source, sink)
|
||||
}
|
||||
|
||||
override predicate isAdditionalTaintStep(DataFlow::Node pred, DataFlow::Node succ) {
|
||||
DataFlow::localFieldStep(pred, succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
/** DEPRECATED. Import `PolynomialReDoSQuery` instead. */
|
||||
deprecated module PolynomialReDoS = PolynomialReDoSQuery;
|
||||
|
||||
@@ -1,147 +1,4 @@
|
||||
/**
|
||||
* Provides default sources, sinks and sanitizers for reasoning about
|
||||
* polynomial regular expression denial-of-service attacks, as well
|
||||
* as extension points for adding your own.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.javascript.security.regexp.PolynomialReDoSCustomizations` instead. */
|
||||
|
||||
import javascript
|
||||
import SuperlinearBackTracking
|
||||
|
||||
module PolynomialReDoS {
|
||||
/**
|
||||
* A data flow source node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Source extends DataFlow::Node {
|
||||
/**
|
||||
* Gets the kind of source that is being accesed.
|
||||
*
|
||||
* Is either a kind from `HTTP::RequestInputAccess::getKind()`, or "library".
|
||||
*/
|
||||
abstract string getKind();
|
||||
|
||||
/**
|
||||
* Gets a string that describes the source.
|
||||
* For use in the alert message.
|
||||
*/
|
||||
string describe() { result = "a user-provided value" }
|
||||
}
|
||||
|
||||
/**
|
||||
* A data flow sink node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sink extends DataFlow::Node {
|
||||
abstract RegExpTerm getRegExp();
|
||||
|
||||
/**
|
||||
* Gets the node to highlight in the alert message.
|
||||
*/
|
||||
DataFlow::Node getHighlight() { result = this }
|
||||
}
|
||||
|
||||
/**
|
||||
* A sanitizer for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sanitizer extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* A remote input to a server, seen as a source for polynomial
|
||||
* regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
class RequestInputAccessAsSource extends Source instanceof HTTP::RequestInputAccess {
|
||||
override string getKind() { result = HTTP::RequestInputAccess.super.getKind() }
|
||||
}
|
||||
|
||||
/**
|
||||
* A use of a superlinear backtracking term, seen as a sink for polynomial
|
||||
* regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
class PolynomialBackTrackingTermUse extends Sink {
|
||||
PolynomialBackTrackingTerm term;
|
||||
DataFlow::MethodCallNode mcn;
|
||||
|
||||
PolynomialBackTrackingTermUse() {
|
||||
exists(DataFlow::Node regexp, string name |
|
||||
term.getRootTerm() = RegExp::getRegExpFromNode(regexp)
|
||||
|
|
||||
this = mcn.getArgument(0) and
|
||||
regexp = mcn.getReceiver() and
|
||||
name = ["match", "split", "matchAll", "replace", "replaceAll", "search"]
|
||||
or
|
||||
this = mcn.getReceiver() and
|
||||
regexp = mcn.getArgument(0) and
|
||||
(name = "test" or name = "exec")
|
||||
)
|
||||
}
|
||||
|
||||
override RegExpTerm getRegExp() { result = term }
|
||||
|
||||
override DataFlow::Node getHighlight() { result = mcn }
|
||||
}
|
||||
|
||||
/**
|
||||
* An operation that limits the length of a string, seen as a sanitizer.
|
||||
*/
|
||||
class StringLengthLimiter extends Sanitizer {
|
||||
StringLengthLimiter() {
|
||||
this.(StringReplaceCall).isGlobal() and
|
||||
// not lone char classes - they don't remove any repeated pattern.
|
||||
not exists(RegExpTerm root | root = this.(StringReplaceCall).getRegExp().getRoot() |
|
||||
isCharClassLike(root)
|
||||
)
|
||||
or
|
||||
this.(DataFlow::MethodCallNode).getMethodName() = StringOps::substringMethodName()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `term` matches a set of strings of length 1.
|
||||
*/
|
||||
predicate isCharClassLike(RegExpTerm term) {
|
||||
term instanceof RegExpCharacterClass
|
||||
or
|
||||
term instanceof RegExpCharacterClassEscape
|
||||
or
|
||||
term.(RegExpConstant).getValue().length() = 1
|
||||
or
|
||||
exists(RegExpAlt alt | term = alt |
|
||||
forall(RegExpTerm choice | choice = alt.getAlternative() | isCharClassLike(choice))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An check on the length of a string, seen as a sanitizer guard.
|
||||
*/
|
||||
class LengthGuard extends TaintTracking::SanitizerGuardNode, DataFlow::ValueNode {
|
||||
DataFlow::Node input;
|
||||
boolean polarity;
|
||||
|
||||
LengthGuard() {
|
||||
exists(RelationalComparison cmp, DataFlow::PropRead length |
|
||||
this.asExpr() = cmp and
|
||||
length.accesses(input, "length")
|
||||
|
|
||||
length.flowsTo(cmp.getLesserOperand().flow()) and polarity = true
|
||||
or
|
||||
length.flowsTo(cmp.getGreaterOperand().flow()) and polarity = false
|
||||
)
|
||||
}
|
||||
|
||||
override predicate sanitizes(boolean outcome, Expr e) {
|
||||
outcome = polarity and
|
||||
e = input.asExpr()
|
||||
}
|
||||
}
|
||||
|
||||
private import semmle.javascript.PackageExports as Exports
|
||||
|
||||
/**
|
||||
* A parameter of an exported function, seen as a source for polynomial-redos.
|
||||
*/
|
||||
class ExternalInputSource extends Source, DataFlow::SourceNode {
|
||||
ExternalInputSource() { this = Exports::getALibraryInputParameter() }
|
||||
|
||||
override string getKind() { result = "library" }
|
||||
|
||||
override string describe() { result = "library input" }
|
||||
}
|
||||
}
|
||||
deprecated import semmle.javascript.security.regexp.PolynomialReDoSCustomizations as Dep
|
||||
import Dep
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,454 +1,4 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.javascript.security.regexp.SuperlinearBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* An instantiaion of `ReDoSConfiguration` for superlinear ReDoS.
|
||||
*/
|
||||
class SuperLinearReDoSConfiguration extends ReDoSConfiguration {
|
||||
SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(_, _, p, t, _) and
|
||||
step(p, s1, s2, s3, _)
|
||||
)
|
||||
or
|
||||
exists(State pivot, State succ | isStartLoops(pivot, succ) |
|
||||
t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) {
|
||||
// base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path.
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Step(s1, s2, s3, Nil()) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and
|
||||
dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper predicate for the recursive case in `isReachableFromStartTuple`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private int isReachableFromStartTupleHelper(
|
||||
State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2,
|
||||
InputSymbol s3
|
||||
) {
|
||||
result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, r)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, s3, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State pivot, State succ, StateTuple q |
|
||||
isReachableFromStartTuple(pivot, succ, q, this, _) and
|
||||
q = getAnEndTuple(pivot, succ)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string getAThreewayIntersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
hasTuple(s1, s2, s3, this, i) and
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `getAThreewayIntersect(s1, s2, s3)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result =
|
||||
strictconcat(int i |
|
||||
hasTuple(_, _, _, this, i)
|
||||
|
|
||||
this.getAThreewayIntersect(i) order by i desc
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, RelevantTrace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynimalReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
deprecated import semmle.javascript.security.regexp.SuperlinearBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, result) }
|
||||
|
||||
/** Holds if `n` is a trace that is used by `concretize` in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State f | isReachableFromFork(f, getAForkPair(f), n, _))
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2 | t = Step(s1, s2, _) | result = intersect(s1, s2))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, Trace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `state` has exponential ReDoS */
|
||||
predicate hasReDoSResult = ReDoSPruning<isPumpable/2>::hasReDoSResult/4;
|
||||
1319
javascript/ql/lib/semmle/javascript/security/regexp/NfaUtils.qll
Normal file
1319
javascript/ql/lib/semmle/javascript/security/regexp/NfaUtils.qll
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* Provides JavaScript-specific definitions for use in the ReDoSUtil module.
|
||||
* Provides JavaScript-specific definitions for use in the NfaUtils module.
|
||||
*/
|
||||
|
||||
import javascript
|
||||
@@ -0,0 +1,148 @@
|
||||
/**
|
||||
* Provides default sources, sinks and sanitizers for reasoning about
|
||||
* polynomial regular expression denial-of-service attacks, as well
|
||||
* as extension points for adding your own.
|
||||
*/
|
||||
|
||||
import javascript
|
||||
import SuperlinearBackTracking
|
||||
|
||||
/** Module containing sources, sinks, and sanitizers for polynomial regular expression denial-of-service attacks. */
|
||||
module PolynomialReDoS {
|
||||
/**
|
||||
* A data flow source node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Source extends DataFlow::Node {
|
||||
/**
|
||||
* Gets the kind of source that is being accesed.
|
||||
*
|
||||
* Is either a kind from `HTTP::RequestInputAccess::getKind()`, or "library".
|
||||
*/
|
||||
abstract string getKind();
|
||||
|
||||
/**
|
||||
* Gets a string that describes the source.
|
||||
* For use in the alert message.
|
||||
*/
|
||||
string describe() { result = "a user-provided value" }
|
||||
}
|
||||
|
||||
/**
|
||||
* A data flow sink node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sink extends DataFlow::Node {
|
||||
abstract RegExpTerm getRegExp();
|
||||
|
||||
/**
|
||||
* Gets the node to highlight in the alert message.
|
||||
*/
|
||||
DataFlow::Node getHighlight() { result = this }
|
||||
}
|
||||
|
||||
/**
|
||||
* A sanitizer for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sanitizer extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* A remote input to a server, seen as a source for polynomial
|
||||
* regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
class RequestInputAccessAsSource extends Source instanceof HTTP::RequestInputAccess {
|
||||
override string getKind() { result = HTTP::RequestInputAccess.super.getKind() }
|
||||
}
|
||||
|
||||
/**
|
||||
* A use of a superlinear backtracking term, seen as a sink for polynomial
|
||||
* regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
class PolynomialBackTrackingTermUse extends Sink {
|
||||
PolynomialBackTrackingTerm term;
|
||||
DataFlow::MethodCallNode mcn;
|
||||
|
||||
PolynomialBackTrackingTermUse() {
|
||||
exists(DataFlow::Node regexp, string name |
|
||||
term.getRootTerm() = RegExp::getRegExpFromNode(regexp)
|
||||
|
|
||||
this = mcn.getArgument(0) and
|
||||
regexp = mcn.getReceiver() and
|
||||
name = ["match", "split", "matchAll", "replace", "replaceAll", "search"]
|
||||
or
|
||||
this = mcn.getReceiver() and
|
||||
regexp = mcn.getArgument(0) and
|
||||
(name = "test" or name = "exec")
|
||||
)
|
||||
}
|
||||
|
||||
override RegExpTerm getRegExp() { result = term }
|
||||
|
||||
override DataFlow::Node getHighlight() { result = mcn }
|
||||
}
|
||||
|
||||
/**
|
||||
* An operation that limits the length of a string, seen as a sanitizer.
|
||||
*/
|
||||
class StringLengthLimiter extends Sanitizer {
|
||||
StringLengthLimiter() {
|
||||
this.(StringReplaceCall).isGlobal() and
|
||||
// not lone char classes - they don't remove any repeated pattern.
|
||||
not exists(RegExpTerm root | root = this.(StringReplaceCall).getRegExp().getRoot() |
|
||||
isCharClassLike(root)
|
||||
)
|
||||
or
|
||||
this.(DataFlow::MethodCallNode).getMethodName() = StringOps::substringMethodName()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `term` matches a set of strings of length 1.
|
||||
*/
|
||||
predicate isCharClassLike(RegExpTerm term) {
|
||||
term instanceof RegExpCharacterClass
|
||||
or
|
||||
term instanceof RegExpCharacterClassEscape
|
||||
or
|
||||
term.(RegExpConstant).getValue().length() = 1
|
||||
or
|
||||
exists(RegExpAlt alt | term = alt |
|
||||
forall(RegExpTerm choice | choice = alt.getAlternative() | isCharClassLike(choice))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An check on the length of a string, seen as a sanitizer guard.
|
||||
*/
|
||||
class LengthGuard extends TaintTracking::SanitizerGuardNode, DataFlow::ValueNode {
|
||||
DataFlow::Node input;
|
||||
boolean polarity;
|
||||
|
||||
LengthGuard() {
|
||||
exists(RelationalComparison cmp, DataFlow::PropRead length |
|
||||
this.asExpr() = cmp and
|
||||
length.accesses(input, "length")
|
||||
|
|
||||
length.flowsTo(cmp.getLesserOperand().flow()) and polarity = true
|
||||
or
|
||||
length.flowsTo(cmp.getGreaterOperand().flow()) and polarity = false
|
||||
)
|
||||
}
|
||||
|
||||
override predicate sanitizes(boolean outcome, Expr e) {
|
||||
outcome = polarity and
|
||||
e = input.asExpr()
|
||||
}
|
||||
}
|
||||
|
||||
private import semmle.javascript.PackageExports as Exports
|
||||
|
||||
/**
|
||||
* A parameter of an exported function, seen as a source for polynomial-redos.
|
||||
*/
|
||||
class ExternalInputSource extends Source, DataFlow::SourceNode {
|
||||
ExternalInputSource() { this = Exports::getALibraryInputParameter() }
|
||||
|
||||
override string getKind() { result = "library" }
|
||||
|
||||
override string describe() { result = "library input" }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Provides a taint tracking configuration for reasoning about
|
||||
* polynomial regular expression denial-of-service attacks.
|
||||
*
|
||||
* Note, for performance reasons: only import this file if
|
||||
* `PolynomialReDoS::Configuration` is needed, otherwise
|
||||
* `PolynomialReDoSCustomizations` should be imported instead.
|
||||
*/
|
||||
|
||||
import javascript
|
||||
import PolynomialReDoSCustomizations::PolynomialReDoS
|
||||
|
||||
/** A taint-tracking configuration for reasoning about polynomial regular expression denial-of-service attacks. */
|
||||
class Configuration extends TaintTracking::Configuration {
|
||||
Configuration() { this = "PolynomialReDoS" }
|
||||
|
||||
override predicate isSource(DataFlow::Node source) { source instanceof Source }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof Sink }
|
||||
|
||||
override predicate isSanitizerGuard(TaintTracking::SanitizerGuardNode node) {
|
||||
super.isSanitizerGuard(node) or
|
||||
node instanceof LengthGuard
|
||||
}
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) {
|
||||
super.isSanitizer(node) or
|
||||
node instanceof Sanitizer
|
||||
}
|
||||
|
||||
override predicate hasFlowPath(DataFlow::SourcePathNode source, DataFlow::SinkPathNode sink) {
|
||||
super.hasFlowPath(source, sink) and
|
||||
// require that there is a path without unmatched return steps
|
||||
DataFlow::hasPathWithoutUnmatchedReturn(source, sink)
|
||||
}
|
||||
|
||||
override predicate isAdditionalTaintStep(DataFlow::Node pred, DataFlow::Node succ) {
|
||||
DataFlow::localFieldStep(pred, succ)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,157 @@
|
||||
/**
|
||||
* Provides precicates for reasoning about which strings are matched by a regular expression,
|
||||
* and for testing which capture groups are filled when a particular regexp matches a string.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/** A root term */
|
||||
class RootTerm extends RegExpTerm {
|
||||
RootTerm() { this.isRootTerm() }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether `root` matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*
|
||||
* If `testWithGroups` is true, then the `RegexpMatching::fillsCaptureGroup` predicate can be used to determine which capture
|
||||
* groups are filled by a string.
|
||||
*/
|
||||
signature predicate isRegexpMatchingCandidateSig(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
);
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
*
|
||||
* The module parameter `isCandidate` determines which strings should be tested,
|
||||
* and the results can be read from the `matches` and `fillsCaptureGroup` predicates.
|
||||
*/
|
||||
module RegexpMatching<isRegexpMatchingCandidateSig/4 isCandidate> {
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
isCandidate(reg, str, ignorePrefix, _) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, RootTerm r | isCandidate(r, s, _, _) | char = s.charAt(_)) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate satisfies the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
isCandidate(reg, str, ignorePrefix, true) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `reg` matches `str`, where `str` is in the `isCandidate` predicate.
|
||||
*/
|
||||
predicate matches(RootTerm reg, string str) {
|
||||
exists(State state | state = getAState(reg, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` against `reg` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
predicate fillsCaptureGroup(RootTerm reg, string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(reg, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,418 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
isReachableFromStartTuple(_, _, t, s1, s2, s3, _, _)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, StateTuple tuple, Trace trace, int dist
|
||||
) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace v |
|
||||
isReachableFromStartTuple(pivot, succ, v, s1, s2, s3, tuple, dist) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, Trace trace, InputSymbol s1, InputSymbol s2, InputSymbol s3,
|
||||
StateTuple tuple, int dist
|
||||
) {
|
||||
// base case.
|
||||
exists(State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Nil() and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(pivot, succ, p, trace, dist + 1) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, tuple)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, _, result) }
|
||||
|
||||
/** Holds if `n` is used in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State pivot, State succ |
|
||||
isReachableFromStartTuple(pivot, succ, getAnEndTuple(pivot, succ), n, _)
|
||||
)
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 | t = Step(s1, s2, s3, _) |
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, Trace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if states starting in `state` can have polynomial backtracking with the string `pump`.
|
||||
*/
|
||||
predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynomialReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
ReDoSPruning<isReDoSCandidate/2>::hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynomialReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
@@ -14,8 +14,8 @@
|
||||
*/
|
||||
|
||||
import javascript
|
||||
import semmle.javascript.security.performance.PolynomialReDoS::PolynomialReDoS
|
||||
import semmle.javascript.security.performance.SuperlinearBackTracking
|
||||
import semmle.javascript.security.regexp.PolynomialReDoSQuery
|
||||
import semmle.javascript.security.regexp.SuperlinearBackTracking
|
||||
import DataFlow::PathGraph
|
||||
|
||||
from
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
*/
|
||||
|
||||
import javascript
|
||||
import semmle.javascript.security.performance.ReDoSUtil
|
||||
import semmle.javascript.security.performance.ExponentialBackTracking
|
||||
import semmle.javascript.security.regexp.NfaUtils
|
||||
import semmle.javascript.security.regexp.ExponentialBackTracking
|
||||
|
||||
from RegExpTerm t, string pump, State s, string prefixMsg
|
||||
where hasReDoSResult(t, pump, s, prefixMsg)
|
||||
|
||||
@@ -57,7 +57,7 @@ DangerousPrefix getADangerousMatchedPrefix(EmptyReplaceRegExpTerm t) {
|
||||
not exists(EmptyReplaceRegExpTerm pred | pred = t.getPredecessor+() and not pred.isNullable())
|
||||
}
|
||||
|
||||
private import semmle.javascript.security.performance.ReDoSUtil as ReDoSUtil
|
||||
private import semmle.javascript.security.regexp.NfaUtils as NfaUtils
|
||||
|
||||
/**
|
||||
* Gets a char from a dangerous prefix that is matched by `t`.
|
||||
@@ -69,8 +69,8 @@ DangerousPrefixSubstring getADangerousMatchedChar(EmptyReplaceRegExpTerm t) {
|
||||
t.getAMatchedString() = result
|
||||
or
|
||||
// A substring matched by some character class. This is only used to match the "word" part of a HTML tag (e.g. "iframe" in "<iframe").
|
||||
exists(ReDoSUtil::CharacterClass cc |
|
||||
cc = ReDoSUtil::getCanonicalCharClass(t) and
|
||||
exists(NfaUtils::CharacterClass cc |
|
||||
cc = NfaUtils::getCanonicalCharClass(t) and
|
||||
cc.matches(result) and
|
||||
result.regexpMatch("\\w") and
|
||||
// excluding character classes that match ">" (e.g. /<[^<]*>/), as these might consume nested HTML tags, and thus prevent the dangerous pattern this query is looking for.
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
import semmle.javascript.security.performance.SuperlinearBackTracking
|
||||
|
||||
from PolynomialBackTrackingTerm t
|
||||
select t, t.getReason()
|
||||
@@ -1,5 +1,5 @@
|
||||
| highlight.js:2:26:2:979 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | Strings starting with '/' and with many repetitions of 'ip ' can start matching anywhere after the start of the preceeding (\\.\\.\\/\|\\/\|\\s)((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+X |
|
||||
| highlight.js:3:27:3:971 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | Strings starting with '/' and with many repetitions of 'ip ' can start matching anywhere after the start of the preceeding (\\.\\.\\/\|\\/\|\\s)((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+X |
|
||||
| highlight.js:2:26:2:979 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | Strings starting with '/' and with many repetitions of 'ip\\t' can start matching anywhere after the start of the preceeding (\\.\\.\\/\|\\/\|\\s)((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+X |
|
||||
| highlight.js:3:27:3:971 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | Strings starting with '/' and with many repetitions of 'ip\\t' can start matching anywhere after the start of the preceeding (\\.\\.\\/\|\\/\|\\s)((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+X |
|
||||
| highlight.js:6:12:6:695 | (Add\|Clear\|Close\|Copy\|Enter\|Exit\|Find\|Format\|Get\|Hide\|Join\|Lock\|Move\|New\|Open\|Optimize\|Pop\|Push\|Redo\|Remove\|Rename\|Reset\|Resize\|Search\|Select\|Set\|Show\|Skip\|Split\|Step\|Switch\|Undo\|Unlock\|Watch\|Backup\|Checkpoint\|Compare\|Compress\|Convert\|ConvertFrom\|ConvertTo\|Dismount\|Edit\|Expand\|Export\|Group\|Import\|Initialize\|Limit\|Merge\|New\|Out\|Publish\|Restore\|Save\|Sync\|Unpublish\|Update\|Approve\|Assert\|Complete\|Confirm\|Deny\|Disable\|Enable\|Install\|Invoke\|Register\|Request\|Restart\|Resume\|Start\|Stop\|Submit\|Suspend\|Uninstall\|Unregister\|Wait\|Debug\|Measure\|Ping\|Repair\|Resolve\|Test\|Trace\|Connect\|Disconnect\|Read\|Receive\|Send\|Write\|Block\|Grant\|Protect\|Revoke\|Unblock\|Unprotect\|Use\|ForEach\|Sort\|Tee\|Where)+ | Strings with many repetitions of 'Add' can start matching anywhere after the start of the preceeding (Add\|Clear\|Close\|Copy\|Enter\|Exit\|Find\|Format\|Get\|Hide\|Join\|Lock\|Move\|New\|Open\|Optimize\|Pop\|Push\|Redo\|Remove\|Rename\|Reset\|Resize\|Search\|Select\|Set\|Show\|Skip\|Split\|Step\|Switch\|Undo\|Unlock\|Watch\|Backup\|Checkpoint\|Compare\|Compress\|Convert\|ConvertFrom\|ConvertTo\|Dismount\|Edit\|Expand\|Export\|Group\|Import\|Initialize\|Limit\|Merge\|New\|Out\|Publish\|Restore\|Save\|Sync\|Unpublish\|Update\|Approve\|Assert\|Complete\|Confirm\|Deny\|Disable\|Enable\|Install\|Invoke\|Register\|Request\|Restart\|Resume\|Start\|Stop\|Submit\|Suspend\|Uninstall\|Unregister\|Wait\|Debug\|Measure\|Ping\|Repair\|Resolve\|Test\|Trace\|Connect\|Disconnect\|Read\|Receive\|Send\|Write\|Block\|Grant\|Protect\|Revoke\|Unblock\|Unprotect\|Use\|ForEach\|Sort\|Tee\|Where)+(-)[\\w\\d]+ |
|
||||
| highlight.js:7:13:7:692 | (Add\|Clear\|Close\|Copy\|Enter\|Exit\|Find\|Format\|Get\|Hide\|Join\|Lock\|Move\|New\|Open\|Optimize\|Pop\|Push\|Redo\|Remove\|Rename\|Reset\|Resize\|Search\|Select\|Set\|Show\|Skip\|Split\|Step\|Switch\|Undo\|Unlock\|Watch\|Backup\|Checkpoint\|Compare\|Compress\|Convert\|ConvertFrom\|ConvertTo\|Dismount\|Edit\|Expand\|Export\|Group\|Import\|Initialize\|Limit\|Merge\|Out\|Publish\|Restore\|Save\|Sync\|Unpublish\|Update\|Approve\|Assert\|Complete\|Confirm\|Deny\|Disable\|Enable\|Install\|Invoke\|Register\|Request\|Restart\|Resume\|Start\|Stop\|Submit\|Suspend\|Uninstall\|Unregister\|Wait\|Debug\|Measure\|Ping\|Repair\|Resolve\|Test\|Trace\|Connect\|Disconnect\|Read\|Receive\|Send\|Write\|Block\|Grant\|Protect\|Revoke\|Unblock\|Unprotect\|Use\|ForEach\|Sort\|Tee\|Where)+ | Strings with many repetitions of 'Add' can start matching anywhere after the start of the preceeding (Add\|Clear\|Close\|Copy\|Enter\|Exit\|Find\|Format\|Get\|Hide\|Join\|Lock\|Move\|New\|Open\|Optimize\|Pop\|Push\|Redo\|Remove\|Rename\|Reset\|Resize\|Search\|Select\|Set\|Show\|Skip\|Split\|Step\|Switch\|Undo\|Unlock\|Watch\|Backup\|Checkpoint\|Compare\|Compress\|Convert\|ConvertFrom\|ConvertTo\|Dismount\|Edit\|Expand\|Export\|Group\|Import\|Initialize\|Limit\|Merge\|Out\|Publish\|Restore\|Save\|Sync\|Unpublish\|Update\|Approve\|Assert\|Complete\|Confirm\|Deny\|Disable\|Enable\|Install\|Invoke\|Register\|Request\|Restart\|Resume\|Start\|Stop\|Submit\|Suspend\|Uninstall\|Unregister\|Wait\|Debug\|Measure\|Ping\|Repair\|Resolve\|Test\|Trace\|Connect\|Disconnect\|Read\|Receive\|Send\|Write\|Block\|Grant\|Protect\|Revoke\|Unblock\|Unprotect\|Use\|ForEach\|Sort\|Tee\|Where)+(-)[\\w\\d]+ |
|
||||
| highlight.js:14:17:14:52 | [a-z0-9&#*=?@\\\\><:,()$[\\]_.{}!+%^-]+ | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding ([ ]*[a-z0-9&#*=?@\\\\><:,()$[\\]_.{}!+%^-]+)+ |
|
||||
@@ -41,20 +41,20 @@
|
||||
| lib/subLib5/feature.js:2:3:2:4 | a* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding a*b |
|
||||
| lib/subLib5/main.js:2:3:2:4 | a* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding a*b |
|
||||
| lib/sublib/factory.js:13:14:13:15 | f* | Strings with many repetitions of 'f' can start matching anywhere after the start of the preceeding f*g |
|
||||
| polynomial-redos.js:7:24:7:26 | \\s+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:7:24:7:26 | \\s+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:8:17:8:18 | * | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding *, * |
|
||||
| polynomial-redos.js:9:19:9:21 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s*\\n\\s* |
|
||||
| polynomial-redos.js:9:19:9:21 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s*\\n\\s* |
|
||||
| polynomial-redos.js:11:19:11:20 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[/\\\\] |
|
||||
| polynomial-redos.js:12:19:12:20 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*\\. |
|
||||
| polynomial-redos.js:15:28:15:35 | [\\s\\S]*? | Strings starting with '`' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding `+ |
|
||||
| polynomial-redos.js:15:41:15:43 | \\s* | Strings starting with '`_' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| polynomial-redos.js:15:28:15:35 | [\\s\\S]*? | Strings starting with '`' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding `+ |
|
||||
| polynomial-redos.js:15:41:15:43 | \\s* | Strings starting with '`_' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| polynomial-redos.js:16:25:16:32 | [\\s\\S]*? | Strings starting with '`' and with many repetitions of '``' can start matching anywhere after the start of the preceeding `+ |
|
||||
| polynomial-redos.js:17:5:17:6 | .* | Strings with many repetitions of ',' can start matching anywhere after the start of the preceeding (.*,)+ |
|
||||
| polynomial-redos.js:17:11:17:12 | .+ | Strings starting with ',' and with many repetitions of ',,' can start matching anywhere after the start of the preceeding .* |
|
||||
| polynomial-redos.js:18:83:18:100 | [\\u0600-\\u06FF\\/]+ | Strings with many repetitions of '/' can start matching anywhere after the start of the preceeding [\\u0600-\\u06FF\\/]+(\\s*?[\\u0600-\\u06FF]+){1,2} |
|
||||
| polynomial-redos.js:19:17:19:22 | [0-9]* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]*['a-z\\u00A0-\\u05FF\\u0700-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]{1,256} |
|
||||
| polynomial-redos.js:20:56:20:58 | \\d+ | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:22:57:22:59 | \\d+ | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:20:56:20:58 | \\d+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:22:57:22:59 | \\d+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:25:37:25:56 | [a-zA-Z0-9+\\/ \\t\\n]+ | Strings starting with '-\\t' and with many repetitions of '\\t\\t' can start matching anywhere after the start of the preceeding [ \\t]+ |
|
||||
| polynomial-redos.js:25:63:25:64 | .* | Strings starting with '-\\t\\t' and with many repetitions of '=' can start matching anywhere after the start of the preceeding [=]* |
|
||||
| polynomial-redos.js:30:19:30:22 | [?]+ | Strings with many repetitions of '?' can start matching anywhere after the start of the preceeding [?]+.*$ |
|
||||
@@ -70,7 +70,7 @@
|
||||
| polynomial-redos.js:38:34:38:35 | .* | Strings starting with '<href="!"' and with many repetitions of 'href="!"' can start matching anywhere after the start of the preceeding .* |
|
||||
| polynomial-redos.js:40:51:40:63 | [?\\x21-\\x7E]* | Strings starting with ',-+' and with many repetitions of '++' can start matching anywhere after the start of the preceeding [A-Za-z0-9+/]+ |
|
||||
| polynomial-redos.js:43:67:43:72 | [^\\n]+ | Strings starting with '-\\t+\\t' and with many repetitions of '\\t\\t' can start matching anywhere after the start of the preceeding [\\n \\t]+ |
|
||||
| polynomial-redos.js:48:22:48:24 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s*\\n\\s* |
|
||||
| polynomial-redos.js:48:22:48:24 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s*\\n\\s* |
|
||||
| polynomial-redos.js:50:4:50:5 | .* | Strings starting with 'Y' and with many repetitions of 'Y' can start matching anywhere after the start of the preceeding Y.*X |
|
||||
| polynomial-redos.js:51:11:51:17 | (YH\|J)* | Strings starting with 'K' and with many repetitions of 'YH' can start matching anywhere after the start of the preceeding B?(YH\|K)(YH\|J)*X |
|
||||
| polynomial-redos.js:52:12:52:13 | .* | Strings starting with 'K' and with many repetitions of 'K' can start matching anywhere after the start of the preceeding B?(YH\|K).*X |
|
||||
@@ -92,7 +92,7 @@
|
||||
| polynomial-redos.js:68:8:68:9 | .* | Strings starting with 'X' and with many repetitions of 'X' can start matching anywhere after the start of the preceeding [^Y].*$ |
|
||||
| polynomial-redos.js:69:8:69:9 | .* | Strings starting with 'X' and with many repetitions of 'X' can start matching anywhere after the start of the preceeding [^Y].*$ |
|
||||
| polynomial-redos.js:71:51:71:63 | [?\\x21-\\x7E]* | Strings starting with ',-+' and with many repetitions of '++' can start matching anywhere after the start of the preceeding [A-Za-z0-9+/]+ |
|
||||
| polynomial-redos.js:73:50:73:51 | .* | Strings starting with 'MSIE 9.9' and with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:73:50:73:51 | .* | Strings starting with 'MSIE 0.0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:75:18:75:19 | .* | Strings starting with '<' and with many repetitions of '<' can start matching anywhere after the start of the preceeding <.*class="([^"]+)".*> |
|
||||
| polynomial-redos.js:75:35:75:36 | .* | Strings starting with '<class="!"' and with many repetitions of 'class="!"' can start matching anywhere after the start of the preceeding .* |
|
||||
| polynomial-redos.js:77:18:77:19 | .* | Strings starting with 'Y' and with many repetitions of 'Y' can start matching anywhere after the start of the preceeding Y.*X |
|
||||
@@ -102,7 +102,7 @@
|
||||
| polynomial-redos.js:86:25:86:29 | (ab)* | Strings starting with 'ab' and with many repetitions of 'ab' can start matching anywhere after the start of the preceeding (ab)* |
|
||||
| polynomial-redos.js:88:18:88:19 | a* | Strings starting with 'a' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding aa*X |
|
||||
| polynomial-redos.js:89:20:89:21 | a* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding a* |
|
||||
| polynomial-redos.js:90:19:90:20 | a* | Strings starting with 'a' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\wa*X |
|
||||
| polynomial-redos.js:90:19:90:20 | a* | Strings starting with '0' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\wa*X |
|
||||
| polynomial-redos.js:94:28:94:37 | ([2-5]\|B)* | Strings with many repetitions of '3' can start matching anywhere after the start of the preceeding ([3-7]\|A)* |
|
||||
| polynomial-redos.js:95:21:95:30 | ([2-5]\|B)* | Strings with many repetitions of '2' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:96:28:96:30 | \\d* | Strings with many repetitions of '3' can start matching anywhere after the start of the preceeding ([3-7]\|A)* |
|
||||
@@ -110,54 +110,54 @@
|
||||
| polynomial-redos.js:100:18:100:19 | a+ | Strings starting with 'a' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding aa+X |
|
||||
| polynomial-redos.js:101:17:101:18 | a+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding a+X |
|
||||
| polynomial-redos.js:102:20:102:21 | a+ | Strings starting with 'a' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding a+ |
|
||||
| polynomial-redos.js:103:19:103:20 | a+ | Strings starting with 'a' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\wa+X |
|
||||
| polynomial-redos.js:103:19:103:20 | a+ | Strings starting with '0' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\wa+X |
|
||||
| polynomial-redos.js:104:17:104:18 | a+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding a+b+c+ |
|
||||
| polynomial-redos.js:107:28:107:37 | ([2-5]\|B)+ | Strings starting with '3' and with many repetitions of '3' can start matching anywhere after the start of the preceeding ([3-7]\|A)+ |
|
||||
| polynomial-redos.js:108:21:108:30 | ([2-5]\|B)+ | Strings starting with '9' and with many repetitions of '2' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:108:21:108:30 | ([2-5]\|B)+ | Strings starting with '0' and with many repetitions of '2' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:109:28:109:30 | \\d+ | Strings starting with '3' and with many repetitions of '3' can start matching anywhere after the start of the preceeding ([3-7]\|A)+ |
|
||||
| polynomial-redos.js:111:17:111:19 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s*$ |
|
||||
| polynomial-redos.js:112:17:112:19 | \\s+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:111:17:111:19 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s*$ |
|
||||
| polynomial-redos.js:112:17:112:19 | \\s+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:114:22:114:24 | \\w* | Strings starting with '5' and with many repetitions of '5' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| polynomial-redos.js:116:21:116:28 | [\\d\\D]*? | Strings starting with '/*' and with many repetitions of 'a/*' can start matching anywhere after the start of the preceeding \\/\\*[\\d\\D]*?\\*\\/ |
|
||||
| polynomial-redos.js:118:17:118:23 | (#\\d+)+ | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:124:33:124:35 | \\s+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:118:17:118:23 | (#\\d+)+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d+ |
|
||||
| polynomial-redos.js:124:33:124:35 | \\s+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s+$ |
|
||||
| polynomial-redos.js:130:21:130:22 | c+ | Strings starting with 'c' and with many repetitions of 'c' can start matching anywhere after the start of the preceeding cc+D |
|
||||
| polynomial-redos.js:133:22:133:23 | f+ | Strings starting with 'f' and with many repetitions of 'f' can start matching anywhere after the start of the preceeding ff+G |
|
||||
| regexplib/address.js:27:3:27:5 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:27:48:27:50 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:27:93:27:95 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:27:3:27:5 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:27:48:27:50 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:27:93:27:95 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:38:39:38:45 | [ 0-9]* | Strings starting with 'po' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [ \|\\.]* |
|
||||
| regexplib/address.js:51:220:51:222 | \\w+ | Strings starting with 'C/O ' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:331:51:344 | [a-zA-Z0-9\\-]+ | Strings starting with '9 a#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:399:51:401 | \\s+ | Strings starting with '9 a' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:415:51:419 | \\x20+ | Strings starting with '9 a 9' and with many repetitions of ' 0 ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:420:51:422 | \\w+ | Strings starting with '9 a 9 ' and with many repetitions of 'a 0 ' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:616:51:618 | \\w+ | Strings starting with '9 a C/O ' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:727:51:740 | [a-zA-Z0-9\\-]+ | Strings starting with '9 a 9 a#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:796:51:798 | \\s+ | Strings starting with '9 a ' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/address.js:51:803:51:811 | [A-Za-z]+ | Strings starting with '9 a ' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:220:51:222 | \\w+ | Strings starting with 'C/O ' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:331:51:344 | [a-zA-Z0-9\\-]+ | Strings starting with '0 0#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:399:51:401 | \\s+ | Strings starting with '0 0' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:415:51:419 | \\x20+ | Strings starting with '0 0\\t0' and with many repetitions of ' 0 ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:420:51:422 | \\w+ | Strings starting with '0 0\\t0 ' and with many repetitions of '0 0 ' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:616:51:618 | \\w+ | Strings starting with '0 0\\tC/O ' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:51:727:51:740 | [a-zA-Z0-9\\-]+ | Strings starting with '0 0\\t0 0#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:51:796:51:798 | \\s+ | Strings starting with '0 0\\t' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/address.js:51:803:51:811 | [A-Za-z]+ | Strings starting with '0 0\\t\\t' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:67:379:67:755 | [a-zA-Z0-9ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïñòóôõöøùúûüýÿ\\.\\,\\-\\/\\' ]+ | Strings starting with '#' and with many repetitions of '#' can start matching anywhere after the start of the preceeding [a-zA-Z0-9ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïñòóôõöøùúûüýÿ\\.\\,\\-\\/\\']+ |
|
||||
| regexplib/address.js:69:3:69:5 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:69:48:69:50 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:69:93:69:95 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:75:220:75:222 | \\w+ | Strings starting with 'C/O ' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:331:75:344 | [a-zA-Z0-9\\-]+ | Strings starting with '9 a#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:399:75:401 | \\s+ | Strings starting with '9 a' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:415:75:419 | \\x20+ | Strings starting with '9 a 9' and with many repetitions of ' 0 ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:420:75:422 | \\w+ | Strings starting with '9 a 9 ' and with many repetitions of 'a 0 ' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:616:75:618 | \\w+ | Strings starting with '9 a C/O ' and with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:727:75:740 | [a-zA-Z0-9\\-]+ | Strings starting with '9 a 9 a#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:796:75:798 | \\s+ | Strings starting with '9 a ' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/address.js:75:803:75:811 | [A-Za-z]+ | Strings starting with '9 a ' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:69:3:69:5 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:69:48:69:50 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:69:93:69:95 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:75:220:75:222 | \\w+ | Strings starting with 'C/O ' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:331:75:344 | [a-zA-Z0-9\\-]+ | Strings starting with '0 0#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:399:75:401 | \\s+ | Strings starting with '0 0' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:415:75:419 | \\x20+ | Strings starting with '0 0\\t0' and with many repetitions of ' 0 ' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:420:75:422 | \\w+ | Strings starting with '0 0\\t0 ' and with many repetitions of '0 0 ' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:616:75:618 | \\w+ | Strings starting with '0 0\\tC/O ' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\x20* |
|
||||
| regexplib/address.js:75:727:75:740 | [a-zA-Z0-9\\-]+ | Strings starting with '0 0\\t0 0#' and with many repetitions of 'FL0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:75:796:75:798 | \\s+ | Strings starting with '0 0\\t' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/address.js:75:803:75:811 | [A-Za-z]+ | Strings starting with '0 0\\t\\t' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/address.js:85:15:85:49 | ([0-9]\|[ ]\|[-]\|[\\(]\|[\\)]\|ext.\|[,])+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (?<Telephone>([0-9]\|[ ]\|[-]\|[\\(]\|[\\)]\|ext.\|[,])+)([ ]\|[:]\|\\t\|[-])*(?<Where>Home\|Office\|Work\|Away\|Fax\|FAX\|Phone) |
|
||||
| regexplib/address.js:85:51:85:67 | ([ ]\|[:]\|\\t\|[-])* | Strings starting with '0' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding ([0-9]\|[ ]\|[-]\|[\\(]\|[\\)]\|ext.\|[,])+ |
|
||||
| regexplib/address.js:93:3:93:5 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:93:48:93:50 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:93:93:93:95 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:93:3:93:5 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{3}\\s*) |
|
||||
| regexplib/address.js:93:48:93:50 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?(\\s*\|-)\\d{3}(\\s*\|-)\\d{4}\\s*) |
|
||||
| regexplib/address.js:93:93:93:95 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*(7\|8)(\\d{7}\|\\d{3}(\\-\|\\s{1})\\d{4})\\s*) |
|
||||
| regexplib/address.js:95:379:95:755 | [a-zA-Z0-9ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïñòóôõöøùúûüýÿ\\.\\,\\-\\/\\' ]+ | Strings starting with '#' and with many repetitions of '#' can start matching anywhere after the start of the preceeding [a-zA-Z0-9ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîïñòóôõöøùúûüýÿ\\.\\,\\-\\/\\']+ |
|
||||
| regexplib/address.js:99:19:99:24 | [0-9]* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]* |
|
||||
| regexplib/email.js:1:16:1:22 | [-.\\w]* | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding ([-.\\w]*[0-9a-zA-Z])* |
|
||||
| regexplib/email.js:2:5:2:12 | [-._\\w]* | Strings starting with 'a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\w[-._\\w]*\\w@\\w[-._\\w]*\\w\\.\\w{2,3}) |
|
||||
| regexplib/email.js:2:5:2:12 | [-._\\w]* | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\w[-._\\w]*\\w@\\w[-._\\w]*\\w\\.\\w{2,3}) |
|
||||
| regexplib/email.js:5:24:5:35 | [a-zA-Z0-9]+ | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding ([_\\.\\-]?[a-zA-Z0-9]+)* |
|
||||
| regexplib/email.js:5:63:5:74 | [a-zA-Z0-9]+ | Strings starting with '0@0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [A-Za-z0-9]+ |
|
||||
| regexplib/email.js:8:16:8:49 | [^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+ | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding (?<user>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\"))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\")))*)@(?<domain>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\]))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\])))*) |
|
||||
@@ -165,20 +165,20 @@
|
||||
| regexplib/email.js:8:89:8:174 | (?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\")))* | Strings starting with '!' and with many repetitions of '.!' can start matching anywhere after the start of the preceeding (?<user>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\"))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\")))*)@(?<domain>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\]))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\])))*) |
|
||||
| regexplib/email.js:8:100:8:133 | [^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+ | Strings starting with '!.' and with many repetitions of '!.!' can start matching anywhere after the start of the preceeding (?<user>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\"))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\")))*)@(?<domain>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\]))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\])))*) |
|
||||
| regexplib/email.js:8:141:8:168 | (?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))* | Strings starting with '!."' and with many repetitions of '".!."' can start matching anywhere after the start of the preceeding (?<user>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\"))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\"(?:(?:[^\\"\\\\\\r\\n])\|(?:\\\\.))*\\")))*)@(?<domain>(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\]))(?:\\.(?:(?:[^ \\t\\(\\)\\<\\>@,;\\:\\\\\\"\\.\\[\\]\\r\\n]+)\|(?:\\[(?:(?:[^\\[\\]\\\\\\r\\n])\|(?:\\\\.))*\\])))*) |
|
||||
| regexplib/email.js:12:2:12:4 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:12:5:12:15 | ([-+.]\\w+)* | Strings starting with 'a' and with many repetitions of '+a' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:12:11:12:13 | \\w+ | Strings starting with 'a+' and with many repetitions of 'a+' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:12:2:12:4 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:12:5:12:15 | ([-+.]\\w+)* | Strings starting with '0' and with many repetitions of '+0' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:12:11:12:13 | \\w+ | Strings starting with '0+' and with many repetitions of '0+' can start matching anywhere after the start of the preceeding \\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*([,;]\\s*\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*)* |
|
||||
| regexplib/email.js:13:47:13:49 | \\s+ | Strings starting with 'A' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [a-zA-Z]* |
|
||||
| regexplib/email.js:15:6:15:13 | [\\w-\\.]* | Strings starting with 'a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:15:28:15:30 | \\w* | Strings starting with 'a@a' and with many repetitions of 'aa' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:20:3:20:6 | \\w+? | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (\\w+?@\\w+?\\x2E.+) |
|
||||
| regexplib/email.js:15:6:15:13 | [\\w-\\.]* | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:15:28:15:30 | \\w* | Strings starting with '0@0' and with many repetitions of '00' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:20:3:20:6 | \\w+? | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\w+?@\\w+?\\x2E.+) |
|
||||
| regexplib/email.js:25:67:25:78 | [a-zA-Z0-9]+ | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding ([_\\.\\-]?[a-zA-Z0-9]+)* |
|
||||
| regexplib/email.js:25:106:25:117 | [a-zA-Z0-9]+ | Strings starting with '0@0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [A-Za-z0-9]+ |
|
||||
| regexplib/email.js:25:212:25:223 | [a-zA-Z0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ([_\\.\\-]?[a-zA-Z0-9]+)* |
|
||||
| regexplib/email.js:25:251:25:262 | [a-zA-Z0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [A-Za-z0-9]+ |
|
||||
| regexplib/email.js:28:2:28:4 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\w+[\\w-\\.]*\\@\\w+((-\\w+)\|(\\w*))\\.[a-z]{2,3}$ |
|
||||
| regexplib/email.js:28:5:28:12 | [\\w-\\.]* | Strings starting with 'a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:28:27:28:29 | \\w* | Strings starting with 'a@a' and with many repetitions of 'aa' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:28:2:28:4 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+[\\w-\\.]*\\@\\w+((-\\w+)\|(\\w*))\\.[a-z]{2,3}$ |
|
||||
| regexplib/email.js:28:5:28:12 | [\\w-\\.]* | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:28:27:28:29 | \\w* | Strings starting with '0@0' and with many repetitions of '00' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/email.js:28:73:28:87 | [0-9a-zA-Z'\\.]+ | Strings with many repetitions of ''' can start matching anywhere after the start of the preceeding ([0-9a-zA-Z'\\.]+)@([0-9a-zA-Z']+)\\.([0-9a-zA-Z']+)$ |
|
||||
| regexplib/email.js:28:125:28:139 | [0-9a-zA-Z'\\.]+ | Strings with many repetitions of ''' can start matching anywhere after the start of the preceeding ([0-9a-zA-Z'\\.]+)@([0-9a-zA-Z']+)\\*+$ |
|
||||
| regexplib/email.js:29:2:29:7 | [\\w-]+ | Strings with many repetitions of '-' can start matching anywhere after the start of the preceeding [\\w-]+@([\\w-]+\\.)+[\\w-]+ |
|
||||
@@ -193,11 +193,11 @@
|
||||
| regexplib/markup.js:1:21:1:22 | .* | Strings starting with '<=' and with many repetitions of '==' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/markup.js:1:40:1:44 | [^<]* | Strings starting with '<=.jpg' and with many repetitions of ';' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/markup.js:2:3:2:7 | [^>]* | Strings starting with '<' and with many repetitions of '<' can start matching anywhere after the start of the preceeding <[^>]*> |
|
||||
| regexplib/markup.js:3:440:3:456 | (\\s(?<attr>.+?))* | Strings starting with '<?i:q' and with many repetitions of ' a' can start matching anywhere after the start of the preceeding .+? |
|
||||
| regexplib/markup.js:3:451:3:453 | .+? | Strings starting with '<?i:q ' and with many repetitions of ' a' can start matching anywhere after the start of the preceeding (\\s(?<attr>.+?))* |
|
||||
| regexplib/markup.js:3:440:3:456 | (\\s(?<attr>.+?))* | Strings starting with '<?i:q' and with many repetitions of '\\ta' can start matching anywhere after the start of the preceeding .+? |
|
||||
| regexplib/markup.js:3:451:3:453 | .+? | Strings starting with '<?i:q\\t' and with many repetitions of '\\ta' can start matching anywhere after the start of the preceeding (\\s(?<attr>.+?))* |
|
||||
| regexplib/markup.js:5:1525:5:1527 | \\s* | Strings starting with '?'DateLiteral' ?# Per the VB Spec : DateLiteral ::= '#' DateOrTime '#' # ?'DateOrTime' DateValue ?# TimeValue ::= HourValue : MinuteValue 10 ?# Hour 01 - 24 : 60 ?# Minute 01 - 60 : ?# Optional Minute :01 - :60 ' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:6:11:6:25 | [\\w\\*\\)\\(\\,\\s]+ | Strings starting with 'SELECT ' and with many repetitions of 'SELECT\\t' can start matching anywhere after the start of the preceeding (SELECT\\s[\\w\\*\\)\\(\\,\\s]+\\sFROM\\s[\\w]+) |
|
||||
| regexplib/markup.js:6:99:6:113 | [\\s\\w\\d\\)\\(\\,]* | Strings starting with ' INSERT INTO 0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [\\d\\w]+ |
|
||||
| regexplib/markup.js:6:11:6:25 | [\\w\\*\\)\\(\\,\\s]+ | Strings starting with 'SELECT\\t' and with many repetitions of 'SELECT\\t' can start matching anywhere after the start of the preceeding (SELECT\\s[\\w\\*\\)\\(\\,\\s]+\\sFROM\\s[\\w]+) |
|
||||
| regexplib/markup.js:6:99:6:113 | [\\s\\w\\d\\)\\(\\,]* | Strings starting with ' INSERT\\tINTO\\t0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [\\d\\w]+ |
|
||||
| regexplib/markup.js:7:15:7:21 | [^\\\\"]* | Strings starting with '"!' and with many repetitions of '\\\\"!!' can start matching anywhere after the start of the preceeding "([^"](?:\\\\.\|[^\\\\"]*)*)" |
|
||||
| regexplib/markup.js:9:6:9:13 | [\\s\\S]*? | Strings starting with '<!--' and with many repetitions of '<!--' can start matching anywhere after the start of the preceeding <!--[\\s\\S]*?--[ \\t\\n\\r]*> |
|
||||
| regexplib/markup.js:11:6:11:8 | .*? | Strings starting with '<!--' and with many repetitions of '<!--' can start matching anywhere after the start of the preceeding <!--.*?--> |
|
||||
@@ -211,44 +211,44 @@
|
||||
| regexplib/markup.js:14:24:14:25 | .* | Strings starting with '<>' and with many repetitions of '>a' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/markup.js:15:16:15:18 | .*? | Strings starting with '<img' and with many repetitions of '<imga' can start matching anywhere after the start of the preceeding <(\\/{0,1})img(.*?)(\\/{0,1})\\> |
|
||||
| regexplib/markup.js:16:5:16:9 | [^>]* | Strings starting with 'src' and with many repetitions of 'src' can start matching anywhere after the start of the preceeding src[^>]*[^/].(?:jpg\|bmp\|gif)(?:\\"\|\\') |
|
||||
| regexplib/markup.js:17:8:17:24 | (\\s(\\w*=".*?")?)* | Strings starting with '<a' and with many repetitions of ' =""' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:17:12:17:14 | \\w* | Strings starting with '<a ' and with many repetitions of '="" a' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:17:17:17:19 | .*? | Strings starting with '<a ="' and with many repetitions of ' =""' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/markup.js:17:40:17:42 | .*? | Strings starting with '<a>' and with many repetitions of '">' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:19:2:19:12 | (<meta\\s+)* | Strings with many repetitions of '<meta ' can start matching anywhere after the start of the preceeding (<meta\\s+)*((name\\s*=\\s*("\|')(?<name>[^'("\|')]*)("\|')){1}\|content\\s*=\\s*("\|')(?<content>[^'("\|')]*)("\|')\|scheme\\s*=\\s*("\|')(?<scheme>[^'("\|')]*)("\|')) |
|
||||
| regexplib/markup.js:19:8:19:10 | \\s+ | Strings starting with '<meta' and with many repetitions of ' <meta' can start matching anywhere after the start of the preceeding (<meta\\s+)*((name\\s*=\\s*("\|')(?<name>[^'("\|')]*)("\|')){1}\|content\\s*=\\s*("\|')(?<content>[^'("\|')]*)("\|')\|scheme\\s*=\\s*("\|')(?<scheme>[^'("\|')]*)("\|')) |
|
||||
| regexplib/markup.js:17:8:17:24 | (\\s(\\w*=".*?")?)* | Strings starting with '<0' and with many repetitions of '\\t=""' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:17:12:17:14 | \\w* | Strings starting with '<0\\t' and with many repetitions of '=""\\t0' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:17:17:17:19 | .*? | Strings starting with '<0\\t="' and with many repetitions of '\\t=""' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/markup.js:17:40:17:42 | .*? | Strings starting with '<0>' and with many repetitions of '">' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:19:2:19:12 | (<meta\\s+)* | Strings with many repetitions of '<meta\\t' can start matching anywhere after the start of the preceeding (<meta\\s+)*((name\\s*=\\s*("\|')(?<name>[^'("\|')]*)("\|')){1}\|content\\s*=\\s*("\|')(?<content>[^'("\|')]*)("\|')\|scheme\\s*=\\s*("\|')(?<scheme>[^'("\|')]*)("\|')) |
|
||||
| regexplib/markup.js:19:8:19:10 | \\s+ | Strings starting with '<meta' and with many repetitions of '\\t<meta' can start matching anywhere after the start of the preceeding (<meta\\s+)*((name\\s*=\\s*("\|')(?<name>[^'("\|')]*)("\|')){1}\|content\\s*=\\s*("\|')(?<content>[^'("\|')]*)("\|')\|scheme\\s*=\\s*("\|')(?<scheme>[^'("\|')]*)("\|')) |
|
||||
| regexplib/markup.js:20:52:20:53 | .* | Strings with many repetitions of '=color' can start matching anywhere after the start of the preceeding [^>]+ |
|
||||
| regexplib/markup.js:20:155:20:156 | '+ | Strings with many repetitions of '''' can start matching anywhere after the start of the preceeding '+ |
|
||||
| regexplib/markup.js:20:197:20:198 | "+ | Strings with many repetitions of '""' can start matching anywhere after the start of the preceeding "+ |
|
||||
| regexplib/markup.js:20:245:20:247 | .*? | Strings with many repetitions of 'color: # IF found THEN move ahead "" # single or double # or no quotes\\t' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/markup.js:20:274:20:276 | .*? | Strings starting with '<font # Match start of Font Tag ' and with many repetitions of '<font # Match start of Font Tag a' can start matching anywhere after the start of the preceeding <\\*?font # Match start of Font Tag (?(?=[^>]+color.*>) #IF\\/THEN lookahead color in tag (.*?color\\s*?[=\|:]\\s*?) # IF found THEN move ahead ('+\\#*?[\\w\\s]*'+ # CAPTURE ColorName\\/Hex \|"+\\#*?[\\w\\s]*"+ # single or double \|\\#*\\w*\\b) # or no quotes\t.*?> # & move to end of tag \|.*?> # ELSE move to end of Tag ) # Close the If\\/Then lookahead # Use Multiline and IgnoreCase # Replace the matches from RE with MatchEvaluator below: # if m.Groups(1).Value<>"" then # Return "<font color=" & m.Groups(1).Value & ">" # else # Return "<font>" # end if |
|
||||
| regexplib/markup.js:24:39:24:41 | \\s+ | Strings starting with '<A' and with many repetitions of ' - != ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:24:43:24:45 | \\S+ | Strings starting with '<A ' and with many repetitions of '- !=' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:24:48:24:50 | \\s* | Strings starting with '<A !' and with many repetitions of ' =- ! ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/markup.js:24:52:24:54 | \\s* | Strings starting with '<A !=' and with many repetitions of '- !=' can start matching anywhere after the start of the preceeding \\S+ |
|
||||
| regexplib/markup.js:24:39:24:41 | \\s+ | Strings starting with '<A' and with many repetitions of '\\t-\\t!=\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:24:43:24:45 | \\S+ | Strings starting with '<A\\t' and with many repetitions of '-\\t!=' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:24:48:24:50 | \\s* | Strings starting with '<A\\t!' and with many repetitions of '\\t=-\\t!\\t' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/markup.js:24:52:24:54 | \\s* | Strings starting with '<A\\t!=' and with many repetitions of '-\\t!=' can start matching anywhere after the start of the preceeding \\S+ |
|
||||
| regexplib/markup.js:25:11:25:15 | [^>]* | Strings starting with '<A' and with many repetitions of '<A' can start matching anywhere after the start of the preceeding <[a-zA-Z][^>]*\\son\\w+=(\\w+\|'[^']*'\|"[^"]*")[^>]*> |
|
||||
| regexplib/markup.js:25:45:25:49 | [^>]* | Strings starting with '<A ona=a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:25:45:25:49 | [^>]* | Strings starting with '<A\\ton0=0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:27:3:27:7 | [^>]* | Strings starting with '<' and with many repetitions of '<' can start matching anywhere after the start of the preceeding <[^>]*name[\\s]*=[\\s]*"?[^\\w_]*"?[^>]*> |
|
||||
| regexplib/markup.js:27:34:27:38 | [^>]* | Strings starting with '<name=' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding [\\s]* |
|
||||
| regexplib/markup.js:29:6:29:13 | [\\d\\D]*? | Strings starting with '/*' and with many repetitions of 'a/*' can start matching anywhere after the start of the preceeding \\/\\*[\\d\\D]*?\\*\\/ |
|
||||
| regexplib/markup.js:33:10:33:11 | .* | Strings starting with '?s/*' and with many repetitions of '?s/*' can start matching anywhere after the start of the preceeding (?s)\\/\\*.*\\*\\/ |
|
||||
| regexplib/markup.js:34:4:34:10 | [^\\s>]* | Strings starting with '<' and with many repetitions of '<!' can start matching anywhere after the start of the preceeding <([^\\s>]*)(\\s[^<]*)> |
|
||||
| regexplib/markup.js:37:15:37:19 | [\\w]* | Strings starting with '[a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:40:23:40:25 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (\\/?(?<step>\\w+))+ |
|
||||
| regexplib/markup.js:40:59:40:61 | \\s* | Strings starting with 'a[' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:37:15:37:19 | [\\w]* | Strings starting with '[0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:40:23:40:25 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\/?(?<step>\\w+))+ |
|
||||
| regexplib/markup.js:40:59:40:61 | \\s* | Strings starting with '0[' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:43:11:43:15 | [^>]* | Strings starting with '<A' and with many repetitions of '<A' can start matching anywhere after the start of the preceeding <[a-zA-Z][^>]*\\son\\w+=(\\w+\|'[^']*'\|"[^"]*")[^>]*> |
|
||||
| regexplib/markup.js:43:45:43:49 | [^>]* | Strings starting with '<A ona=a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:43:45:43:49 | [^>]* | Strings starting with '<A\\ton0=0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:44:3:44:7 | [^>]* | Strings starting with '<' and with many repetitions of '<' can start matching anywhere after the start of the preceeding <[^>]*name[\\s]*=[\\s]*"?[^\\w_]*"?[^>]*> |
|
||||
| regexplib/markup.js:44:34:44:38 | [^>]* | Strings starting with '<name=' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding [\\s]* |
|
||||
| regexplib/markup.js:45:6:45:13 | [\\d\\D]*? | Strings starting with '/*' and with many repetitions of 'a/*' can start matching anywhere after the start of the preceeding \\/\\*[\\d\\D]*?\\*\\/ |
|
||||
| regexplib/markup.js:47:39:47:41 | \\s+ | Strings starting with '<A' and with many repetitions of ' - != ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:47:43:47:45 | \\S+ | Strings starting with '<A ' and with many repetitions of '- !=' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:47:48:47:50 | \\s* | Strings starting with '<A !' and with many repetitions of ' =- ! ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/markup.js:47:52:47:54 | \\s* | Strings starting with '<A !=' and with many repetitions of '- !=' can start matching anywhere after the start of the preceeding \\S+ |
|
||||
| regexplib/markup.js:47:39:47:41 | \\s+ | Strings starting with '<A' and with many repetitions of '\\t-\\t!=\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:47:43:47:45 | \\S+ | Strings starting with '<A\\t' and with many repetitions of '-\\t!=' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:47:48:47:50 | \\s* | Strings starting with '<A\\t!' and with many repetitions of '\\t=-\\t!\\t' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/markup.js:47:52:47:54 | \\s* | Strings starting with '<A\\t!=' and with many repetitions of '-\\t!=' can start matching anywhere after the start of the preceeding \\S+ |
|
||||
| regexplib/markup.js:48:6:48:13 | [\\s\\S]*? | Strings starting with '<!--' and with many repetitions of '<!--' can start matching anywhere after the start of the preceeding <!--[\\s\\S]*?--> |
|
||||
| regexplib/markup.js:53:15:53:19 | [\\w]* | Strings starting with '[a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:56:23:56:25 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (\\/?(?<step>\\w+))+ |
|
||||
| regexplib/markup.js:56:59:56:61 | \\s* | Strings starting with 'a[' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:53:15:53:19 | [\\w]* | Strings starting with '[0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/markup.js:56:23:56:25 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\/?(?<step>\\w+))+ |
|
||||
| regexplib/markup.js:56:59:56:61 | \\s* | Strings starting with '0[' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/markup.js:61:74:61:79 | [0-9]+ | Strings starting with '<Assembly:AssemblyVersion("0a' and with many repetitions of '00' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/markup.js:61:91:61:96 | [0-9]+ | Strings starting with '<Assembly:AssemblyVersion("0a' and with many repetitions of '00' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/markup.js:61:98:61:103 | [0-9]+ | Strings starting with '<Assembly:AssemblyVersion("0a0a' and with many repetitions of '00' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
@@ -273,34 +273,35 @@
|
||||
| regexplib/misc.js:83:90:83:92 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/misc.js:90:4:90:11 | ([a-z])+ | Strings with many repetitions of 'aa' can start matching anywhere after the start of the preceeding (([a-z])+.)+ |
|
||||
| regexplib/misc.js:93:3:93:4 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (.*\\.([wW][mM][aA])\|([mM][pP][3])$) |
|
||||
| regexplib/misc.js:95:25:95:26 | .+ | Strings starting with 'at ' and with many repetitions of 'at a' can start matching anywhere after the start of the preceeding (at\\s)(?<fullClassName>.+)(\\.)(?<methodName>[^\\.]*)(\\()(?<parameters>[^\\)]*)(\\))((\\sin\\s)(?<fileName>.+)(:line )(?<lineNumber>[\\d]*))? |
|
||||
| regexplib/misc.js:95:71:95:76 | [^\\)]* | Strings starting with 'at a.(' and with many repetitions of '((' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:95:103:95:104 | .+ | Strings starting with 'at a.() in ' and with many repetitions of '() in -' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:95:25:95:26 | .+ | Strings starting with 'at\\t' and with many repetitions of 'at\\ta' can start matching anywhere after the start of the preceeding (at\\s)(?<fullClassName>.+)(\\.)(?<methodName>[^\\.]*)(\\()(?<parameters>[^\\)]*)(\\))((\\sin\\s)(?<fileName>.+)(:line )(?<lineNumber>[\\d]*))? |
|
||||
| regexplib/misc.js:95:71:95:76 | [^\\)]* | Strings starting with 'at\\ta.(' and with many repetitions of '((' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:95:103:95:104 | .+ | Strings starting with 'at\\ta.()\\tin\\t' and with many repetitions of '()\\tin\\t-' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:101:52:101:70 | [a-z0-9\\/\\.\\?\\=\\&]* | Strings starting with '".htm' and with many repetitions of '.asp' can start matching anywhere after the start of the preceeding [a-z0-9\\/\\.\\?\\=\\&]* |
|
||||
| regexplib/misc.js:112:3:112:5 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?\\s*\\d{6}\\s*) |
|
||||
| regexplib/misc.js:112:32:112:34 | \\s* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?\\s*\\d{3}\\s*\\d{4}\\s*) |
|
||||
| regexplib/misc.js:112:3:112:5 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{4}\\)?\\s*\\d{6}\\s*) |
|
||||
| regexplib/misc.js:112:32:112:34 | \\s* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s*\\(?0\\d{3}\\)?\\s*\\d{3}\\s*\\d{4}\\s*) |
|
||||
| regexplib/misc.js:114:6:114:8 | \\\|+ | Strings starting with 'a' and with many repetitions of '\|' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:116:3:116:4 | .* | Strings starting with '{' and with many repetitions of '{' can start matching anywhere after the start of the preceeding {.*} |
|
||||
| regexplib/misc.js:117:25:117:26 | .+ | Strings starting with '{a}' and with many repetitions of 'a)' can start matching anywhere after the start of the preceeding .+ |
|
||||
| regexplib/misc.js:119:20:119:22 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/misc.js:119:20:119:22 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/misc.js:119:52:119:57 | [^\\)]* | Strings starting with '0=(' and with many repetitions of '0<((' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/misc.js:123:36:123:38 | .*? | Strings starting with '?se[A' and with many repetitions of '?se[Aa' can start matching anywhere after the start of the preceeding (?s)(?:\\e\\[(?:(\\d+);?)*([A-Za-z])(.*?))(?=\\e\\[\|\\z) |
|
||||
| regexplib/misc.js:126:15:126:20 | [a-z]+ | Strings starting with 'a' and with many repetitions of 'aa' can start matching anywhere after the start of the preceeding [a-z]+ |
|
||||
| regexplib/misc.js:141:15:141:19 | [^;]+ | Strings starting with '{\\\\f\\\\' and with many repetitions of '{\\\\f\\\\:' can start matching anywhere after the start of the preceeding (\\{\\\\f\\d*)\\\\([^;]+;) |
|
||||
| regexplib/misc.js:144:52:144:70 | [a-z0-9\\/\\.\\?\\=\\&]* | Strings starting with '".htm' and with many repetitions of '.asp' can start matching anywhere after the start of the preceeding [a-z0-9\\/\\.\\?\\=\\&]* |
|
||||
| regexplib/misc.js:148:12:148:18 | [^\\s>]+ | Strings starting with '<' and with many repetitions of '<!' can start matching anywhere after the start of the preceeding \\w?<\\s?\\/?[^\\s>]+(\\s+[^"'=]+(=("[^"]*")\|('[^\\']*')\|([^\\s"'>]*))?)*\\s*\\/?> |
|
||||
| regexplib/misc.js:148:20:148:22 | \\s+ | Strings starting with '<!' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [^"'=]+ |
|
||||
| regexplib/misc.js:148:23:148:29 | [^"'=]+ | Strings starting with '<! ' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/misc.js:148:34:148:38 | [^"]* | Strings starting with '<! !="' and with many repetitions of '<" !="' can start matching anywhere after the start of the preceeding \\w?<\\s?\\/?[^\\s>]+(\\s+[^"'=]+(=("[^"]*")\|('[^\\']*')\|([^\\s"'>]*))?)*\\s*\\/?> |
|
||||
| regexplib/misc.js:148:44:148:49 | [^\\']* | Strings starting with '<! !'' and with many repetitions of '<' !'' can start matching anywhere after the start of the preceeding \\w?<\\s?\\/?[^\\s>]+(\\s+[^"'=]+(=("[^"]*")\|('[^\\']*')\|([^\\s"'>]*))?)*\\s*\\/?> |
|
||||
| regexplib/misc.js:148:68:148:70 | \\s* | Strings starting with '<!' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/misc.js:148:20:148:22 | \\s+ | Strings starting with '<!' and with many repetitions of '\\t\\t' can start matching anywhere after the start of the preceeding [^"'=]+ |
|
||||
| regexplib/misc.js:148:23:148:29 | [^"'=]+ | Strings starting with '<!\\t' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/misc.js:148:34:148:38 | [^"]* | Strings starting with '<!\\t!="' and with many repetitions of '<"\\t!="' can start matching anywhere after the start of the preceeding \\w?<\\s?\\/?[^\\s>]+(\\s+[^"'=]+(=("[^"]*")\|('[^\\']*')\|([^\\s"'>]*))?)*\\s*\\/?> |
|
||||
| regexplib/misc.js:148:44:148:49 | [^\\']* | Strings starting with '<!\\t!'' and with many repetitions of '<'\\t!'' can start matching anywhere after the start of the preceeding \\w?<\\s?\\/?[^\\s>]+(\\s+[^"'=]+(=("[^"]*")\|('[^\\']*')\|([^\\s"'>]*))?)*\\s*\\/?> |
|
||||
| regexplib/misc.js:148:68:148:70 | \\s* | Strings starting with '<!' and with many repetitions of '\\t\\t' can start matching anywhere after the start of the preceeding \\s+ |
|
||||
| regexplib/misc.js:162:18:162:23 | [0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/misc.js:162:24:162:26 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/misc.js:162:36:162:38 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/misc.js:162:72:162:77 | [0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/misc.js:162:78:162:80 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/misc.js:162:90:162:92 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/misc.js:163:41:163:43 | \\s* | Strings starting with 'SR' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/misc.js:163:75:163:77 | \\s* | Strings starting with '99' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/misc.js:163:41:163:43 | \\s* | Strings starting with 'SR' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/misc.js:163:75:163:77 | \\s* | Strings starting with '00' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/misc.js:164:31:164:45 | [^a-z\\:\\,\\(\\)]* | Strings starting with '#' and with many repetitions of '#' can start matching anywhere after the start of the preceeding ([A-Zäöü0-9\\/][^a-z\\:\\,\\(\\)]*[A-Zäöü0-9])($\|[\\.\\:\\,\\;\\)\\-\\ \\+]\|s\\b) |
|
||||
| regexplib/misc.js:168:3:168:4 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (.*\\.([wW][mM][aA])\|([mM][pP][3])$) |
|
||||
| regexplib/misc.js:173:4:173:11 | ([a-z])+ | Strings with many repetitions of 'aa' can start matching anywhere after the start of the preceeding (([a-z])+.)+ |
|
||||
@@ -308,14 +309,14 @@
|
||||
| regexplib/numbers.js:3:14:3:19 | [0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]* |
|
||||
| regexplib/numbers.js:5:18:5:23 | [0-9]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding [0-9]* |
|
||||
| regexplib/numbers.js:5:34:5:39 | [0-9]* | Strings starting with '0' and with many repetitions of '00' can start matching anywhere after the start of the preceeding [0-9]+ |
|
||||
| regexplib/numbers.js:6:10:6:12 | \\d* | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding (\\d\|,)* |
|
||||
| regexplib/numbers.js:9:14:9:16 | \\d* | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/numbers.js:13:9:13:11 | \\d* | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/numbers.js:16:14:16:16 | \\d* | Strings starting with '0a' and with many repetitions of '09' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/numbers.js:6:10:6:12 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\d\|,)* |
|
||||
| regexplib/numbers.js:9:14:9:16 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/numbers.js:13:9:13:11 | \\d* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/numbers.js:16:14:16:16 | \\d* | Strings starting with '0a' and with many repetitions of '00' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| regexplib/strings.js:2:2:2:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[Pp]en[Ii1][\\$s].* |
|
||||
| regexplib/strings.js:14:35:14:37 | \\w* | Strings starting with 'A' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:14:35:14:37 | \\w* | Strings starting with 'A' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:14:61:14:63 | \\w* | Strings starting with 'AA' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:14:107:14:109 | \\w* | Strings starting with 'AAA' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:14:107:14:109 | \\w* | Strings starting with 'AAA' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:19:31:19:57 | [a-zæøå0-9]+ | Strings starting with '#@' and with many repetitions of '##' can start matching anywhere after the start of the preceeding [a-zæøå0-9]+ |
|
||||
| regexplib/strings.js:19:69:19:95 | [a-zæøå0-9]+ | Strings starting with '#@#' and with many repetitions of '##' can start matching anywhere after the start of the preceeding [a-zæøå0-9]+ |
|
||||
| regexplib/strings.js:20:3:20:20 | ((\\\\")\|[^"(\\\\")])+ | Strings starting with '"' and with many repetitions of '\\\\"' can start matching anywhere after the start of the preceeding "((\\\\")\|[^"(\\\\")])+" |
|
||||
@@ -325,41 +326,43 @@
|
||||
| regexplib/strings.js:26:18:26:20 | \\s* | Strings starting with '\\t' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/strings.js:29:2:29:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*\\$AVE |
|
||||
| regexplib/strings.js:30:2:30:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[Pp]re[Ss\\$]cr[iI1]pt.* |
|
||||
| regexplib/strings.js:32:35:32:37 | \\w* | Strings starting with 'A' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:32:35:32:37 | \\w* | Strings starting with 'A' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:32:61:32:63 | \\w+ | Strings starting with 'AA' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:32:119:32:121 | \\w* | Strings with many repetitions of 'aA' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/strings.js:40:3:40:5 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (\\w+)\\s+\\1 |
|
||||
| regexplib/strings.js:32:119:32:121 | \\w* | Strings with many repetitions of '00' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/strings.js:40:3:40:5 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\w+)\\s+\\1 |
|
||||
| regexplib/strings.js:48:3:48:12 | [^\\.\\?\\!]* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding ([^\\.\\?\\!]*)[\\.\\?\\!] |
|
||||
| regexplib/strings.js:49:3:49:5 | \\S+ | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding (\\S+)\\x20{2,}(?=\\S+) |
|
||||
| regexplib/strings.js:53:4:53:12 | [a-z0-9]+ | Strings with many repetitions of '0.00' can start matching anywhere after the start of the preceeding [a-z0-9]+ |
|
||||
| regexplib/strings.js:53:25:53:33 | [a-z0-9]+ | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [a-z0-9]+ |
|
||||
| regexplib/strings.js:53:44:53:52 | [a-z0-9]+ | Strings with many repetitions of '00' can start matching anywhere after the start of the preceeding [a-z0-9]+ |
|
||||
| regexplib/strings.js:53:65:53:73 | [a-z0-9]+ | Strings starting with '0' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [a-z0-9]+ |
|
||||
| regexplib/strings.js:54:20:54:22 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:54:20:54:22 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:54:52:54:57 | [^\\)]* | Strings starting with '0=(' and with many repetitions of '0<((' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:56:52:56:53 | .+ | Strings starting with 'PRN.' and with many repetitions of '.' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/strings.js:57:36:57:38 | .*? | Strings starting with '?se[A' and with many repetitions of '?se[Aa' can start matching anywhere after the start of the preceeding (?s)(?:\\e\\[(?:(\\d+);?)*([A-Za-z])(.*?))(?=\\e\\[\|\\z) |
|
||||
| regexplib/strings.js:64:3:64:5 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (\\w+)\\s+\\1 |
|
||||
| regexplib/strings.js:64:3:64:5 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (\\w+)\\s+\\1 |
|
||||
| regexplib/strings.js:70:6:70:17 | [a-zA-Z,\\s]+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/strings.js:70:18:70:20 | \\s* | Strings starting with '\\t' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/strings.js:72:35:72:37 | \\w* | Strings starting with 'A' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:72:35:72:37 | \\w* | Strings starting with 'A' and with many repetitions of '0' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:72:61:72:63 | \\w+ | Strings starting with 'AA' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\w* |
|
||||
| regexplib/strings.js:72:119:72:121 | \\w* | Strings with many repetitions of 'aA' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/strings.js:72:119:72:121 | \\w* | Strings with many repetitions of '00' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| regexplib/strings.js:73:2:73:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*\\$AVE |
|
||||
| regexplib/strings.js:74:2:74:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[Pp]re[Ss\\$]cr[iI1]pt.* |
|
||||
| regexplib/strings.js:75:2:75:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[Vv][Ii1]agr.* |
|
||||
| regexplib/strings.js:76:2:76:3 | .* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*[Oo0][Ee][Mm].* |
|
||||
| regexplib/strings.js:81:36:81:38 | .*? | Strings starting with '?se[A' and with many repetitions of '?se[Aa' can start matching anywhere after the start of the preceeding (?s)(?:\\e\\[(?:(\\d+);?)*([A-Za-z])(.*?))(?=\\e\\[\|\\z) |
|
||||
| regexplib/strings.js:82:20:82:22 | \\w+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:82:20:82:22 | \\w+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:82:52:82:57 | [^\\)]* | Strings starting with '0=(' and with many repetitions of '0<((' can start matching anywhere after the start of the preceeding (NOT)?(\\s*\\(*)\\s*(\\w+)\\s*(=\|<>\|<\|>\|LIKE\|IN)\\s*(\\(([^\\)]*)\\)\|'([^']*)'\|(-?\\d*\\.?\\d+))(\\s*\\)*\\s*)(AND\|OR)? |
|
||||
| regexplib/strings.js:88:3:88:12 | [^\\.\\?\\!]* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding ([^\\.\\?\\!]*)[\\.\\?\\!] |
|
||||
| regexplib/strings.js:89:3:89:5 | \\S+ | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding (\\S+)\\x20{2,}(?=\\S+) |
|
||||
| regexplib/uri.js:3:149:3:151 | .*? | Strings starting with 'ftp:// ' and with many repetitions of '/ /' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:188:3:190 | \\w+ | Strings starting with 'ftp:// ' and with many repetitions of '00' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:193:3:198 | [^\\#]+ | Strings starting with 'ftp:// a=' and with many repetitions of '00=' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:206:3:208 | \\w+ | Strings starting with 'ftp:// a="' and with many repetitions of '00' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:211:3:213 | \\w+ | Strings starting with 'ftp:// a="a=' and with many repetitions of '00=' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:5:42:5:43 | .* | Strings starting with 'A:\\\\a' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [\\w ]* |
|
||||
| regexplib/uri.js:3:193:3:198 | [^\\#]+ | Strings starting with 'ftp:// 0=' and with many repetitions of '00=' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:206:3:208 | \\w+ | Strings starting with 'ftp:// 0="' and with many repetitions of '00' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:3:211:3:213 | \\w+ | Strings starting with 'ftp:// 0="0=' and with many repetitions of '00=' can start matching anywhere after the start of the preceeding .* |
|
||||
| regexplib/uri.js:5:42:5:43 | .* | Strings starting with 'A:\\\\0' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [\\w ]* |
|
||||
| regexplib/uri.js:14:175:14:213 | [\\d\\w\\.\\/\\%\\+\\-\\=\\&\\?\\:\\\\\\"\\'\\,\\\|\\~\\;]* | Strings with many repetitions of '.ac+' can start matching anywhere after the start of the preceeding [\\d\\w\\.\\/\\+\\-\\?\\:]* |
|
||||
| regexplib/uri.js:17:42:17:43 | .* | Strings starting with 'A:\\\\a' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [\\w ]* |
|
||||
| regexplib/uri.js:17:42:17:43 | .* | Strings starting with 'A:\\\\0' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [\\w ]* |
|
||||
| regexplib/uri.js:18:4:18:46 | ([A-Za-z0-9'~`!@#$%&^_+=\\(\\){},\\-\\[\\]\\;])+? | Strings starting with 'A' and with many repetitions of 'A' can start matching anywhere after the start of the preceeding \\A([A-Za-z0-9'~`!@#$%&^_+=\\(\\){},\\-\\[\\]\\;])+?([ A-Za-z0-9'~` !@#$%&^_+=\\(\\){},\\-\\[\\];]\|([.]))*?(?(3)(([ A-Za-z0-9'~`!@#$ %&^_+=\\(\\){},\\-\\[\\]\\;]*?)([A-Za-z0-9'~`!@#$%&^_+=\\(\\){},\\-\\[ \\];])+\\z)\|(\\z)) |
|
||||
| regexplib/uri.js:18:47:18:96 | ([ A-Za-z0-9'~` !@#$%&^_+=\\(\\){},\\-\\[\\];]\|([.]))*? | Strings starting with 'A!' and with many repetitions of '!' can start matching anywhere after the start of the preceeding ([A-Za-z0-9'~`!@#$%&^_+=\\(\\){},\\-\\[\\]\\;])+? |
|
||||
| regexplib/uri.js:18:148:18:189 | ([A-Za-z0-9'~`!@#$%&^_+=\\(\\){},\\-\\[ \\];])+ | Strings starting with 'A!?3' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding [ A-Za-z0-9'~`!@#$ %&^_+=\\(\\){},\\-\\[\\]\\;]*? |
|
||||
@@ -389,7 +392,7 @@
|
||||
| regexplib/uri.js:38:20:38:28 | [a-z0-9]+ | Strings starting with 'a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [a-z]+ |
|
||||
| regexplib/uri.js:38:35:38:40 | [a-z]+ | Strings starting with 'a.' and with many repetitions of 'aa' can start matching anywhere after the start of the preceeding [a-z0-9]+ |
|
||||
| regexplib/uri.js:38:52:38:60 | [a-z0-9]+ | Strings starting with 'a.a' and with many repetitions of '0' can start matching anywhere after the start of the preceeding [a-z]+ |
|
||||
| regexplib/uri.js:39:7:39:9 | .*? | Strings starting with '<a' and with many repetitions of ' ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/uri.js:39:7:39:9 | .*? | Strings starting with '<a' and with many repetitions of '\\t' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| regexplib/uri.js:39:43:39:45 | .*? | Strings with many repetitions of '>a' can start matching anywhere after the start of the preceeding .*? |
|
||||
| regexplib/uri.js:41:16:41:31 | [a-zA-Z0-9\\-\\.]+ | Strings starting with '0' and with many repetitions of '00' can start matching anywhere after the start of the preceeding [a-zA-Z0-9]+ |
|
||||
| regexplib/uri.js:44:2:44:4 | .*? | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding .*?$(?<!\\.aspx) |
|
||||
@@ -445,15 +448,15 @@
|
||||
| tst.js:125:15:125:28 | ([a-z]\|[d-h])* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (([a-z]\|[d-h])*)" |
|
||||
| tst.js:128:15:128:30 | ([^a-z]\|[^0-9])* | Strings with many repetitions of '/' can start matching anywhere after the start of the preceeding (([^a-z]\|[^0-9])*)" |
|
||||
| tst.js:131:15:131:25 | (\\d\|[0-9])* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ((\\d\|[0-9])*)" |
|
||||
| tst.js:134:15:134:22 | (\\s\|\\s)* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding ((\\s\|\\s)*)" |
|
||||
| tst.js:137:15:137:21 | (\\w\|G)* | Strings with many repetitions of 'G' can start matching anywhere after the start of the preceeding ((\\w\|G)*)" |
|
||||
| tst.js:140:16:140:23 | (\\s\|\\d)* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding ((\\s\|\\d)*)" |
|
||||
| tst.js:143:15:143:22 | (\\d\|\\w)* | Strings with many repetitions of '9' can start matching anywhere after the start of the preceeding ((\\d\|\\w)*)" |
|
||||
| tst.js:146:15:146:21 | (\\d\|5)* | Strings with many repetitions of '5' can start matching anywhere after the start of the preceeding ((\\d\|5)*)" |
|
||||
| tst.js:149:15:149:24 | (\\s\|[\\f])* | Strings with many repetitions of '\u000c' can start matching anywhere after the start of the preceeding ((\\s\|[\\f])*)" |
|
||||
| tst.js:152:15:152:28 | (\\s\|[\\v]\|\\\\v)* | Strings with many repetitions of '\u000b' can start matching anywhere after the start of the preceeding ((\\s\|[\\v]\|\\\\v)*)" |
|
||||
| tst.js:134:15:134:22 | (\\s\|\\s)* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding ((\\s\|\\s)*)" |
|
||||
| tst.js:137:15:137:21 | (\\w\|G)* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ((\\w\|G)*)" |
|
||||
| tst.js:140:16:140:23 | (\\s\|\\d)* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding ((\\s\|\\d)*)" |
|
||||
| tst.js:143:15:143:22 | (\\d\|\\w)* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ((\\d\|\\w)*)" |
|
||||
| tst.js:146:15:146:21 | (\\d\|5)* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ((\\d\|5)*)" |
|
||||
| tst.js:149:15:149:24 | (\\s\|[\\f])* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding ((\\s\|[\\f])*)" |
|
||||
| tst.js:152:15:152:28 | (\\s\|[\\v]\|\\\\v)* | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding ((\\s\|[\\v]\|\\\\v)*)" |
|
||||
| tst.js:155:15:155:24 | (\\f\|[\\f])* | Strings with many repetitions of '\u000c' can start matching anywhere after the start of the preceeding ((\\f\|[\\f])*)" |
|
||||
| tst.js:158:15:158:22 | (\\W\|\\D)* | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding ((\\W\|\\D)*)" |
|
||||
| tst.js:158:15:158:22 | (\\W\|\\D)* | Strings with many repetitions of '/' can start matching anywhere after the start of the preceeding ((\\W\|\\D)*)" |
|
||||
| tst.js:161:15:161:22 | (\\S\|\\w)* | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding ((\\S\|\\w)*)" |
|
||||
| tst.js:164:15:164:24 | (\\S\|[\\w])* | Strings with many repetitions of '!' can start matching anywhere after the start of the preceeding ((\\S\|[\\w])*)" |
|
||||
| tst.js:167:15:167:27 | (1s\|[\\da-z])* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ((1s\|[\\da-z])*)" |
|
||||
@@ -461,7 +464,7 @@
|
||||
| tst.js:173:16:173:20 | [\\d]+ | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding ([\\d]+)* |
|
||||
| tst.js:185:16:185:21 | [^>a]+ | Strings with many repetitions of '=' can start matching anywhere after the start of the preceeding ([^>a]+)* |
|
||||
| tst.js:188:17:188:19 | \\s* | Strings starting with '\\n' and with many repetitions of '\\n' can start matching anywhere after the start of the preceeding (\\n\\s*)+$ |
|
||||
| tst.js:191:18:191:20 | \\s+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding .* |
|
||||
| tst.js:191:18:191:20 | \\s+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding .* |
|
||||
| tst.js:191:31:191:35 | [^)]* | Strings starting with '(?#' and with many repetitions of '(?#' can start matching anywhere after the start of the preceeding .* |
|
||||
| tst.js:194:53:194:61 | [a-zA-Z]+ | Strings starting with '{[A(A)' and with many repetitions of 'AA' can start matching anywhere after the start of the preceeding [ a-zA-Z{}]+ |
|
||||
| tst.js:194:68:194:79 | [ a-zA-Z{}]+ | Strings starting with '{[A(A)A:' and with many repetitions of ' A: ' can start matching anywhere after the start of the preceeding \\s* |
|
||||
@@ -477,23 +480,23 @@
|
||||
| tst.js:227:15:227:22 | ([^X]b)+ | Strings with many repetitions of 'Wb' can start matching anywhere after the start of the preceeding (([^X]b)+)* |
|
||||
| tst.js:239:15:239:19 | (ab)+ | Strings with many repetitions of 'ab' can start matching anywhere after the start of the preceeding ((ab)+)* |
|
||||
| tst.js:248:18:248:19 | A* | Strings with many repetitions of 'A' can start matching anywhere after the start of the preceeding A* |
|
||||
| tst.js:254:15:254:17 | \\w* | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:15:254:17 | \\w* | Strings with many repetitions of '0' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:27:254:29 | \\w* | Strings starting with 'foobarbaz' and with many repetitions of 'foobarbaz' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:39:254:41 | \\w* | Strings starting with 'foobarbazfoobarbaz' and with many repetitions of 'foobarbaz' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:51:254:53 | \\w* | Strings starting with 'foobarbazfoobarbazfoobarbaz' and with many repetitions of 'foobarbaz' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:63:254:65 | \\s* | Strings starting with 'foobarbazfoobarbazfoobarbazfoobarbaz' and with many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz' can start matching anywhere after the start of the preceeding \\d* |
|
||||
| tst.js:254:75:254:77 | \\d* | Strings starting with 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbaz' and with many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| tst.js:257:14:257:116 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | Strings with many repetitions of ' thisisagoddamnlongstringforstresstestingthequery' can start matching anywhere after the start of the preceeding (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)*- |
|
||||
| tst.js:260:14:260:77 | (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* | Strings with many repetitions of 'thisaquery' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| tst.js:257:14:257:116 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | Strings with many repetitions of '\\tthisisagoddamnlongstringforstresstestingthequery' can start matching anywhere after the start of the preceeding (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)*- |
|
||||
| tst.js:260:14:260:77 | (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* | Strings with many repetitions of 'this0query' can start matching anywhere after the start of the preceeding \\w+ |
|
||||
| tst.js:260:68:260:70 | \\w+ | Strings starting with 'this' and with many repetitions of 'this' can start matching anywhere after the start of the preceeding (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* |
|
||||
| tst.js:263:15:263:117 | (thisisagoddamnlongstringforstresstestingthequery\|imanotherbutunrelatedstringcomparedtotheotherstring)* | Strings with many repetitions of 'thisisagoddamnlongstringforstresstestingthequery' can start matching anywhere after the start of the preceeding (thisisagoddamnlongstringforstresstestingthequery\|imanotherbutunrelatedstringcomparedtotheotherstring)*- |
|
||||
| tst.js:272:21:272:22 | b+ | Strings with many repetitions of 'b' can start matching anywhere after the start of the preceeding (b+)+ |
|
||||
| tst.js:275:25:275:27 | \\s+ | Strings starting with '<a' and with many repetitions of ' 0=" 0=" ' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:28:275:30 | \\w+ | Strings starting with '<a ' and with many repetitions of '0=" 0=" ' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:34:275:36 | \\s* | Strings starting with '<a a' and with many repetitions of '=" 0=" 0 ' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:48:275:52 | [^"]* | Strings starting with '<a a="' and with many repetitions of '" a="' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| tst.js:275:60:275:64 | [^']* | Strings starting with '<a a='' and with many repetitions of '' a='' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| tst.js:275:68:275:74 | [^>\\s]+ | Strings starting with '<a a=' and with many repetitions of '" a="' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:25:275:27 | \\s+ | Strings starting with '<0' and with many repetitions of '\\t0="\\t0="\\t' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:28:275:30 | \\w+ | Strings starting with '<0\\t' and with many repetitions of '0="\\t0="\\t' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:34:275:36 | \\s* | Strings starting with '<0\\t0' and with many repetitions of '="\\t0="\\t0\\t' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:275:48:275:52 | [^"]* | Strings starting with '<0\\t0="' and with many repetitions of '"\\t0="' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| tst.js:275:60:275:64 | [^']* | Strings starting with '<0\\t0='' and with many repetitions of ''\\t0='' can start matching anywhere after the start of the preceeding \\s* |
|
||||
| tst.js:275:68:275:74 | [^>\\s]+ | Strings starting with '<0\\t0=' and with many repetitions of '"\\t0="' can start matching anywhere after the start of the preceeding [^"]* |
|
||||
| tst.js:281:16:281:17 | a+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (a+)* |
|
||||
| tst.js:284:16:284:17 | a+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (a+)* |
|
||||
| tst.js:290:16:290:17 | a+ | Strings with many repetitions of 'a' can start matching anywhere after the start of the preceeding (a+)* |
|
||||
@@ -502,7 +505,7 @@
|
||||
| tst.js:296:21:296:22 | a+ | Strings with many repetitions of 'a;' can start matching anywhere after the start of the preceeding ((;\|^)a+)+$ |
|
||||
| tst.js:299:90:299:91 | e+ | Strings starting with '00000000000000' and with many repetitions of 'e' can start matching anywhere after the start of the preceeding (e+)+ |
|
||||
| tst.js:302:18:302:19 | c+ | Strings starting with 'ab' and with many repetitions of 'c' can start matching anywhere after the start of the preceeding (c+)+ |
|
||||
| tst.js:305:18:305:20 | \\s+ | Strings with many repetitions of ' ' can start matching anywhere after the start of the preceeding (\\s+)* |
|
||||
| tst.js:305:18:305:20 | \\s+ | Strings with many repetitions of '\\t' can start matching anywhere after the start of the preceeding (\\s+)* |
|
||||
| tst.js:308:16:308:24 | ([^/]\|X)+ | Strings with many repetitions of '.' can start matching anywhere after the start of the preceeding (([^/]\|X)+)(\\/[^]*)*$ |
|
||||
| tst.js:311:17:311:28 | (x([^Y]+)?)* | Strings with many repetitions of 'xX' can start matching anywhere after the start of the preceeding [^Y]+ |
|
||||
| tst.js:311:20:311:24 | [^Y]+ | Strings starting with 'x' and with many repetitions of 'xX' can start matching anywhere after the start of the preceeding (x([^Y]+)?)* |
|
||||
@@ -0,0 +1,4 @@
|
||||
import semmle.javascript.security.regexp.SuperlinearBackTracking
|
||||
|
||||
from PolynomialBackTrackingTerm t
|
||||
select t, t.getReason()
|
||||
@@ -437,19 +437,19 @@ edges
|
||||
| lib/subLib5/feature.js:2:2:2:17 | /a*b/.test(name) | lib/subLib5/feature.js:1:28:1:31 | name | lib/subLib5/feature.js:2:13:2:16 | name | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | lib/subLib5/feature.js:2:3:2:4 | a* | regular expression | lib/subLib5/feature.js:1:28:1:31 | name | library input |
|
||||
| lib/subLib5/main.js:2:2:2:17 | /a*b/.test(name) | lib/subLib5/main.js:1:28:1:31 | name | lib/subLib5/main.js:2:13:2:16 | name | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | lib/subLib5/main.js:2:3:2:4 | a* | regular expression | lib/subLib5/main.js:1:28:1:31 | name | library input |
|
||||
| lib/sublib/factory.js:13:13:13:28 | /f*g/.test(name) | lib/sublib/factory.js:12:26:12:29 | name | lib/sublib/factory.js:13:24:13:27 | name | This $@ that depends on $@ may run slow on strings with many repetitions of 'f'. | lib/sublib/factory.js:13:14:13:15 | f* | regular expression | lib/sublib/factory.js:12:26:12:29 | name | library input |
|
||||
| polynomial-redos.js:7:2:7:34 | tainted ... /g, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:7:2:7:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:7:24:7:26 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:7:2:7:34 | tainted ... /g, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:7:2:7:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:7:24:7:26 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:8:2:8:23 | tainted ... *, */) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:8:2:8:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:8:17:8:18 | * | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:9:2:9:34 | tainted ... g, ' ') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:9:2:9:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:9:19:9:21 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:9:2:9:34 | tainted ... g, ' ') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:9:2:9:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:9:19:9:21 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:11:2:11:31 | tainted ... ]/, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:11:2:11:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | polynomial-redos.js:11:19:11:20 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:12:2:12:28 | tainted ... ./, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:12:2:12:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | polynomial-redos.js:12:19:12:20 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:15:2:15:52 | tainted ... (?!`)/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:15:2:15:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '`' and with many repetitions of ' '. | polynomial-redos.js:15:28:15:35 | [\\s\\S]*? | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:15:2:15:52 | tainted ... (?!`)/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:15:2:15:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '`_' and with many repetitions of ' '. | polynomial-redos.js:15:41:15:43 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:15:2:15:52 | tainted ... (?!`)/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:15:2:15:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '`' and with many repetitions of '\\t'. | polynomial-redos.js:15:28:15:35 | [\\s\\S]*? | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:15:2:15:52 | tainted ... (?!`)/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:15:2:15:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '`_' and with many repetitions of '\\t'. | polynomial-redos.js:15:41:15:43 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:16:2:16:46 | tainted ... (?!`)/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:16:2:16:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '`' and with many repetitions of '``'. | polynomial-redos.js:16:25:16:32 | [\\s\\S]*? | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:17:2:17:30 | /^(.*,) ... ainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:17:23:17:29 | tainted | This $@ that depends on $@ may run slow on strings starting with ',' and with many repetitions of ',,'. | polynomial-redos.js:17:11:17:12 | .+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:17:2:17:30 | /^(.*,) ... ainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:17:23:17:29 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ','. | polynomial-redos.js:17:5:17:6 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:18:2:18:130 | tainted ... 1,2}/i) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:18:2:18:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '/'. | polynomial-redos.js:18:83:18:100 | [\\u0600-\\u06FF\\/]+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:19:2:19:148 | tainted ... 1,2}/i) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:19:2:19:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '0'. | polynomial-redos.js:19:17:19:22 | [0-9]* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:20:2:20:63 | tainted ... d+)?$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:20:2:20:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '9'. | polynomial-redos.js:20:56:20:58 | \\d+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:20:2:20:63 | tainted ... d+)?$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:20:2:20:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '0'. | polynomial-redos.js:20:56:20:58 | \\d+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:25:2:25:68 | tainted ... (.*)$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:25:2:25:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '-\\t' and with many repetitions of '\\t\\t'. | polynomial-redos.js:25:37:25:56 | [a-zA-Z0-9+\\/ \\t\\n]+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:25:2:25:68 | tainted ... (.*)$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:25:2:25:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '-\\t\\t' and with many repetitions of '='. | polynomial-redos.js:25:63:25:64 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:30:2:30:32 | tainted ... /g, "") | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:30:2:30:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '?' and with many repetitions of '?'. | polynomial-redos.js:30:23:30:24 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
@@ -463,7 +463,7 @@ edges
|
||||
| polynomial-redos.js:38:2:38:38 | tainted ... )".*>/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:38:2:38:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '<href="!"' and with many repetitions of 'href="!"'. | polynomial-redos.js:38:34:38:35 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:40:2:40:67 | tainted ... E]*)$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:40:2:40:8 | tainted | This $@ that depends on $@ may run slow on strings starting with ',-+' and with many repetitions of '++'. | polynomial-redos.js:40:51:40:63 | [?\\x21-\\x7E]* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:43:2:43:78 | tainted ... +))?$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:43:2:43:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '-\\t+\\t' and with many repetitions of '\\t\\t'. | polynomial-redos.js:43:67:43:72 | [^\\n]+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:48:2:48:37 | tainted ... g, ' ') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:48:2:48:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:48:22:48:24 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:48:2:48:37 | tainted ... g, ' ') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:48:2:48:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:48:22:48:24 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:50:2:50:21 | /Y.*X/.test(tainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:50:14:50:20 | tainted | This $@ that depends on $@ may run slow on strings starting with 'Y' and with many repetitions of 'Y'. | polynomial-redos.js:50:4:50:5 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:51:2:51:33 | /B?(YH\| ... ainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:51:26:51:32 | tainted | This $@ that depends on $@ may run slow on strings starting with 'K' and with many repetitions of 'YH'. | polynomial-redos.js:51:11:51:17 | (YH\|J)* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:52:3:52:29 | /B?(YH\| ... ainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:52:22:52:28 | tainted | This $@ that depends on $@ may run slow on strings starting with 'K' and with many repetitions of 'K'. | polynomial-redos.js:52:12:52:13 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
@@ -484,7 +484,7 @@ edges
|
||||
| polynomial-redos.js:67:3:67:25 | /[^Y].* ... ainted) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:67:18:67:24 | tainted | This $@ that depends on $@ may run slow on strings starting with 'X' and with many repetitions of 'Z'. | polynomial-redos.js:67:8:67:9 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:69:3:69:26 | /[^Y].* ... q.body) | polynomial-redos.js:69:18:69:25 | req.body | polynomial-redos.js:69:18:69:25 | req.body | This $@ that depends on $@ may run slow on strings starting with 'X' and with many repetitions of 'X'. | polynomial-redos.js:69:8:69:9 | .* | regular expression | polynomial-redos.js:69:18:69:25 | req.body | a user-provided value |
|
||||
| polynomial-redos.js:71:2:71:67 | tainted ... E]*)$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:71:2:71:8 | tainted | This $@ that depends on $@ may run slow on strings starting with ',-+' and with many repetitions of '++'. | polynomial-redos.js:71:51:71:63 | [?\\x21-\\x7E]* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:73:2:73:60 | tainted ... LWP7")) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:73:2:73:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'MSIE 9.9' and with many repetitions of '9'. | polynomial-redos.js:73:50:73:51 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:73:2:73:60 | tainted ... LWP7")) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:73:2:73:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'MSIE 0.0' and with many repetitions of '0'. | polynomial-redos.js:73:50:73:51 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:75:2:75:39 | tainted ... )".*>/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:75:2:75:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '<' and with many repetitions of '<'. | polynomial-redos.js:75:18:75:19 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:75:2:75:39 | tainted ... )".*>/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:75:2:75:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '<class="!"' and with many repetitions of 'class="!"'. | polynomial-redos.js:75:35:75:36 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:77:2:77:22 | tainted ... /Y.*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:77:2:77:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'Y' and with many repetitions of 'Y'. | polynomial-redos.js:77:18:77:19 | .* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
@@ -493,7 +493,7 @@ edges
|
||||
| polynomial-redos.js:86:2:86:32 | tainted ... ab)*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:86:2:86:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'ab' and with many repetitions of 'ab'. | polynomial-redos.js:86:25:86:29 | (ab)* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:88:2:88:22 | tainted ... /aa*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:88:2:88:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'a' and with many repetitions of 'a'. | polynomial-redos.js:88:18:88:19 | a* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:89:2:89:24 | tainted ... a*a*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:89:2:89:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | polynomial-redos.js:89:20:89:21 | a* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:90:2:90:23 | tainted ... \\wa*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:90:2:90:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'a' and with many repetitions of 'a'. | polynomial-redos.js:90:19:90:20 | a* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:90:2:90:23 | tainted ... \\wa*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:90:2:90:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '0' and with many repetitions of 'a'. | polynomial-redos.js:90:19:90:20 | a* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:94:2:94:40 | tainted ... \|B)*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:94:2:94:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '3'. | polynomial-redos.js:94:28:94:37 | ([2-5]\|B)* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:95:2:95:33 | tainted ... \|B)*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:95:2:95:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '2'. | polynomial-redos.js:95:21:95:30 | ([2-5]\|B)* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:96:2:96:33 | tainted ... *\\d*X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:96:2:96:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '3'. | polynomial-redos.js:96:28:96:30 | \\d* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
@@ -501,16 +501,16 @@ edges
|
||||
| polynomial-redos.js:100:2:100:22 | tainted ... /aa+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:100:2:100:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'a' and with many repetitions of 'a'. | polynomial-redos.js:100:18:100:19 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:101:2:101:21 | tainted.match(/a+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:101:2:101:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | polynomial-redos.js:101:17:101:18 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:102:2:102:24 | tainted ... a+a+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:102:2:102:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'a' and with many repetitions of 'a'. | polynomial-redos.js:102:20:102:21 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:103:2:103:23 | tainted ... \\wa+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:103:2:103:8 | tainted | This $@ that depends on $@ may run slow on strings starting with 'a' and with many repetitions of 'a'. | polynomial-redos.js:103:19:103:20 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:103:2:103:23 | tainted ... \\wa+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:103:2:103:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '0' and with many repetitions of 'a'. | polynomial-redos.js:103:19:103:20 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:104:2:104:24 | tainted ... +b+c+/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:104:2:104:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of 'a'. | polynomial-redos.js:104:17:104:18 | a+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:107:2:107:40 | tainted ... \|B)+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:107:2:107:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '3' and with many repetitions of '3'. | polynomial-redos.js:107:28:107:37 | ([2-5]\|B)+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:108:2:108:33 | tainted ... \|B)+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:108:2:108:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '9' and with many repetitions of '2'. | polynomial-redos.js:108:21:108:30 | ([2-5]\|B)+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:108:2:108:33 | tainted ... \|B)+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:108:2:108:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '0' and with many repetitions of '2'. | polynomial-redos.js:108:21:108:30 | ([2-5]\|B)+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:109:2:109:33 | tainted ... +\\d+X/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:109:2:109:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '3' and with many repetitions of '3'. | polynomial-redos.js:109:28:109:30 | \\d+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:111:2:111:22 | tainted ... /\\s*$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:111:2:111:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:111:17:111:19 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:112:2:112:22 | tainted ... /\\s+$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:112:2:112:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:112:17:112:19 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:111:2:111:22 | tainted ... /\\s*$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:111:2:111:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:111:17:111:19 | \\s* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:112:2:112:22 | tainted ... /\\s+$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:112:2:112:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:112:17:112:19 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:114:2:114:27 | tainted ... 5\\w*$/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:114:2:114:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '5' and with many repetitions of '5'. | polynomial-redos.js:114:22:114:24 | \\w* | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:116:2:116:35 | tainted ... \\*\\//g) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:116:2:116:8 | tainted | This $@ that depends on $@ may run slow on strings starting with '/*' and with many repetitions of 'a/*'. | polynomial-redos.js:116:21:116:28 | [\\d\\D]*? | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:118:2:118:25 | tainted ... \\d+)+/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:118:2:118:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '9'. | polynomial-redos.js:118:17:118:23 | (#\\d+)+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:124:12:124:43 | result. ... /g, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:124:12:124:17 | result | This $@ that depends on $@ may run slow on strings with many repetitions of ' '. | polynomial-redos.js:124:33:124:35 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:118:2:118:25 | tainted ... \\d+)+/) | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:118:2:118:8 | tainted | This $@ that depends on $@ may run slow on strings with many repetitions of '0'. | polynomial-redos.js:118:17:118:23 | (#\\d+)+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:124:12:124:43 | result. ... /g, '') | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:124:12:124:17 | result | This $@ that depends on $@ may run slow on strings with many repetitions of '\\t'. | polynomial-redos.js:124:33:124:35 | \\s+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:130:2:130:31 | modifie ... g, "b") | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:130:2:130:9 | modified | This $@ that depends on $@ may run slow on strings starting with 'c' and with many repetitions of 'c'. | polynomial-redos.js:130:21:130:22 | c+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
| polynomial-redos.js:133:2:133:32 | modifie ... g, "b") | polynomial-redos.js:5:16:5:32 | req.query.tainted | polynomial-redos.js:133:2:133:10 | modified2 | This $@ that depends on $@ may run slow on strings starting with 'f' and with many repetitions of 'f'. | polynomial-redos.js:133:22:133:23 | f+ | regular expression | polynomial-redos.js:5:16:5:32 | req.query.tainted | a user-provided value |
|
||||
@@ -1,4 +1,4 @@
|
||||
| highlight.js:2:26:2:979 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | This part of the regular expression may cause exponential backtracking on strings starting with '/' and containing many repetitions of 'firewall '. |
|
||||
| highlight.js:2:26:2:979 | ((traffic-flow\|traffic-generator\|firewall\|scheduler\|aaa\|accounting\|address-list\|address\|align\|area\|bandwidth-server\|bfd\|bgp\|bridge\|client\|clock\|community\|config\|connection\|console\|customer\|default\|dhcp-client\|dhcp-server\|discovery\|dns\|e-mail\|ethernet\|filter\|firewall\|firmware\|gps\|graphing\|group\|hardware\|health\|hotspot\|identity\|igmp-proxy\|incoming\|instance\|interface\|ip\|ipsec\|ipv6\|irq\|l2tp-server\|lcd\|ldp\|logging\|mac-server\|mac-winbox\|mangle\|manual\|mirror\|mme\|mpls\|nat\|nd\|neighbor\|network\|note\|ntp\|ospf\|ospf-v3\|ovpn-server\|page\|peer\|pim\|ping\|policy\|pool\|port\|ppp\|pppoe-client\|pptp-server\|prefix\|profile\|proposal\|proxy\|queue\|radius\|resource\|rip\|ripng\|route\|routing\|screen\|script\|security-profiles\|server\|service\|service-port\|settings\|shares\|smb\|sms\|sniffer\|snmp\|snooper\|socks\|sstp-server\|system\|tool\|tracking\|type\|upgrade\|upnp\|user-manager\|users\|user\|vlan\|secret\|vrrp\|watchdog\|web-access\|wireless\|pptp\|pppoe\|lan\|wan\|layer7-protocol\|lease\|simple\|raw);?\\s)+ | This part of the regular expression may cause exponential backtracking on strings starting with '/' and containing many repetitions of 'firewall\\t'. |
|
||||
| highlight.js:6:12:6:695 | (Add\|Clear\|Close\|Copy\|Enter\|Exit\|Find\|Format\|Get\|Hide\|Join\|Lock\|Move\|New\|Open\|Optimize\|Pop\|Push\|Redo\|Remove\|Rename\|Reset\|Resize\|Search\|Select\|Set\|Show\|Skip\|Split\|Step\|Switch\|Undo\|Unlock\|Watch\|Backup\|Checkpoint\|Compare\|Compress\|Convert\|ConvertFrom\|ConvertTo\|Dismount\|Edit\|Expand\|Export\|Group\|Import\|Initialize\|Limit\|Merge\|New\|Out\|Publish\|Restore\|Save\|Sync\|Unpublish\|Update\|Approve\|Assert\|Complete\|Confirm\|Deny\|Disable\|Enable\|Install\|Invoke\|Register\|Request\|Restart\|Resume\|Start\|Stop\|Submit\|Suspend\|Uninstall\|Unregister\|Wait\|Debug\|Measure\|Ping\|Repair\|Resolve\|Test\|Trace\|Connect\|Disconnect\|Read\|Receive\|Send\|Write\|Block\|Grant\|Protect\|Revoke\|Unblock\|Unprotect\|Use\|ForEach\|Sort\|Tee\|Where)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'New'. |
|
||||
| highlight.js:10:22:10:32 | (\\\\.\|[^/])* | This part of the regular expression may cause exponential backtracking on strings starting with 's/' and containing many repetitions of '\\\\.'. |
|
||||
| highlight.js:10:35:10:45 | (\\\\.\|[^/])* | This part of the regular expression may cause exponential backtracking on strings starting with 's//' and containing many repetitions of '\\\\.'. |
|
||||
@@ -18,12 +18,12 @@
|
||||
| polynomial-redos.js:17:5:17:6 | .* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ','. |
|
||||
| polynomial-redos.js:41:52:41:63 | [\\x21-\\x7E]* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '?'. |
|
||||
| polynomial-redos.js:46:33:46:45 | [a-zA-Z_0-9]* | This part of the regular expression may cause exponential backtracking on strings starting with 'A' and containing many repetitions of 'A'. |
|
||||
| regexplib/address.js:51:220:51:222 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'C/O ' and containing many repetitions of 'a'. |
|
||||
| regexplib/address.js:51:616:51:618 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with '9 a C/O ' and containing many repetitions of 'a'. |
|
||||
| regexplib/address.js:51:803:51:811 | [A-Za-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with '9 a ' and containing many repetitions of 'A'. |
|
||||
| regexplib/address.js:75:220:75:222 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'C/O ' and containing many repetitions of 'a'. |
|
||||
| regexplib/address.js:75:616:75:618 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with '9 a C/O ' and containing many repetitions of 'a'. |
|
||||
| regexplib/address.js:75:803:75:811 | [A-Za-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with '9 a ' and containing many repetitions of 'A'. |
|
||||
| regexplib/address.js:51:220:51:222 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'C/O ' and containing many repetitions of '0'. |
|
||||
| regexplib/address.js:51:616:51:618 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with '0 0\\tC/O ' and containing many repetitions of '0'. |
|
||||
| regexplib/address.js:51:803:51:811 | [A-Za-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0 0\\t\\t' and containing many repetitions of 'A'. |
|
||||
| regexplib/address.js:75:220:75:222 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'C/O ' and containing many repetitions of '0'. |
|
||||
| regexplib/address.js:75:616:75:618 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with '0 0\\tC/O ' and containing many repetitions of '0'. |
|
||||
| regexplib/address.js:75:803:75:811 | [A-Za-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0 0\\t\\t' and containing many repetitions of 'A'. |
|
||||
| regexplib/dates.js:27:341:27:348 | [^\\(\\)]* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '''. |
|
||||
| regexplib/email.js:1:16:1:22 | [-.\\w]* | This part of the regular expression may cause exponential backtracking on strings starting with '0' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:5:24:5:35 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0' and containing many repetitions of '0'. |
|
||||
@@ -35,52 +35,52 @@
|
||||
| regexplib/email.js:25:106:25:117 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0@0' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:25:212:25:223 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| regexplib/email.js:25:251:25:262 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| regexplib/email.js:32:10:32:25 | (?:\\w[\\.\\-\\+]?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| regexplib/email.js:32:41:32:61 | (?:\\w[\\.\\-\\+]?){0,62} | This part of the regular expression may cause exponential backtracking on strings starting with 'a@' and containing many repetitions of 'a'. |
|
||||
| regexplib/email.js:32:10:32:25 | (?:\\w[\\.\\-\\+]?)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| regexplib/email.js:32:41:32:61 | (?:\\w[\\.\\-\\+]?){0,62} | This part of the regular expression may cause exponential backtracking on strings starting with '0@' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:33:16:33:22 | [-.\\w]* | This part of the regular expression may cause exponential backtracking on strings starting with '0' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:33:38:33:51 | ([0-9a-zA-Z])+ | This part of the regular expression may cause exponential backtracking on strings starting with '0@' and containing many repetitions of '00.'. |
|
||||
| regexplib/email.js:33:53:33:58 | [-\\w]* | This part of the regular expression may cause exponential backtracking on strings starting with '0@0' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:34:24:34:35 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0' and containing many repetitions of '0'. |
|
||||
| regexplib/email.js:34:63:34:74 | [a-zA-Z0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '0@0' and containing many repetitions of '0'. |
|
||||
| regexplib/markup.js:3:451:3:453 | .+? | This part of the regular expression may cause exponential backtracking on strings starting with '<?i:q ' and containing many repetitions of 'a '. |
|
||||
| regexplib/markup.js:3:451:3:453 | .+? | This part of the regular expression may cause exponential backtracking on strings starting with '<?i:q\\t' and containing many repetitions of 'a\\t'. |
|
||||
| regexplib/markup.js:7:15:7:21 | [^\\\\"]* | This part of the regular expression may cause exponential backtracking on strings starting with '"!' and containing many repetitions of '!'. |
|
||||
| regexplib/markup.js:13:6:13:12 | [^"']+? | This part of the regular expression may cause exponential backtracking on strings starting with '<' and containing many repetitions of '!'. |
|
||||
| regexplib/markup.js:13:14:13:16 | .+? | This part of the regular expression may cause exponential backtracking on strings starting with '<' and containing many repetitions of 'a"'. |
|
||||
| regexplib/markup.js:17:17:17:19 | .*? | This part of the regular expression may cause exponential backtracking on strings starting with '<a ="' and containing many repetitions of '" ="'. |
|
||||
| regexplib/markup.js:24:43:24:45 | \\S+ | This part of the regular expression may cause exponential backtracking on strings starting with '<A ' and containing many repetitions of '!=- '. |
|
||||
| regexplib/markup.js:24:47:24:118 | (\\s*=\\s*([-\\w\\.]{1,1024}\|"[^"]{0,1024}"\|'[^']{0,1024}'))? | This part of the regular expression may cause exponential backtracking on strings starting with '<A !' and containing many repetitions of ' =- !'. |
|
||||
| regexplib/markup.js:37:29:37:56 | [a-zA-Z0-9\|:\|\\/\|=\|-\|.\|\\?\|&]* | This part of the regular expression may cause exponential backtracking on strings starting with '[a=' and containing many repetitions of '='. |
|
||||
| regexplib/markup.js:40:23:40:25 | \\w+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| regexplib/markup.js:40:132:40:134 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with 'a[@a=''' and containing many repetitions of ' @a<""'. |
|
||||
| regexplib/markup.js:47:43:47:45 | \\S+ | This part of the regular expression may cause exponential backtracking on strings starting with '<A ' and containing many repetitions of '!=- '. |
|
||||
| regexplib/markup.js:47:47:47:118 | (\\s*=\\s*([-\\w\\.]{1,1024}\|"[^"]{0,1024}"\|'[^']{0,1024}'))? | This part of the regular expression may cause exponential backtracking on strings starting with '<A !' and containing many repetitions of ' =- !'. |
|
||||
| regexplib/markup.js:53:29:53:56 | [a-zA-Z0-9\|:\|\\/\|=\|-\|.\|\\?\|&]* | This part of the regular expression may cause exponential backtracking on strings starting with '[a=' and containing many repetitions of '='. |
|
||||
| regexplib/markup.js:56:23:56:25 | \\w+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| regexplib/markup.js:56:132:56:134 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with 'a[@a=''' and containing many repetitions of ' @a<""'. |
|
||||
| regexplib/markup.js:17:17:17:19 | .*? | This part of the regular expression may cause exponential backtracking on strings starting with '<0\\t="' and containing many repetitions of '"\\t="'. |
|
||||
| regexplib/markup.js:24:43:24:45 | \\S+ | This part of the regular expression may cause exponential backtracking on strings starting with '<A\\t' and containing many repetitions of '!=-\\t'. |
|
||||
| regexplib/markup.js:24:47:24:118 | (\\s*=\\s*([-\\w\\.]{1,1024}\|"[^"]{0,1024}"\|'[^']{0,1024}'))? | This part of the regular expression may cause exponential backtracking on strings starting with '<A\\t!' and containing many repetitions of '\\t=-\\t!'. |
|
||||
| regexplib/markup.js:37:29:37:56 | [a-zA-Z0-9\|:\|\\/\|=\|-\|.\|\\?\|&]* | This part of the regular expression may cause exponential backtracking on strings starting with '[0=' and containing many repetitions of '='. |
|
||||
| regexplib/markup.js:40:23:40:25 | \\w+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| regexplib/markup.js:40:132:40:134 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '0[@0=''' and containing many repetitions of '\\t@0<""'. |
|
||||
| regexplib/markup.js:47:43:47:45 | \\S+ | This part of the regular expression may cause exponential backtracking on strings starting with '<A\\t' and containing many repetitions of '!=-\\t'. |
|
||||
| regexplib/markup.js:47:47:47:118 | (\\s*=\\s*([-\\w\\.]{1,1024}\|"[^"]{0,1024}"\|'[^']{0,1024}'))? | This part of the regular expression may cause exponential backtracking on strings starting with '<A\\t!' and containing many repetitions of '\\t=-\\t!'. |
|
||||
| regexplib/markup.js:53:29:53:56 | [a-zA-Z0-9\|:\|\\/\|=\|-\|.\|\\?\|&]* | This part of the regular expression may cause exponential backtracking on strings starting with '[0=' and containing many repetitions of '='. |
|
||||
| regexplib/markup.js:56:23:56:25 | \\w+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| regexplib/markup.js:56:132:56:134 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '0[@0=''' and containing many repetitions of '\\t@0<""'. |
|
||||
| regexplib/misc.js:4:36:4:44 | [a-zA-Z]* | This part of the regular expression may cause exponential backtracking on strings starting with 'A' and containing many repetitions of 'A'. |
|
||||
| regexplib/misc.js:15:56:15:118 | (([^\\\\/:\\*\\?"\\\|<>\\. ])\|([^\\\\/:\\*\\?"\\\|<>]*[^\\\\/:\\*\\?"\\\|<>\\. ]))? | This part of the regular expression may cause exponential backtracking on strings starting with '!' and containing many repetitions of '!\\\\!'. |
|
||||
| regexplib/misc.js:24:56:24:118 | (([^\\\\/:\\*\\?"\\\|<>\\. ])\|([^\\\\/:\\*\\?"\\\|<>]*[^\\\\/:\\*\\?"\\\|<>\\. ]))? | This part of the regular expression may cause exponential backtracking on strings starting with '!' and containing many repetitions of '!\\\\!'. |
|
||||
| regexplib/misc.js:79:3:79:25 | (\\/w\|\\/W\|[^<>+?$%{}&])+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/W'. |
|
||||
| regexplib/misc.js:90:4:90:11 | ([a-z])+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'aa'. |
|
||||
| regexplib/misc.js:123:17:123:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '9'. |
|
||||
| regexplib/misc.js:123:17:123:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '0'. |
|
||||
| regexplib/misc.js:142:3:142:25 | (\\/w\|\\/W\|[^<>+?$%{}&])+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/W'. |
|
||||
| regexplib/misc.js:148:20:148:22 | \\s+ | This part of the regular expression may cause exponential backtracking on strings starting with '<!' and containing many repetitions of ' '. |
|
||||
| regexplib/misc.js:148:23:148:29 | [^"'=]+ | This part of the regular expression may cause exponential backtracking on strings starting with '<! ' and containing many repetitions of '! '. |
|
||||
| regexplib/misc.js:148:20:148:22 | \\s+ | This part of the regular expression may cause exponential backtracking on strings starting with '<!' and containing many repetitions of '\\t\\t'. |
|
||||
| regexplib/misc.js:148:23:148:29 | [^"'=]+ | This part of the regular expression may cause exponential backtracking on strings starting with '<!\\t' and containing many repetitions of '!\\t'. |
|
||||
| regexplib/misc.js:173:4:173:11 | ([a-z])+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'aa'. |
|
||||
| regexplib/strings.js:19:31:19:57 | [a-zæøå0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '#@' and containing many repetitions of '#'. |
|
||||
| regexplib/strings.js:19:69:19:95 | [a-zæøå0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with '#@#' and containing many repetitions of '##'. |
|
||||
| regexplib/strings.js:47:3:47:5 | \\S* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '!'. |
|
||||
| regexplib/strings.js:53:4:53:12 | [a-z0-9]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '00.'. |
|
||||
| regexplib/strings.js:53:14:53:24 | [\\-a-z0-9]* | This part of the regular expression may cause exponential backtracking on strings starting with '0' and containing many repetitions of '00.0'. |
|
||||
| regexplib/strings.js:57:17:57:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '9'. |
|
||||
| regexplib/strings.js:81:17:81:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '9'. |
|
||||
| regexplib/strings.js:57:17:57:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '0'. |
|
||||
| regexplib/strings.js:81:17:81:19 | \\d+ | This part of the regular expression may cause exponential backtracking on strings starting with '?se[' and containing many repetitions of '0'. |
|
||||
| regexplib/strings.js:91:3:91:5 | \\S* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '!'. |
|
||||
| regexplib/uri.js:3:128:3:129 | .* | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// /' and containing many repetitions of '/'. |
|
||||
| regexplib/uri.js:3:193:3:198 | [^\\#]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// a=' and containing many repetitions of '"0='. |
|
||||
| regexplib/uri.js:3:200:3:215 | (?:\\&?\\w+\\=\\w+)* | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// a="' and containing many repetitions of 'a=0'. |
|
||||
| regexplib/uri.js:3:211:3:213 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// a="a=' and containing many repetitions of 'aaa='. |
|
||||
| regexplib/uri.js:5:42:5:43 | .* | This part of the regular expression may cause exponential backtracking on strings starting with 'A:\\\\a' and containing many repetitions of '\\\\a'. |
|
||||
| regexplib/uri.js:17:42:17:43 | .* | This part of the regular expression may cause exponential backtracking on strings starting with 'A:\\\\a' and containing many repetitions of '\\\\a'. |
|
||||
| regexplib/uri.js:3:193:3:198 | [^\\#]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// 0=' and containing many repetitions of '"0='. |
|
||||
| regexplib/uri.js:3:200:3:215 | (?:\\&?\\w+\\=\\w+)* | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// 0="' and containing many repetitions of '0=0'. |
|
||||
| regexplib/uri.js:3:211:3:213 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp:// 0="0=' and containing many repetitions of '000='. |
|
||||
| regexplib/uri.js:5:42:5:43 | .* | This part of the regular expression may cause exponential backtracking on strings starting with 'A:\\\\0' and containing many repetitions of '\\\\0'. |
|
||||
| regexplib/uri.js:17:42:17:43 | .* | This part of the regular expression may cause exponential backtracking on strings starting with 'A:\\\\0' and containing many repetitions of '\\\\0'. |
|
||||
| regexplib/uri.js:38:35:38:40 | [a-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'a.' and containing many repetitions of 'a'. |
|
||||
| regexplib/uri.js:38:52:38:60 | [a-z0-9]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'a.a' and containing many repetitions of '0a'. |
|
||||
| regexplib/uri.js:55:35:55:40 | [a-z]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'a.' and containing many repetitions of 'a'. |
|
||||
@@ -88,9 +88,9 @@
|
||||
| regexplib/uri.js:63:393:63:429 | [a-zA-Z0-9\\.\\,\\?\\'\\\\/\\+&%\\$#\\=~_\\-@]* | This part of the regular expression may cause exponential backtracking on strings starting with 'ftp://1.1.1.0/.' and containing many repetitions of '/#'. |
|
||||
| tst.js:4:18:4:32 | (?:__\|[\\s\\S])+? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '__'. |
|
||||
| tst.js:4:42:4:58 | (?:\\*\\*\|[\\s\\S])+? | This part of the regular expression may cause exponential backtracking on strings starting with '*' and containing many repetitions of '**'. |
|
||||
| tst.js:19:24:19:43 | (?:[^"\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with ' "' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:19:47:19:66 | (?:[^'\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with ' '' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:19:71:19:90 | (?:[^)\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with ' (' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:19:24:19:43 | (?:[^"\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with '\\t"' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:19:47:19:66 | (?:[^'\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with '\\t'' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:19:71:19:90 | (?:[^)\\\\]\|\\\\\\\\\|\\\\.)+ | This part of the regular expression may cause exponential backtracking on strings starting with '\\t(' and containing many repetitions of '\\\\\\\\'. |
|
||||
| tst.js:31:54:31:55 | .* | This part of the regular expression may cause exponential backtracking on strings starting with '!\|\\n-\|\\n' and containing many repetitions of '\|\|\\n'. |
|
||||
| tst.js:36:23:36:32 | (\\\\\\/\|.)*? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\\\/'. |
|
||||
| tst.js:41:27:41:28 | .* | This part of the regular expression may cause exponential backtracking on strings starting with '#' and containing many repetitions of '#'. |
|
||||
@@ -119,14 +119,14 @@
|
||||
| tst.js:125:15:125:28 | ([a-z]\|[d-h])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'd'. |
|
||||
| tst.js:128:15:128:30 | ([^a-z]\|[^0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/'. |
|
||||
| tst.js:131:15:131:25 | (\\d\|[0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| tst.js:134:15:134:22 | (\\s\|\\s)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| tst.js:134:15:134:22 | (\\s\|\\s)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| tst.js:137:15:137:21 | (\\w\|G)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'G'. |
|
||||
| tst.js:143:15:143:22 | (\\d\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| tst.js:146:15:146:21 | (\\d\|5)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '5'. |
|
||||
| tst.js:149:15:149:24 | (\\s\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. |
|
||||
| tst.js:152:15:152:28 | (\\s\|[\\v]\|\\\\v)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000b'. |
|
||||
| tst.js:155:15:155:24 | (\\f\|[\\f])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\u000c'. |
|
||||
| tst.js:158:15:158:22 | (\\W\|\\D)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| tst.js:158:15:158:22 | (\\W\|\\D)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/'. |
|
||||
| tst.js:161:15:161:22 | (\\S\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| tst.js:164:15:164:24 | (\\S\|[\\w])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| tst.js:167:15:167:27 | (1s\|[\\da-z])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '1s'. |
|
||||
@@ -134,7 +134,7 @@
|
||||
| tst.js:173:16:173:20 | [\\d]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| tst.js:185:16:185:21 | [^>a]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '='. |
|
||||
| tst.js:188:17:188:19 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '\\n' and containing many repetitions of '\\n'. |
|
||||
| tst.js:191:18:191:20 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| tst.js:191:18:191:20 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| tst.js:194:68:194:79 | [ a-zA-Z{}]+ | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A:' and containing many repetitions of ' A:'. |
|
||||
| tst.js:194:81:194:82 | ,? | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A: ' and containing many repetitions of ',A: '. |
|
||||
| tst.js:197:15:197:16 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
@@ -152,18 +152,18 @@
|
||||
| tst.js:254:27:254:29 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| tst.js:254:39:254:41 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| tst.js:254:51:254:53 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| tst.js:257:14:257:116 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' thisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| tst.js:257:14:257:116 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\tthisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| tst.js:260:14:260:77 | (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'thisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| tst.js:260:68:260:70 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'this' and containing many repetitions of 'aquerythis'. |
|
||||
| tst.js:260:68:260:70 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'this' and containing many repetitions of '0querythis'. |
|
||||
| tst.js:272:21:272:22 | b+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. |
|
||||
| tst.js:275:38:275:40 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '<a a=' and containing many repetitions of '"" a='. |
|
||||
| tst.js:275:38:275:40 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '<0\\t0=' and containing many repetitions of '""\\t0='. |
|
||||
| tst.js:281:16:281:17 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| tst.js:284:16:284:17 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| tst.js:290:16:290:17 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| tst.js:293:22:293:23 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| tst.js:299:90:299:91 | e+ | This part of the regular expression may cause exponential backtracking on strings starting with '00000000000000' and containing many repetitions of 'e'. |
|
||||
| tst.js:302:18:302:19 | c+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ab' and containing many repetitions of 'c'. |
|
||||
| tst.js:305:18:305:20 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| tst.js:305:18:305:20 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| tst.js:308:16:308:24 | ([^/]\|X)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'X'. |
|
||||
| tst.js:311:20:311:24 | [^Y]+ | This part of the regular expression may cause exponential backtracking on strings starting with 'x' and containing many repetitions of 'Xx'. |
|
||||
| tst.js:314:15:314:16 | a* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
5
python/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
5
python/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
category: deprecated
|
||||
---
|
||||
* The utility files previously in the `semmle.python.security.performance` package have been moved to the `semmle.python.security.regexp` package.
|
||||
The previous files still exist as deprecated aliases.
|
||||
@@ -2,196 +2,27 @@
|
||||
* Provides precicates for reasoning about bad tag filter vulnerabilities.
|
||||
*/
|
||||
|
||||
import performance.ReDoSUtil
|
||||
import regexp.RegexpMatching
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
* Holds if the regexp `root` should be tested against `str`.
|
||||
* Implements the `isRegexpMatchingCandidateSig` signature from `RegexpMatching`.
|
||||
* `ignorePrefix` toggles whether the regular expression should be treated as accepting any prefix if it's unanchored.
|
||||
* `testWithGroups` toggles whether it's tested which groups are filled by a given input string.
|
||||
*/
|
||||
private module RegexpMatching {
|
||||
/**
|
||||
* A class to test whether a regular expression matches a string.
|
||||
* Override this class and extend `test`/`testWithGroups` to configure which strings should be tested for acceptance by this regular expression.
|
||||
* The result can afterwards be read from the `matches` predicate.
|
||||
*
|
||||
* Strings in the `testWithGroups` predicate are also tested for which capture groups are filled by the given string.
|
||||
* The result is available in the `fillCaptureGroup` predicate.
|
||||
*/
|
||||
abstract class MatchedRegExp extends RegExpTerm {
|
||||
MatchedRegExp() { this.isRootTerm() }
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether this regular expression matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*/
|
||||
predicate test(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as `test(..)`, but where the `fillsCaptureGroup` afterwards tells which capture groups were filled by the given string.
|
||||
*/
|
||||
predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if this RegExp matches `str`, where `str` is either in the `test` or `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate matches(string str) {
|
||||
exists(State state | state = getAState(this, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate fillsCaptureGroup(string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(this, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(MatchedRegExp reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
(
|
||||
reg.test(str, ignorePrefix)
|
||||
or
|
||||
reg.testWithGroups(str, ignorePrefix)
|
||||
) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
private predicate isBadTagFilterCandidate(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
) {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
root
|
||||
) and
|
||||
ignorePrefix = true and
|
||||
(
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"] and
|
||||
testWithGroups = true
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, MatchedRegExp r |
|
||||
r.test(s, _)
|
||||
or
|
||||
r.testWithGroups(s, _)
|
||||
|
|
||||
char = s.charAt(_)
|
||||
) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate holds the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(
|
||||
MatchedRegExp reg, int i, string str, boolean ignorePrefix
|
||||
) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
reg.testWithGroups(str, ignorePrefix) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
}
|
||||
|
||||
/** A class to test whether a regular expression matches certain HTML tags. */
|
||||
class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
HtmlMatchingRegExp() {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
this
|
||||
)
|
||||
}
|
||||
|
||||
override predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"]
|
||||
}
|
||||
|
||||
override predicate test(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str =
|
||||
[
|
||||
"<!-- foo -->", "<!- foo ->", "<!-- foo --!>", "<!-- foo\n -->", "<script>foo</script>",
|
||||
@@ -200,7 +31,23 @@ class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
"<script src='foo'></script>", "<SCRIPT>foo</SCRIPT>", "<script\tsrc=\"foo\"/>",
|
||||
"<script\tsrc='foo'></script>", "<sCrIpT>foo</ScRiPt>", "<script src=\"foo\">foo</script >",
|
||||
"<script src=\"foo\">foo</script foo=\"bar\">", "<script src=\"foo\">foo</script\t\n bar>"
|
||||
]
|
||||
] and
|
||||
testWithGroups = false
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A regexp that matches some string from the `isBadTagFilterCandidate` predicate.
|
||||
*/
|
||||
class HtmlMatchingRegExp extends RootTerm {
|
||||
HtmlMatchingRegExp() { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, _) }
|
||||
|
||||
/** Holds if this regexp matched `str`, where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate matches(string str) { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, str) }
|
||||
|
||||
/** Holds if this regexp fills capture group `g' when matching `str', where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate fillsCaptureGroup(string str, int g) {
|
||||
RegexpMatching<isBadTagFilterCandidate/4>::fillsCaptureGroup(this, str, g)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,374 +1,4 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.python.security.regexp.ExponentialBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State fork, StatePair q |
|
||||
isReachableFromFork(fork, q, this, _) and
|
||||
q = getAForkPair(fork)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string intersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2 |
|
||||
hasTuple(s1, s2, this, i) and
|
||||
result = intersect(s1, s2)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `intersect(s1, s2)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result = strictconcat(int i | hasTuple(_, _, this, i) | this.intersect(i) order by i desc)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, RelevantTrace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An instantiation of `ReDoSConfiguration` for exponential backtracking.
|
||||
*/
|
||||
class ExponentialReDoSConfiguration extends ReDoSConfiguration {
|
||||
ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) }
|
||||
}
|
||||
deprecated import semmle.python.security.regexp.ExponentialBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,454 +1,4 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
/** DEPRECATED. Import `semmle.python.security.regexp.SuperlinearBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* An instantiaion of `ReDoSConfiguration` for superlinear ReDoS.
|
||||
*/
|
||||
class SuperLinearReDoSConfiguration extends ReDoSConfiguration {
|
||||
SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(_, _, p, t, _) and
|
||||
step(p, s1, s2, s3, _)
|
||||
)
|
||||
or
|
||||
exists(State pivot, State succ | isStartLoops(pivot, succ) |
|
||||
t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) {
|
||||
// base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path.
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Step(s1, s2, s3, Nil()) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and
|
||||
dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper predicate for the recursive case in `isReachableFromStartTuple`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private int isReachableFromStartTupleHelper(
|
||||
State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2,
|
||||
InputSymbol s3
|
||||
) {
|
||||
result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, r)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, s3, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State pivot, State succ, StateTuple q |
|
||||
isReachableFromStartTuple(pivot, succ, q, this, _) and
|
||||
q = getAnEndTuple(pivot, succ)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string getAThreewayIntersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
hasTuple(s1, s2, s3, this, i) and
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `getAThreewayIntersect(s1, s2, s3)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result =
|
||||
strictconcat(int i |
|
||||
hasTuple(_, _, _, this, i)
|
||||
|
|
||||
this.getAThreewayIntersect(i) order by i desc
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, RelevantTrace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynimalReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
deprecated import semmle.python.security.regexp.SuperlinearBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, result) }
|
||||
|
||||
/** Holds if `n` is a trace that is used by `concretize` in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State f | isReachableFromFork(f, getAForkPair(f), n, _))
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2 | t = Step(s1, s2, _) | result = intersect(s1, s2))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, Trace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `state` has exponential ReDoS */
|
||||
predicate hasReDoSResult = ReDoSPruning<isPumpable/2>::hasReDoSResult/4;
|
||||
1319
python/ql/lib/semmle/python/security/regexp/NfaUtils.qll
Normal file
1319
python/ql/lib/semmle/python/security/regexp/NfaUtils.qll
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* Provides Python-specific definitions for use in the ReDoSUtil module.
|
||||
* Provides Python-specific definitions for use in the NfaUtils module.
|
||||
*/
|
||||
|
||||
import python
|
||||
157
python/ql/lib/semmle/python/security/regexp/RegexpMatching.qll
Normal file
157
python/ql/lib/semmle/python/security/regexp/RegexpMatching.qll
Normal file
@@ -0,0 +1,157 @@
|
||||
/**
|
||||
* Provides precicates for reasoning about which strings are matched by a regular expression,
|
||||
* and for testing which capture groups are filled when a particular regexp matches a string.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/** A root term */
|
||||
class RootTerm extends RegExpTerm {
|
||||
RootTerm() { this.isRootTerm() }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether `root` matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*
|
||||
* If `testWithGroups` is true, then the `RegexpMatching::fillsCaptureGroup` predicate can be used to determine which capture
|
||||
* groups are filled by a string.
|
||||
*/
|
||||
signature predicate isRegexpMatchingCandidateSig(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
);
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
*
|
||||
* The module parameter `isCandidate` determines which strings should be tested,
|
||||
* and the results can be read from the `matches` and `fillsCaptureGroup` predicates.
|
||||
*/
|
||||
module RegexpMatching<isRegexpMatchingCandidateSig/4 isCandidate> {
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
isCandidate(reg, str, ignorePrefix, _) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, RootTerm r | isCandidate(r, s, _, _) | char = s.charAt(_)) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate satisfies the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
isCandidate(reg, str, ignorePrefix, true) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `reg` matches `str`, where `str` is in the `isCandidate` predicate.
|
||||
*/
|
||||
predicate matches(RootTerm reg, string str) {
|
||||
exists(State state | state = getAState(reg, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` against `reg` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
predicate fillsCaptureGroup(RootTerm reg, string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(reg, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,418 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
isReachableFromStartTuple(_, _, t, s1, s2, s3, _, _)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, StateTuple tuple, Trace trace, int dist
|
||||
) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace v |
|
||||
isReachableFromStartTuple(pivot, succ, v, s1, s2, s3, tuple, dist) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, Trace trace, InputSymbol s1, InputSymbol s2, InputSymbol s3,
|
||||
StateTuple tuple, int dist
|
||||
) {
|
||||
// base case.
|
||||
exists(State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Nil() and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(pivot, succ, p, trace, dist + 1) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, tuple)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, _, result) }
|
||||
|
||||
/** Holds if `n` is used in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State pivot, State succ |
|
||||
isReachableFromStartTuple(pivot, succ, getAnEndTuple(pivot, succ), n, _)
|
||||
)
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 | t = Step(s1, s2, s3, _) |
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, Trace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if states starting in `state` can have polynomial backtracking with the string `pump`.
|
||||
*/
|
||||
predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynomialReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
ReDoSPruning<isReDoSCandidate/2>::hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynomialReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
*/
|
||||
|
||||
import python
|
||||
import semmle.python.security.performance.SuperlinearBackTracking
|
||||
import semmle.python.security.regexp.SuperlinearBackTracking
|
||||
import semmle.python.security.dataflow.PolynomialReDoSQuery
|
||||
import DataFlow::PathGraph
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*/
|
||||
|
||||
import python
|
||||
import semmle.python.security.performance.ExponentialBackTracking
|
||||
import semmle.python.security.regexp.ExponentialBackTracking
|
||||
|
||||
from RegExpTerm t, string pump, State s, string prefixMsg
|
||||
where
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import python
|
||||
import semmle.python.security.performance.SuperlinearBackTracking
|
||||
import semmle.python.security.regexp.SuperlinearBackTracking
|
||||
|
||||
from PolynomialBackTrackingTerm t
|
||||
select t.getRegex(), t, t.getReason()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
| KnownCVEs.py:15:22:15:24 | \\d+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '9'. |
|
||||
| KnownCVEs.py:15:22:15:24 | \\d+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| KnownCVEs.py:30:24:31:25 | .* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ','. |
|
||||
| KnownCVEs.py:35:18:35:81 | ([-/:,#%.'"\\s!\\w]\|\\w-\\w\|'[\\s\\w]+'\\s*\|"[\\s\\w]+"\|\\([\\d,%\\.\\s]+\\))* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '"\\t"'. |
|
||||
| redos.py:6:28:6:42 | (?:__\|[\\s\\S])+? | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '__'. |
|
||||
@@ -31,7 +31,7 @@
|
||||
| redos.py:127:25:127:38 | ([a-z]\|[d-h])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'd'. |
|
||||
| redos.py:130:25:130:40 | ([^a-z]\|[^0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '/'. |
|
||||
| redos.py:133:25:133:35 | (\\d\|[0-9])* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| redos.py:136:25:136:32 | (\\s\|\\s)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| redos.py:136:25:136:32 | (\\s\|\\s)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| redos.py:139:25:139:31 | (\\w\|G)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'G'. |
|
||||
| redos.py:145:25:145:32 | (\\d\|\\w)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| redos.py:148:25:148:31 | (\\d\|5)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '5'. |
|
||||
@@ -46,7 +46,7 @@
|
||||
| redos.py:175:26:175:30 | [\\d]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '0'. |
|
||||
| redos.py:187:26:187:31 | [^>a]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '='. |
|
||||
| redos.py:190:27:190:29 | \\s* | This part of the regular expression may cause exponential backtracking on strings starting with '\\n' and containing many repetitions of '\\n'. |
|
||||
| redos.py:193:28:193:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| redos.py:193:28:193:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| redos.py:196:78:196:89 | [ a-zA-Z{}]+ | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A:' and containing many repetitions of ' A:'. |
|
||||
| redos.py:196:91:196:92 | ,? | This part of the regular expression may cause exponential backtracking on strings starting with '{[A(A)A: ' and containing many repetitions of ',A: '. |
|
||||
| redos.py:199:25:199:26 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
@@ -65,20 +65,20 @@
|
||||
| redos.py:256:37:256:39 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| redos.py:256:49:256:51 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| redos.py:256:61:256:63 | \\w* | This part of the regular expression may cause exponential backtracking on strings starting with 'foobarbazfoobarbazfoobarbaz' and containing many repetitions of 'foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz'. |
|
||||
| redos.py:259:24:259:126 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' thisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| redos.py:259:24:259:126 | (.thisisagoddamnlongstringforstresstestingthequery\|\\sthisisagoddamnlongstringforstresstestingthequery)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\tthisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| redos.py:262:24:262:87 | (thisisagoddamnlongstringforstresstestingthequery\|this\\w+query)* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'thisisagoddamnlongstringforstresstestingthequery'. |
|
||||
| redos.py:262:78:262:80 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'this' and containing many repetitions of 'aquerythis'. |
|
||||
| redos.py:262:78:262:80 | \\w+ | This part of the regular expression may cause exponential backtracking on strings starting with 'this' and containing many repetitions of '0querythis'. |
|
||||
| redos.py:268:28:268:39 | ([\ufffd\ufffd]\|[\ufffd\ufffd])* | This part of the regular expression may cause exponential backtracking on strings starting with 'foo' and containing many repetitions of '\ufffd'. |
|
||||
| redos.py:271:28:271:41 | ((\ufffd\|\ufffd)\|(\ufffd\|\ufffd))* | This part of the regular expression may cause exponential backtracking on strings starting with 'foo' and containing many repetitions of '\ufffd'. |
|
||||
| redos.py:274:31:274:32 | b+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'b'. |
|
||||
| redos.py:277:48:277:50 | \\s* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '"" a='. |
|
||||
| redos.py:277:48:277:50 | \\s* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '""\\t0='. |
|
||||
| redos.py:283:26:283:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| redos.py:286:26:286:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| redos.py:292:26:292:27 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| redos.py:295:35:295:36 | a+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
| redos.py:301:100:301:101 | e+ | This part of the regular expression may cause exponential backtracking on strings starting with ';00000000000000' and containing many repetitions of 'e'. |
|
||||
| redos.py:304:28:304:29 | c+ | This part of the regular expression may cause exponential backtracking on strings starting with 'ab' and containing many repetitions of 'c'. |
|
||||
| redos.py:307:28:307:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of ' '. |
|
||||
| redos.py:307:28:307:30 | \\s+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of '\\t'. |
|
||||
| redos.py:310:26:310:34 | ([^/]\|X)+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'X'. |
|
||||
| redos.py:313:30:313:34 | [^Y]+ | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'Xx'. |
|
||||
| redos.py:316:25:316:26 | a* | This part of the regular expression may cause exponential backtracking on strings containing many repetitions of 'a'. |
|
||||
|
||||
5
ruby/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
5
ruby/ql/lib/change-notes/2022-05-25-redos-refac.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
category: deprecated
|
||||
---
|
||||
* The utility files previously in the `codeql.ruby.security.performance` package have been moved to the `codeql.ruby.security.regexp` package.
|
||||
The previous files still exist as deprecated aliases.
|
||||
@@ -2,196 +2,27 @@
|
||||
* Provides precicates for reasoning about bad tag filter vulnerabilities.
|
||||
*/
|
||||
|
||||
import performance.ReDoSUtil
|
||||
import regexp.RegexpMatching
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
* Holds if the regexp `root` should be tested against `str`.
|
||||
* Implements the `isRegexpMatchingCandidateSig` signature from `RegexpMatching`.
|
||||
* `ignorePrefix` toggles whether the regular expression should be treated as accepting any prefix if it's unanchored.
|
||||
* `testWithGroups` toggles whether it's tested which groups are filled by a given input string.
|
||||
*/
|
||||
private module RegexpMatching {
|
||||
/**
|
||||
* A class to test whether a regular expression matches a string.
|
||||
* Override this class and extend `test`/`testWithGroups` to configure which strings should be tested for acceptance by this regular expression.
|
||||
* The result can afterwards be read from the `matches` predicate.
|
||||
*
|
||||
* Strings in the `testWithGroups` predicate are also tested for which capture groups are filled by the given string.
|
||||
* The result is available in the `fillCaptureGroup` predicate.
|
||||
*/
|
||||
abstract class MatchedRegExp extends RegExpTerm {
|
||||
MatchedRegExp() { this.isRootTerm() }
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether this regular expression matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*/
|
||||
predicate test(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as `test(..)`, but where the `fillsCaptureGroup` afterwards tells which capture groups were filled by the given string.
|
||||
*/
|
||||
predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
none() // maybe overridden in subclasses
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if this RegExp matches `str`, where `str` is either in the `test` or `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate matches(string str) {
|
||||
exists(State state | state = getAState(this, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
final predicate fillsCaptureGroup(string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(this, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(MatchedRegExp reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
(
|
||||
reg.test(str, ignorePrefix)
|
||||
or
|
||||
reg.testWithGroups(str, ignorePrefix)
|
||||
) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
private predicate isBadTagFilterCandidate(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
) {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
root
|
||||
) and
|
||||
ignorePrefix = true and
|
||||
(
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"] and
|
||||
testWithGroups = true
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
MatchedRegExp reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, MatchedRegExp r |
|
||||
r.test(s, _)
|
||||
or
|
||||
r.testWithGroups(s, _)
|
||||
|
|
||||
char = s.charAt(_)
|
||||
) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate holds the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(
|
||||
MatchedRegExp reg, int i, string str, boolean ignorePrefix
|
||||
) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
reg.testWithGroups(str, ignorePrefix) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
}
|
||||
|
||||
/** A class to test whether a regular expression matches certain HTML tags. */
|
||||
class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
HtmlMatchingRegExp() {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
this
|
||||
)
|
||||
}
|
||||
|
||||
override predicate testWithGroups(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"]
|
||||
}
|
||||
|
||||
override predicate test(string str, boolean ignorePrefix) {
|
||||
ignorePrefix = true and
|
||||
str =
|
||||
[
|
||||
"<!-- foo -->", "<!- foo ->", "<!-- foo --!>", "<!-- foo\n -->", "<script>foo</script>",
|
||||
@@ -200,7 +31,23 @@ class HtmlMatchingRegExp extends RegexpMatching::MatchedRegExp {
|
||||
"<script src='foo'></script>", "<SCRIPT>foo</SCRIPT>", "<script\tsrc=\"foo\"/>",
|
||||
"<script\tsrc='foo'></script>", "<sCrIpT>foo</ScRiPt>", "<script src=\"foo\">foo</script >",
|
||||
"<script src=\"foo\">foo</script foo=\"bar\">", "<script src=\"foo\">foo</script\t\n bar>"
|
||||
]
|
||||
] and
|
||||
testWithGroups = false
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A regexp that matches some string from the `isBadTagFilterCandidate` predicate.
|
||||
*/
|
||||
class HtmlMatchingRegExp extends RootTerm {
|
||||
HtmlMatchingRegExp() { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, _) }
|
||||
|
||||
/** Holds if this regexp matched `str`, where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate matches(string str) { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, str) }
|
||||
|
||||
/** Holds if this regexp fills capture group `g' when matching `str', where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate fillsCaptureGroup(string str, int g) {
|
||||
RegexpMatching<isBadTagFilterCandidate/4>::fillsCaptureGroup(this, str, g)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,374 +1,4 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.ExponentialBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State fork, StatePair q |
|
||||
isReachableFromFork(fork, q, this, _) and
|
||||
q = getAForkPair(fork)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string intersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2 |
|
||||
hasTuple(s1, s2, this, i) and
|
||||
result = intersect(s1, s2)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `intersect(s1, s2)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result = strictconcat(int i | hasTuple(_, _, this, i) | this.intersect(i) order by i desc)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, RelevantTrace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* An instantiation of `ReDoSConfiguration` for exponential backtracking.
|
||||
*/
|
||||
class ExponentialReDoSConfiguration extends ReDoSConfiguration {
|
||||
ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) }
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.ExponentialBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,130 +1,4 @@
|
||||
/**
|
||||
* Provides default sources, sinks and sanitizers for reasoning about
|
||||
* polynomial regular expression denial-of-service attacks, as well
|
||||
* as extension points for adding your own.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.PolynomialReDoSCustomizations` instead. */
|
||||
|
||||
private import codeql.ruby.AST as AST
|
||||
private import codeql.ruby.CFG
|
||||
private import codeql.ruby.DataFlow
|
||||
private import codeql.ruby.dataflow.RemoteFlowSources
|
||||
private import codeql.ruby.Regexp
|
||||
private import codeql.ruby.security.performance.SuperlinearBackTracking
|
||||
|
||||
module PolynomialReDoS {
|
||||
/**
|
||||
* A data flow source node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Source extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* A data flow sink node for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sink extends DataFlow::Node {
|
||||
/** Gets the regex that is being executed by this node. */
|
||||
abstract RegExpTerm getRegExp();
|
||||
|
||||
/** Gets the node to highlight in the alert message. */
|
||||
DataFlow::Node getHighlight() { result = this }
|
||||
}
|
||||
|
||||
/**
|
||||
* A sanitizer for polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
abstract class Sanitizer extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* DEPRECATED: Use `Sanitizer` instead.
|
||||
*
|
||||
* A sanitizer guard for polynomial regular expression denial of service
|
||||
* vulnerabilities.
|
||||
*/
|
||||
abstract deprecated class SanitizerGuard extends DataFlow::BarrierGuard { }
|
||||
|
||||
/**
|
||||
* A source of remote user input, considered as a flow source.
|
||||
*/
|
||||
class RemoteFlowSourceAsSource extends Source, RemoteFlowSource { }
|
||||
|
||||
/**
|
||||
* Gets the AST of a regular expression object that can flow to `node`.
|
||||
*/
|
||||
RegExpTerm getRegExpObjectFromNode(DataFlow::Node node) {
|
||||
exists(DataFlow::LocalSourceNode regexp |
|
||||
regexp.flowsTo(node) and
|
||||
result = regexp.asExpr().(CfgNodes::ExprNodes::RegExpLiteralCfgNode).getExpr().getParsed()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A regexp match against a superlinear backtracking term, seen as a sink for
|
||||
* polynomial regular expression denial-of-service vulnerabilities.
|
||||
*/
|
||||
class PolynomialBackTrackingTermMatch extends Sink {
|
||||
PolynomialBackTrackingTerm term;
|
||||
DataFlow::ExprNode matchNode;
|
||||
|
||||
PolynomialBackTrackingTermMatch() {
|
||||
exists(DataFlow::Node regexp |
|
||||
term.getRootTerm() = getRegExpObjectFromNode(regexp) and
|
||||
(
|
||||
// `=~` or `!~`
|
||||
exists(CfgNodes::ExprNodes::BinaryOperationCfgNode op |
|
||||
matchNode.asExpr() = op and
|
||||
(
|
||||
op.getExpr() instanceof AST::RegExpMatchExpr or
|
||||
op.getExpr() instanceof AST::NoRegExpMatchExpr
|
||||
) and
|
||||
(
|
||||
this.asExpr() = op.getLeftOperand() and regexp.asExpr() = op.getRightOperand()
|
||||
or
|
||||
this.asExpr() = op.getRightOperand() and regexp.asExpr() = op.getLeftOperand()
|
||||
)
|
||||
)
|
||||
or
|
||||
// Any of the methods on `String` that take a regexp.
|
||||
exists(CfgNodes::ExprNodes::MethodCallCfgNode call |
|
||||
matchNode.asExpr() = call and
|
||||
call.getExpr().getMethodName() =
|
||||
[
|
||||
"[]", "gsub", "gsub!", "index", "match", "match?", "partition", "rindex",
|
||||
"rpartition", "scan", "slice!", "split", "sub", "sub!"
|
||||
] and
|
||||
this.asExpr() = call.getReceiver() and
|
||||
regexp.asExpr() = call.getArgument(0)
|
||||
)
|
||||
or
|
||||
// A call to `match` or `match?` where the regexp is the receiver.
|
||||
exists(CfgNodes::ExprNodes::MethodCallCfgNode call |
|
||||
matchNode.asExpr() = call and
|
||||
call.getExpr().getMethodName() = ["match", "match?"] and
|
||||
regexp.asExpr() = call.getReceiver() and
|
||||
this.asExpr() = call.getArgument(0)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
override RegExpTerm getRegExp() { result = term }
|
||||
|
||||
override DataFlow::Node getHighlight() { result = matchNode }
|
||||
}
|
||||
|
||||
private predicate lengthGuard(CfgNodes::ExprCfgNode g, CfgNode node, boolean branch) {
|
||||
exists(DataFlow::Node input, DataFlow::CallNode length, DataFlow::ExprNode operand |
|
||||
length.asExpr().getExpr().(AST::MethodCall).getMethodName() = "length" and
|
||||
length.getReceiver() = input and
|
||||
length.flowsTo(operand) and
|
||||
operand.getExprNode() = g.(CfgNodes::ExprNodes::RelationalOperationCfgNode).getAnOperand() and
|
||||
node = input.asExpr() and
|
||||
branch = true
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A check on the length of a string, seen as a sanitizer guard.
|
||||
*/
|
||||
class LengthGuard extends Sanitizer {
|
||||
LengthGuard() { this = DataFlow::BarrierGuard<lengthGuard/3>::getABarrierNode() }
|
||||
}
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.PolynomialReDoSCustomizations as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,37 +1,4 @@
|
||||
/**
|
||||
* Provides a taint tracking configuration for reasoning about polynomial
|
||||
* regular expression denial-of-service attacks.
|
||||
*
|
||||
* Note, for performance reasons: only import this file if `Configuration` is
|
||||
* needed. Otherwise, `PolynomialReDoSCustomizations` should be imported
|
||||
* instead.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.PolynomialReDoSQuery` instead. */
|
||||
|
||||
private import codeql.ruby.DataFlow
|
||||
private import codeql.ruby.TaintTracking
|
||||
|
||||
/**
|
||||
* Provides a taint-tracking configuration for detecting polynomial regular
|
||||
* expression denial of service vulnerabilities.
|
||||
*/
|
||||
module PolynomialReDoS {
|
||||
import PolynomialReDoSCustomizations::PolynomialReDoS
|
||||
|
||||
/**
|
||||
* A taint-tracking configuration for detecting polynomial regular expression
|
||||
* denial of service vulnerabilities.
|
||||
*/
|
||||
class Configuration extends TaintTracking::Configuration {
|
||||
Configuration() { this = "PolynomialReDoS" }
|
||||
|
||||
override predicate isSource(DataFlow::Node source) { source instanceof Source }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof Sink }
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) { node instanceof Sanitizer }
|
||||
|
||||
deprecated override predicate isSanitizerGuard(DataFlow::BarrierGuard node) {
|
||||
node instanceof SanitizerGuard
|
||||
}
|
||||
}
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.PolynomialReDoSQuery as Dep
|
||||
import Dep
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,87 +1,4 @@
|
||||
/**
|
||||
* Provides default sources, sinks and sanitizers for reasoning about regexp
|
||||
* injection vulnerabilities, as well as extension points for adding your own.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.RegExpInjectionCustomizations` instead. */
|
||||
|
||||
private import ruby
|
||||
private import codeql.ruby.DataFlow
|
||||
private import codeql.ruby.Concepts
|
||||
private import codeql.ruby.Frameworks
|
||||
private import codeql.ruby.dataflow.RemoteFlowSources
|
||||
private import codeql.ruby.dataflow.BarrierGuards
|
||||
private import codeql.ruby.ApiGraphs
|
||||
|
||||
/**
|
||||
* Provides default sources, sinks and sanitizers for detecting
|
||||
* regexp injection vulnerabilities, as well as extension points for
|
||||
* adding your own.
|
||||
*/
|
||||
module RegExpInjection {
|
||||
/**
|
||||
* A data flow source for regexp injection vulnerabilities.
|
||||
*/
|
||||
abstract class Source extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* A data flow sink for regexp injection vulnerabilities.
|
||||
*/
|
||||
abstract class Sink extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* DEPRECATED: Use `Sanitizer` instead.
|
||||
*
|
||||
* A sanitizer guard for regexp injection vulnerabilities.
|
||||
*/
|
||||
abstract deprecated class SanitizerGuard extends DataFlow::BarrierGuard { }
|
||||
|
||||
/**
|
||||
* A data flow sanitized for regexp injection vulnerabilities.
|
||||
*/
|
||||
abstract class Sanitizer extends DataFlow::Node { }
|
||||
|
||||
/**
|
||||
* A source of remote user input, considered as a flow source.
|
||||
*/
|
||||
class RemoteFlowSourceAsSource extends Source, RemoteFlowSource { }
|
||||
|
||||
/** A regexp literal, considered as a flow sink. */
|
||||
class RegExpLiteralAsSink extends Sink {
|
||||
RegExpLiteralAsSink() { this.asExpr().getExpr() instanceof RegExpLiteral }
|
||||
}
|
||||
|
||||
/**
|
||||
* The first argument of a call to `Regexp.new` or `Regexp.compile`,
|
||||
* considered as a flow sink.
|
||||
*/
|
||||
class ConstructedRegExpAsSink extends Sink {
|
||||
ConstructedRegExpAsSink() {
|
||||
exists(API::Node regexp, DataFlow::CallNode callNode |
|
||||
regexp = API::getTopLevelMember("Regexp") and
|
||||
(callNode = regexp.getAnInstantiation() or callNode = regexp.getAMethodCall("compile")) and
|
||||
this = callNode.getArgument(0)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A comparison with a constant string, considered as a sanitizer-guard.
|
||||
*/
|
||||
class StringConstCompareAsSanitizer extends Sanitizer, StringConstCompareBarrier { }
|
||||
|
||||
/**
|
||||
* An inclusion check against an array of constant strings, considered as a
|
||||
* sanitizer-guard.
|
||||
*/
|
||||
class StringConstArrayInclusionCallAsSanitizer extends Sanitizer,
|
||||
StringConstArrayInclusionCallBarrier { }
|
||||
|
||||
/**
|
||||
* A call to `Regexp.escape` (or its alias, `Regexp.quote`), considered as a
|
||||
* sanitizer.
|
||||
*/
|
||||
class RegexpEscapeSanitization extends Sanitizer {
|
||||
RegexpEscapeSanitization() {
|
||||
this = API::getTopLevelMember("Regexp").getAMethodCall(["escape", "quote"])
|
||||
}
|
||||
}
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.RegExpInjectionCustomizations as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,28 +1,4 @@
|
||||
/**
|
||||
* Provides a taint-tracking configuration for detecting regexp injection vulnerabilities.
|
||||
*
|
||||
* Note, for performance reasons: only import this file if `Configuration` is needed,
|
||||
* otherwise `RegExpInjectionCustomizations` should be imported instead.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.RegExpInjectionQuery` instead. */
|
||||
|
||||
import codeql.ruby.DataFlow
|
||||
import codeql.ruby.TaintTracking
|
||||
import RegExpInjectionCustomizations
|
||||
import codeql.ruby.dataflow.BarrierGuards
|
||||
|
||||
/**
|
||||
* A taint-tracking configuration for detecting regexp injection vulnerabilities.
|
||||
*/
|
||||
class Configuration extends TaintTracking::Configuration {
|
||||
Configuration() { this = "RegExpInjection" }
|
||||
|
||||
override predicate isSource(DataFlow::Node source) { source instanceof RegExpInjection::Source }
|
||||
|
||||
override predicate isSink(DataFlow::Node sink) { sink instanceof RegExpInjection::Sink }
|
||||
|
||||
deprecated override predicate isSanitizerGuard(DataFlow::BarrierGuard guard) {
|
||||
guard instanceof RegExpInjection::SanitizerGuard
|
||||
}
|
||||
|
||||
override predicate isSanitizer(DataFlow::Node node) { node instanceof RegExpInjection::Sanitizer }
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.RegExpInjectionQuery as Dep
|
||||
import Dep
|
||||
|
||||
@@ -1,454 +1,4 @@
|
||||
/**
|
||||
* Provides classes for working with regular expressions that can
|
||||
* perform backtracking in superlinear time.
|
||||
*/
|
||||
/** DEPRECATED. Import `codeql.ruby.security.regexp.SuperlinearBackTracking` instead. */
|
||||
|
||||
import ReDoSUtil
|
||||
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
/**
|
||||
* An instantiaion of `ReDoSConfiguration` for superlinear ReDoS.
|
||||
*/
|
||||
class SuperLinearReDoSConfiguration extends ReDoSConfiguration {
|
||||
SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" }
|
||||
|
||||
override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string repesentation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() }
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(_, _, p, t, _) and
|
||||
step(p, s1, s2, s3, _)
|
||||
)
|
||||
or
|
||||
exists(State pivot, State succ | isStartLoops(pivot, succ) |
|
||||
t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) {
|
||||
// base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path.
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Step(s1, s2, s3, Nil()) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and
|
||||
dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper predicate for the recursive case in `isReachableFromStartTuple`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private int isReachableFromStartTupleHelper(
|
||||
State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2,
|
||||
InputSymbol s3
|
||||
) {
|
||||
result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, r)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
private predicate hasSuffix(Trace suffix, Trace t, int i) {
|
||||
// Declaring `t` to be a `RelevantTrace` currently causes a redundant check in the
|
||||
// recursive case, so instead we check it explicitly here.
|
||||
t instanceof RelevantTrace and
|
||||
i = 0 and
|
||||
suffix = t
|
||||
or
|
||||
hasSuffix(Step(_, _, _, suffix), t, i - 1)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private predicate hasTuple(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t, int i) {
|
||||
hasSuffix(Step(s1, s2, s3, _), t, i)
|
||||
}
|
||||
|
||||
private class RelevantTrace extends Trace, Step {
|
||||
RelevantTrace() {
|
||||
exists(State pivot, State succ, StateTuple q |
|
||||
isReachableFromStartTuple(pivot, succ, q, this, _) and
|
||||
q = getAnEndTuple(pivot, succ)
|
||||
)
|
||||
}
|
||||
|
||||
pragma[noinline]
|
||||
private string getAThreewayIntersect(int i) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 |
|
||||
hasTuple(s1, s2, s3, this, i) and
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a string corresponding to this trace. */
|
||||
// the pragma is needed for the case where `getAThreewayIntersect(s1, s2, s3)` has multiple values,
|
||||
// not for recursion
|
||||
language[monotonicAggregates]
|
||||
string concretise() {
|
||||
result =
|
||||
strictconcat(int i |
|
||||
hasTuple(_, _, _, this, i)
|
||||
|
|
||||
this.getAThreewayIntersect(i) order by i desc
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, RelevantTrace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = t.concretise()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynimalReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
}
|
||||
deprecated import codeql.ruby.security.regexp.SuperlinearBackTracking as Dep
|
||||
import Dep
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
|
||||
import NfaUtils
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s, Location l |
|
||||
l = s.getRepr().getLocation()
|
||||
|
|
||||
s order by l.getStartLine(), l.getStartColumn(), s.toString()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, result) }
|
||||
|
||||
/** Holds if `n` is a trace that is used by `concretize` in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State f | isReachableFromFork(f, getAForkPair(f), n, _))
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2 | t = Step(s1, s2, _) | result = intersect(s1, s2))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, Trace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `state` has exponential ReDoS */
|
||||
predicate hasReDoSResult = ReDoSPruning<isPumpable/2>::hasReDoSResult/4;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user