diff --git a/config/identical-files.json b/config/identical-files.json index 582e4c7b6dc..b3be90b75b2 100644 --- a/config/identical-files.json +++ b/config/identical-files.json @@ -448,5 +448,17 @@ "SensitiveDataHeuristics Python/JS": [ "javascript/ql/src/semmle/javascript/security/internal/SensitiveDataHeuristics.qll", "python/ql/src/semmle/python/security/internal/SensitiveDataHeuristics.qll" + ], + "ReDoS Util Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/ReDoSUtil.qll", + "python/ql/src/semmle/python/regex/ReDoSUtil.qll" + ], + "ReDoS Exponential Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/ExponentialBackTracking.qll", + "python/ql/src/semmle/python/regex/ExponentialBackTracking.qll" + ], + "ReDoS Polynomial Python/JS": [ + "javascript/ql/src/semmle/javascript/security/performance/SuperlinearBackTracking.qll", + "python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll" ] } diff --git a/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql new file mode 100644 index 00000000000..a98d4eefa7e --- /dev/null +++ b/python/ql/src/Security/CWE-730/PolynomialBackTracking.ql @@ -0,0 +1,6 @@ +import python +import semmle.python.regex.SuperlinearBackTracking + +from PolynomialBackTrackingTerm t +where t.getLocation().getFile().getBaseName() = "KnownCVEs.py" +select t.getRegex(), t, t.getReason() diff --git a/python/ql/src/Security/CWE-730/PolynomialReDoS.ql b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql new file mode 100644 index 00000000000..b948c1601a8 --- /dev/null +++ b/python/ql/src/Security/CWE-730/PolynomialReDoS.ql @@ -0,0 +1,33 @@ +/** + * @name Polynomial regular expression used on uncontrolled data + * @description A regular expression that can require polynomial time + * to match may be vulnerable to denial-of-service attacks. + * @kind path-problem + * @problem.severity warning + * @precision high + * @id py/polynomial-redos + * @tags security + * external/cwe/cwe-730 + * external/cwe/cwe-400 + */ + +import python +import semmle.python.regex.SuperlinearBackTracking +import semmle.python.security.dataflow.PolynomialReDoS +import DataFlow::PathGraph + +from + PolynomialReDoSConfiguration config, DataFlow::PathNode source, DataFlow::PathNode sink, + PolynomialReDoSSink sinkNode, PolynomialBackTrackingTerm regexp +where + config.hasFlowPath(source, sink) and + sinkNode = sink.getNode() and + regexp.getRootTerm() = sinkNode.getRegExp() +// not ( +// source.getNode().(Source).getKind() = "url" and +// regexp.isAtEndLine() +// ) +select sinkNode.getHighlight(), source, sink, + "This $@ that depends on $@ may run slow on strings " + regexp.getPrefixMessage() + + "with many repetitions of '" + regexp.getPumpString() + "'.", regexp, "regular expression", + source.getNode(), "a user-provided value" diff --git a/python/ql/src/Security/CWE-730/ReDoS.ql b/python/ql/src/Security/CWE-730/ReDoS.ql new file mode 100644 index 00000000000..aebc2f81cff --- /dev/null +++ b/python/ql/src/Security/CWE-730/ReDoS.ql @@ -0,0 +1,25 @@ +/** + * @name Inefficient regular expression + * @description A regular expression that requires exponential time to match certain inputs + * can be a performance bottleneck, and may be vulnerable to denial-of-service + * attacks. + * @kind problem + * @problem.severity error + * @precision high + * @id py/redos + * @tags security + * external/cwe/cwe-730 + * external/cwe/cwe-400 + */ + +import python +import semmle.python.regex.ExponentialBackTracking + +from RegExpTerm t, string pump, State s, string prefixMsg +where + hasReDoSResult(t, pump, s, prefixMsg) and + // exclude verbose mode regexes for now + not t.getRegex().getAMode() = "VERBOSE" +select t, + "This part of the regular expression may cause exponential backtracking on strings " + prefixMsg + + "containing many repetitions of '" + pump + "'." diff --git a/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll b/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll new file mode 100644 index 00000000000..61707b1f392 --- /dev/null +++ b/python/ql/src/semmle/python/regex/ExponentialBackTracking.qll @@ -0,0 +1,342 @@ +/** + * This library implements the analysis described in the following two papers: + * + * James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for + * Regular Expression Denial-of-Service Attacks. NSS 2013. + * (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf) + * Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression + * Exponential Runtime via Substructural Logics. 2014. + * (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf) + * + * The basic idea is to search for overlapping cycles in the NFA, that is, + * states `q` such that there are two distinct paths from `q` to itself + * that consume the same word `w`. + * + * For any such state `q`, an attack string can be constructed as follows: + * concatenate a prefix `v` that takes the NFA to `q` with `n` copies of + * the word `w` that leads back to `q` along two different paths, followed + * by a suffix `x` that is _not_ accepted in state `q`. A backtracking + * implementation will need to explore at least 2^n different ways of going + * from `q` back to itself while trying to match the `n` copies of `w` + * before finally giving up. + * + * Now in order to identify overlapping cycles, all we have to do is find + * pumpable forks, that is, states `q` that can transition to two different + * states `r1` and `r2` on the same input symbol `c`, such that there are + * paths from both `r1` and `r2` to `q` that consume the same word. The latter + * condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)` + * in the product NFA. + * + * This is what the library does. It makes a simple attempt to construct a + * prefix `v` leading into `q`, but only to improve the alert message. + * And the library tries to prove the existence of a suffix that ensures + * rejection. This check might fail, which can cause false positives. + * + * Finally, sometimes it depends on the translation whether the NFA generated + * for a regular expression has a pumpable fork or not. We implement one + * particular translation, which may result in false positives or negatives + * relative to some particular JavaScript engine. + * + * More precisely, the library constructs an NFA from a regular expression `r` + * as follows: + * + * * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing + * the state of the automaton before attempting to match the `i`th character in `t`. + * * There is one accepting state `Accept(r)`. + * * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string + * by using an epsilon transition to `Accept(r)` and an any transition to itself. + * * Transitions between states may be labelled with epsilon, or an abstract + * input symbol. + * * Each abstract input symbol represents a set of concrete input characters: + * either a single character, a set of characters represented by a + * character class, or the set of all characters. + * * The product automaton is constructed lazily, starting with pair states + * `(q, q)` where `q` is a fork, and proceding along an over-approximate + * step relation. + * * The over-approximate step relation allows transitions along pairs of + * abstract input symbols where the symbols have overlap in the characters they accept. + * * Once a trace of pairs of abstract input symbols that leads from a fork + * back to itself has been identified, we attempt to construct a concrete + * string corresponding to it, which may fail. + * * Lastly we ensure that any state reached by repeating `n` copies of `w` has + * a suffix `x` (possible empty) that is most likely __not__ accepted. + */ + +import ReDoSUtil + +/** + * Holds if state `s` might be inside a backtracking repetition. + */ +pragma[noinline] +predicate stateInsideBacktracking(State s) { + s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition +} + +/** + * A infinitely repeating quantifier that might backtrack. + */ +class MaybeBacktrackingRepetition extends InfiniteRepetitionQuantifier { + MaybeBacktrackingRepetition() { + exists(RegExpTerm child | + child instanceof RegExpAlt or + child instanceof RegExpQuantifier + | + child.getParent+() = this + ) + } +} + +/** + * A state in the product automaton. + */ +newtype TStatePair = + /** + * We lazily only construct those states that we are actually + * going to need: `(q, q)` for every fork state `q`, and any + * pair of states that can be reached from a pair that we have + * already constructed. To cut down on the number of states, + * we only represent states `(q1, q2)` where `q1` is lexicographically + * no bigger than `q2`. + * + * States are only constructed if both states in the pair are + * inside a repetition that might backtrack. + */ + MkStatePair(State q1, State q2) { + isFork(q1, _, _, _, _) and q2 = q1 + or + (step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and + rankState(q1) <= rankState(q2) + } + +/** + * Gets a unique number for a `state`. + * Is used to create an ordering of states, where states with the same `toString()` will be ordered differently. + */ +int rankState(State state) { + state = + rank[result](State s, Location l | + l = s.getRepr().getLocation() + | + s order by l.getStartLine(), l.getStartColumn(), s.toString() + ) +} + +/** + * A state in the product automaton. + */ +class StatePair extends TStatePair { + State q1; + State q2; + + StatePair() { this = MkStatePair(q1, q2) } + + /** Gets a textual representation of this element. */ + string toString() { result = "(" + q1 + ", " + q2 + ")" } + + /** Gets the first component of the state pair. */ + State getLeft() { result = q1 } + + /** Gets the second component of the state pair. */ + State getRight() { result = q2 } +} + +/** + * Holds for all constructed state pairs. + * + * Used in `statePairDist` + */ +predicate isStatePair(StatePair p) { any() } + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r`. + * + * Used in `statePairDist` + */ +predicate delta2(StatePair q, StatePair r) { step(q, _, _, r) } + +/** + * Gets the minimum length of a path from `q` to `r` in the + * product automaton. + */ +int statePairDist(StatePair q, StatePair r) = + shortestDistances(isStatePair/1, delta2/2)(q, r, result) + +/** + * Holds if there are transitions from `q` to `r1` and from `q` to `r2` + * labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not + * trivially have an empty intersection. + * + * This predicate only holds for states associated with regular expressions + * that have at least one repetition quantifier in them (otherwise the + * expression cannot be vulnerable to ReDoS attacks anyway). + */ +pragma[noopt] +predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + stateInsideBacktracking(q) and + exists(State q1, State q2 | + q1 = epsilonSucc*(q) and + delta(q1, s1, r1) and + q2 = epsilonSucc*(q) and + delta(q2, s2, r2) and + // Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join. + // From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals, + // and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions. + exists(intersect(s1, s2)) + | + s1 != s2 + or + r1 != r2 + or + r1 = r2 and q1 != q2 + or + // If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state: + // one that uses the loop and one that doesn't. The engine will separately attempt to match with each path, + // despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not. + // To avoid every state in the loop becoming a fork state, + // we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop + // (every epsilon-loop must contain such a state). + // + // We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself. + // This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`. + // The below code is therefore a heuritic, that only flags regular expressions such as `/(a*)*b/`, + // and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently. + r1 = r2 and + q1 = q2 and + epsilonSucc+(q) = q and + exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and + // One of the mid states is an infinite quantifier itself + exists(State mid, RegExpTerm term | + mid = epsilonSucc+(q) and + term = mid.getRepr() and + term instanceof InfiniteRepetitionQuantifier and + q = epsilonSucc+(mid) and + not mid = q + ) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +/** + * Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only + * one or the other is defined. + */ +StatePair mkStatePair(State q1, State q2) { + result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1) +} + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r` labelled with `s1` and `s2`, respectively. + */ +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) { + exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2)) +} + +/** + * Holds if there are transitions from the components of `q` to `r1` and `r2` + * labelled with `s1` and `s2`, respectively. + * + * We only consider transitions where the resulting states `(r1, r2)` are both + * inside a repetition that might backtrack. + */ +pragma[noopt] +predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) { + exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 | + deltaClosed(q1, s1, r1) and + deltaClosed(q2, s2, r2) and + // use noopt to force the join on `intersect` to happen last. + exists(intersect(s1, s2)) + ) and + stateInsideBacktracking(r1) and + stateInsideBacktracking(r2) +} + +private newtype TTrace = + Nil() or + Step(InputSymbol s1, InputSymbol s2, TTrace t) { + exists(StatePair p | + isReachableFromFork(_, p, t, _) and + step(p, s1, s2, _) + ) + or + t = Nil() and isFork(_, s1, s2, _, _) + } + +/** + * A list of pairs of input symbols that describe a path in the product automaton + * starting from some fork state. + */ +class Trace extends TTrace { + /** Gets a textual representation of this element. */ + string toString() { + this = Nil() and result = "Nil()" + or + exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) | + result = "Step(" + s1 + ", " + s2 + ", " + t + ")" + ) + } +} + +/** + * Gets a string corresponding to the trace `t`. + */ +string concretise(Trace t) { + t = Nil() and result = "" + or + exists(InputSymbol s1, InputSymbol s2, Trace rest | t = Step(s1, s2, rest) | + result = concretise(rest) + intersect(s1, s2) + ) +} + +/** + * Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is + * a path from `r` back to `(fork, fork)` with `rem` steps. + */ +predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) { + // base case + exists(InputSymbol s1, InputSymbol s2, State q1, State q2 | + isFork(fork, s1, s2, q1, q2) and + r = MkStatePair(q1, q2) and + w = Step(s1, s2, Nil()) and + rem = statePairDist(r, MkStatePair(fork, fork)) + ) + or + // recursive case + exists(StatePair p, Trace v, InputSymbol s1, InputSymbol s2 | + isReachableFromFork(fork, p, v, rem + 1) and + step(p, s1, s2, r) and + w = Step(s1, s2, v) and + rem >= statePairDist(r, MkStatePair(fork, fork)) + ) +} + +/** + * Gets a state in the product automaton from which `(fork, fork)` is + * reachable in zero or more epsilon transitions. + */ +StatePair getAForkPair(State fork) { + isFork(fork, _, _, _, _) and + result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork)) +} + +/** + * Holds if `fork` is a pumpable fork with word `w`. + */ +predicate isPumpable(State fork, string w) { + exists(StatePair q, Trace t | + isReachableFromFork(fork, q, t, _) and + q = getAForkPair(fork) and + w = concretise(t) + ) +} + +/** + * An instantiation of `ReDoSConfiguration` for exponential backtracking. + */ +class ExponentialReDoSConfiguration extends ReDoSConfiguration { + ExponentialReDoSConfiguration() { this = "ExponentialReDoSConfiguration" } + + override predicate isReDoSCandidate(State state, string pump) { isPumpable(state, pump) } +} diff --git a/python/ql/src/semmle/python/regex/ReDoSUtil.qll b/python/ql/src/semmle/python/regex/ReDoSUtil.qll new file mode 100644 index 00000000000..40b05825bc6 --- /dev/null +++ b/python/ql/src/semmle/python/regex/ReDoSUtil.qll @@ -0,0 +1,1034 @@ +/** + * Provides classes for working with regular expressions that can + * perform backtracking in superlinear/exponential time. + * + * This module contains a number of utility predicates for compiling a regular expression into a NFA and reasoning about this NFA. + * + * The `ReDoSConfiguration` contains a `isReDoSCandidate` predicate that is used to + * to determine which states the prefix/suffix search should happen on. + * There is only meant to exist one `ReDoSConfiguration` at a time. + * + * The predicate `hasReDoSResult` outputs a de-duplicated set of + * states that will cause backtracking (a rejecting suffix exists). + */ + +import RegExpTreeView + +/** + * A configuration for which parts of a regular expression should be considered relevant for + * the different predicates in `ReDoS.qll`. + * Used to adjust the computations for either superlinear or exponential backtracking. + */ +abstract class ReDoSConfiguration extends string { + bindingset[this] + ReDoSConfiguration() { any() } + + /** + * Holds if `state` with the pump string `pump` is a candidate for a + * ReDoS vulnerable state. + * This is used to determine which states are considered for the prefix/suffix construction. + */ + abstract predicate isReDoSCandidate(State state, string pump); +} + +/** + * Holds if repeating `pump' starting at `state` is a candidate for causing backtracking. + * No check whether a rejected suffix exists has been made. + */ +private predicate isReDoSCandidate(State state, string pump) { + any(ReDoSConfiguration conf).isReDoSCandidate(state, pump) and + ( + not any(ReDoSConfiguration conf).isReDoSCandidate(epsilonSucc+(state), _) + or + epsilonSucc+(state) = state and + state = + max(State s, Location l | + s = epsilonSucc+(state) and + l = s.getRepr().getLocation() and + any(ReDoSConfiguration conf).isReDoSCandidate(s, _) and + s.getRepr() instanceof InfiniteRepetitionQuantifier + | + s order by l.getStartLine(), l.getStartColumn(), l.getEndColumn(), l.getEndLine() + ) + ) +} + +/** + * Gets the char after `c` (from a simplified ASCII table). + */ +private string nextChar(string c) { exists(int code | code = ascii(c) | code + 1 = ascii(result)) } + +/** + * Gets an approximation for the ASCII code for `char`. + * Only the easily printable chars are included (so no newline, tab, null, etc). + */ +private int ascii(string char) { + char = + rank[result](string c | + c = + "! \"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" + .charAt(_) + ) +} + +/** + * A branch in a disjunction that is the root node in a literal, or a literal + * whose root node is not a disjunction. + */ +class RegExpRoot extends RegExpTerm { + RegExpParent parent; + + RegExpRoot() { + exists(RegExpAlt alt | + alt.isRootTerm() and + this = alt.getAChild() and + parent = alt.getParent() + ) + or + this.isRootTerm() and + not this instanceof RegExpAlt and + parent = this.getParent() + } + + /** + * Holds if this root term is relevant to the ReDoS analysis. + */ + predicate isRelevant() { + // there is at least one repetition + getRoot(any(InfiniteRepetitionQuantifier q)) = this and + // there are no lookbehinds + not exists(RegExpLookbehind lbh | getRoot(lbh) = this) and + // is actually used as a RegExp + isUsedAsRegExp() and + // not excluded for library specific reasons + not isExcluded(getRootTerm().getParent()) + } +} + +/** + * A constant in a regular expression that represents valid Unicode character(s). + */ +private class RegexpCharacterConstant extends RegExpConstant { + RegexpCharacterConstant() { this.isCharacter() } +} + +/** + * Holds if `term` is the chosen canonical representative for all terms with string representation `str`. + * + * Using canonical representatives gives a huge performance boost when working with tuples containing multiple `InputSymbol`s. + * The number of `InputSymbol`s is decreased by 3 orders of magnitude or more in some larger benchmarks. + */ +private predicate isCanonicalTerm(RegExpTerm term, string str) { + term = + rank[1](RegExpTerm t, Location loc, File file | + loc = t.getLocation() and + file = t.getFile() and + str = t.getRawValue() + | + t order by t.getFile().getRelativePath(), loc.getStartLine(), loc.getStartColumn() + ) +} + +/** + * An abstract input symbol, representing a set of concrete characters. + */ +private newtype TInputSymbol = + /** An input symbol corresponding to character `c`. */ + Char(string c) { + c = any(RegexpCharacterConstant cc | getRoot(cc).isRelevant()).getValue().charAt(_) + } or + /** + * An input symbol representing all characters matched by + * a (non-universal) character class that has string representation `charClassString`. + */ + CharClass(string charClassString) { + exists(RegExpTerm term | term.getRawValue() = charClassString | getRoot(term).isRelevant()) and + exists(RegExpTerm recc | isCanonicalTerm(recc, charClassString) | + recc instanceof RegExpCharacterClass and + not recc.(RegExpCharacterClass).isUniversalClass() + or + recc instanceof RegExpCharacterClassEscape + ) + } or + /** An input symbol representing all characters matched by `.`. */ + Dot() or + /** An input symbol representing all characters. */ + Any() or + /** An epsilon transition in the automaton. */ + Epsilon() + +/** + * Gets the canonical CharClass for `term`. + */ +CharClass getCanonicalCharClass(RegExpTerm term) { + exists(string str | isCanonicalTerm(term, str) | result = CharClass(str)) +} + +/** + * Holds if `a` and `b` are input symbols from the same regexp. + */ +private predicate sharesRoot(TInputSymbol a, TInputSymbol b) { + exists(RegExpRoot root | + belongsTo(a, root) and + belongsTo(b, root) + ) +} + +/** + * Holds if the `a` is an input symbol from a regexp that has root `root`. + */ +private predicate belongsTo(TInputSymbol a, RegExpRoot root) { + exists(State s | getRoot(s.getRepr()) = root | + delta(s, a, _) + or + delta(_, a, s) + ) +} + +/** + * An abstract input symbol, representing a set of concrete characters. + */ +class InputSymbol extends TInputSymbol { + InputSymbol() { not this instanceof Epsilon } + + /** + * Gets a string representation of this input symbol. + */ + string toString() { + this = Char(result) + or + this = CharClass(result) + or + this = Dot() and result = "." + or + this = Any() and result = "[^]" + } +} + +/** + * An abstract input symbol that represents a character class. + */ +abstract private class CharacterClass extends InputSymbol { + /** + * Gets a character that is relevant for intersection-tests involving this + * character class. + * + * Specifically, this is any of the characters mentioned explicitly in the + * character class, offset by one if it is inverted. For character class escapes, + * the result is as if the class had been written out as a series of intervals. + * + * This set is large enough to ensure that for any two intersecting character + * classes, one contains a relevant character from the other. + */ + abstract string getARelevantChar(); + + /** + * Holds if this character class matches `char`. + */ + bindingset[char] + abstract predicate matches(string char); + + /** + * Gets a character matched by this character class. + */ + string choose() { result = getARelevantChar() and matches(result) } +} + +/** + * Provides implementations for `CharacterClass`. + */ +private module CharacterClasses { + /** + * Holds if the character class `cc` has a child (constant or range) that matches `char`. + */ + pragma[noinline] + predicate hasChildThatMatches(RegExpCharacterClass cc, string char) { + exists(getCanonicalCharClass(cc)) and + exists(RegExpTerm child | child = cc.getAChild() | + char = child.(RegexpCharacterConstant).getValue() + or + rangeMatchesOnLetterOrDigits(child, char) + or + not rangeMatchesOnLetterOrDigits(child, _) and + char = getARelevantChar() and + exists(string lo, string hi | child.(RegExpCharacterRange).isRange(lo, hi) | + lo <= char and + char <= hi + ) + or + exists(RegExpCharacterClassEscape escape | escape = child | + escape.getValue() = escape.getValue().toLowerCase() and + classEscapeMatches(escape.getValue(), char) + or + char = getARelevantChar() and + escape.getValue() = escape.getValue().toUpperCase() and + not classEscapeMatches(escape.getValue().toLowerCase(), char) + ) + ) + } + + /** + * Holds if `range` is a range on lower-case, upper-case, or digits, and matches `char`. + * This predicate is used to restrict the searchspace for ranges by only joining `getAnyPossiblyMatchedChar` + * on a few ranges. + */ + private predicate rangeMatchesOnLetterOrDigits(RegExpCharacterRange range, string char) { + exists(string lo, string hi | + range.isRange(lo, hi) and lo = lowercaseLetter() and hi = lowercaseLetter() + | + lo <= char and + char <= hi and + char = lowercaseLetter() + ) + or + exists(string lo, string hi | + range.isRange(lo, hi) and lo = upperCaseLetter() and hi = upperCaseLetter() + | + lo <= char and + char <= hi and + char = upperCaseLetter() + ) + or + exists(string lo, string hi | range.isRange(lo, hi) and lo = digit() and hi = digit() | + lo <= char and + char <= hi and + char = digit() + ) + } + + private string lowercaseLetter() { result = "abdcefghijklmnopqrstuvwxyz".charAt(_) } + + private string upperCaseLetter() { result = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".charAt(_) } + + private string digit() { result = [0 .. 9].toString() } + + /** + * Gets a char that could be matched by a regular expression. + * Includes all printable ascii chars, all constants mentioned in a regexp, and all chars matches by the regexp `/\s|\d|\w/`. + */ + string getARelevantChar() { + exists(ascii(result)) + or + exists(RegexpCharacterConstant c | result = c.getValue().charAt(_)) + or + classEscapeMatches(_, result) + } + + /** + * Gets a char that is mentioned in the character class `c`. + */ + private string getAMentionedChar(RegExpCharacterClass c) { + exists(RegExpTerm child | child = c.getAChild() | + result = child.(RegexpCharacterConstant).getValue() + or + child.(RegExpCharacterRange).isRange(result, _) + or + child.(RegExpCharacterRange).isRange(_, result) + or + exists(RegExpCharacterClassEscape escape | child = escape | + result = min(string s | classEscapeMatches(escape.getValue().toLowerCase(), s)) + or + result = max(string s | classEscapeMatches(escape.getValue().toLowerCase(), s)) + ) + ) + } + + /** + * An implementation of `CharacterClass` for positive (non inverted) character classes. + */ + private class PositiveCharacterClass extends CharacterClass { + RegExpCharacterClass cc; + + PositiveCharacterClass() { this = getCanonicalCharClass(cc) and not cc.isInverted() } + + override string getARelevantChar() { result = getAMentionedChar(cc) } + + override predicate matches(string char) { hasChildThatMatches(cc, char) } + } + + /** + * An implementation of `CharacterClass` for inverted character classes. + */ + private class InvertedCharacterClass extends CharacterClass { + RegExpCharacterClass cc; + + InvertedCharacterClass() { this = getCanonicalCharClass(cc) and cc.isInverted() } + + override string getARelevantChar() { + result = nextChar(getAMentionedChar(cc)) or + nextChar(result) = getAMentionedChar(cc) + } + + bindingset[char] + override predicate matches(string char) { not hasChildThatMatches(cc, char) } + } + + /** + * Holds if the character class escape `clazz` (\d, \s, or \w) matches `char`. + */ + pragma[noinline] + private predicate classEscapeMatches(string clazz, string char) { + clazz = "d" and + char = "0123456789".charAt(_) + or + clazz = "s" and + ( + char = [" ", "\t", "\r", "\n"] + or + char = getARelevantChar() and + char.regexpMatch("\\u000b|\\u000c") // \v|\f (vertical tab | form feed) + ) + or + clazz = "w" and + char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_".charAt(_) + } + + /** + * An implementation of `CharacterClass` for \d, \s, and \w. + */ + private class PositiveCharacterClassEscape extends CharacterClass { + RegExpCharacterClassEscape cc; + + PositiveCharacterClassEscape() { + this = getCanonicalCharClass(cc) and cc.getValue() = ["d", "s", "w"] + } + + override string getARelevantChar() { + cc.getValue() = "d" and + result = ["0", "9"] + or + cc.getValue() = "s" and + result = [" "] + or + cc.getValue() = "w" and + result = ["a", "Z", "_", "0", "9"] + } + + override predicate matches(string char) { classEscapeMatches(cc.getValue(), char) } + + override string choose() { + cc.getValue() = "d" and + result = "9" + or + cc.getValue() = "s" and + result = [" "] + or + cc.getValue() = "w" and + result = "a" + } + } + + /** + * An implementation of `CharacterClass` for \D, \S, and \W. + */ + private class NegativeCharacterClassEscape extends CharacterClass { + RegExpCharacterClassEscape cc; + + NegativeCharacterClassEscape() { + this = getCanonicalCharClass(cc) and cc.getValue() = ["D", "S", "W"] + } + + override string getARelevantChar() { + cc.getValue() = "D" and + result = ["a", "Z", "!"] + or + cc.getValue() = "S" and + result = ["a", "9", "!"] + or + cc.getValue() = "W" and + result = [" ", "!"] + } + + bindingset[char] + override predicate matches(string char) { + not classEscapeMatches(cc.getValue().toLowerCase(), char) + } + } +} + +private class EdgeLabel extends TInputSymbol { + string toString() { + this = Epsilon() and result = "" + or + exists(InputSymbol s | this = s and result = s.toString()) + } +} + +/** + * Gets the state before matching `t`. + */ +pragma[inline] +private State before(RegExpTerm t) { result = Match(t, 0) } + +/** + * Gets a state the NFA may be in after matching `t`. + */ +private State after(RegExpTerm t) { + exists(RegExpAlt alt | t = alt.getAChild() | result = after(alt)) + or + exists(RegExpSequence seq, int i | t = seq.getChild(i) | + result = before(seq.getChild(i + 1)) + or + i + 1 = seq.getNumChild() and result = after(seq) + ) + or + exists(RegExpGroup grp | t = grp.getAChild() | result = after(grp)) + or + exists(RegExpStar star | t = star.getAChild() | result = before(star)) + or + exists(RegExpPlus plus | t = plus.getAChild() | + result = before(plus) or + result = after(plus) + ) + or + exists(RegExpOpt opt | t = opt.getAChild() | result = after(opt)) + or + exists(RegExpRoot root | t = root | result = AcceptAnySuffix(root)) +} + +/** + * Holds if the NFA has a transition from `q1` to `q2` labelled with `lbl`. + */ +predicate delta(State q1, EdgeLabel lbl, State q2) { + exists(RegexpCharacterConstant s, int i | + q1 = Match(s, i) and + lbl = Char(s.getValue().charAt(i)) and + ( + q2 = Match(s, i + 1) + or + s.getValue().length() = i + 1 and + q2 = after(s) + ) + ) + or + exists(RegExpDot dot | q1 = before(dot) and q2 = after(dot) | + if dot.getLiteral().isDotAll() then lbl = Any() else lbl = Dot() + ) + or + exists(RegExpCharacterClass cc | + cc.isUniversalClass() and q1 = before(cc) and lbl = Any() and q2 = after(cc) + or + q1 = before(cc) and + lbl = CharClass(cc.getRawValue()) and + q2 = after(cc) + ) + or + exists(RegExpCharacterClassEscape cc | + q1 = before(cc) and + lbl = CharClass(cc.getRawValue()) and + q2 = after(cc) + ) + or + exists(RegExpAlt alt | lbl = Epsilon() | q1 = before(alt) and q2 = before(alt.getAChild())) + or + exists(RegExpSequence seq | lbl = Epsilon() | q1 = before(seq) and q2 = before(seq.getChild(0))) + or + exists(RegExpGroup grp | lbl = Epsilon() | q1 = before(grp) and q2 = before(grp.getChild(0))) + or + exists(RegExpStar star | lbl = Epsilon() | + q1 = before(star) and q2 = before(star.getChild(0)) + or + q1 = before(star) and q2 = after(star) + ) + or + exists(RegExpPlus plus | lbl = Epsilon() | q1 = before(plus) and q2 = before(plus.getChild(0))) + or + exists(RegExpOpt opt | lbl = Epsilon() | + q1 = before(opt) and q2 = before(opt.getChild(0)) + or + q1 = before(opt) and q2 = after(opt) + ) + or + exists(RegExpRoot root | q1 = AcceptAnySuffix(root) | + lbl = Any() and q2 = q1 + or + lbl = Epsilon() and q2 = Accept(root) + ) + or + exists(RegExpRoot root | q1 = Match(root, 0) | lbl = Any() and q2 = q1) + or + exists(RegExpDollar dollar | q1 = before(dollar) | + lbl = Epsilon() and q2 = Accept(getRoot(dollar)) + ) +} + +/** + * Gets a state that `q` has an epsilon transition to. + */ +State epsilonSucc(State q) { delta(q, Epsilon(), result) } + +/** + * Gets a state that has an epsilon transition to `q`. + */ +State epsilonPred(State q) { q = epsilonSucc(result) } + +/** + * Holds if there is a state `q` that can be reached from `q1` + * along epsilon edges, such that there is a transition from + * `q` to `q2` that consumes symbol `s`. + */ +predicate deltaClosed(State q1, InputSymbol s, State q2) { delta(epsilonSucc*(q1), s, q2) } + +/** + * Gets the root containing the given term, that is, the root of the literal, + * or a branch of the root disjunction. + */ +RegExpRoot getRoot(RegExpTerm term) { + result = term or + result = getRoot(term.getParent()) +} + +private newtype TState = + Match(RegExpTerm t, int i) { + getRoot(t).isRelevant() and + ( + i = 0 + or + exists(t.(RegexpCharacterConstant).getValue().charAt(i)) + ) + } or + Accept(RegExpRoot l) { l.isRelevant() } or + AcceptAnySuffix(RegExpRoot l) { l.isRelevant() } + +/** + * Gets a state that is about to match the regular expression `t`. + */ +State mkMatch(RegExpTerm t) { result = Match(t, 0) } + +/** + * A state in the NFA corresponding to a regular expression. + * + * Each regular expression literal `l` has one accepting state + * `Accept(l)`, one state that accepts all suffixes `AcceptAnySuffix(l)`, + * and a state `Match(t, i)` for every subterm `t`, + * which represents the state of the NFA before starting to + * match `t`, or the `i`th character in `t` if `t` is a constant. + */ +class State extends TState { + RegExpTerm repr; + + State() { + this = Match(repr, _) or + this = Accept(repr) or + this = AcceptAnySuffix(repr) + } + + /** + * Gets a string representation for this state in a regular expression. + */ + string toString() { + exists(int i | this = Match(repr, i) | result = "Match(" + repr + "," + i + ")") + or + this instanceof Accept and + result = "Accept(" + repr + ")" + or + this instanceof AcceptAnySuffix and + result = "AcceptAny(" + repr + ")" + } + + /** + * Gets the location for this state. + */ + Location getLocation() { result = repr.getLocation() } + + /** + * Gets the term represented by this state. + */ + RegExpTerm getRepr() { result = repr } +} + +/** + * Gets the minimum char that is matched by both the character classes `c` and `d`. + */ +private string getMinOverlapBetweenCharacterClasses(CharacterClass c, CharacterClass d) { + result = min(getAOverlapBetweenCharacterClasses(c, d)) +} + +/** + * Gets a char that is matched by both the character classes `c` and `d`. + * And `c` and `d` is not the same character class. + */ +private string getAOverlapBetweenCharacterClasses(CharacterClass c, CharacterClass d) { + sharesRoot(c, d) and + result = [c.getARelevantChar(), d.getARelevantChar()] and + c.matches(result) and + d.matches(result) and + not c = d +} + +/** + * Gets a character that is represented by both `c` and `d`. + */ +string intersect(InputSymbol c, InputSymbol d) { + (sharesRoot(c, d) or [c, d] = Any()) and + ( + c = Char(result) and + d = getAnInputSymbolMatching(result) + or + result = getMinOverlapBetweenCharacterClasses(c, d) + or + result = c.(CharacterClass).choose() and + ( + d = c + or + d = Dot() and + not (result = "\n" or result = "\r") + or + d = Any() + ) + or + (c = Dot() or c = Any()) and + (d = Dot() or d = Any()) and + result = "a" + ) + or + result = intersect(d, c) +} + +/** + * Gets a symbol that matches `char`. + */ +bindingset[char] +InputSymbol getAnInputSymbolMatching(string char) { + result = Char(char) + or + result.(CharacterClass).matches(char) + or + result = Dot() and + not (char = "\n" or char = "\r") + or + result = Any() +} + +/** + * Predicates for constructing a prefix string that leads to a given state. + */ +private module PrefixConstruction { + /** + * Holds if `state` starts the string matched by the regular expression. + */ + private predicate isStartState(State state) { + state instanceof StateInPumpableRegexp and + ( + state = Match(any(RegExpRoot r), _) + or + exists(RegExpCaret car | state = after(car)) + ) + } + + /** + * Holds if `state` is the textually last start state for the regular expression. + */ + private predicate lastStartState(State state) { + exists(RegExpRoot root | + state = + max(State s, Location l | + isStartState(s) and getRoot(s.getRepr()) = root and l = s.getRepr().getLocation() + | + s + order by + l.getStartLine(), l.getStartColumn(), s.getRepr().toString(), l.getEndColumn(), + l.getEndLine() + ) + ) + } + + /** + * Holds if there exists any transition (Epsilon() or other) from `a` to `b`. + */ + private predicate existsTransition(State a, State b) { delta(a, _, b) } + + /** + * Gets the minimum number of transitions it takes to reach `state` from the `start` state. + */ + int prefixLength(State start, State state) = + shortestDistances(lastStartState/1, existsTransition/2)(start, state, result) + + /** + * Gets the minimum number of transitions it takes to reach `state` from the start state. + */ + private int lengthFromStart(State state) { result = prefixLength(_, state) } + + /** + * Gets a string for which the regular expression will reach `state`. + * + * Has at most one result for any given `state`. + * This predicate will not always have a result even if there is a ReDoS issue in + * the regular expression. + */ + string prefix(State state) { + lastStartState(state) and + result = "" + or + // the search stops past the last redos candidate state. + lengthFromStart(state) <= max(lengthFromStart(any(State s | isReDoSCandidate(s, _)))) and + exists(State prev | + // select a unique predecessor (by an arbitrary measure) + prev = + min(State s, Location loc | + lengthFromStart(s) = lengthFromStart(state) - 1 and + loc = s.getRepr().getLocation() and + delta(s, _, state) + | + s + order by + loc.getStartLine(), loc.getStartColumn(), loc.getEndLine(), loc.getEndColumn(), + s.getRepr().toString() + ) + | + // greedy search for the shortest prefix + result = prefix(prev) and delta(prev, Epsilon(), state) + or + not delta(prev, Epsilon(), state) and + result = prefix(prev) + getCanonicalEdgeChar(prev, state) + ) + } + + /** + * Gets a canonical char for which there exists a transition from `prev` to `next` in the NFA. + */ + private string getCanonicalEdgeChar(State prev, State next) { + result = + min(string c | delta(prev, any(InputSymbol symbol | c = intersect(Any(), symbol)), next)) + } + + /** + * A state within a regular expression that has a pumpable state. + */ + class StateInPumpableRegexp extends State { + pragma[noinline] + StateInPumpableRegexp() { + exists(State s | isReDoSCandidate(s, _) | getRoot(s.getRepr()) = getRoot(this.getRepr())) + } + } +} + +/** + * Predicates for testing the presence of a rejecting suffix. + * + * These predicates are used to ensure that the all states reached from the fork + * by repeating `w` have a rejecting suffix. + * + * For example, a regexp like `/^(a+)+/` will accept any string as long the prefix is + * some number of `"a"`s, and it is therefore not possible to construct a rejecting suffix. + * + * A regexp like `/(a+)+$/` or `/(a+)+b/` trivially has a rejecting suffix, + * as the suffix "X" will cause both the regular expressions to be rejected. + * + * The string `w` is repeated any number of times because it needs to be + * infinitely repeatedable for the attack to work. + * For the regular expression `/((ab)+)*abab/` the accepting state is not reachable from the fork + * using epsilon transitions. But any attempt at repeating `w` will end in a state that accepts all suffixes. + */ +private module SuffixConstruction { + import PrefixConstruction + + /** + * Holds if all states reachable from `fork` by repeating `w` + * are likely rejectable by appending some suffix. + */ + predicate reachesOnlyRejectableSuffixes(State fork, string w) { + isReDoSCandidate(fork, w) and + forex(State next | next = process(fork, w, w.length() - 1) | isLikelyRejectable(next)) + } + + /** + * Holds if there likely exists a suffix starting from `s` that leads to the regular expression being rejected. + * This predicate might find impossible suffixes when searching for suffixes of length > 1, which can cause FPs. + */ + pragma[noinline] + private predicate isLikelyRejectable(StateInPumpableRegexp s) { + // exists a reject edge with some char. + hasRejectEdge(s) + or + hasEdgeToLikelyRejectable(s) + or + // stopping here is rejection + isRejectState(s) + } + + /** + * Holds if `s` is not an accept state, and there is no epsilon transition to an accept state. + */ + predicate isRejectState(StateInPumpableRegexp s) { not epsilonSucc*(s) = Accept(_) } + + /** + * Holds if there is likely a non-empty suffix leading to rejection starting in `s`. + */ + pragma[noopt] + predicate hasEdgeToLikelyRejectable(StateInPumpableRegexp s) { + // all edges (at least one) with some char leads to another state that is rejectable. + // the `next` states might not share a common suffix, which can cause FPs. + exists(string char | char = hasEdgeToLikelyRejectableHelper(s) | + // noopt to force `hasEdgeToLikelyRejectableHelper` to be first in the join-order. + exists(State next | deltaClosedChar(s, char, next) | isLikelyRejectable(next)) and + forall(State next | deltaClosedChar(s, char, next) | isLikelyRejectable(next)) + ) + } + + /** + * Gets a char for there exists a transition away from `s`, + * and `s` has not been found to be rejectable by `hasRejectEdge` or `isRejectState`. + */ + pragma[noinline] + private string hasEdgeToLikelyRejectableHelper(StateInPumpableRegexp s) { + not hasRejectEdge(s) and + not isRejectState(s) and + deltaClosedChar(s, result, _) + } + + /** + * Holds if there is a state `next` that can be reached from `prev` + * along epsilon edges, such that there is a transition from + * `prev` to `next` that the character symbol `char`. + */ + predicate deltaClosedChar(StateInPumpableRegexp prev, string char, StateInPumpableRegexp next) { + deltaClosed(prev, getAnInputSymbolMatchingRelevant(char), next) + } + + pragma[noinline] + InputSymbol getAnInputSymbolMatchingRelevant(string char) { + char = relevant(_) and + result = getAnInputSymbolMatching(char) + } + + /** + * Gets a char used for finding possible suffixes inside `root`. + */ + pragma[noinline] + private string relevant(RegExpRoot root) { + exists(ascii(result)) + or + exists(InputSymbol s | belongsTo(s, root) | result = intersect(s, _)) + or + // The characters from `hasSimpleRejectEdge`. Only `\n` is really needed (as `\n` is not in the `ascii` relation). + // The three chars must be kept in sync with `hasSimpleRejectEdge`. + result = ["|", "\n", "Z"] + } + + /** + * Holds if there exists a `char` such that there is no edge from `s` labeled `char` in our NFA. + * The NFA does not model reject states, so the above is the same as saying there is a reject edge. + */ + private predicate hasRejectEdge(State s) { + hasSimpleRejectEdge(s) + or + not hasSimpleRejectEdge(s) and + exists(string char | char = relevant(getRoot(s.getRepr())) | not deltaClosedChar(s, char, _)) + } + + /** + * Holds if there is no edge from `s` labeled with "|", "\n", or "Z" in our NFA. + * This predicate is used as a cheap pre-processing to speed up `hasRejectEdge`. + */ + private predicate hasSimpleRejectEdge(State s) { + // The three chars were chosen arbitrarily. The three chars must be kept in sync with `relevant`. + exists(string char | char = ["|", "\n", "Z"] | not deltaClosedChar(s, char, _)) + } + + /** + * Gets a state that can be reached from pumpable `fork` consuming all + * chars in `w` any number of times followed by the first `i+1` characters of `w`. + */ + pragma[noopt] + private State process(State fork, string w, int i) { + exists(State prev | prev = getProcessPrevious(fork, i, w) | + exists(string char, InputSymbol sym | + char = w.charAt(i) and + deltaClosed(prev, sym, result) and + // noopt to prevent joining `prev` with all possible `chars` that could transition away from `prev`. + // Instead only join with the set of `chars` where a relevant `InputSymbol` has already been found. + sym = getAProcessInputSymbol(char) + ) + ) + } + + /** + * Gets a state that can be reached from pumpable `fork` consuming all + * chars in `w` any number of times followed by the first `i` characters of `w`. + */ + private State getProcessPrevious(State fork, int i, string w) { + isReDoSCandidate(fork, w) and + ( + i = 0 and result = fork + or + result = process(fork, w, i - 1) + or + // repeat until fixpoint + i = 0 and + result = process(fork, w, w.length() - 1) + ) + } + + /** + * Gets an InputSymbol that matches `char`. + * The predicate is specialized to only have a result for the `char`s that are relevant for the `process` predicate. + */ + private InputSymbol getAProcessInputSymbol(string char) { + char = getAProcessChar() and + result = getAnInputSymbolMatching(char) + } + + /** + * Gets a `char` that occurs in a `pump` string. + */ + private string getAProcessChar() { result = any(string s | isReDoSCandidate(_, s)).charAt(_) } +} + +/** + * Gets the result of backslash-escaping newlines, carriage-returns and + * backslashes in `s`. + */ +bindingset[s] +private string escape(string s) { + result = + s.replaceAll("\\", "\\\\") + .replaceAll("\n", "\\n") + .replaceAll("\r", "\\r") + .replaceAll("\t", "\\t") +} + +/** + * Gets `str` with the last `i` characters moved to the front. + * + * We use this to adjust the pump string to match with the beginning of + * a RegExpTerm, so it doesn't start in the middle of a constant. + */ +bindingset[str, i] +private string rotate(string str, int i) { + result = str.suffix(str.length() - i) + str.prefix(str.length() - i) +} + +/** + * Holds if `term` may cause superlinear backtracking on strings containing many repetitions of `pump`. + * Gets the shortest string that causes superlinear backtracking. + */ +private predicate isReDoSAttackable(RegExpTerm term, string pump, State s) { + exists(int i, string c | s = Match(term, i) | + c = + min(string w | + any(ReDoSConfiguration conf).isReDoSCandidate(s, w) and + SuffixConstruction::reachesOnlyRejectableSuffixes(s, w) + | + w order by w.length(), w + ) and + pump = escape(rotate(c, i)) + ) +} + +/** + * Holds if the state `s` (represented by the term `t`) can have backtracking with repetitions of `pump`. + * + * `prefixMsg` contains a friendly message for a prefix that reaches `s` (or `prefixMsg` is the empty string if the prefix is empty or if no prefix could be found). + */ +predicate hasReDoSResult(RegExpTerm t, string pump, State s, string prefixMsg) { + isReDoSAttackable(t, pump, s) and + ( + prefixMsg = "starting with '" + escape(PrefixConstruction::prefix(s)) + "' and " and + not PrefixConstruction::prefix(s) = "" + or + PrefixConstruction::prefix(s) = "" and prefixMsg = "" + or + not exists(PrefixConstruction::prefix(s)) and prefixMsg = "" + ) +} diff --git a/python/ql/src/semmle/python/regex/RegExpTreeView.qll b/python/ql/src/semmle/python/regex/RegExpTreeView.qll new file mode 100644 index 00000000000..addd192cafa --- /dev/null +++ b/python/ql/src/semmle/python/regex/RegExpTreeView.qll @@ -0,0 +1,15 @@ +/** + * This module should provide a class hierarchy corresponding to a parse tree of regular expressions. + */ + +import python +import semmle.python.RegexTreeView + +/** + * Holds if the regular expression should not be considered. + * + * For javascript we make the pragmatic performance optimization to ignore files we did not extract. + */ +predicate isExcluded(RegExpParent parent) { + not exists(parent.getRegex().getLocation().getFile().getRelativePath()) +} diff --git a/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll b/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll new file mode 100644 index 00000000000..0bbff12b49d --- /dev/null +++ b/python/ql/src/semmle/python/regex/SuperlinearBackTracking.qll @@ -0,0 +1,449 @@ +/** + * Provides classes for working with regular expressions that can + * perform backtracking in superlinear time. + */ + +import ReDoSUtil + +/* + * This module implements the analysis described in the paper: + * Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig: + * Static Detection of DoS Vulnerabilities in + * Programs that use Regular Expressions + * (Extended Version). + * (https://arxiv.org/pdf/1701.04045.pdf) + * + * Theorem 3 from the paper describes the basic idea. + * + * The following explains the idea using variables and predicate names that are used in the implementation: + * We consider a pair of repetitions, which we will call `pivot` and `succ`. + * + * We create a product automaton of 3-tuples of states (see `StateTuple`). + * There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton + * iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three + * transitions all match a shared character `char`. (see `getAThreewayIntersect`) + * + * We start a search in the product automaton at `(pivot, pivot, succ)`, + * and search for a series of transitions (a `Trace`), such that we end + * at `(pivot, succ, succ)` (see `isReachableFromStartTuple`). + * + * For example, consider the regular expression `/^\d*5\w*$/`. + * The search will start at the tuple `(\d*, \d*, \w*)` and search + * for a path to `(\d*, \w*, \w*)`. + * This path exists, and consists of a single transition in the product automaton, + * where the three corresponding NFA edges all match the character `"5"`. + * + * The start-state in the NFA has an any-transition to itself, this allows us to + * flag regular expressions such as `/a*$/` - which does not have a start anchor - + * and can thus start matching anywhere. + * + * The implementation is not perfect. + * It has the same suffix detection issue as the `js/redos` query, which can cause false positives. + * It also doesn't find all transitions in the product automaton, which can cause false negatives. + */ + +/** + * An instantiaion of `ReDoSConfiguration` for superlinear ReDoS. + */ +class SuperLinearReDoSConfiguration extends ReDoSConfiguration { + SuperLinearReDoSConfiguration() { this = "SuperLinearReDoSConfiguration" } + + override predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) } +} + +/** + * Gets any root (start) state of a regular expression. + */ +private State getRootState() { result = mkMatch(any(RegExpRoot r)) } + +private newtype TStateTuple = + MkStateTuple(State q1, State q2, State q3) { + // starts at (pivot, pivot, succ) + isStartLoops(q1, q3) and q1 = q2 + or + step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3) + } + +/** + * A state in the product automaton. + * The product automaton contains 3-tuples of states. + * + * We lazily only construct those states that we are actually + * going to need. + * Either a start state `(pivot, pivot, succ)`, or a state + * where there exists a transition from an already existing state. + * + * The exponential variant of this query (`js/redos`) uses an optimization + * trick where `q1 <= q2`. This trick cannot be used here as the order + * of the elements matter. + */ +class StateTuple extends TStateTuple { + State q1; + State q2; + State q3; + + StateTuple() { this = MkStateTuple(q1, q2, q3) } + + /** + * Gest a string repesentation of this tuple. + */ + string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" } + + /** + * Holds if this tuple is `(r1, r2, r3)`. + */ + pragma[noinline] + predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 } +} + +/** + * A module for determining feasible tuples for the product automaton. + * + * The implementation is split into many predicates for performance reasons. + */ +private module FeasibleTuple { + /** + * Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton. + */ + pragma[inline] + predicate isFeasibleTuple(State r1, State r2, State r3) { + // The first element is either inside a repetition (or the start state itself) + isRepetitionOrStart(r1) and + // The last element is inside a repetition + stateInsideRepetition(r3) and + // The states are reachable in the NFA in the order r1 -> r2 -> r3 + delta+(r1) = r2 and + delta+(r2) = r3 and + // The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair). + canReachABeginning(r1) and + // The last element can reach a target (the "succ" state in a `(pivot, succ)` pair). + canReachATarget(r3) + } + + /** + * Holds if `s` is either inside a repetition, or is the start state (which is a repetition). + */ + pragma[noinline] + private predicate isRepetitionOrStart(State s) { stateInsideRepetition(s) or s = getRootState() } + + /** + * Holds if state `s` might be inside a backtracking repetition. + */ + pragma[noinline] + private predicate stateInsideRepetition(State s) { + s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier + } + + /** + * Holds if there exists a path in the NFA from `s` to a "pivot" state + * (from a `(pivot, succ)` pair that starts the search). + */ + pragma[noinline] + private predicate canReachABeginning(State s) { + delta+(s) = any(State pivot | isStartLoops(pivot, _)) + } + + /** + * Holds if there exists a path in the NFA from `s` to a "succ" state + * (from a `(pivot, succ)` pair that starts the search). + */ + pragma[noinline] + private predicate canReachATarget(State s) { delta+(s) = any(State succ | isStartLoops(_, succ)) } +} + +/** + * Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup. + * + * There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`. + * The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query. + */ +predicate isStartLoops(State pivot, State succ) { + pivot != succ and + succ.getRepr() instanceof InfiniteRepetitionQuantifier and + delta+(pivot) = succ and + ( + pivot.getRepr() instanceof InfiniteRepetitionQuantifier + or + pivot = mkMatch(any(RegExpRoot root)) + ) +} + +/** + * Gets a state for which there exists a transition in the NFA from `s'. + */ +State delta(State s) { delta(s, _, result) } + +/** + * Holds if there are transitions from the components of `q` to the corresponding + * components of `r` labelled with `s1`, `s2`, and `s3`, respectively. + */ +pragma[noinline] +predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) { + exists(State r1, State r2, State r3 | + step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3) + ) +} + +/** + * Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3 + * labelled with `s1`, `s2`, and `s3`, respectively. + */ +pragma[noopt] +predicate step( + StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3 +) { + exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) | + deltaClosed(q1, s1, r1) and + deltaClosed(q2, s2, r2) and + deltaClosed(q3, s3, r3) and + // use noopt to force the join on `getAThreewayIntersect` to happen last. + exists(getAThreewayIntersect(s1, s2, s3)) + ) +} + +/** + * Gets a char that is matched by all the edges `s1`, `s2`, and `s3`. + * + * The result is not complete, and might miss some combination of edges that share some character. + */ +pragma[noinline] +string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) { + result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)] + or + result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)] + or + result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)] +} + +/** + * Gets the minimum and maximum characters that intersect between `a` and `b`. + * This predicate is used to limit the size of `getAThreewayIntersect`. + */ +pragma[noinline] +string minAndMaxIntersect(InputSymbol a, InputSymbol b) { + result = [min(intersect(a, b)), max(intersect(a, b))] +} + +private newtype TTrace = + Nil() or + Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) { + exists(StateTuple p | + isReachableFromStartTuple(_, _, p, t, _) and + step(p, s1, s2, s3, _) + ) + or + exists(State pivot, State succ | isStartLoops(pivot, succ) | + t = Nil() and step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, _) + ) + } + +/** + * A list of tuples of input symbols that describe a path in the product automaton + * starting from some start state. + */ +class Trace extends TTrace { + /** + * Gets a string representation of this Trace that can be used for debug purposes. + */ + string toString() { + this = Nil() and result = "Nil()" + or + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) | + result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")" + ) + } +} + +/** + * Gets a string corresponding to the trace `t`. + */ +string concretise(Trace t) { + t = Nil() and result = "" + or + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace rest | t = Step(s1, s2, s3, rest) | + result = concretise(rest) + getAThreewayIntersect(s1, s2, s3) + ) +} + +/** + * Holds if there exists a transition from `r` to `q` in the product automaton. + * Notice that the arguments are flipped, and thus the direction is backwards. + */ +pragma[noinline] +predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) } + +/** + * Holds if `tuple` is an end state in our search. + * That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`. + */ +predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) } + +/** + * Gets the minimum length of a path from `r` to some an end state `end`. + * + * The implementation searches backwards from the end-tuple. + * This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small. + * The `end` argument must always be an end state. + */ +int distBackFromEnd(StateTuple r, StateTuple end) = + shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result) + +/** + * Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that: + * `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton, + * and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`, + * and a path from a start-state to `tuple` follows the transitions in `trace`. + */ +predicate isReachableFromStartTuple(State pivot, State succ, StateTuple tuple, Trace trace, int dist) { + // base case. The first step is inlined to start the search after all possible 1-steps, and not just the ones with the shortest path. + exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, State q1, State q2, State q3 | + isStartLoops(pivot, succ) and + step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and + tuple = MkStateTuple(q1, q2, q3) and + trace = Step(s1, s2, s3, Nil()) and + dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) + ) + or + // recursive case + exists(StateTuple p, Trace v, InputSymbol s1, InputSymbol s2, InputSymbol s3 | + isReachableFromStartTuple(pivot, succ, p, v, dist + 1) and + dist = isReachableFromStartTupleHelper(pivot, succ, tuple, p, s1, s2, s3) and + trace = Step(s1, s2, s3, v) + ) +} + +/** + * Helper predicate for the recursive case in `isReachableFromStartTuple`. + */ +pragma[noinline] +private int isReachableFromStartTupleHelper( + State pivot, State succ, StateTuple r, StateTuple p, InputSymbol s1, InputSymbol s2, + InputSymbol s3 +) { + result = distBackFromEnd(r, MkStateTuple(pivot, succ, succ)) and + step(p, s1, s2, s3, r) +} + +/** + * Gets the tuple `(pivot, succ, succ)` from the product automaton. + */ +StateTuple getAnEndTuple(State pivot, State succ) { + isStartLoops(pivot, succ) and + result = MkStateTuple(pivot, succ, succ) +} + +/** + * Holds if matching repetitions of `pump` can: + * 1) Transition from `pivot` back to `pivot`. + * 2) Transition from `pivot` to `succ`. + * 3) Transition from `succ` to `succ`. + * + * From theorem 3 in the paper linked in the top of this file we can therefore conclude that + * the regular expression has polynomial backtracking - if a rejecting suffix exists. + * + * This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are + * available in the `hasReDoSResult` predicate. + */ +predicate isPumpable(State pivot, State succ, string pump) { + exists(StateTuple q, Trace t | + isReachableFromStartTuple(pivot, succ, q, t, _) and + q = getAnEndTuple(pivot, succ) and + pump = concretise(t) + ) +} + +/** + * Holds if repetitions of `pump` at `t` will cause polynomial backtracking. + */ +predicate polynimalReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) { + exists(State s, State pivot | + hasReDoSResult(t, pump, s, prefixMsg) and + isPumpable(pivot, s, _) and + prev = pivot.getRepr() + ) +} + +/** + * Holds if `t` matches at least an epsilon symbol. + * + * That is, this term does not restrict the language of the enclosing regular expression. + * + * This is implemented as an under-approximation, and this predicate does not hold for sub-patterns in particular. + */ +private predicate matchesEpsilon(RegExpTerm t) { + t instanceof RegExpStar + or + t instanceof RegExpOpt + or + t.(RegExpRange).getLowerBound() = 0 + or + exists(RegExpTerm child | + child = t.getAChild() and + matchesEpsilon(child) + | + t instanceof RegExpAlt or + t instanceof RegExpGroup or + t instanceof RegExpPlus or + t instanceof RegExpRange + ) + or + matchesEpsilon(t.(RegExpBackRef).getGroup()) + or + forex(RegExpTerm child | child = t.(RegExpSequence).getAChild() | matchesEpsilon(child)) +} + +/** + * Gets a message for why `term` can cause polynomial backtracking. + */ +string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) { + polynimalReDoS(term, pump, prefixMsg, prev) and + result = + "Strings " + prefixMsg + "with many repetitions of '" + pump + + "' can start matching anywhere after the start of the preceeding " + prev +} + +/** + * A term that may cause a regular expression engine to perform a + * polynomial number of match attempts, relative to the input length. + */ +class PolynomialBackTrackingTerm extends InfiniteRepetitionQuantifier { + string reason; + string pump; + string prefixMsg; + RegExpTerm prev; + + PolynomialBackTrackingTerm() { + reason = getReasonString(this, pump, prefixMsg, prev) and + // there might be many reasons for this term to have polynomial backtracking - we pick the shortest one. + reason = min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg) + } + + /** + * Holds if all non-empty successors to the polynomial backtracking term matches the end of the line. + */ + predicate isAtEndLine() { + forall(RegExpTerm succ | this.getSuccessor+() = succ and not matchesEpsilon(succ) | + succ instanceof RegExpDollar + ) + } + + /** + * Gets the string that should be repeated to cause this regular expression to perform polynomially. + */ + string getPumpString() { result = pump } + + /** + * Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking. + */ + string getPrefixMessage() { result = prefixMsg } + + /** + * Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking. + */ + RegExpTerm getPreviousLoop() { result = prev } + + /** + * Gets the reason for the number of match attempts. + */ + string getReason() { result = reason } +} diff --git a/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll b/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll new file mode 100644 index 00000000000..eace61442ed --- /dev/null +++ b/python/ql/src/semmle/python/security/dataflow/PolynomialReDoS.qll @@ -0,0 +1,177 @@ +/** + * Provides a taint-tracking configuration for detecting polynomial regular expression denial of service (ReDoS) + * vulnerabilities. + */ + +import python +import semmle.python.dataflow.new.DataFlow +import semmle.python.dataflow.new.DataFlow2 +import semmle.python.dataflow.new.TaintTracking +import semmle.python.Concepts +import semmle.python.dataflow.new.RemoteFlowSources +import semmle.python.dataflow.new.BarrierGuards +import semmle.python.RegexTreeView +import semmle.python.ApiGraphs + +/** A configuration for finding uses of compiled regexes. */ +class RegexDefinitionConfiguration extends DataFlow2::Configuration { + RegexDefinitionConfiguration() { this = "RegexDefinitionConfiguration" } + + override predicate isSource(DataFlow::Node source) { source instanceof RegexDefinitonSource } + + override predicate isSink(DataFlow::Node sink) { sink instanceof RegexDefinitionSink } +} + +/** A regex compilation. */ +class RegexDefinitonSource extends DataFlow::CallCfgNode { + DataFlow::Node regexNode; + + RegexDefinitonSource() { + this = API::moduleImport("re").getMember("compile").getACall() and + regexNode in [this.getArg(0), this.getArgByName("pattern")] + } + + /** Gets the regex that is being compiled by this node. */ + RegExpTerm getRegExp() { result.getRegex() = regexNode.asExpr() and result.isRootTerm() } + + /** Gets the data flow node for the regex being compiled by this node. */ + DataFlow::Node getRegexNode() { result = regexNode } +} + +/** A use of a compiled regex. */ +class RegexDefinitionSink extends DataFlow::Node { + RegexExecutionMethod method; + DataFlow::CallCfgNode executingCall; + + RegexDefinitionSink() { + exists(DataFlow::AttrRead reMethod | + executingCall.getFunction() = reMethod and + reMethod.getAttributeName() = method and + this = reMethod.getObject() + ) + } + + /** Gets the method used to execute the regex. */ + RegexExecutionMethod getMethod() { result = method } + + /** Gets the data flow node for the executing call. */ + DataFlow::CallCfgNode getExecutingCall() { result = executingCall } +} + +/** + * A taint-tracking configuration for detecting regular expression denial-of-service vulnerabilities. + */ +class PolynomialReDoSConfiguration extends TaintTracking::Configuration { + PolynomialReDoSConfiguration() { this = "PolynomialReDoSConfiguration" } + + override predicate isSource(DataFlow::Node source) { source instanceof RemoteFlowSource } + + override predicate isSink(DataFlow::Node sink) { sink instanceof PolynomialReDoSSink } +} + +/** A data flow node executing a regex. */ +abstract class RegexExecution extends DataFlow::Node { + /** Gets the data flow node for the regex being compiled by this node. */ + abstract DataFlow::Node getRegexNode(); + + /** Gets a dataflow node for the string to be searched or matched against. */ + abstract DataFlow::Node getString(); +} + +private class RegexExecutionMethod extends string { + RegexExecutionMethod() { + this in ["match", "fullmatch", "search", "split", "findall", "finditer", "sub", "subn"] + } +} + +/** Gets the index of the argument representing the string to be searched by a regex. */ +int stringArg(RegexExecutionMethod method) { + method in ["match", "fullmatch", "search", "split", "findall", "finditer"] and + result = 1 + or + method in ["sub", "subn"] and + result = 2 +} + +/** + * A class to find `re` methods immediately executing an expression. + * + * See `RegexExecutionMethods` + */ +class DirectRegex extends DataFlow::CallCfgNode, RegexExecution { + RegexExecutionMethod method; + + DirectRegex() { this = API::moduleImport("re").getMember(method).getACall() } + + override DataFlow::Node getRegexNode() { + result in [this.getArg(0), this.getArgByName("pattern")] + } + + override DataFlow::Node getString() { + result in [this.getArg(stringArg(method)), this.getArgByName("string")] + } +} + +/** + * A class to find `re` methods immediately executing a compiled expression by `re.compile`. + * + * Given the following example: + * + * ```py + * pattern = re.compile(input) + * pattern.match(s) + * ``` + * + * This class will identify that `re.compile` compiles `input` and afterwards + * executes `re`'s `match`. As a result, `this` will refer to `pattern.match(s)` + * and `this.getRegexNode()` will return the node for `input` (`re.compile`'s first argument) + * + * + * See `RegexExecutionMethods` + * + * See https://docs.python.org/3/library/re.html#regular-expression-objects + */ +private class CompiledRegex extends DataFlow::CallCfgNode, RegexExecution { + DataFlow::Node regexNode; + RegexExecutionMethod method; + + CompiledRegex() { + exists( + RegexDefinitionConfiguration conf, RegexDefinitonSource source, RegexDefinitionSink sink + | + conf.hasFlow(source, sink) and + regexNode = source.getRegexNode() and + method = sink.getMethod() and + this = sink.getExecutingCall() + ) + } + + override DataFlow::Node getRegexNode() { result = regexNode } + + override DataFlow::Node getString() { + result in [this.getArg(stringArg(method) - 1), this.getArgByName("string")] + } +} + +/** + * A data flow sink node for polynomial regular expression denial-of-service vulnerabilities. + */ +class PolynomialReDoSSink extends DataFlow::Node { + RegExpTerm t; + + PolynomialReDoSSink() { + exists(RegexExecution re | + re.getRegexNode().asExpr() = t.getRegex() and + this = re.getString() + ) and + t.isRootTerm() + } + + /** Gets the regex that is being executed by this node. */ + RegExpTerm getRegExp() { result = t } + + /** + * Gets the node to highlight in the alert message. + */ + DataFlow::Node getHighlight() { result = this } +}