mirror of
https://github.com/github/codeql.git
synced 2025-12-16 08:43:11 +01:00
add a shared regex pack
This commit is contained in:
4
shared/regex/change-notes/2022-09-26-initial-version.md
Normal file
4
shared/regex/change-notes/2022-09-26-initial-version.md
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
category: minorAnalysis
|
||||
---
|
||||
* Initial release. Extracted common regex related code, including the ReDoS analysis, into a library pack to share code between languages.
|
||||
4
shared/regex/codeql-pack.lock.yml
Normal file
4
shared/regex/codeql-pack.lock.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
dependencies: {}
|
||||
compiled: false
|
||||
lockVersion: 1.0.0
|
||||
300
shared/regex/codeql/regex/OverlyLargeRangeQuery.qll
Normal file
300
shared/regex/codeql/regex/OverlyLargeRangeQuery.qll
Normal file
@@ -0,0 +1,300 @@
|
||||
/**
|
||||
* Classes and predicates for working with suspicious character ranges.
|
||||
*/
|
||||
|
||||
private import RegexTreeView
|
||||
|
||||
/**
|
||||
* Classes and predicates implementing an analysis detecting suspicious character ranges.
|
||||
*/
|
||||
module Make<RegexTreeViewSig TreeImpl> {
|
||||
private import TreeImpl
|
||||
|
||||
/**
|
||||
* Gets a rank for `range` that is unique for ranges in the same file.
|
||||
* Prioritizes ranges that match more characters.
|
||||
*/
|
||||
int rankRange(RegExpCharacterRange range) {
|
||||
range =
|
||||
rank[result](RegExpCharacterRange r, int startline, int startcolumn, int low, int high |
|
||||
r.hasLocationInfo(_, startline, startcolumn, _, _) and
|
||||
isRange(r, low, high)
|
||||
|
|
||||
r order by (high - low) desc, startline, startcolumn
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `range` spans from the unicode code points `low` to `high` (both inclusive). */
|
||||
predicate isRange(RegExpCharacterRange range, int low, int high) {
|
||||
exists(string lowc, string highc |
|
||||
range.isRange(lowc, highc) and
|
||||
low.toUnicode() = lowc and
|
||||
high.toUnicode() = highc
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `char` is an alpha-numeric character. */
|
||||
predicate isAlphanumeric(string char) {
|
||||
// written like this to avoid having a bindingset for the predicate
|
||||
char = [[48 .. 57], [65 .. 90], [97 .. 122]].toUnicode() // 0-9, A-Z, a-z
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if the given ranges are from the same character class
|
||||
* and there exists at least one character matched by both ranges.
|
||||
*/
|
||||
predicate overlap(RegExpCharacterRange a, RegExpCharacterRange b) {
|
||||
exists(RegExpCharacterClass clz |
|
||||
a = clz.getAChild() and
|
||||
b = clz.getAChild() and
|
||||
a != b
|
||||
|
|
||||
exists(int alow, int ahigh, int blow, int bhigh |
|
||||
isRange(a, alow, ahigh) and
|
||||
isRange(b, blow, bhigh) and
|
||||
alow <= bhigh and
|
||||
blow <= ahigh
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `range` overlaps with the char class `escape` from the same character class.
|
||||
*/
|
||||
predicate overlapsWithCharEscape(RegExpCharacterRange range, RegExpCharacterClassEscape escape) {
|
||||
exists(RegExpCharacterClass clz, string low, string high |
|
||||
range = clz.getAChild() and
|
||||
escape = clz.getAChild() and
|
||||
range.isRange(low, high)
|
||||
|
|
||||
escape.getValue() = "w" and
|
||||
getInRange(low, high).regexpMatch("\\w")
|
||||
or
|
||||
escape.getValue() = "d" and
|
||||
getInRange(low, high).regexpMatch("\\d")
|
||||
or
|
||||
escape.getValue() = "s" and
|
||||
getInRange(low, high).regexpMatch("\\s")
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the unicode code point for a `char`. */
|
||||
bindingset[char]
|
||||
int toCodePoint(string char) { result.toUnicode() = char }
|
||||
|
||||
/** A character range that appears to be overly wide. */
|
||||
class OverlyWideRange instanceof RegExpCharacterRange {
|
||||
OverlyWideRange() {
|
||||
exists(int low, int high, int numChars |
|
||||
isRange(this, low, high) and
|
||||
numChars = (1 + high - low) and
|
||||
this.getRootTerm().isUsedAsRegExp() and
|
||||
numChars >= 10
|
||||
|
|
||||
// across the Z-a range (which includes backticks)
|
||||
toCodePoint("Z") >= low and
|
||||
toCodePoint("a") <= high
|
||||
or
|
||||
// across the 9-A range (which includes e.g. ; and ?)
|
||||
toCodePoint("9") >= low and
|
||||
toCodePoint("A") <= high
|
||||
or
|
||||
// a non-alphanumeric char as part of the range boundaries
|
||||
exists(int bound | bound = [low, high] | not isAlphanumeric(bound.toUnicode())) and
|
||||
// while still being ascii
|
||||
low < 128 and
|
||||
high < 128
|
||||
) and
|
||||
// allowlist for known ranges
|
||||
not this = allowedWideRanges()
|
||||
}
|
||||
|
||||
/** Gets a string representation of a character class that matches the same chars as this range. */
|
||||
string printEquivalent() { result = RangePrinter::printEquivalentCharClass(this) }
|
||||
|
||||
/** Gets a string representation of this range. */
|
||||
string toString() { result = super.toString() }
|
||||
|
||||
/** Holds if `lo` is the lower bound of this character range and `hi` the upper bound. */
|
||||
predicate isRange(string lo, string hi) { super.isRange(lo, hi) }
|
||||
}
|
||||
|
||||
/** Gets a range that should not be reported as an overly wide range. */
|
||||
RegExpCharacterRange allowedWideRanges() {
|
||||
// ~ is the last printable ASCII character, it's used right in various wide ranges.
|
||||
result.isRange(_, "~")
|
||||
or
|
||||
// the same with " " and "!". " " is the first printable character, and "!" is the first non-white-space printable character.
|
||||
result.isRange([" ", "!"], _)
|
||||
or
|
||||
// the `[@-_]` range is intentional
|
||||
result.isRange("@", "_")
|
||||
or
|
||||
// starting from the zero byte is a good indication that it's purposely matching a large range.
|
||||
result.isRange(0.toUnicode(), _)
|
||||
}
|
||||
|
||||
/** Gets a char between (and including) `low` and `high`. */
|
||||
bindingset[low, high]
|
||||
private string getInRange(string low, string high) {
|
||||
result = [toCodePoint(low) .. toCodePoint(high)].toUnicode()
|
||||
}
|
||||
|
||||
/** A module computing an equivalent character class for an overly wide range. */
|
||||
module RangePrinter {
|
||||
bindingset[char]
|
||||
bindingset[result]
|
||||
private string next(string char) {
|
||||
exists(int prev, int next |
|
||||
prev.toUnicode() = char and
|
||||
next.toUnicode() = result and
|
||||
next = prev + 1
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the points where the parts of the pretty printed range should be cut off. */
|
||||
private string cutoffs() { result = ["A", "Z", "a", "z", "0", "9"] }
|
||||
|
||||
/** Gets the char to use in the low end of a range for a given `cut` */
|
||||
private string lowCut(string cut) {
|
||||
cut = ["A", "a", "0"] and
|
||||
result = cut
|
||||
or
|
||||
cut = ["Z", "z", "9"] and
|
||||
result = next(cut)
|
||||
}
|
||||
|
||||
/** Gets the char to use in the high end of a range for a given `cut` */
|
||||
private string highCut(string cut) {
|
||||
cut = ["Z", "z", "9"] and
|
||||
result = cut
|
||||
or
|
||||
cut = ["A", "a", "0"] and
|
||||
next(result) = cut
|
||||
}
|
||||
|
||||
/** Gets the cutoff char used for a given `part` of a range when pretty-printing it. */
|
||||
private string cutoff(OverlyWideRange range, int part) {
|
||||
exists(int low, int high | isRange(range, low, high) |
|
||||
result =
|
||||
rank[part + 1](string cut |
|
||||
cut = cutoffs() and low < toCodePoint(cut) and toCodePoint(cut) < high
|
||||
|
|
||||
cut order by toCodePoint(cut)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the number of parts we should print for a given `range`. */
|
||||
private int parts(OverlyWideRange range) { result = 1 + count(cutoff(range, _)) }
|
||||
|
||||
/** Holds if the given part of a range should span from `low` to `high`. */
|
||||
private predicate part(OverlyWideRange range, int part, string low, string high) {
|
||||
// first part.
|
||||
part = 0 and
|
||||
(
|
||||
range.isRange(low, high) and
|
||||
parts(range) = 1
|
||||
or
|
||||
parts(range) >= 2 and
|
||||
range.isRange(low, _) and
|
||||
high = highCut(cutoff(range, part))
|
||||
)
|
||||
or
|
||||
// middle
|
||||
part >= 1 and
|
||||
part < parts(range) - 1 and
|
||||
low = lowCut(cutoff(range, part - 1)) and
|
||||
high = highCut(cutoff(range, part))
|
||||
or
|
||||
// last.
|
||||
part = parts(range) - 1 and
|
||||
low = lowCut(cutoff(range, part - 1)) and
|
||||
range.isRange(_, high)
|
||||
}
|
||||
|
||||
/** Gets an escaped `char` for use in a character class. */
|
||||
bindingset[char]
|
||||
private string escape(string char) {
|
||||
exists(string reg | reg = "(\\[|\\]|\\\\|-|/)" |
|
||||
if char.regexpMatch(reg) then result = "\\" + char else result = char
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets a part of the equivalent range. */
|
||||
private string printEquivalentCharClass(OverlyWideRange range, int part) {
|
||||
exists(string low, string high | part(range, part, low, high) |
|
||||
if
|
||||
isAlphanumeric(low) and
|
||||
isAlphanumeric(high)
|
||||
then result = low + "-" + high
|
||||
else
|
||||
result =
|
||||
strictconcat(string char | char = getInRange(low, high) | escape(char) order by char)
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the entire pretty printed equivalent range. */
|
||||
string printEquivalentCharClass(OverlyWideRange range) {
|
||||
result =
|
||||
strictconcat(string r, int part |
|
||||
r = "[" and part = -1 and exists(range)
|
||||
or
|
||||
r = printEquivalentCharClass(range, part)
|
||||
or
|
||||
r = "]" and part = parts(range)
|
||||
|
|
||||
r order by part
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/** Gets a char range that is overly large because of `reason`. */
|
||||
RegExpCharacterRange getABadRange(string reason, int priority) {
|
||||
result instanceof OverlyWideRange and
|
||||
priority = 0 and
|
||||
exists(string equiv | equiv = result.(OverlyWideRange).printEquivalent() |
|
||||
if equiv.length() <= 50
|
||||
then reason = "is equivalent to " + equiv
|
||||
else reason = "is equivalent to " + equiv.substring(0, 50) + "..."
|
||||
)
|
||||
or
|
||||
priority = 1 and
|
||||
exists(RegExpCharacterRange other |
|
||||
reason = "overlaps with " + other + " in the same character class" and
|
||||
rankRange(result) < rankRange(other) and
|
||||
overlap(result, other)
|
||||
)
|
||||
or
|
||||
priority = 2 and
|
||||
exists(RegExpCharacterClassEscape escape |
|
||||
reason = "overlaps with " + escape + " in the same character class" and
|
||||
overlapsWithCharEscape(result, escape)
|
||||
)
|
||||
or
|
||||
reason = "is empty" and
|
||||
priority = 3 and
|
||||
exists(int low, int high |
|
||||
isRange(result, low, high) and
|
||||
low > high
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `range` matches suspiciously many characters. */
|
||||
predicate problem(RegExpCharacterRange range, string reason) {
|
||||
reason =
|
||||
strictconcat(string m, int priority |
|
||||
range = getABadRange(m, priority)
|
||||
|
|
||||
m, ", and " order by priority desc
|
||||
) and
|
||||
// specifying a range using an escape is usually OK.
|
||||
not range.getAChild() instanceof RegExpEscape and
|
||||
// Unicode escapes in strings are interpreted before it turns into a regexp,
|
||||
// so e.g. [\u0001-\uFFFF] will just turn up as a range between two constants.
|
||||
// We therefore exclude these ranges.
|
||||
range.getRootTerm().getParent() instanceof RegExpLiteral and
|
||||
// is used as regexp (mostly for JS where regular expressions are parsed eagerly)
|
||||
range.getRootTerm().isUsedAsRegExp()
|
||||
}
|
||||
}
|
||||
451
shared/regex/codeql/regex/RegexTreeView.qll
Normal file
451
shared/regex/codeql/regex/RegexTreeView.qll
Normal file
@@ -0,0 +1,451 @@
|
||||
/**
|
||||
* This file contains a `RegexTreeViewSig` module describing the syntax tree of regular expressions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* A signature describing the syntax tree of regular expressions.
|
||||
*/
|
||||
signature module RegexTreeViewSig {
|
||||
/**
|
||||
* An element used in some way as or in a regular expression.
|
||||
* This class exists to have a common supertype that all languages can agree on.
|
||||
*/
|
||||
class Top;
|
||||
|
||||
/**
|
||||
* An element containing a regular expression term, that is, either
|
||||
* a string literal (parsed as a regular expression; the root of the parse tree)
|
||||
* or another regular expression term (a descendant of the root).
|
||||
*/
|
||||
class RegExpParent extends Top;
|
||||
|
||||
/**
|
||||
* A regular expression literal.
|
||||
*
|
||||
* Note that this class does not cover regular expressions constructed by calling the built-in
|
||||
* `RegExp` function.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* /(?i)ab*c(d|e)$/
|
||||
* ```
|
||||
*/
|
||||
class RegExpLiteral extends RegExpParent;
|
||||
|
||||
/**
|
||||
* A regular expression term, that is, a syntactic part of a regular expression.
|
||||
* These are the tree nodes that form the parse tree of a regular expression literal.
|
||||
*/
|
||||
class RegExpTerm extends Top {
|
||||
/** Gets a child term of this term. */
|
||||
RegExpTerm getAChild();
|
||||
|
||||
/**
|
||||
* Holds if this is the root term of a regular expression.
|
||||
*/
|
||||
predicate isRootTerm();
|
||||
|
||||
/**
|
||||
* Gets the parent term of this regular expression term, or the
|
||||
* regular expression literal if this is the root term.
|
||||
*/
|
||||
RegExpParent getParent();
|
||||
|
||||
/**
|
||||
* Holds if this term is part of a regular expression literal, or a string literal
|
||||
* that is interpreted as a regular expression.
|
||||
*/
|
||||
predicate isUsedAsRegExp();
|
||||
|
||||
/** Gets the outermost term of this regular expression. */
|
||||
RegExpTerm getRootTerm();
|
||||
|
||||
/** Gets the raw source text of this term. */
|
||||
string getRawValue();
|
||||
|
||||
/** Gets the `i`th child term of this term. */
|
||||
RegExpTerm getChild(int i);
|
||||
|
||||
/** Gets the number of child terms of this term. */
|
||||
int getNumChild();
|
||||
|
||||
/** Gets the regular expression term that is matched (textually) after this one, if any. */
|
||||
RegExpTerm getSuccessor();
|
||||
|
||||
string toString();
|
||||
|
||||
predicate hasLocationInfo(
|
||||
string filepath, int startline, int startcolumn, int endline, int endcolumn
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* A quantified regular expression term.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* ((ECMA|Java)[sS]cript)*
|
||||
* ```
|
||||
*/
|
||||
class RegExpQuantifier extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A star-quantified term.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* \w*
|
||||
* ```
|
||||
*/
|
||||
class RegExpStar extends RegExpQuantifier;
|
||||
|
||||
/**
|
||||
* An optional term.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* ;?
|
||||
* ```
|
||||
*/
|
||||
class RegExpOpt extends RegExpQuantifier;
|
||||
|
||||
/**
|
||||
* A plus-quantified term.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* \w+
|
||||
* ```
|
||||
*/
|
||||
class RegExpPlus extends RegExpQuantifier;
|
||||
|
||||
/**
|
||||
* A range-quantified term
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* \w{2,4}
|
||||
* \w{2,}
|
||||
* \w{2}
|
||||
* ```
|
||||
*/
|
||||
class RegExpRange extends RegExpQuantifier {
|
||||
/** Gets the lower bound of the range. */
|
||||
int getLowerBound();
|
||||
|
||||
/**
|
||||
* Gets the upper bound of the range, if any.
|
||||
*
|
||||
* If there is no upper bound, any number of repetitions is allowed.
|
||||
* For a term of the form `r{lo}`, both the lower and the upper bound
|
||||
* are `lo`.
|
||||
*/
|
||||
int getUpperBound();
|
||||
}
|
||||
|
||||
/**
|
||||
* An escaped regular expression term, that is, a regular expression
|
||||
* term starting with a backslash.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* \.
|
||||
* \w
|
||||
* ```
|
||||
*/
|
||||
class RegExpEscape extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A character class escape in a regular expression.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* \w
|
||||
* \S
|
||||
* ```
|
||||
*/
|
||||
class RegExpCharacterClassEscape extends RegExpEscape {
|
||||
/** Gets the name of the character class; for example, `w` for `\w`. */
|
||||
string getValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* An alternative term, that is, a term of the form `a|b`.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* ECMA|Java
|
||||
* ```
|
||||
*/
|
||||
class RegExpAlt extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A grouped regular expression.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (ECMA|Java)
|
||||
* (?:ECMA|Java)
|
||||
* (?<quote>['"])
|
||||
* ```
|
||||
*/
|
||||
class RegExpGroup extends RegExpTerm {
|
||||
/**
|
||||
* Gets the index of this capture group within the enclosing regular
|
||||
* expression literal.
|
||||
*
|
||||
* For example, in the regular expression `/((a?).)(?:b)/`, the
|
||||
* group `((a?).)` has index 1, the group `(a?)` nested inside it
|
||||
* has index 2, and the group `(?:b)` has no index, since it is
|
||||
* not a capture group.
|
||||
*/
|
||||
int getNumber();
|
||||
}
|
||||
|
||||
/**
|
||||
* A back reference, that is, a term of the form `\i` or `\k<name>`
|
||||
* in a regular expression.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* \1
|
||||
* \k<quote>
|
||||
* ```
|
||||
*/
|
||||
class RegExpBackRef extends RegExpTerm {
|
||||
/** Gets the capture group this back reference refers to. */
|
||||
RegExpGroup getGroup();
|
||||
}
|
||||
|
||||
/**
|
||||
* A sequence term.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* (ECMA|Java)Script
|
||||
* ```
|
||||
*
|
||||
* This is a sequence with the elements `(ECMA|Java)` and `Script`.
|
||||
*/
|
||||
class RegExpSequence extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A zero-width lookahead or lookbehind assertion.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (?=\w)
|
||||
* (?!\n)
|
||||
* (?<=\.)
|
||||
* (?<!\\)
|
||||
* ```
|
||||
*/
|
||||
class RegExpSubPattern extends RegExpTerm {
|
||||
/** Gets the lookahead term. */
|
||||
RegExpTerm getOperand();
|
||||
}
|
||||
|
||||
/**
|
||||
* A zero-width lookahead assertion.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (?=\w)
|
||||
* (?!\n)
|
||||
* ```
|
||||
*/
|
||||
class RegExpLookahead extends RegExpSubPattern;
|
||||
|
||||
/**
|
||||
* A positive-lookahead assertion.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (?=\w)
|
||||
* ```
|
||||
*/
|
||||
class RegExpPositiveLookahead extends RegExpLookahead;
|
||||
|
||||
/**
|
||||
* A zero-width lookbehind assertion.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (?<=\.)
|
||||
* (?<!\\)
|
||||
* ```
|
||||
*/
|
||||
class RegExpLookbehind extends RegExpSubPattern;
|
||||
|
||||
/**
|
||||
* A positive-lookbehind assertion.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* (?<=\.)
|
||||
* ```
|
||||
*/
|
||||
class RegExpPositiveLookbehind extends RegExpLookbehind;
|
||||
|
||||
/**
|
||||
* A constant regular expression term, that is, a regular expression
|
||||
* term matching a single string.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* abc
|
||||
* ```
|
||||
*/
|
||||
class RegExpConstant extends RegExpTerm {
|
||||
/** Gets the string matched by this constant term. */
|
||||
string getValue();
|
||||
|
||||
/**
|
||||
* Holds if this constant represents a valid Unicode character (as opposed
|
||||
* to a surrogate code point that does not correspond to a character by itself.)
|
||||
*/
|
||||
predicate isCharacter();
|
||||
}
|
||||
|
||||
/**
|
||||
* A character class in a regular expression.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* ```
|
||||
* [a-z_]
|
||||
* [^<>&]
|
||||
* ```
|
||||
*/
|
||||
class RegExpCharacterClass extends RegExpTerm {
|
||||
/**
|
||||
* Holds if this character class matches any character.
|
||||
*/
|
||||
predicate isUniversalClass();
|
||||
|
||||
/** Holds if this is an inverted character class, that is, a term of the form `[^...]`. */
|
||||
predicate isInverted();
|
||||
}
|
||||
|
||||
/**
|
||||
* A character range in a character class in a regular expression.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* a-z
|
||||
* ```
|
||||
*/
|
||||
class RegExpCharacterRange extends RegExpTerm {
|
||||
/** Holds if `lo` is the lower bound of this character range and `hi` the upper bound. */
|
||||
predicate isRange(string lo, string hi);
|
||||
}
|
||||
|
||||
/**
|
||||
* A dot regular expression.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* .
|
||||
* ```
|
||||
*/
|
||||
class RegExpDot extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A dollar assertion `$` matching the end of a line.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* $
|
||||
* ```
|
||||
*/
|
||||
class RegExpDollar extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A caret assertion `^` matching the beginning of a line.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* ^
|
||||
* ```
|
||||
*/
|
||||
class RegExpCaret extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A word boundary assertion.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* ```
|
||||
* \b
|
||||
* ```
|
||||
*/
|
||||
class RegExpWordBoundary extends RegExpTerm;
|
||||
|
||||
/**
|
||||
* A regular expression term that permits unlimited repetitions.
|
||||
*/
|
||||
class InfiniteRepetitionQuantifier extends RegExpQuantifier;
|
||||
|
||||
/**
|
||||
* Holds if the regular expression should not be considered.
|
||||
*
|
||||
* For javascript we make the pragmatic performance optimization to ignore minified files.
|
||||
*/
|
||||
predicate isExcluded(RegExpParent parent);
|
||||
|
||||
/**
|
||||
* Holds if `term` is a possessive quantifier.
|
||||
* As javascript's regexes do not support possessive quantifiers, this never holds, but is used by the shared library.
|
||||
*/
|
||||
predicate isPossessive(RegExpQuantifier term);
|
||||
|
||||
/**
|
||||
* Holds if the regex that `term` is part of is used in a way that ignores any leading prefix of the input it's matched against.
|
||||
* Not yet implemented for JavaScript.
|
||||
*/
|
||||
predicate matchesAnyPrefix(RegExpTerm term);
|
||||
|
||||
/**
|
||||
* Holds if the regex that `term` is part of is used in a way that ignores any trailing suffix of the input it's matched against.
|
||||
* Not yet implemented for JavaScript.
|
||||
*/
|
||||
predicate matchesAnySuffix(RegExpTerm term);
|
||||
|
||||
/**
|
||||
* Holds if `term` is an escape class representing e.g. `\d`.
|
||||
* `clazz` is which character class it represents, e.g. "d" for `\d`.
|
||||
*/
|
||||
predicate isEscapeClass(RegExpTerm term, string clazz);
|
||||
|
||||
/**
|
||||
* Holds if `root` has the `i` flag for case-insensitive matching.
|
||||
*/
|
||||
predicate isIgnoreCase(RegExpTerm root);
|
||||
|
||||
/**
|
||||
* Holds if `root` has the `s` flag for multi-line matching.
|
||||
*/
|
||||
predicate isDotAll(RegExpTerm root);
|
||||
}
|
||||
177
shared/regex/codeql/regex/nfa/BadTagFilterQuery.qll
Normal file
177
shared/regex/codeql/regex/nfa/BadTagFilterQuery.qll
Normal file
@@ -0,0 +1,177 @@
|
||||
/**
|
||||
* Provides predicates for reasoning about bad tag filter vulnerabilities.
|
||||
*/
|
||||
|
||||
private import NfaUtils as NfaUtils
|
||||
private import RegexpMatching as RM
|
||||
private import codeql.regex.RegexTreeView
|
||||
|
||||
/**
|
||||
* Module implementing classes and predicates reasoing about bad tag filter vulnerabilities.
|
||||
*/
|
||||
module Make<RegexTreeViewSig TreeImpl> {
|
||||
private import TreeImpl
|
||||
import RM::Make<TreeImpl>
|
||||
|
||||
/**
|
||||
* Holds if the regexp `root` should be tested against `str`.
|
||||
* Implements the `isRegexpMatchingCandidateSig` signature from `RegexpMatching`.
|
||||
* `ignorePrefix` toggles whether the regular expression should be treated as accepting any prefix if it's unanchored.
|
||||
* `testWithGroups` toggles whether it's tested which groups are filled by a given input string.
|
||||
*/
|
||||
private predicate isBadTagFilterCandidate(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
) {
|
||||
// the regexp must mention "<" and ">" explicitly.
|
||||
forall(string angleBracket | angleBracket = ["<", ">"] |
|
||||
any(RegExpConstant term | term.getValue().matches("%" + angleBracket + "%")).getRootTerm() =
|
||||
root
|
||||
) and
|
||||
ignorePrefix = true and
|
||||
(
|
||||
str = ["<!-- foo -->", "<!-- foo --!>", "<!- foo ->", "<foo>", "<script>"] and
|
||||
testWithGroups = true
|
||||
or
|
||||
str =
|
||||
[
|
||||
"<!-- foo -->", "<!- foo ->", "<!-- foo --!>", "<!-- foo\n -->", "<script>foo</script>",
|
||||
"<script \n>foo</script>", "<script >foo\n</script>", "<foo ></foo>", "<foo>",
|
||||
"<foo src=\"foo\"></foo>", "<script>", "<script src=\"foo\"></script>",
|
||||
"<script src='foo'></script>", "<SCRIPT>foo</SCRIPT>", "<script\tsrc=\"foo\"/>",
|
||||
"<script\tsrc='foo'></script>", "<sCrIpT>foo</ScRiPt>",
|
||||
"<script src=\"foo\">foo</script >", "<script src=\"foo\">foo</script foo=\"bar\">",
|
||||
"<script src=\"foo\">foo</script\t\n bar>"
|
||||
] and
|
||||
testWithGroups = false
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A regexp that matches some string from the `isBadTagFilterCandidate` predicate.
|
||||
*/
|
||||
class HtmlMatchingRegExp instanceof RootTerm {
|
||||
HtmlMatchingRegExp() { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, _) }
|
||||
|
||||
/** Holds if this regexp matched `str`, where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate matches(string str) { RegexpMatching<isBadTagFilterCandidate/4>::matches(this, str) }
|
||||
|
||||
/** Holds if this regexp fills capture group `g' when matching `str', where `str` is one of the string from `isBadTagFilterCandidate`. */
|
||||
predicate fillsCaptureGroup(string str, int g) {
|
||||
RegexpMatching<isBadTagFilterCandidate/4>::fillsCaptureGroup(this, str, g)
|
||||
}
|
||||
|
||||
/** Gets a string representation of this term. */
|
||||
string toString() { result = super.toString() }
|
||||
|
||||
/** Holds if this term has the specified location. */
|
||||
predicate hasLocationInfo(
|
||||
string filepath, int startline, int startcolumn, int endline, int endcolumn
|
||||
) {
|
||||
super.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `regexp` matches some HTML tags, but misses some HTML tags that it should match.
|
||||
*
|
||||
* When adding a new case to this predicate, make sure the test string used in `matches(..)` calls are present in `HTMLMatchingRegExp::test` / `HTMLMatchingRegExp::testWithGroups`.
|
||||
*/
|
||||
predicate isBadRegexpFilter(HtmlMatchingRegExp regexp, string msg) {
|
||||
// CVE-2021-33829 - matching both "<!-- foo -->" and "<!-- foo --!>", but in different capture groups
|
||||
regexp.matches("<!-- foo -->") and
|
||||
regexp.matches("<!-- foo --!>") and
|
||||
exists(int a, int b | a != b |
|
||||
regexp.fillsCaptureGroup("<!-- foo -->", a) and
|
||||
// <!-- foo --> might be ambiguously parsed (matching both capture groups), and that is ok here.
|
||||
regexp.fillsCaptureGroup("<!-- foo --!>", b) and
|
||||
not regexp.fillsCaptureGroup("<!-- foo --!>", a) and
|
||||
msg =
|
||||
"Comments ending with --> are matched differently from comments ending with --!>. The first is matched with capture group "
|
||||
+ a + " and comments ending with --!> are matched with capture group " +
|
||||
strictconcat(int i | regexp.fillsCaptureGroup("<!-- foo --!>", i) | i.toString(), ", ") +
|
||||
"."
|
||||
)
|
||||
or
|
||||
// CVE-2020-17480 - matching "<!-- foo -->" and other tags, but not "<!-- foo --!>".
|
||||
exists(int group, int other |
|
||||
group != other and
|
||||
regexp.fillsCaptureGroup("<!-- foo -->", group) and
|
||||
regexp.fillsCaptureGroup("<foo>", other) and
|
||||
not regexp.matches("<!-- foo --!>") and
|
||||
not regexp.fillsCaptureGroup("<!-- foo -->", any(int i | i != group)) and
|
||||
not regexp.fillsCaptureGroup("<!- foo ->", group) and
|
||||
not regexp.fillsCaptureGroup("<foo>", group) and
|
||||
not regexp.fillsCaptureGroup("<script>", group) and
|
||||
msg =
|
||||
"This regular expression only parses --> (capture group " + group +
|
||||
") and not --!> as an HTML comment end tag."
|
||||
)
|
||||
or
|
||||
regexp.matches("<!-- foo -->") and
|
||||
not regexp.matches("<!-- foo\n -->") and
|
||||
not regexp.matches("<!- foo ->") and
|
||||
not regexp.matches("<foo>") and
|
||||
not regexp.matches("<script>") and
|
||||
msg = "This regular expression does not match comments containing newlines."
|
||||
or
|
||||
regexp.matches("<script>foo</script>") and
|
||||
regexp.matches("<script src=\"foo\"></script>") and
|
||||
not regexp.matches("<foo ></foo>") and
|
||||
(
|
||||
not regexp.matches("<script \n>foo</script>") and
|
||||
msg = "This regular expression matches <script></script>, but not <script \\n></script>"
|
||||
or
|
||||
not regexp.matches("<script >foo\n</script>") and
|
||||
msg = "This regular expression matches <script>...</script>, but not <script >...\\n</script>"
|
||||
)
|
||||
or
|
||||
regexp.matches("<script>foo</script>") and
|
||||
regexp.matches("<script src=\"foo\"></script>") and
|
||||
not regexp.matches("<script src='foo'></script>") and
|
||||
not regexp.matches("<foo>") and
|
||||
msg =
|
||||
"This regular expression does not match script tags where the attribute uses single-quotes."
|
||||
or
|
||||
regexp.matches("<script>foo</script>") and
|
||||
regexp.matches("<script src='foo'></script>") and
|
||||
not regexp.matches("<script src=\"foo\"></script>") and
|
||||
not regexp.matches("<foo>") and
|
||||
msg =
|
||||
"This regular expression does not match script tags where the attribute uses double-quotes."
|
||||
or
|
||||
regexp.matches("<script>foo</script>") and
|
||||
regexp.matches("<script src='foo'></script>") and
|
||||
not regexp.matches("<script\tsrc='foo'></script>") and
|
||||
not regexp.matches("<foo>") and
|
||||
not regexp.matches("<foo src=\"foo\"></foo>") and
|
||||
msg =
|
||||
"This regular expression does not match script tags where tabs are used between attributes."
|
||||
or
|
||||
regexp.matches("<script>foo</script>") and
|
||||
not isIgnoreCase(regexp) and
|
||||
not regexp.matches("<foo>") and
|
||||
not regexp.matches("<foo ></foo>") and
|
||||
(
|
||||
not regexp.matches("<SCRIPT>foo</SCRIPT>") and
|
||||
msg = "This regular expression does not match upper case <SCRIPT> tags."
|
||||
or
|
||||
not regexp.matches("<sCrIpT>foo</ScRiPt>") and
|
||||
regexp.matches("<SCRIPT>foo</SCRIPT>") and
|
||||
msg = "This regular expression does not match mixed case <sCrIpT> tags."
|
||||
)
|
||||
or
|
||||
regexp.matches("<script src=\"foo\"></script>") and
|
||||
not regexp.matches("<foo>") and
|
||||
not regexp.matches("<foo ></foo>") and
|
||||
(
|
||||
not regexp.matches("<script src=\"foo\">foo</script >") and
|
||||
msg = "This regular expression does not match script end tags like </script >."
|
||||
or
|
||||
not regexp.matches("<script src=\"foo\">foo</script foo=\"bar\">") and
|
||||
msg = "This regular expression does not match script end tags like </script foo=\"bar\">."
|
||||
or
|
||||
not regexp.matches("<script src=\"foo\">foo</script\t\n bar>") and
|
||||
msg = "This regular expression does not match script end tags like </script\\t\\n bar>."
|
||||
)
|
||||
}
|
||||
}
|
||||
355
shared/regex/codeql/regex/nfa/ExponentialBackTracking.qll
Normal file
355
shared/regex/codeql/regex/nfa/ExponentialBackTracking.qll
Normal file
@@ -0,0 +1,355 @@
|
||||
/**
|
||||
* This library implements the analysis described in the following two papers:
|
||||
*
|
||||
* James Kirrage, Asiri Rathnayake, Hayo Thielecke: Static Analysis for
|
||||
* Regular Expression Denial-of-Service Attacks. NSS 2013.
|
||||
* (http://www.cs.bham.ac.uk/~hxt/research/reg-exp-sec.pdf)
|
||||
* Asiri Rathnayake, Hayo Thielecke: Static Analysis for Regular Expression
|
||||
* Exponential Runtime via Substructural Logics. 2014.
|
||||
* (https://www.cs.bham.ac.uk/~hxt/research/redos_full.pdf)
|
||||
*
|
||||
* The basic idea is to search for overlapping cycles in the NFA, that is,
|
||||
* states `q` such that there are two distinct paths from `q` to itself
|
||||
* that consume the same word `w`.
|
||||
*
|
||||
* For any such state `q`, an attack string can be constructed as follows:
|
||||
* concatenate a prefix `v` that takes the NFA to `q` with `n` copies of
|
||||
* the word `w` that leads back to `q` along two different paths, followed
|
||||
* by a suffix `x` that is _not_ accepted in state `q`. A backtracking
|
||||
* implementation will need to explore at least 2^n different ways of going
|
||||
* from `q` back to itself while trying to match the `n` copies of `w`
|
||||
* before finally giving up.
|
||||
*
|
||||
* Now in order to identify overlapping cycles, all we have to do is find
|
||||
* pumpable forks, that is, states `q` that can transition to two different
|
||||
* states `r1` and `r2` on the same input symbol `c`, such that there are
|
||||
* paths from both `r1` and `r2` to `q` that consume the same word. The latter
|
||||
* condition is equivalent to saying that `(q, q)` is reachable from `(r1, r2)`
|
||||
* in the product NFA.
|
||||
*
|
||||
* This is what the library does. It makes a simple attempt to construct a
|
||||
* prefix `v` leading into `q`, but only to improve the alert message.
|
||||
* And the library tries to prove the existence of a suffix that ensures
|
||||
* rejection. This check might fail, which can cause false positives.
|
||||
*
|
||||
* Finally, sometimes it depends on the translation whether the NFA generated
|
||||
* for a regular expression has a pumpable fork or not. We implement one
|
||||
* particular translation, which may result in false positives or negatives
|
||||
* relative to some particular JavaScript engine.
|
||||
*
|
||||
* More precisely, the library constructs an NFA from a regular expression `r`
|
||||
* as follows:
|
||||
*
|
||||
* * Every sub-term `t` gives rise to an NFA state `Match(t,i)`, representing
|
||||
* the state of the automaton before attempting to match the `i`th character in `t`.
|
||||
* * There is one accepting state `Accept(r)`.
|
||||
* * There is a special `AcceptAnySuffix(r)` state, which accepts any suffix string
|
||||
* by using an epsilon transition to `Accept(r)` and an any transition to itself.
|
||||
* * Transitions between states may be labelled with epsilon, or an abstract
|
||||
* input symbol.
|
||||
* * Each abstract input symbol represents a set of concrete input characters:
|
||||
* either a single character, a set of characters represented by a
|
||||
* character class, or the set of all characters.
|
||||
* * The product automaton is constructed lazily, starting with pair states
|
||||
* `(q, q)` where `q` is a fork, and proceeding along an over-approximate
|
||||
* step relation.
|
||||
* * The over-approximate step relation allows transitions along pairs of
|
||||
* abstract input symbols where the symbols have overlap in the characters they accept.
|
||||
* * Once a trace of pairs of abstract input symbols that leads from a fork
|
||||
* back to itself has been identified, we attempt to construct a concrete
|
||||
* string corresponding to it, which may fail.
|
||||
* * Lastly we ensure that any state reached by repeating `n` copies of `w` has
|
||||
* a suffix `x` (possible empty) that is most likely __not__ accepted.
|
||||
*/
|
||||
|
||||
private import NfaUtils as NfaUtils
|
||||
private import codeql.regex.RegexTreeView
|
||||
|
||||
/**
|
||||
* A parameterized module implementing the analysis described in the above papers.
|
||||
*/
|
||||
module Make<RegexTreeViewSig TreeImpl> {
|
||||
private import TreeImpl
|
||||
import NfaUtils::Make<TreeImpl>
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideBacktracking(State s) {
|
||||
s.getRepr().getParent*() instanceof MaybeBacktrackingRepetition
|
||||
}
|
||||
|
||||
/**
|
||||
* A infinitely repeating quantifier that might backtrack.
|
||||
*/
|
||||
private class MaybeBacktrackingRepetition instanceof InfiniteRepetitionQuantifier {
|
||||
MaybeBacktrackingRepetition() {
|
||||
exists(RegExpTerm child |
|
||||
child instanceof RegExpAlt or
|
||||
child instanceof RegExpQuantifier
|
||||
|
|
||||
child.getParent+() = this
|
||||
)
|
||||
}
|
||||
|
||||
string toString() { result = this.(InfiniteRepetitionQuantifier).toString() }
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private newtype TStatePair =
|
||||
/**
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need: `(q, q)` for every fork state `q`, and any
|
||||
* pair of states that can be reached from a pair that we have
|
||||
* already constructed. To cut down on the number of states,
|
||||
* we only represent states `(q1, q2)` where `q1` is lexicographically
|
||||
* no bigger than `q2`.
|
||||
*
|
||||
* States are only constructed if both states in the pair are
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
MkStatePair(State q1, State q2) {
|
||||
isFork(q1, _, _, _, _) and q2 = q1
|
||||
or
|
||||
(step(_, _, _, q1, q2) or step(_, _, _, q2, q1)) and
|
||||
rankState(q1) <= rankState(q2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a unique number for a `state`.
|
||||
* Is used to create an ordering of states, where states with the same `toString()` will be ordered differently.
|
||||
*/
|
||||
private int rankState(State state) {
|
||||
state =
|
||||
rank[result](State s |
|
||||
stateInsideBacktracking(s)
|
||||
|
|
||||
s order by getTermLocationString(s.getRepr())
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
*/
|
||||
private class StatePair extends TStatePair {
|
||||
State q1;
|
||||
State q2;
|
||||
|
||||
StatePair() { this = MkStatePair(q1, q2) }
|
||||
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ")" }
|
||||
|
||||
/** Gets the first component of the state pair. */
|
||||
State getLeft() { result = q1 }
|
||||
|
||||
/** Gets the second component of the state pair. */
|
||||
State getRight() { result = q2 }
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds for `(fork, fork)` state pairs when `isFork(fork, _, _, _, _)` holds.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate isStatePairFork(StatePair p) {
|
||||
exists(State fork | p = MkStatePair(fork, fork) and isFork(fork, _, _, _, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r`.
|
||||
*
|
||||
* Used in `statePairDistToFork`
|
||||
*/
|
||||
private predicate reverseStep(StatePair r, StatePair q) { step(q, _, _, r) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `q` to `r` in the
|
||||
* product automaton.
|
||||
*/
|
||||
private int statePairDistToFork(StatePair q, StatePair r) =
|
||||
shortestDistances(isStatePairFork/1, reverseStep/2)(r, q, result)
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from `q` to `r1` and from `q` to `r2`
|
||||
* labelled with `s1` and `s2`, respectively, where `s1` and `s2` do not
|
||||
* trivially have an empty intersection.
|
||||
*
|
||||
* This predicate only holds for states associated with regular expressions
|
||||
* that have at least one repetition quantifier in them (otherwise the
|
||||
* expression cannot be vulnerable to ReDoS attacks anyway).
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate isFork(State q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
stateInsideBacktracking(q) and
|
||||
exists(State q1, State q2 |
|
||||
q1 = epsilonSucc*(q) and
|
||||
delta(q1, s1, r1) and
|
||||
q2 = epsilonSucc*(q) and
|
||||
delta(q2, s2, r2) and
|
||||
// Use pragma[noopt] to prevent intersect(s1,s2) from being the starting point of the join.
|
||||
// From (s1,s2) it would find a huge number of intermediate state pairs (q1,q2) originating from different literals,
|
||||
// and discover at the end that no `q` can reach both `q1` and `q2` by epsilon transitions.
|
||||
exists(intersect(s1, s2))
|
||||
|
|
||||
s1 != s2
|
||||
or
|
||||
r1 != r2
|
||||
or
|
||||
r1 = r2 and q1 != q2
|
||||
or
|
||||
// If q can reach itself by epsilon transitions, then there are two distinct paths to the q1/q2 state:
|
||||
// one that uses the loop and one that doesn't. The engine will separately attempt to match with each path,
|
||||
// despite ending in the same state. The "fork" thus arises from the choice of whether to use the loop or not.
|
||||
// To avoid every state in the loop becoming a fork state,
|
||||
// we arbitrarily pick the InfiniteRepetitionQuantifier state as the canonical fork state for the loop
|
||||
// (every epsilon-loop must contain such a state).
|
||||
//
|
||||
// We additionally require that the there exists another InfiniteRepetitionQuantifier `mid` on the path from `q` to itself.
|
||||
// This is done to avoid flagging regular expressions such as `/(a?)*b/` - that only has polynomial runtime, and is detected by `js/polynomial-redos`.
|
||||
// The below code is therefore a heuristic, that only flags regular expressions such as `/(a*)*b/`,
|
||||
// and does not flag regular expressions such as `/(a?b?)c/`, but the latter pattern is not used frequently.
|
||||
r1 = r2 and
|
||||
q1 = q2 and
|
||||
epsilonSucc+(q) = q and
|
||||
exists(RegExpTerm term | term = q.getRepr() | term instanceof InfiniteRepetitionQuantifier) and
|
||||
// One of the mid states is an infinite quantifier itself
|
||||
exists(State mid, RegExpTerm term |
|
||||
mid = epsilonSucc+(q) and
|
||||
term = mid.getRepr() and
|
||||
term instanceof InfiniteRepetitionQuantifier and
|
||||
q = epsilonSucc+(mid) and
|
||||
not mid = q
|
||||
)
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state pair `(q1, q2)` or `(q2, q1)`; note that only
|
||||
* one or the other is defined.
|
||||
*/
|
||||
private StatePair mkStatePair(State q1, State q2) {
|
||||
result = MkStatePair(q1, q2) or result = MkStatePair(q2, q1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1` and `s2`, respectively.
|
||||
*/
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, StatePair r) {
|
||||
exists(State r1, State r2 | step(q, s1, s2, r1, r2) and r = mkStatePair(r1, r2))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1` and `r2`
|
||||
* labelled with `s1` and `s2`, respectively.
|
||||
*
|
||||
* We only consider transitions where the resulting states `(r1, r2)` are both
|
||||
* inside a repetition that might backtrack.
|
||||
*/
|
||||
pragma[noopt]
|
||||
private predicate step(StatePair q, InputSymbol s1, InputSymbol s2, State r1, State r2) {
|
||||
exists(State q1, State q2 | q.getLeft() = q1 and q.getRight() = q2 |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
// use noopt to force the join on `intersect` to happen last.
|
||||
exists(intersect(s1, s2))
|
||||
) and
|
||||
stateInsideBacktracking(r1) and
|
||||
stateInsideBacktracking(r2)
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, TTrace t) { isReachableFromFork(_, _, s1, s2, t, _) }
|
||||
|
||||
/**
|
||||
* A list of pairs of input symbols that describe a path in the product automaton
|
||||
* starting from some fork state.
|
||||
*/
|
||||
private class Trace extends TTrace {
|
||||
/** Gets a textual representation of this element. */
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace t | this = Step(s1, s2, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `r` is reachable from `(fork, fork)` under input `w`, and there is
|
||||
* a path from `r` back to `(fork, fork)` with `rem` steps.
|
||||
*/
|
||||
private predicate isReachableFromFork(State fork, StatePair r, Trace w, int rem) {
|
||||
exists(InputSymbol s1, InputSymbol s2, Trace v |
|
||||
isReachableFromFork(fork, r, s1, s2, v, rem) and
|
||||
w = Step(s1, s2, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromFork(
|
||||
State fork, StatePair r, InputSymbol s1, InputSymbol s2, Trace v, int rem
|
||||
) {
|
||||
// base case
|
||||
exists(State q1, State q2 |
|
||||
isFork(fork, s1, s2, q1, q2) and
|
||||
r = MkStatePair(q1, q2) and
|
||||
v = Nil() and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StatePair p |
|
||||
isReachableFromFork(fork, p, v, rem + 1) and
|
||||
step(p, s1, s2, r) and
|
||||
rem = statePairDistToFork(r, MkStatePair(fork, fork))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state in the product automaton from which `(fork, fork)` is
|
||||
* reachable in zero or more epsilon transitions.
|
||||
*/
|
||||
private StatePair getAForkPair(State fork) {
|
||||
isFork(fork, _, _, _, _) and
|
||||
result = MkStatePair(epsilonPred*(fork), epsilonPred*(fork))
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, result) }
|
||||
|
||||
/** Holds if `n` is a trace that is used by `concretize` in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State f | isReachableFromFork(f, getAForkPair(f), n, _))
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2 | t = Step(s1, s2, _) | result = intersect(s1, s2))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `fork` is a pumpable fork with word `w`.
|
||||
*/
|
||||
private predicate isPumpable(State fork, string w) {
|
||||
exists(StatePair q, Trace t |
|
||||
isReachableFromFork(fork, q, t, _) and
|
||||
q = getAForkPair(fork) and
|
||||
w = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/** Holds if `state` has exponential ReDoS */
|
||||
predicate hasReDoSResult = ReDoSPruning<isPumpable/2>::hasReDoSResult/4;
|
||||
}
|
||||
1378
shared/regex/codeql/regex/nfa/NfaUtils.qll
Normal file
1378
shared/regex/codeql/regex/nfa/NfaUtils.qll
Normal file
File diff suppressed because it is too large
Load Diff
176
shared/regex/codeql/regex/nfa/RegexpMatching.qll
Normal file
176
shared/regex/codeql/regex/nfa/RegexpMatching.qll
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* Provides predicates for reasoning about which strings are matched by a regular expression,
|
||||
* and for testing which capture groups are filled when a particular regexp matches a string.
|
||||
*/
|
||||
|
||||
private import NfaUtils as NfaUtils
|
||||
private import codeql.regex.RegexTreeView
|
||||
|
||||
/**
|
||||
* A parameterized module implementing the analysis described in the above papers.
|
||||
*/
|
||||
module Make<RegexTreeViewSig TreeImpl> {
|
||||
private import TreeImpl
|
||||
import NfaUtils::Make<TreeImpl>
|
||||
|
||||
/** A root term */
|
||||
class RootTerm instanceof RegExpTerm {
|
||||
RootTerm() { this.isRootTerm() }
|
||||
|
||||
/** Gets a string representation of this term. */
|
||||
string toString() { result = super.toString() }
|
||||
|
||||
/** Holds if this term has the specified location. */
|
||||
predicate hasLocationInfo(
|
||||
string filepath, int startline, int startcolumn, int endline, int endcolumn
|
||||
) {
|
||||
super.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if it should be tested whether `root` matches `str`.
|
||||
*
|
||||
* If `ignorePrefix` is true, then a regexp without a start anchor will be treated as if it had a start anchor.
|
||||
* E.g. a regular expression `/foo$/` will match any string that ends with "foo",
|
||||
* but if `ignorePrefix` is true, it will only match "foo".
|
||||
*
|
||||
* If `testWithGroups` is true, then the `RegexpMatching::fillsCaptureGroup` predicate can be used to determine which capture
|
||||
* groups are filled by a string.
|
||||
*/
|
||||
signature predicate isRegexpMatchingCandidateSig(
|
||||
RootTerm root, string str, boolean ignorePrefix, boolean testWithGroups
|
||||
);
|
||||
|
||||
/**
|
||||
* A module for determining if a regexp matches a given string,
|
||||
* and reasoning about which capture groups are filled by a given string.
|
||||
*
|
||||
* The module parameter `isCandidate` determines which strings should be tested,
|
||||
* and the results can be read from the `matches` and `fillsCaptureGroup` predicates.
|
||||
*/
|
||||
module RegexpMatching<isRegexpMatchingCandidateSig/4 isCandidate> {
|
||||
/**
|
||||
* Gets a state the regular expression `reg` can be in after matching the `i`th char in `str`.
|
||||
* The regular expression is modeled as a non-determistic finite automaton,
|
||||
* the regular expression can therefore be in multiple states after matching a character.
|
||||
*
|
||||
* It's a forward search to all possible states, and there is thus no guarantee that the state is on a path to an accepting state.
|
||||
*/
|
||||
private State getAState(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// start state, the -1 position before any chars have been matched
|
||||
i = -1 and
|
||||
isCandidate(reg, str, ignorePrefix, _) and
|
||||
result.getRepr().getRootTerm() = reg and
|
||||
isStartState(result)
|
||||
or
|
||||
// recursive case
|
||||
result = getAStateAfterMatching(reg, _, str, i, _, ignorePrefix)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the next state after the `prev` state from `reg`.
|
||||
* `prev` is the state after matching `fromIndex` chars in `str`,
|
||||
* and the result is the state after matching `toIndex` chars in `str`.
|
||||
*
|
||||
* This predicate is used as a step relation in the forwards search (`getAState`),
|
||||
* and also as a step relation in the later backwards search (`getAStateThatReachesAccept`).
|
||||
*/
|
||||
private State getAStateAfterMatching(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
// the basic recursive case - outlined into a noopt helper to make performance work out.
|
||||
result = getAStateAfterMatchingAux(reg, prev, str, toIndex, fromIndex, ignorePrefix)
|
||||
or
|
||||
// we can skip past word boundaries if the next char is a non-word char.
|
||||
fromIndex = toIndex and
|
||||
prev.getRepr() instanceof RegExpWordBoundary and
|
||||
prev = getAState(reg, toIndex, str, ignorePrefix) and
|
||||
after(prev.getRepr()) = result and
|
||||
str.charAt(toIndex + 1).regexpMatch("\\W") // \W matches any non-word char.
|
||||
}
|
||||
|
||||
pragma[noopt]
|
||||
private State getAStateAfterMatchingAux(
|
||||
RootTerm reg, State prev, string str, int toIndex, int fromIndex, boolean ignorePrefix
|
||||
) {
|
||||
prev = getAState(reg, fromIndex, str, ignorePrefix) and
|
||||
fromIndex = toIndex - 1 and
|
||||
exists(string char | char = str.charAt(toIndex) | specializedDeltaClosed(prev, char, result)) and
|
||||
not discardedPrefixStep(prev, result, ignorePrefix)
|
||||
}
|
||||
|
||||
/** Holds if a step from `prev` to `next` should be discarded when the `ignorePrefix` flag is set. */
|
||||
private predicate discardedPrefixStep(State prev, State next, boolean ignorePrefix) {
|
||||
prev = mkMatch(any(RegExpRoot r)) and
|
||||
ignorePrefix = true and
|
||||
next = prev
|
||||
}
|
||||
|
||||
// The `deltaClosed` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
private predicate specializedDeltaClosed(State prev, string char, State next) {
|
||||
deltaClosed(prev, specializedGetAnInputSymbolMatching(char), next)
|
||||
}
|
||||
|
||||
// The `getAnInputSymbolMatching` relation specialized to the chars that exists in strings tested by a `MatchedRegExp`.
|
||||
pragma[noinline]
|
||||
private InputSymbol specializedGetAnInputSymbolMatching(string char) {
|
||||
exists(string s, RootTerm r | isCandidate(r, s, _, _) | char = s.charAt(_)) and
|
||||
result = getAnInputSymbolMatching(char)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the `i`th state on a path to the accepting state when `reg` matches `str`.
|
||||
* Starts with an accepting state as found by `getAState` and searches backwards
|
||||
* to the start state through the reachable states (as found by `getAState`).
|
||||
*
|
||||
* This predicate satisfies the invariant that the result state can be reached with `i` steps from a start state,
|
||||
* and an accepting state can be found after (`str.length() - 1 - i`) steps from the result.
|
||||
* The result state is therefore always on a valid path where `reg` accepts `str`.
|
||||
*
|
||||
* This predicate is only used to find which capture groups a regular expression has filled,
|
||||
* and thus the search is only performed for the strings in the `testWithGroups(..)` predicate.
|
||||
*/
|
||||
private State getAStateThatReachesAccept(RootTerm reg, int i, string str, boolean ignorePrefix) {
|
||||
// base case, reaches an accepting state from the last state in `getAState(..)`
|
||||
isCandidate(reg, str, ignorePrefix, true) and
|
||||
i = str.length() - 1 and
|
||||
result = getAState(reg, i, str, ignorePrefix) and
|
||||
epsilonSucc*(result) = Accept(_)
|
||||
or
|
||||
// recursive case. `next` is the next state to be matched after matching `prev`.
|
||||
// this predicate is doing a backwards search, so `prev` is the result we are looking for.
|
||||
exists(State next, State prev, int fromIndex, int toIndex |
|
||||
next = getAStateThatReachesAccept(reg, toIndex, str, ignorePrefix) and
|
||||
next = getAStateAfterMatching(reg, prev, str, toIndex, fromIndex, ignorePrefix) and
|
||||
i = fromIndex and
|
||||
result = prev
|
||||
)
|
||||
}
|
||||
|
||||
/** Gets the capture group number that `term` belongs to. */
|
||||
private int group(RegExpTerm term) {
|
||||
exists(RegExpGroup grp | grp.getNumber() = result | term.getParent*() = grp)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `reg` matches `str`, where `str` is in the `isCandidate` predicate.
|
||||
*/
|
||||
predicate matches(RootTerm reg, string str) {
|
||||
exists(State state | state = getAState(reg, str.length() - 1, str, _) |
|
||||
epsilonSucc*(state) = Accept(_)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching `str` against `reg` may fill capture group number `g`.
|
||||
* Only holds if `str` is in the `testWithGroups` predicate.
|
||||
*/
|
||||
predicate fillsCaptureGroup(RootTerm reg, string str, int g) {
|
||||
exists(State s |
|
||||
s = getAStateThatReachesAccept(reg, _, str, _) and
|
||||
g = group(s.getRepr())
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
440
shared/regex/codeql/regex/nfa/SuperlinearBackTracking.qll
Normal file
440
shared/regex/codeql/regex/nfa/SuperlinearBackTracking.qll
Normal file
@@ -0,0 +1,440 @@
|
||||
/*
|
||||
* This module implements the analysis described in the paper:
|
||||
* Valentin Wustholz, Oswaldo Olivo, Marijn J. H. Heule, and Isil Dillig:
|
||||
* Static Detection of DoS Vulnerabilities in
|
||||
* Programs that use Regular Expressions
|
||||
* (Extended Version).
|
||||
* (https://arxiv.org/pdf/1701.04045.pdf)
|
||||
*
|
||||
* Theorem 3 from the paper describes the basic idea.
|
||||
*
|
||||
* The following explains the idea using variables and predicate names that are used in the implementation:
|
||||
* We consider a pair of repetitions, which we will call `pivot` and `succ`.
|
||||
*
|
||||
* We create a product automaton of 3-tuples of states (see `StateTuple`).
|
||||
* There exists a transition `(a,b,c) -> (d,e,f)` in the product automaton
|
||||
* iff there exists three transitions in the NFA `a->d, b->e, c->f` where those three
|
||||
* transitions all match a shared character `char`. (see `getAThreewayIntersect`)
|
||||
*
|
||||
* We start a search in the product automaton at `(pivot, pivot, succ)`,
|
||||
* and search for a series of transitions (a `Trace`), such that we end
|
||||
* at `(pivot, succ, succ)` (see `isReachableFromStartTuple`).
|
||||
*
|
||||
* For example, consider the regular expression `/^\d*5\w*$/`.
|
||||
* The search will start at the tuple `(\d*, \d*, \w*)` and search
|
||||
* for a path to `(\d*, \w*, \w*)`.
|
||||
* This path exists, and consists of a single transition in the product automaton,
|
||||
* where the three corresponding NFA edges all match the character `"5"`.
|
||||
*
|
||||
* The start-state in the NFA has an any-transition to itself, this allows us to
|
||||
* flag regular expressions such as `/a*$/` - which does not have a start anchor -
|
||||
* and can thus start matching anywhere.
|
||||
*
|
||||
* The implementation is not perfect.
|
||||
* It has the same suffix detection issue as the `js/redos` query, which can cause false positives.
|
||||
* It also doesn't find all transitions in the product automaton, which can cause false negatives.
|
||||
*/
|
||||
|
||||
private import NfaUtils as NfaUtils
|
||||
private import codeql.regex.RegexTreeView
|
||||
|
||||
/**
|
||||
* A parameterized module implementing the analysis described in the above papers.
|
||||
*/
|
||||
module Make<RegexTreeViewSig TreeImpl> {
|
||||
private import TreeImpl
|
||||
import NfaUtils::Make<TreeImpl>
|
||||
|
||||
/**
|
||||
* Gets any root (start) state of a regular expression.
|
||||
*/
|
||||
private State getRootState() { result = mkMatch(any(RegExpRoot r)) }
|
||||
|
||||
private newtype TStateTuple =
|
||||
MkStateTuple(State q1, State q2, State q3) {
|
||||
// starts at (pivot, pivot, succ)
|
||||
isStartLoops(q1, q3) and q1 = q2
|
||||
or
|
||||
step(_, _, _, _, q1, q2, q3) and FeasibleTuple::isFeasibleTuple(q1, q2, q3)
|
||||
}
|
||||
|
||||
/**
|
||||
* A state in the product automaton.
|
||||
* The product automaton contains 3-tuples of states.
|
||||
*
|
||||
* We lazily only construct those states that we are actually
|
||||
* going to need.
|
||||
* Either a start state `(pivot, pivot, succ)`, or a state
|
||||
* where there exists a transition from an already existing state.
|
||||
*
|
||||
* The exponential variant of this query (`js/redos`) uses an optimization
|
||||
* trick where `q1 <= q2`. This trick cannot be used here as the order
|
||||
* of the elements matter.
|
||||
*/
|
||||
class StateTuple extends TStateTuple {
|
||||
State q1;
|
||||
State q2;
|
||||
State q3;
|
||||
|
||||
StateTuple() { this = MkStateTuple(q1, q2, q3) }
|
||||
|
||||
/**
|
||||
* Gest a string representation of this tuple.
|
||||
*/
|
||||
string toString() { result = "(" + q1 + ", " + q2 + ", " + q3 + ")" }
|
||||
|
||||
/**
|
||||
* Holds if this tuple is `(r1, r2, r3)`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate isTuple(State r1, State r2, State r3) { r1 = q1 and r2 = q2 and r3 = q3 }
|
||||
}
|
||||
|
||||
/**
|
||||
* A module for determining feasible tuples for the product automaton.
|
||||
*
|
||||
* The implementation is split into many predicates for performance reasons.
|
||||
*/
|
||||
private module FeasibleTuple {
|
||||
/**
|
||||
* Holds if the tuple `(r1, r2, r3)` might be on path from a start-state to an end-state in the product automaton.
|
||||
*/
|
||||
pragma[inline]
|
||||
predicate isFeasibleTuple(State r1, State r2, State r3) {
|
||||
// The first element is either inside a repetition (or the start state itself)
|
||||
isRepetitionOrStart(r1) and
|
||||
// The last element is inside a repetition
|
||||
stateInsideRepetition(r3) and
|
||||
// The states are reachable in the NFA in the order r1 -> r2 -> r3
|
||||
delta+(r1) = r2 and
|
||||
delta+(r2) = r3 and
|
||||
// The first element can reach a beginning (the "pivot" state in a `(pivot, succ)` pair).
|
||||
canReachABeginning(r1) and
|
||||
// The last element can reach a target (the "succ" state in a `(pivot, succ)` pair).
|
||||
canReachATarget(r3)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `s` is either inside a repetition, or is the start state (which is a repetition).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate isRepetitionOrStart(State s) {
|
||||
stateInsideRepetition(s) or s = getRootState()
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if state `s` might be inside a backtracking repetition.
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate stateInsideRepetition(State s) {
|
||||
s.getRepr().getParent*() instanceof InfiniteRepetitionQuantifier
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "pivot" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachABeginning(State s) {
|
||||
delta+(s) = any(State pivot | isStartLoops(pivot, _))
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a path in the NFA from `s` to a "succ" state
|
||||
* (from a `(pivot, succ)` pair that starts the search).
|
||||
*/
|
||||
pragma[noinline]
|
||||
private predicate canReachATarget(State s) {
|
||||
delta+(s) = any(State succ | isStartLoops(_, succ))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if `pivot` and `succ` are a pair of loops that could be the beginning of a quadratic blowup.
|
||||
*
|
||||
* There is a slight implementation difference compared to the paper: this predicate requires that `pivot != succ`.
|
||||
* The case where `pivot = succ` causes exponential backtracking and is handled by the `js/redos` query.
|
||||
*/
|
||||
predicate isStartLoops(State pivot, State succ) {
|
||||
pivot != succ and
|
||||
succ.getRepr() instanceof InfiniteRepetitionQuantifier and
|
||||
delta+(pivot) = succ and
|
||||
(
|
||||
pivot.getRepr() instanceof InfiniteRepetitionQuantifier
|
||||
or
|
||||
pivot = mkMatch(any(RegExpRoot root))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a state for which there exists a transition in the NFA from `s'.
|
||||
*/
|
||||
State delta(State s) { delta(s, _, result) }
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to the corresponding
|
||||
* components of `r` labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate step(StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, StateTuple r) {
|
||||
exists(State r1, State r2, State r3 |
|
||||
step(q, s1, s2, s3, r1, r2, r3) and r = MkStateTuple(r1, r2, r3)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there are transitions from the components of `q` to `r1`, `r2`, and `r3
|
||||
* labelled with `s1`, `s2`, and `s3`, respectively.
|
||||
*/
|
||||
pragma[noopt]
|
||||
predicate step(
|
||||
StateTuple q, InputSymbol s1, InputSymbol s2, InputSymbol s3, State r1, State r2, State r3
|
||||
) {
|
||||
exists(State q1, State q2, State q3 | q.isTuple(q1, q2, q3) |
|
||||
deltaClosed(q1, s1, r1) and
|
||||
deltaClosed(q2, s2, r2) and
|
||||
deltaClosed(q3, s3, r3) and
|
||||
// use noopt to force the join on `getAThreewayIntersect` to happen last.
|
||||
exists(getAThreewayIntersect(s1, s2, s3))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a char that is matched by all the edges `s1`, `s2`, and `s3`.
|
||||
*
|
||||
* The result is not complete, and might miss some combination of edges that share some character.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string getAThreewayIntersect(InputSymbol s1, InputSymbol s2, InputSymbol s3) {
|
||||
result = minAndMaxIntersect(s1, s2) and result = [intersect(s2, s3), intersect(s1, s3)]
|
||||
or
|
||||
result = minAndMaxIntersect(s1, s3) and result = [intersect(s2, s3), intersect(s1, s2)]
|
||||
or
|
||||
result = minAndMaxIntersect(s2, s3) and result = [intersect(s1, s2), intersect(s1, s3)]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum and maximum characters that intersect between `a` and `b`.
|
||||
* This predicate is used to limit the size of `getAThreewayIntersect`.
|
||||
*/
|
||||
pragma[noinline]
|
||||
string minAndMaxIntersect(InputSymbol a, InputSymbol b) {
|
||||
result = [min(intersect(a, b)), max(intersect(a, b))]
|
||||
}
|
||||
|
||||
private newtype TTrace =
|
||||
Nil() or
|
||||
Step(InputSymbol s1, InputSymbol s2, InputSymbol s3, TTrace t) {
|
||||
isReachableFromStartTuple(_, _, t, s1, s2, s3, _, _)
|
||||
}
|
||||
|
||||
/**
|
||||
* A list of tuples of input symbols that describe a path in the product automaton
|
||||
* starting from some start state.
|
||||
*/
|
||||
class Trace extends TTrace {
|
||||
/**
|
||||
* Gets a string representation of this Trace that can be used for debug purposes.
|
||||
*/
|
||||
string toString() {
|
||||
this = Nil() and result = "Nil()"
|
||||
or
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace t | this = Step(s1, s2, s3, t) |
|
||||
result = "Step(" + s1 + ", " + s2 + ", " + s3 + ", " + t + ")"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if there exists a transition from `r` to `q` in the product automaton.
|
||||
* Notice that the arguments are flipped, and thus the direction is backwards.
|
||||
*/
|
||||
pragma[noinline]
|
||||
predicate tupleDeltaBackwards(StateTuple q, StateTuple r) { step(r, _, _, _, q) }
|
||||
|
||||
/**
|
||||
* Holds if `tuple` is an end state in our search.
|
||||
* That means there exists a pair of loops `(pivot, succ)` such that `tuple = (pivot, succ, succ)`.
|
||||
*/
|
||||
predicate isEndTuple(StateTuple tuple) { tuple = getAnEndTuple(_, _) }
|
||||
|
||||
/**
|
||||
* Gets the minimum length of a path from `r` to some an end state `end`.
|
||||
*
|
||||
* The implementation searches backwards from the end-tuple.
|
||||
* This approach was chosen because it is way more efficient if the first predicate given to `shortestDistances` is small.
|
||||
* The `end` argument must always be an end state.
|
||||
*/
|
||||
int distBackFromEnd(StateTuple r, StateTuple end) =
|
||||
shortestDistances(isEndTuple/1, tupleDeltaBackwards/2)(end, r, result)
|
||||
|
||||
/**
|
||||
* Holds if there exists a pair of repetitions `(pivot, succ)` in the regular expression such that:
|
||||
* `tuple` is reachable from `(pivot, pivot, succ)` in the product automaton,
|
||||
* and there is a distance of `dist` from `tuple` to the nearest end-tuple `(pivot, succ, succ)`,
|
||||
* and a path from a start-state to `tuple` follows the transitions in `trace`.
|
||||
*/
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, StateTuple tuple, Trace trace, int dist
|
||||
) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3, Trace v |
|
||||
isReachableFromStartTuple(pivot, succ, v, s1, s2, s3, tuple, dist) and
|
||||
trace = Step(s1, s2, s3, v)
|
||||
)
|
||||
}
|
||||
|
||||
private predicate isReachableFromStartTuple(
|
||||
State pivot, State succ, Trace trace, InputSymbol s1, InputSymbol s2, InputSymbol s3,
|
||||
StateTuple tuple, int dist
|
||||
) {
|
||||
// base case.
|
||||
exists(State q1, State q2, State q3 |
|
||||
isStartLoops(pivot, succ) and
|
||||
step(MkStateTuple(pivot, pivot, succ), s1, s2, s3, tuple) and
|
||||
tuple = MkStateTuple(q1, q2, q3) and
|
||||
trace = Nil() and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ))
|
||||
)
|
||||
or
|
||||
// recursive case
|
||||
exists(StateTuple p |
|
||||
isReachableFromStartTuple(pivot, succ, p, trace, dist + 1) and
|
||||
dist = distBackFromEnd(tuple, MkStateTuple(pivot, succ, succ)) and
|
||||
step(p, s1, s2, s3, tuple)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the tuple `(pivot, succ, succ)` from the product automaton.
|
||||
*/
|
||||
StateTuple getAnEndTuple(State pivot, State succ) {
|
||||
isStartLoops(pivot, succ) and
|
||||
result = MkStateTuple(pivot, succ, succ)
|
||||
}
|
||||
|
||||
/** An implementation of a chain containing chars for use by `Concretizer`. */
|
||||
private module CharTreeImpl implements CharTree {
|
||||
class CharNode = Trace;
|
||||
|
||||
CharNode getPrev(CharNode t) { t = Step(_, _, _, result) }
|
||||
|
||||
/** Holds if `n` is used in `isPumpable`. */
|
||||
predicate isARelevantEnd(CharNode n) {
|
||||
exists(State pivot, State succ |
|
||||
isReachableFromStartTuple(pivot, succ, getAnEndTuple(pivot, succ), n, _)
|
||||
)
|
||||
}
|
||||
|
||||
string getChar(CharNode t) {
|
||||
exists(InputSymbol s1, InputSymbol s2, InputSymbol s3 | t = Step(s1, s2, s3, _) |
|
||||
result = getAThreewayIntersect(s1, s2, s3)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if matching repetitions of `pump` can:
|
||||
* 1) Transition from `pivot` back to `pivot`.
|
||||
* 2) Transition from `pivot` to `succ`.
|
||||
* 3) Transition from `succ` to `succ`.
|
||||
*
|
||||
* From theorem 3 in the paper linked in the top of this file we can therefore conclude that
|
||||
* the regular expression has polynomial backtracking - if a rejecting suffix exists.
|
||||
*
|
||||
* This predicate is used by `SuperLinearReDoSConfiguration`, and the final results are
|
||||
* available in the `hasReDoSResult` predicate.
|
||||
*/
|
||||
predicate isPumpable(State pivot, State succ, string pump) {
|
||||
exists(StateTuple q, Trace t |
|
||||
isReachableFromStartTuple(pivot, succ, q, t, _) and
|
||||
q = getAnEndTuple(pivot, succ) and
|
||||
pump = Concretizer<CharTreeImpl>::concretize(t)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if states starting in `state` can have polynomial backtracking with the string `pump`.
|
||||
*/
|
||||
predicate isReDoSCandidate(State state, string pump) { isPumpable(_, state, pump) }
|
||||
|
||||
/**
|
||||
* Holds if repetitions of `pump` at `t` will cause polynomial backtracking.
|
||||
*/
|
||||
predicate polynomialReDoS(RegExpTerm t, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
exists(State s, State pivot |
|
||||
ReDoSPruning<isReDoSCandidate/2>::hasReDoSResult(t, pump, s, prefixMsg) and
|
||||
isPumpable(pivot, s, _) and
|
||||
prev = pivot.getRepr()
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a message for why `term` can cause polynomial backtracking.
|
||||
*/
|
||||
string getReasonString(RegExpTerm term, string pump, string prefixMsg, RegExpTerm prev) {
|
||||
polynomialReDoS(term, pump, prefixMsg, prev) and
|
||||
result =
|
||||
"Strings " + prefixMsg + "with many repetitions of '" + pump +
|
||||
"' can start matching anywhere after the start of the preceeding " + prev
|
||||
}
|
||||
|
||||
/**
|
||||
* A term that may cause a regular expression engine to perform a
|
||||
* polynomial number of match attempts, relative to the input length.
|
||||
*/
|
||||
class PolynomialBackTrackingTerm instanceof InfiniteRepetitionQuantifier {
|
||||
string reason;
|
||||
string pump;
|
||||
string prefixMsg;
|
||||
RegExpTerm prev;
|
||||
|
||||
PolynomialBackTrackingTerm() {
|
||||
reason = getReasonString(this, pump, prefixMsg, prev) and
|
||||
// there might be many reasons for this term to have polynomial backtracking - we pick the shortest one.
|
||||
reason =
|
||||
min(string msg | msg = getReasonString(this, _, _, _) | msg order by msg.length(), msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds if all non-empty successors to the polynomial backtracking term matches the end of the line.
|
||||
*/
|
||||
predicate isAtEndLine() {
|
||||
forall(RegExpTerm succ | super.getSuccessor+() = succ and not matchesEpsilon(succ) |
|
||||
succ instanceof RegExpDollar
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the string that should be repeated to cause this regular expression to perform polynomially.
|
||||
*/
|
||||
string getPumpString() { result = pump }
|
||||
|
||||
/**
|
||||
* Gets a message for which prefix a matching string must start with for this term to cause polynomial backtracking.
|
||||
*/
|
||||
string getPrefixMessage() { result = prefixMsg }
|
||||
|
||||
/**
|
||||
* Gets a predecessor to `this`, which also loops on the pump string, and thereby causes polynomial backtracking.
|
||||
*/
|
||||
RegExpTerm getPreviousLoop() { result = prev }
|
||||
|
||||
/**
|
||||
* Gets the reason for the number of match attempts.
|
||||
*/
|
||||
string getReason() { result = reason }
|
||||
|
||||
/** Gets a string representation of this term. */
|
||||
string toString() { result = super.toString() }
|
||||
|
||||
/** Gets the outermost term of this regular expression. */
|
||||
RegExpTerm getRootTerm() { result = super.getRootTerm() }
|
||||
|
||||
/** Holds if this term has the specific location. */
|
||||
predicate hasLocationInfo(
|
||||
string filepath, int startline, int startcolumn, int endline, int endcolumn
|
||||
) {
|
||||
super.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
|
||||
}
|
||||
}
|
||||
}
|
||||
5
shared/regex/qlpack.yml
Normal file
5
shared/regex/qlpack.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
name: codeql/regex
|
||||
version: 0.0.1-dev
|
||||
groups: shared
|
||||
library: true
|
||||
dependencies:
|
||||
Reference in New Issue
Block a user