From a837d5f4ceddee27006de97bcbaa498756032c81 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 9 Apr 2020 15:06:59 +0100
Subject: [PATCH 001/157] Add support for extracting Go frontend errors.
---
extractor/dbscheme/tables.go | 28 +++++++++++++++++++
extractor/extractor.go | 52 +++++++++++++++++++++++++++++++-----
ql/src/go.dbscheme | 10 +++++++
3 files changed, 83 insertions(+), 7 deletions(-)
diff --git a/extractor/dbscheme/tables.go b/extractor/dbscheme/tables.go
index c82fa0459b1..df146c8e4e5 100644
--- a/extractor/dbscheme/tables.go
+++ b/extractor/dbscheme/tables.go
@@ -4,6 +4,7 @@ import (
"go/ast"
"go/token"
gotypes "go/types"
+ "golang.org/x/tools/go/packages"
)
var defaultSnippet = AddDefaultSnippet(`
@@ -642,6 +643,20 @@ var ModLParenType = ModExprKind.NewBranch("@modlparen")
// ModRParenType is the type of go.mod line block end AST nodes
var ModRParenType = ModExprKind.NewBranch("@modrparen")
+// ErrorType is the type of frontend errors
+var ErrorType = NewPrimaryKeyType("@error")
+
+// ErrorKind is a case type for distinguishing different kinds of frontend errors
+var ErrorKind = NewCaseType(ErrorType, "kind")
+
+// ErrorTypes is a map from error kinds to the corresponding type
+var ErrorTypes = map[packages.ErrorKind]*BranchType{
+ packages.UnknownError: ErrorKind.NewBranch("@unknownerror"),
+ packages.ListError: ErrorKind.NewBranch("@listerror"),
+ packages.ParseError: ErrorKind.NewBranch("@parseerror"),
+ packages.TypeError: ErrorKind.NewBranch("@typeerror"),
+}
+
// LocationsDefaultTable is the table defining location objects
var LocationsDefaultTable = NewTable("locations_default",
EntityColumn(LocationDefaultType, "id").Key(),
@@ -915,3 +930,16 @@ var ModTokensTable = NewTable("modtokens",
EntityColumn(ModExprType, "parent"),
IntColumn("idx"),
).KeySet("parent", "idx")
+
+// ErrorsTable is the table describing frontend errors
+var ErrorsTable = NewTable("errors",
+ EntityColumn(ErrorType, "id").Key(),
+ IntColumn("kind"),
+ StringColumn("msg"),
+ StringColumn("rawpos"),
+ StringColumn("file"),
+ IntColumn("line"),
+ IntColumn("col"),
+ EntityColumn(PackageType, "package"),
+ IntColumn("idx"),
+).KeySet("package", "idx")
diff --git a/extractor/extractor.go b/extractor/extractor.go
index f59ee9c228c..8fe96147808 100644
--- a/extractor/extractor.go
+++ b/extractor/extractor.go
@@ -57,13 +57,6 @@ func ExtractWithFlags(buildFlags []string, patterns []string) error {
packages.Visit(pkgs, func(pkg *packages.Package) bool {
return true
}, func(pkg *packages.Package) {
- if len(pkg.Errors) != 0 {
- log.Printf("Warning: encountered errors extracting package `%s`:", pkg.PkgPath)
- for _, err := range pkg.Errors {
- log.Printf(" %s", err.Error())
- }
- }
-
tw, err := trap.NewWriter(pkg.PkgPath, pkg)
if err != nil {
log.Fatal(err)
@@ -74,6 +67,14 @@ func ExtractWithFlags(buildFlags []string, patterns []string) error {
tw.ForEachObject(extractObjectType)
lbl := tw.Labeler.GlobalID(pkg.PkgPath + ";pkg")
dbscheme.PackagesTable.Emit(tw, lbl, pkg.Name, pkg.PkgPath, scope)
+
+ if len(pkg.Errors) != 0 {
+ log.Printf("Warning: encountered errors extracting package `%s`:", pkg.PkgPath)
+ for i, err := range pkg.Errors {
+ log.Printf(" %s", err.Error())
+ extractError(tw, err, lbl, i)
+ }
+ }
})
// this sets the number of threads that the Go runtime will spawn; this is separate
@@ -253,6 +254,43 @@ func extractObjectType(tw *trap.Writer, obj types.Object, lbl trap.Label) {
}
}
+// extractError extracts the message and location of a frontend error
+func extractError(tw *trap.Writer, err packages.Error, pkglbl trap.Label, idx int) {
+ var (
+ lbl = tw.Labeler.FreshID()
+ kind = dbscheme.ErrorTypes[err.Kind].Index()
+ pos = err.Pos
+ posComponents = strings.Split(err.Pos, ":")
+ file = ""
+ line = 0
+ col = 0
+ e error
+ )
+ switch len(posComponents) {
+ case 3:
+ // "file:line:col"
+ col, e = strconv.Atoi(posComponents[2])
+ if e != nil {
+ log.Printf("Warning: malformed column number `%s`: %v", posComponents[2], e)
+ }
+ fallthrough
+ case 2:
+ // "file:line"
+ file = posComponents[0]
+ line, e = strconv.Atoi(posComponents[1])
+ if e != nil {
+ log.Printf("Warning: malformed line number `%s`: %v", posComponents[1], e)
+ }
+ default:
+ // "", "-"
+ if pos != "" && pos != "-" {
+ log.Printf("Warning: malformed error position `%s`", pos)
+ }
+ }
+ file = filepath.ToSlash(srcarchive.TransformPath(file))
+ dbscheme.ErrorsTable.Emit(tw, lbl, kind, err.Msg, pos, file, line, col, pkglbl, idx)
+}
+
// extractPackage extracts AST information for all files in the given package
func extractPackage(pkg *packages.Package, wg *sync.WaitGroup,
goroutineSem *semaphore, fdSem *semaphore) {
diff --git a/ql/src/go.dbscheme b/ql/src/go.dbscheme
index f7fb4ff6229..ee5c327face 100644
--- a/ql/src/go.dbscheme
+++ b/ql/src/go.dbscheme
@@ -124,6 +124,10 @@ modexprs(unique int id: @modexpr, int kind: int ref, int parent: @modexprparent
#keyset[parent, idx]
modtokens(string token: string ref, int parent: @modexpr ref, int idx: int ref);
+#keyset[package, idx]
+errors(unique int id: @error, int kind: int ref, string msg: string ref, string rawpos: string ref,
+ string file: string ref, int line: int ref, int col: int ref, int package: @package ref, int idx: int ref);
+
@container = @file | @folder;
@locatable = @node | @localscope;
@@ -418,3 +422,9 @@ case @modexpr.kind of
| 3 = @modlparen
| 4 = @modrparen;
+case @error.kind of
+ 0 = @unknownerror
+| 1 = @listerror
+| 2 = @parseerror
+| 3 = @typeerror;
+
From c38edf77cecec13b506955b6177dae994df63d54 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 9 Apr 2020 15:07:08 +0100
Subject: [PATCH 002/157] Update stats.
---
ql/src/go.dbscheme.stats | 956 ++++++++++++++++++++++++++++++++++++---
1 file changed, 892 insertions(+), 64 deletions(-)
diff --git a/ql/src/go.dbscheme.stats b/ql/src/go.dbscheme.stats
index 2245ca9508d..c1b9237ef18 100644
--- a/ql/src/go.dbscheme.stats
+++ b/ql/src/go.dbscheme.stats
@@ -1,41 +1,33 @@
-
- @similarity
- 0
-
-
- @file
- 523
-
-
- @folder
- 223
-
@duplication
0
-
- @externalDataElement
- 0
-
@comment_group
12133
+
+ @folder
+ 223
+
+
+ @file
+ 523
+
+
+ @externalDataElement
+ 0
+
+
+ @similarity
+ 0
+
@field
19974
-
- @slashslashcomment
- 24891
-
-
- @slashstarcomment
- 846
-
@importdecl
479
@@ -60,6 +52,14 @@
@baddecl
0
+
+ @slashslashcomment
+ 24891
+
+
+ @slashstarcomment
+ 846
+
@importspec
3468
@@ -84,6 +84,42 @@
@localscope
36428
+
+ @pkgobject
+ 3468
+
+
+ @decltypeobject
+ 3499
+
+
+ @builtintypeobject
+ 20
+
+
+ @declconstobject
+ 8488
+
+
+ @builtinconstobject
+ 4
+
+
+ @declvarobject
+ 50364
+
+
+ @declfunctionobject
+ 17254
+
+
+ @builtinfunctionobject
+ 18
+
+
+ @labelobject
+ 49
+
@invalidtype
1
@@ -236,46 +272,6 @@
@complexliteraltype
0
-
- @pkgobject
- 3468
-
-
- @decltypeobject
- 3499
-
-
- @builtintypeobject
- 20
-
-
- @declconstobject
- 8488
-
-
- @builtinconstobject
- 4
-
-
- @package
- 340
-
-
- @declvarobject
- 50364
-
-
- @declfunctionobject
- 17254
-
-
- @builtinfunctionobject
- 18
-
-
- @labelobject
- 49
-
@declstmt
1454
@@ -424,6 +420,10 @@
@andassignstmt
0
+
+ @package
+ 340
+
@modcommentblock
3
@@ -444,6 +444,22 @@
@modrparen
2
+
+ @unknownerror
+ 0
+
+
+ @listerror
+ 0
+
+
+ @parseerror
+ 0
+
+
+ @typeerror
+ 0
+
@ident
237316
@@ -9789,5 +9805,817 @@
+
+ errors
+ 0
+
+
+ id
+ 0
+
+
+ kind
+ 0
+
+
+ msg
+ 0
+
+
+ rawpos
+ 0
+
+
+ file
+ 0
+
+
+ line
+ 0
+
+
+ col
+ 0
+
+
+ package
+ 0
+
+
+ idx
+ 0
+
+
+
+
+ id
+ kind
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ msg
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ rawpos
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ file
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ line
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ col
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ package
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ id
+ idx
+
+
+ 12
+
+
+ 1
+ 2
+ 1
+
+
+
+
+
+
+ kind
+ id
+
+
+ 12
+
+
+
+
+
+ kind
+ msg
+
+
+ 12
+
+
+
+
+
+ kind
+ rawpos
+
+
+ 12
+
+
+
+
+
+ kind
+ file
+
+
+ 12
+
+
+
+
+
+ kind
+ line
+
+
+ 12
+
+
+
+
+
+ kind
+ col
+
+
+ 12
+
+
+
+
+
+ kind
+ package
+
+
+ 12
+
+
+
+
+
+ kind
+ idx
+
+
+ 12
+
+
+
+
+
+ msg
+ id
+
+
+ 12
+
+
+
+
+
+ msg
+ kind
+
+
+ 12
+
+
+
+
+
+ msg
+ rawpos
+
+
+ 12
+
+
+
+
+
+ msg
+ file
+
+
+ 12
+
+
+
+
+
+ msg
+ line
+
+
+ 12
+
+
+
+
+
+ msg
+ col
+
+
+ 12
+
+
+
+
+
+ msg
+ package
+
+
+ 12
+
+
+
+
+
+ msg
+ idx
+
+
+ 12
+
+
+
+
+
+ rawpos
+ id
+
+
+ 12
+
+
+
+
+
+ rawpos
+ kind
+
+
+ 12
+
+
+
+
+
+ rawpos
+ msg
+
+
+ 12
+
+
+
+
+
+ rawpos
+ file
+
+
+ 12
+
+
+
+
+
+ rawpos
+ line
+
+
+ 12
+
+
+
+
+
+ rawpos
+ col
+
+
+ 12
+
+
+
+
+
+ rawpos
+ package
+
+
+ 12
+
+
+
+
+
+ rawpos
+ idx
+
+
+ 12
+
+
+
+
+
+ file
+ id
+
+
+ 12
+
+
+
+
+
+ file
+ kind
+
+
+ 12
+
+
+
+
+
+ file
+ msg
+
+
+ 12
+
+
+
+
+
+ file
+ rawpos
+
+
+ 12
+
+
+
+
+
+ file
+ line
+
+
+ 12
+
+
+
+
+
+ file
+ col
+
+
+ 12
+
+
+
+
+
+ file
+ package
+
+
+ 12
+
+
+
+
+
+ file
+ idx
+
+
+ 12
+
+
+
+
+
+ line
+ id
+
+
+ 12
+
+
+
+
+
+ line
+ kind
+
+
+ 12
+
+
+
+
+
+ line
+ msg
+
+
+ 12
+
+
+
+
+
+ line
+ rawpos
+
+
+ 12
+
+
+
+
+
+ line
+ file
+
+
+ 12
+
+
+
+
+
+ line
+ col
+
+
+ 12
+
+
+
+
+
+ line
+ package
+
+
+ 12
+
+
+
+
+
+ line
+ idx
+
+
+ 12
+
+
+
+
+
+ col
+ id
+
+
+ 12
+
+
+
+
+
+ col
+ kind
+
+
+ 12
+
+
+
+
+
+ col
+ msg
+
+
+ 12
+
+
+
+
+
+ col
+ rawpos
+
+
+ 12
+
+
+
+
+
+ col
+ file
+
+
+ 12
+
+
+
+
+
+ col
+ line
+
+
+ 12
+
+
+
+
+
+ col
+ package
+
+
+ 12
+
+
+
+
+
+ col
+ idx
+
+
+ 12
+
+
+
+
+
+ package
+ id
+
+
+ 12
+
+
+
+
+
+ package
+ kind
+
+
+ 12
+
+
+
+
+
+ package
+ msg
+
+
+ 12
+
+
+
+
+
+ package
+ rawpos
+
+
+ 12
+
+
+
+
+
+ package
+ file
+
+
+ 12
+
+
+
+
+
+ package
+ line
+
+
+ 12
+
+
+
+
+
+ package
+ col
+
+
+ 12
+
+
+
+
+
+ package
+ idx
+
+
+ 12
+
+
+
+
+
+ idx
+ id
+
+
+ 12
+
+
+
+
+
+ idx
+ kind
+
+
+ 12
+
+
+
+
+
+ idx
+ msg
+
+
+ 12
+
+
+
+
+
+ idx
+ rawpos
+
+
+ 12
+
+
+
+
+
+ idx
+ file
+
+
+ 12
+
+
+
+
+
+ idx
+ line
+
+
+ 12
+
+
+
+
+
+ idx
+ col
+
+
+ 12
+
+
+
+
+
+ idx
+ package
+
+
+ 12
+
+
+
+
+
+
From f2d11538ceb01f592d67f00e3b5766d9525c9ba2 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 14 Apr 2020 10:29:29 +0100
Subject: [PATCH 003/157] Add upgrade script.
---
.../go.dbscheme | 430 ++++++++++++++++++
.../old.dbscheme | 420 +++++++++++++++++
.../upgrade.properties | 2 +
3 files changed, 852 insertions(+)
create mode 100644 upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/go.dbscheme
create mode 100644 upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/old.dbscheme
create mode 100644 upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/upgrade.properties
diff --git a/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/go.dbscheme b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/go.dbscheme
new file mode 100644
index 00000000000..ee5c327face
--- /dev/null
+++ b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/go.dbscheme
@@ -0,0 +1,430 @@
+/** Auto-generated dbscheme; do not edit. */
+
+
+/** Duplicate code **/
+
+duplicateCode(
+ unique int id : @duplication,
+ varchar(900) relativePath : string ref,
+ int equivClass : int ref);
+
+similarCode(
+ unique int id : @similarity,
+ varchar(900) relativePath : string ref,
+ int equivClass : int ref);
+
+@duplication_or_similarity = @duplication | @similarity;
+
+tokens(
+ int id : @duplication_or_similarity ref,
+ int offset : int ref,
+ int beginLine : int ref,
+ int beginColumn : int ref,
+ int endLine : int ref,
+ int endColumn : int ref);
+
+/** External data **/
+
+externalData(
+ int id : @externalDataElement,
+ varchar(900) path : string ref,
+ int column: int ref,
+ varchar(900) value : string ref
+);
+
+snapshotDate(unique date snapshotDate : date ref);
+
+sourceLocationPrefix(varchar(900) prefix : string ref);
+
+locations_default(unique int id: @location_default, int file: @file ref, int beginLine: int ref, int beginColumn: int ref,
+ int endLine: int ref, int endColumn: int ref);
+
+numlines(int element_id: @sourceline ref, int num_lines: int ref, int num_code: int ref, int num_comment: int ref);
+
+files(unique int id: @file, string name: string ref, string simple: string ref, string ext: string ref, int fromSource: int ref);
+
+folders(unique int id: @folder, string name: string ref, string simple: string ref);
+
+containerparent(int parent: @container ref, unique int child: @container ref);
+
+has_location(unique int locatable: @locatable ref, int location: @location ref);
+
+comment_groups(unique int id: @comment_group);
+
+comments(unique int id: @comment, int kind: int ref, int parent: @comment_group ref, int idx: int ref, string text: string ref);
+
+doc_comments(unique int node: @documentable ref, int comment: @comment_group ref);
+
+#keyset[parent, idx]
+exprs(unique int id: @expr, int kind: int ref, int parent: @exprparent ref, int idx: int ref);
+
+literals(unique int expr: @expr ref, string value: string ref, string raw: string ref);
+
+constvalues(unique int expr: @expr ref, string value: string ref, string exact: string ref);
+
+fields(unique int id: @field, int parent: @fieldparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+stmts(unique int id: @stmt, int kind: int ref, int parent: @stmtparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+decls(unique int id: @decl, int kind: int ref, int parent: @declparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+specs(unique int id: @spec, int kind: int ref, int parent: @gendecl ref, int idx: int ref);
+
+scopes(unique int id: @scope, int kind: int ref);
+
+scopenesting(unique int inner: @scope ref, int outer: @scope ref);
+
+scopenodes(unique int node: @scopenode ref, int scope: @localscope ref);
+
+objects(unique int id: @object, int kind: int ref, string name: string ref);
+
+objectscopes(unique int object: @object ref, int scope: @scope ref);
+
+objecttypes(unique int object: @object ref, int tp: @type ref);
+
+methodreceivers(unique int method: @object ref, int receiver: @object ref);
+
+fieldstructs(unique int field: @object ref, int struct: @structtype ref);
+
+methodhosts(int method: @object ref, int host: @namedtype ref);
+
+defs(int ident: @ident ref, int object: @object ref);
+
+uses(int ident: @ident ref, int object: @object ref);
+
+types(unique int id: @type, int kind: int ref);
+
+type_of(unique int expr: @expr ref, int tp: @type ref);
+
+typename(unique int tp: @type ref, string name: string ref);
+
+key_type(unique int map: @maptype ref, int tp: @type ref);
+
+element_type(unique int container: @containertype ref, int tp: @type ref);
+
+base_type(unique int ptr: @pointertype ref, int tp: @type ref);
+
+underlying_type(unique int named: @namedtype ref, int tp: @type ref);
+
+#keyset[parent, index]
+component_types(int parent: @compositetype ref, int index: int ref, string name: string ref, int tp: @type ref);
+
+array_length(unique int tp: @arraytype ref, string len: string ref);
+
+type_objects(unique int tp: @type ref, int object: @object ref);
+
+packages(unique int id: @package, string name: string ref, string path: string ref, int scope: @packagescope ref);
+
+#keyset[parent, idx]
+modexprs(unique int id: @modexpr, int kind: int ref, int parent: @modexprparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+modtokens(string token: string ref, int parent: @modexpr ref, int idx: int ref);
+
+#keyset[package, idx]
+errors(unique int id: @error, int kind: int ref, string msg: string ref, string rawpos: string ref,
+ string file: string ref, int line: int ref, int col: int ref, int package: @package ref, int idx: int ref);
+
+@container = @file | @folder;
+
+@locatable = @node | @localscope;
+
+@node = @documentable | @exprparent | @modexprparent | @fieldparent | @stmtparent | @declparent | @scopenode
+ | @comment_group | @comment;
+
+@documentable = @file | @field | @spec | @gendecl | @funcdecl | @modexpr;
+
+@exprparent = @funcdef | @file | @expr | @field | @stmt | @decl | @spec;
+
+@modexprparent = @file | @modexpr;
+
+@fieldparent = @decl | @structtypeexpr | @functypeexpr | @interfacetypeexpr;
+
+@stmtparent = @funcdef | @stmt | @decl;
+
+@declparent = @file | @declstmt;
+
+@funcdef = @funclit | @funcdecl;
+
+@scopenode = @file | @functypeexpr | @blockstmt | @ifstmt | @caseclause | @switchstmt | @commclause | @loopstmt;
+
+@location = @location_default;
+
+@sourceline = @locatable;
+
+case @comment.kind of
+ 0 = @slashslashcomment
+| 1 = @slashstarcomment;
+
+case @expr.kind of
+ 0 = @badexpr
+| 1 = @ident
+| 2 = @ellipsis
+| 3 = @intlit
+| 4 = @floatlit
+| 5 = @imaglit
+| 6 = @charlit
+| 7 = @stringlit
+| 8 = @funclit
+| 9 = @compositelit
+| 10 = @parenexpr
+| 11 = @selectorexpr
+| 12 = @indexexpr
+| 13 = @sliceexpr
+| 14 = @typeassertexpr
+| 15 = @callorconversionexpr
+| 16 = @starexpr
+| 17 = @keyvalueexpr
+| 18 = @arraytypeexpr
+| 19 = @structtypeexpr
+| 20 = @functypeexpr
+| 21 = @interfacetypeexpr
+| 22 = @maptypeexpr
+| 23 = @plusexpr
+| 24 = @minusexpr
+| 25 = @notexpr
+| 26 = @complementexpr
+| 27 = @derefexpr
+| 28 = @addressexpr
+| 29 = @arrowexpr
+| 30 = @lorexpr
+| 31 = @landexpr
+| 32 = @eqlexpr
+| 33 = @neqexpr
+| 34 = @lssexpr
+| 35 = @leqexpr
+| 36 = @gtrexpr
+| 37 = @geqexpr
+| 38 = @addexpr
+| 39 = @subexpr
+| 40 = @orexpr
+| 41 = @xorexpr
+| 42 = @mulexpr
+| 43 = @quoexpr
+| 44 = @remexpr
+| 45 = @shlexpr
+| 46 = @shrexpr
+| 47 = @andexpr
+| 48 = @andnotexpr
+| 49 = @sendchantypeexpr
+| 50 = @recvchantypeexpr
+| 51 = @sendrcvchantypeexpr;
+
+@basiclit = @intlit | @floatlit | @imaglit | @charlit | @stringlit;
+
+@operatorexpr = @logicalexpr | @arithmeticexpr | @bitwiseexpr | @unaryexpr | @binaryexpr;
+
+@logicalexpr = @logicalunaryexpr | @logicalbinaryexpr;
+
+@arithmeticexpr = @arithmeticunaryexpr | @arithmeticbinaryexpr;
+
+@bitwiseexpr = @bitwiseunaryexpr | @bitwisebinaryexpr;
+
+@unaryexpr = @logicalunaryexpr | @bitwiseunaryexpr | @arithmeticunaryexpr | @derefexpr | @addressexpr | @arrowexpr;
+
+@logicalunaryexpr = @notexpr;
+
+@bitwiseunaryexpr = @complementexpr;
+
+@arithmeticunaryexpr = @plusexpr | @minusexpr;
+
+@binaryexpr = @logicalbinaryexpr | @bitwisebinaryexpr | @arithmeticbinaryexpr | @comparison;
+
+@logicalbinaryexpr = @lorexpr | @landexpr;
+
+@bitwisebinaryexpr = @shiftexpr | @orexpr | @xorexpr | @andexpr | @andnotexpr;
+
+@arithmeticbinaryexpr = @addexpr | @subexpr | @mulexpr | @quoexpr | @remexpr;
+
+@shiftexpr = @shlexpr | @shrexpr;
+
+@comparison = @equalitytest | @relationalcomparison;
+
+@equalitytest = @eqlexpr | @neqexpr;
+
+@relationalcomparison = @lssexpr | @leqexpr | @gtrexpr | @geqexpr;
+
+@chantypeexpr = @sendchantypeexpr | @recvchantypeexpr | @sendrcvchantypeexpr;
+
+case @stmt.kind of
+ 0 = @badstmt
+| 1 = @declstmt
+| 2 = @emptystmt
+| 3 = @labeledstmt
+| 4 = @exprstmt
+| 5 = @sendstmt
+| 6 = @incstmt
+| 7 = @decstmt
+| 8 = @gostmt
+| 9 = @deferstmt
+| 10 = @returnstmt
+| 11 = @breakstmt
+| 12 = @continuestmt
+| 13 = @gotostmt
+| 14 = @fallthroughstmt
+| 15 = @blockstmt
+| 16 = @ifstmt
+| 17 = @caseclause
+| 18 = @exprswitchstmt
+| 19 = @typeswitchstmt
+| 20 = @commclause
+| 21 = @selectstmt
+| 22 = @forstmt
+| 23 = @rangestmt
+| 24 = @assignstmt
+| 25 = @definestmt
+| 26 = @addassignstmt
+| 27 = @subassignstmt
+| 28 = @mulassignstmt
+| 29 = @quoassignstmt
+| 30 = @remassignstmt
+| 31 = @andassignstmt
+| 32 = @orassignstmt
+| 33 = @xorassignstmt
+| 34 = @shlassignstmt
+| 35 = @shrassignstmt
+| 36 = @andnotassignstmt;
+
+@incdecstmt = @incstmt | @decstmt;
+
+@assignment = @simpleassignstmt | @compoundassignstmt;
+
+@simpleassignstmt = @assignstmt | @definestmt;
+
+@compoundassignstmt = @addassignstmt | @subassignstmt | @mulassignstmt | @quoassignstmt | @remassignstmt
+ | @andassignstmt | @orassignstmt | @xorassignstmt | @shlassignstmt | @shrassignstmt | @andnotassignstmt;
+
+@branchstmt = @breakstmt | @continuestmt | @gotostmt | @fallthroughstmt;
+
+@switchstmt = @exprswitchstmt | @typeswitchstmt;
+
+@loopstmt = @forstmt | @rangestmt;
+
+case @decl.kind of
+ 0 = @baddecl
+| 1 = @importdecl
+| 2 = @constdecl
+| 3 = @typedecl
+| 4 = @vardecl
+| 5 = @funcdecl;
+
+@gendecl = @importdecl | @constdecl | @typedecl | @vardecl;
+
+case @spec.kind of
+ 0 = @importspec
+| 1 = @valuespec
+| 2 = @typespec;
+
+case @object.kind of
+ 0 = @pkgobject
+| 1 = @decltypeobject
+| 2 = @builtintypeobject
+| 3 = @declconstobject
+| 4 = @builtinconstobject
+| 5 = @declvarobject
+| 6 = @declfunctionobject
+| 7 = @builtinfunctionobject
+| 8 = @labelobject;
+
+@declobject = @decltypeobject | @declconstobject | @declvarobject | @declfunctionobject;
+
+@builtinobject = @builtintypeobject | @builtinconstobject | @builtinfunctionobject;
+
+@typeobject = @decltypeobject | @builtintypeobject;
+
+@valueobject = @constobject | @varobject | @functionobject;
+
+@constobject = @declconstobject | @builtinconstobject;
+
+@varobject = @declvarobject;
+
+@functionobject = @declfunctionobject | @builtinfunctionobject;
+
+case @scope.kind of
+ 0 = @universescope
+| 1 = @packagescope
+| 2 = @localscope;
+
+case @type.kind of
+ 0 = @invalidtype
+| 1 = @boolexprtype
+| 2 = @inttype
+| 3 = @int8type
+| 4 = @int16type
+| 5 = @int32type
+| 6 = @int64type
+| 7 = @uinttype
+| 8 = @uint8type
+| 9 = @uint16type
+| 10 = @uint32type
+| 11 = @uint64type
+| 12 = @uintptrtype
+| 13 = @float32type
+| 14 = @float64type
+| 15 = @complex64type
+| 16 = @complex128type
+| 17 = @stringexprtype
+| 18 = @unsafepointertype
+| 19 = @boolliteraltype
+| 20 = @intliteraltype
+| 21 = @runeliteraltype
+| 22 = @floatliteraltype
+| 23 = @complexliteraltype
+| 24 = @stringliteraltype
+| 25 = @nilliteraltype
+| 26 = @arraytype
+| 27 = @slicetype
+| 28 = @structtype
+| 29 = @pointertype
+| 30 = @interfacetype
+| 31 = @tupletype
+| 32 = @signaturetype
+| 33 = @maptype
+| 34 = @sendchantype
+| 35 = @recvchantype
+| 36 = @sendrcvchantype
+| 37 = @namedtype;
+
+@basictype = @booltype | @numerictype | @stringtype | @literaltype | @invalidtype | @unsafepointertype;
+
+@booltype = @boolexprtype | @boolliteraltype;
+
+@numerictype = @integertype | @floattype | @complextype;
+
+@integertype = @signedintegertype | @unsignedintegertype;
+
+@signedintegertype = @inttype | @int8type | @int16type | @int32type | @int64type | @intliteraltype | @runeliteraltype;
+
+@unsignedintegertype = @uinttype | @uint8type | @uint16type | @uint32type | @uint64type | @uintptrtype;
+
+@floattype = @float32type | @float64type | @floatliteraltype;
+
+@complextype = @complex64type | @complex128type | @complexliteraltype;
+
+@stringtype = @stringexprtype | @stringliteraltype;
+
+@literaltype = @boolliteraltype | @intliteraltype | @runeliteraltype | @floatliteraltype | @complexliteraltype
+ | @stringliteraltype | @nilliteraltype;
+
+@compositetype = @containertype | @structtype | @pointertype | @interfacetype | @tupletype | @signaturetype | @namedtype;
+
+@containertype = @arraytype | @slicetype | @maptype | @chantype;
+
+@chantype = @sendchantype | @recvchantype | @sendrcvchantype;
+
+case @modexpr.kind of
+ 0 = @modcommentblock
+| 1 = @modline
+| 2 = @modlineblock
+| 3 = @modlparen
+| 4 = @modrparen;
+
+case @error.kind of
+ 0 = @unknownerror
+| 1 = @listerror
+| 2 = @parseerror
+| 3 = @typeerror;
+
diff --git a/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/old.dbscheme b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/old.dbscheme
new file mode 100644
index 00000000000..f7fb4ff6229
--- /dev/null
+++ b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/old.dbscheme
@@ -0,0 +1,420 @@
+/** Auto-generated dbscheme; do not edit. */
+
+
+/** Duplicate code **/
+
+duplicateCode(
+ unique int id : @duplication,
+ varchar(900) relativePath : string ref,
+ int equivClass : int ref);
+
+similarCode(
+ unique int id : @similarity,
+ varchar(900) relativePath : string ref,
+ int equivClass : int ref);
+
+@duplication_or_similarity = @duplication | @similarity;
+
+tokens(
+ int id : @duplication_or_similarity ref,
+ int offset : int ref,
+ int beginLine : int ref,
+ int beginColumn : int ref,
+ int endLine : int ref,
+ int endColumn : int ref);
+
+/** External data **/
+
+externalData(
+ int id : @externalDataElement,
+ varchar(900) path : string ref,
+ int column: int ref,
+ varchar(900) value : string ref
+);
+
+snapshotDate(unique date snapshotDate : date ref);
+
+sourceLocationPrefix(varchar(900) prefix : string ref);
+
+locations_default(unique int id: @location_default, int file: @file ref, int beginLine: int ref, int beginColumn: int ref,
+ int endLine: int ref, int endColumn: int ref);
+
+numlines(int element_id: @sourceline ref, int num_lines: int ref, int num_code: int ref, int num_comment: int ref);
+
+files(unique int id: @file, string name: string ref, string simple: string ref, string ext: string ref, int fromSource: int ref);
+
+folders(unique int id: @folder, string name: string ref, string simple: string ref);
+
+containerparent(int parent: @container ref, unique int child: @container ref);
+
+has_location(unique int locatable: @locatable ref, int location: @location ref);
+
+comment_groups(unique int id: @comment_group);
+
+comments(unique int id: @comment, int kind: int ref, int parent: @comment_group ref, int idx: int ref, string text: string ref);
+
+doc_comments(unique int node: @documentable ref, int comment: @comment_group ref);
+
+#keyset[parent, idx]
+exprs(unique int id: @expr, int kind: int ref, int parent: @exprparent ref, int idx: int ref);
+
+literals(unique int expr: @expr ref, string value: string ref, string raw: string ref);
+
+constvalues(unique int expr: @expr ref, string value: string ref, string exact: string ref);
+
+fields(unique int id: @field, int parent: @fieldparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+stmts(unique int id: @stmt, int kind: int ref, int parent: @stmtparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+decls(unique int id: @decl, int kind: int ref, int parent: @declparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+specs(unique int id: @spec, int kind: int ref, int parent: @gendecl ref, int idx: int ref);
+
+scopes(unique int id: @scope, int kind: int ref);
+
+scopenesting(unique int inner: @scope ref, int outer: @scope ref);
+
+scopenodes(unique int node: @scopenode ref, int scope: @localscope ref);
+
+objects(unique int id: @object, int kind: int ref, string name: string ref);
+
+objectscopes(unique int object: @object ref, int scope: @scope ref);
+
+objecttypes(unique int object: @object ref, int tp: @type ref);
+
+methodreceivers(unique int method: @object ref, int receiver: @object ref);
+
+fieldstructs(unique int field: @object ref, int struct: @structtype ref);
+
+methodhosts(int method: @object ref, int host: @namedtype ref);
+
+defs(int ident: @ident ref, int object: @object ref);
+
+uses(int ident: @ident ref, int object: @object ref);
+
+types(unique int id: @type, int kind: int ref);
+
+type_of(unique int expr: @expr ref, int tp: @type ref);
+
+typename(unique int tp: @type ref, string name: string ref);
+
+key_type(unique int map: @maptype ref, int tp: @type ref);
+
+element_type(unique int container: @containertype ref, int tp: @type ref);
+
+base_type(unique int ptr: @pointertype ref, int tp: @type ref);
+
+underlying_type(unique int named: @namedtype ref, int tp: @type ref);
+
+#keyset[parent, index]
+component_types(int parent: @compositetype ref, int index: int ref, string name: string ref, int tp: @type ref);
+
+array_length(unique int tp: @arraytype ref, string len: string ref);
+
+type_objects(unique int tp: @type ref, int object: @object ref);
+
+packages(unique int id: @package, string name: string ref, string path: string ref, int scope: @packagescope ref);
+
+#keyset[parent, idx]
+modexprs(unique int id: @modexpr, int kind: int ref, int parent: @modexprparent ref, int idx: int ref);
+
+#keyset[parent, idx]
+modtokens(string token: string ref, int parent: @modexpr ref, int idx: int ref);
+
+@container = @file | @folder;
+
+@locatable = @node | @localscope;
+
+@node = @documentable | @exprparent | @modexprparent | @fieldparent | @stmtparent | @declparent | @scopenode
+ | @comment_group | @comment;
+
+@documentable = @file | @field | @spec | @gendecl | @funcdecl | @modexpr;
+
+@exprparent = @funcdef | @file | @expr | @field | @stmt | @decl | @spec;
+
+@modexprparent = @file | @modexpr;
+
+@fieldparent = @decl | @structtypeexpr | @functypeexpr | @interfacetypeexpr;
+
+@stmtparent = @funcdef | @stmt | @decl;
+
+@declparent = @file | @declstmt;
+
+@funcdef = @funclit | @funcdecl;
+
+@scopenode = @file | @functypeexpr | @blockstmt | @ifstmt | @caseclause | @switchstmt | @commclause | @loopstmt;
+
+@location = @location_default;
+
+@sourceline = @locatable;
+
+case @comment.kind of
+ 0 = @slashslashcomment
+| 1 = @slashstarcomment;
+
+case @expr.kind of
+ 0 = @badexpr
+| 1 = @ident
+| 2 = @ellipsis
+| 3 = @intlit
+| 4 = @floatlit
+| 5 = @imaglit
+| 6 = @charlit
+| 7 = @stringlit
+| 8 = @funclit
+| 9 = @compositelit
+| 10 = @parenexpr
+| 11 = @selectorexpr
+| 12 = @indexexpr
+| 13 = @sliceexpr
+| 14 = @typeassertexpr
+| 15 = @callorconversionexpr
+| 16 = @starexpr
+| 17 = @keyvalueexpr
+| 18 = @arraytypeexpr
+| 19 = @structtypeexpr
+| 20 = @functypeexpr
+| 21 = @interfacetypeexpr
+| 22 = @maptypeexpr
+| 23 = @plusexpr
+| 24 = @minusexpr
+| 25 = @notexpr
+| 26 = @complementexpr
+| 27 = @derefexpr
+| 28 = @addressexpr
+| 29 = @arrowexpr
+| 30 = @lorexpr
+| 31 = @landexpr
+| 32 = @eqlexpr
+| 33 = @neqexpr
+| 34 = @lssexpr
+| 35 = @leqexpr
+| 36 = @gtrexpr
+| 37 = @geqexpr
+| 38 = @addexpr
+| 39 = @subexpr
+| 40 = @orexpr
+| 41 = @xorexpr
+| 42 = @mulexpr
+| 43 = @quoexpr
+| 44 = @remexpr
+| 45 = @shlexpr
+| 46 = @shrexpr
+| 47 = @andexpr
+| 48 = @andnotexpr
+| 49 = @sendchantypeexpr
+| 50 = @recvchantypeexpr
+| 51 = @sendrcvchantypeexpr;
+
+@basiclit = @intlit | @floatlit | @imaglit | @charlit | @stringlit;
+
+@operatorexpr = @logicalexpr | @arithmeticexpr | @bitwiseexpr | @unaryexpr | @binaryexpr;
+
+@logicalexpr = @logicalunaryexpr | @logicalbinaryexpr;
+
+@arithmeticexpr = @arithmeticunaryexpr | @arithmeticbinaryexpr;
+
+@bitwiseexpr = @bitwiseunaryexpr | @bitwisebinaryexpr;
+
+@unaryexpr = @logicalunaryexpr | @bitwiseunaryexpr | @arithmeticunaryexpr | @derefexpr | @addressexpr | @arrowexpr;
+
+@logicalunaryexpr = @notexpr;
+
+@bitwiseunaryexpr = @complementexpr;
+
+@arithmeticunaryexpr = @plusexpr | @minusexpr;
+
+@binaryexpr = @logicalbinaryexpr | @bitwisebinaryexpr | @arithmeticbinaryexpr | @comparison;
+
+@logicalbinaryexpr = @lorexpr | @landexpr;
+
+@bitwisebinaryexpr = @shiftexpr | @orexpr | @xorexpr | @andexpr | @andnotexpr;
+
+@arithmeticbinaryexpr = @addexpr | @subexpr | @mulexpr | @quoexpr | @remexpr;
+
+@shiftexpr = @shlexpr | @shrexpr;
+
+@comparison = @equalitytest | @relationalcomparison;
+
+@equalitytest = @eqlexpr | @neqexpr;
+
+@relationalcomparison = @lssexpr | @leqexpr | @gtrexpr | @geqexpr;
+
+@chantypeexpr = @sendchantypeexpr | @recvchantypeexpr | @sendrcvchantypeexpr;
+
+case @stmt.kind of
+ 0 = @badstmt
+| 1 = @declstmt
+| 2 = @emptystmt
+| 3 = @labeledstmt
+| 4 = @exprstmt
+| 5 = @sendstmt
+| 6 = @incstmt
+| 7 = @decstmt
+| 8 = @gostmt
+| 9 = @deferstmt
+| 10 = @returnstmt
+| 11 = @breakstmt
+| 12 = @continuestmt
+| 13 = @gotostmt
+| 14 = @fallthroughstmt
+| 15 = @blockstmt
+| 16 = @ifstmt
+| 17 = @caseclause
+| 18 = @exprswitchstmt
+| 19 = @typeswitchstmt
+| 20 = @commclause
+| 21 = @selectstmt
+| 22 = @forstmt
+| 23 = @rangestmt
+| 24 = @assignstmt
+| 25 = @definestmt
+| 26 = @addassignstmt
+| 27 = @subassignstmt
+| 28 = @mulassignstmt
+| 29 = @quoassignstmt
+| 30 = @remassignstmt
+| 31 = @andassignstmt
+| 32 = @orassignstmt
+| 33 = @xorassignstmt
+| 34 = @shlassignstmt
+| 35 = @shrassignstmt
+| 36 = @andnotassignstmt;
+
+@incdecstmt = @incstmt | @decstmt;
+
+@assignment = @simpleassignstmt | @compoundassignstmt;
+
+@simpleassignstmt = @assignstmt | @definestmt;
+
+@compoundassignstmt = @addassignstmt | @subassignstmt | @mulassignstmt | @quoassignstmt | @remassignstmt
+ | @andassignstmt | @orassignstmt | @xorassignstmt | @shlassignstmt | @shrassignstmt | @andnotassignstmt;
+
+@branchstmt = @breakstmt | @continuestmt | @gotostmt | @fallthroughstmt;
+
+@switchstmt = @exprswitchstmt | @typeswitchstmt;
+
+@loopstmt = @forstmt | @rangestmt;
+
+case @decl.kind of
+ 0 = @baddecl
+| 1 = @importdecl
+| 2 = @constdecl
+| 3 = @typedecl
+| 4 = @vardecl
+| 5 = @funcdecl;
+
+@gendecl = @importdecl | @constdecl | @typedecl | @vardecl;
+
+case @spec.kind of
+ 0 = @importspec
+| 1 = @valuespec
+| 2 = @typespec;
+
+case @object.kind of
+ 0 = @pkgobject
+| 1 = @decltypeobject
+| 2 = @builtintypeobject
+| 3 = @declconstobject
+| 4 = @builtinconstobject
+| 5 = @declvarobject
+| 6 = @declfunctionobject
+| 7 = @builtinfunctionobject
+| 8 = @labelobject;
+
+@declobject = @decltypeobject | @declconstobject | @declvarobject | @declfunctionobject;
+
+@builtinobject = @builtintypeobject | @builtinconstobject | @builtinfunctionobject;
+
+@typeobject = @decltypeobject | @builtintypeobject;
+
+@valueobject = @constobject | @varobject | @functionobject;
+
+@constobject = @declconstobject | @builtinconstobject;
+
+@varobject = @declvarobject;
+
+@functionobject = @declfunctionobject | @builtinfunctionobject;
+
+case @scope.kind of
+ 0 = @universescope
+| 1 = @packagescope
+| 2 = @localscope;
+
+case @type.kind of
+ 0 = @invalidtype
+| 1 = @boolexprtype
+| 2 = @inttype
+| 3 = @int8type
+| 4 = @int16type
+| 5 = @int32type
+| 6 = @int64type
+| 7 = @uinttype
+| 8 = @uint8type
+| 9 = @uint16type
+| 10 = @uint32type
+| 11 = @uint64type
+| 12 = @uintptrtype
+| 13 = @float32type
+| 14 = @float64type
+| 15 = @complex64type
+| 16 = @complex128type
+| 17 = @stringexprtype
+| 18 = @unsafepointertype
+| 19 = @boolliteraltype
+| 20 = @intliteraltype
+| 21 = @runeliteraltype
+| 22 = @floatliteraltype
+| 23 = @complexliteraltype
+| 24 = @stringliteraltype
+| 25 = @nilliteraltype
+| 26 = @arraytype
+| 27 = @slicetype
+| 28 = @structtype
+| 29 = @pointertype
+| 30 = @interfacetype
+| 31 = @tupletype
+| 32 = @signaturetype
+| 33 = @maptype
+| 34 = @sendchantype
+| 35 = @recvchantype
+| 36 = @sendrcvchantype
+| 37 = @namedtype;
+
+@basictype = @booltype | @numerictype | @stringtype | @literaltype | @invalidtype | @unsafepointertype;
+
+@booltype = @boolexprtype | @boolliteraltype;
+
+@numerictype = @integertype | @floattype | @complextype;
+
+@integertype = @signedintegertype | @unsignedintegertype;
+
+@signedintegertype = @inttype | @int8type | @int16type | @int32type | @int64type | @intliteraltype | @runeliteraltype;
+
+@unsignedintegertype = @uinttype | @uint8type | @uint16type | @uint32type | @uint64type | @uintptrtype;
+
+@floattype = @float32type | @float64type | @floatliteraltype;
+
+@complextype = @complex64type | @complex128type | @complexliteraltype;
+
+@stringtype = @stringexprtype | @stringliteraltype;
+
+@literaltype = @boolliteraltype | @intliteraltype | @runeliteraltype | @floatliteraltype | @complexliteraltype
+ | @stringliteraltype | @nilliteraltype;
+
+@compositetype = @containertype | @structtype | @pointertype | @interfacetype | @tupletype | @signaturetype | @namedtype;
+
+@containertype = @arraytype | @slicetype | @maptype | @chantype;
+
+@chantype = @sendchantype | @recvchantype | @sendrcvchantype;
+
+case @modexpr.kind of
+ 0 = @modcommentblock
+| 1 = @modline
+| 2 = @modlineblock
+| 3 = @modlparen
+| 4 = @modrparen;
+
diff --git a/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/upgrade.properties b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/upgrade.properties
new file mode 100644
index 00000000000..67fada3ac3b
--- /dev/null
+++ b/upgrades/f7fb4ff6229adffa2c2c4238ef72c82359d56be4/upgrade.properties
@@ -0,0 +1,2 @@
+description: Add tables for extracting frontend errors.
+compatibility: backwards
From d565a26d5b765fb337360b578f070df72a76be7c Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 9 Apr 2020 16:39:38 +0100
Subject: [PATCH 004/157] Add QL library for working with Go frontend errors.
---
ql/src/go.qll | 1 +
ql/src/semmle/go/Errors.qll | 50 +++++++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+)
create mode 100644 ql/src/semmle/go/Errors.qll
diff --git a/ql/src/go.qll b/ql/src/go.qll
index 0bce4c7d6b7..f4f459f2e48 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -7,6 +7,7 @@ import semmle.go.AST
import semmle.go.Comments
import semmle.go.Concepts
import semmle.go.Decls
+import semmle.go.Errors
import semmle.go.Expr
import semmle.go.Files
import semmle.go.GoMod
diff --git a/ql/src/semmle/go/Errors.qll b/ql/src/semmle/go/Errors.qll
new file mode 100644
index 00000000000..8beb6877f2f
--- /dev/null
+++ b/ql/src/semmle/go/Errors.qll
@@ -0,0 +1,50 @@
+/** Provides classes for working with Go frontend errors recorded during extraction. */
+
+import go
+
+/**
+ * An error reported by the Go frontend during extraction.
+ */
+class Error extends @error {
+ /** Gets the message associated with this error. */
+ string getMessage() { errors(this, _, result, _, _, _, _, _, _) }
+
+ /** Gets the raw position reported by the frontend for this error. */
+ string getRawPosition() { errors(this, _, _, result, _, _, _, _, _) }
+
+ /** Gets the package in which this error was reported. */
+ Package getPackage() { errors(this, _, _, _, _, _, _, result, _) }
+
+ /** Gets the index of this error among all errors reported for the same package. */
+ int getIndex() { errors(this, _, _, _, _, _, _, _, result) }
+
+ /**
+ * Holds if this element is at the specified location.
+ * The location spans column `startcolumn` of line `startline` to
+ * column `endcolumn` of line `endline` in file `filepath`.
+ * For more information, see
+ * [LGTM locations](https://lgtm.com/help/ql/locations).
+ */
+ predicate hasLocationInfo(
+ string filepath, int startline, int startcolumn, int endline, int endcolumn
+ ) {
+ errors(this, _, _, _, filepath, startline, startcolumn, _, _) and
+ endline = startline and
+ endcolumn = startcolumn
+ }
+
+ /** Gets a textual representation of this error. */
+ string toString() { result = getMessage() }
+}
+
+/** An error reported by an unknown part of the Go frontend. */
+class UnknownError extends Error, @unknownerror { }
+
+/** An error reported by the Go frontend driver. */
+class ListError extends Error, @listerror { }
+
+/** An error reported by the Go parser. */
+class ParseError extends Error, @parseerror { }
+
+/** An error reported by the Go type checker. */
+class TypeError extends Error, @typeerror { }
From bf42271d14bd8412312b9d3e6afe289f85698b2d Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 9 Apr 2020 16:39:38 +0100
Subject: [PATCH 005/157] Add convenience predicate to class `Error`.
---
ql/src/semmle/go/Errors.qll | 3 +++
1 file changed, 3 insertions(+)
diff --git a/ql/src/semmle/go/Errors.qll b/ql/src/semmle/go/Errors.qll
index 8beb6877f2f..3c8a8b9adea 100644
--- a/ql/src/semmle/go/Errors.qll
+++ b/ql/src/semmle/go/Errors.qll
@@ -18,6 +18,9 @@ class Error extends @error {
/** Gets the index of this error among all errors reported for the same package. */
int getIndex() { errors(this, _, _, _, _, _, _, _, result) }
+ /** Gets the file in which this error was reported, if it can be determined. */
+ File getFile() { hasLocationInfo(result.getAbsolutePath(), _, _, _, _) }
+
/**
* Holds if this element is at the specified location.
* The location spans column `startcolumn` of line `startline` to
From c6a37fdf1dca8fd8ecbedd4907f4261520745ddb Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 9 Apr 2020 16:44:03 +0100
Subject: [PATCH 006/157] Add consistency query flagging unexpected frontend
errors.
---
.../UnexpectedFrontendErrors.expected | 0
ql/test/consistency/UnexpectedFrontendErrors.ql | 16 ++++++++++++++++
ql/test/consistency/test.go | 7 +++++++
3 files changed, 23 insertions(+)
create mode 100644 ql/test/consistency/UnexpectedFrontendErrors.expected
create mode 100644 ql/test/consistency/UnexpectedFrontendErrors.ql
create mode 100644 ql/test/consistency/test.go
diff --git a/ql/test/consistency/UnexpectedFrontendErrors.expected b/ql/test/consistency/UnexpectedFrontendErrors.expected
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ql/test/consistency/UnexpectedFrontendErrors.ql b/ql/test/consistency/UnexpectedFrontendErrors.ql
new file mode 100644
index 00000000000..f75e2c6bf21
--- /dev/null
+++ b/ql/test/consistency/UnexpectedFrontendErrors.ql
@@ -0,0 +1,16 @@
+/**
+ * @name Unexpected frontend error
+ * @description This query produces a list of all errors produced by the Go frontend
+ * during extraction, except for those occurring in files annotated with
+ * "// codeql test: expect frontend errors".
+ * @id go/unexpected-frontend-error
+ */
+
+import go
+
+from Error e
+where
+ not exists(Comment c | c.getFile() = e.getFile() |
+ c.getText().trim() = "codeql test: expect frontend errors"
+ )
+select e
diff --git a/ql/test/consistency/test.go b/ql/test/consistency/test.go
new file mode 100644
index 00000000000..ca40bd0f601
--- /dev/null
+++ b/ql/test/consistency/test.go
@@ -0,0 +1,7 @@
+package main
+
+// Example file with a syntax error to demonstrate use of "expect frontend errors" directive
+
+// codeql test: expect frontend errors
+
+This is not a valid Go program
From 13762bd76c0d3a7c2f71e4842f8db4cdc91d28ec Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 14 Apr 2020 10:18:27 +0100
Subject: [PATCH 007/157] Mark frontend errors in `Types/unknownFunction.go` as
expected.
---
.../semmle/go/Types/SignatureType_getNumParameter.expected | 2 +-
.../semmle/go/Types/SignatureType_getNumResult.expected | 2 +-
ql/test/library-tests/semmle/go/Types/notype.expected | 6 +++---
ql/test/library-tests/semmle/go/Types/unknownFunction.go | 4 +++-
4 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/ql/test/library-tests/semmle/go/Types/SignatureType_getNumParameter.expected b/ql/test/library-tests/semmle/go/Types/SignatureType_getNumParameter.expected
index 48855348e90..cc10eecd92b 100644
--- a/ql/test/library-tests/semmle/go/Types/SignatureType_getNumParameter.expected
+++ b/ql/test/library-tests/semmle/go/Types/SignatureType_getNumParameter.expected
@@ -8,4 +8,4 @@
| pkg1/tst.go:33:1:35:1 | function declaration | 0 |
| pkg1/tst.go:37:1:37:26 | function declaration | 1 |
| pkg1/tst.go:39:1:57:1 | function declaration | 2 |
-| unknownFunction.go:8:1:12:1 | function declaration | 0 |
+| unknownFunction.go:10:1:14:1 | function declaration | 0 |
diff --git a/ql/test/library-tests/semmle/go/Types/SignatureType_getNumResult.expected b/ql/test/library-tests/semmle/go/Types/SignatureType_getNumResult.expected
index a90412830d7..4a50841a812 100644
--- a/ql/test/library-tests/semmle/go/Types/SignatureType_getNumResult.expected
+++ b/ql/test/library-tests/semmle/go/Types/SignatureType_getNumResult.expected
@@ -8,4 +8,4 @@
| pkg1/tst.go:33:1:35:1 | function declaration | 1 |
| pkg1/tst.go:37:1:37:26 | function declaration | 0 |
| pkg1/tst.go:39:1:57:1 | function declaration | 0 |
-| unknownFunction.go:8:1:12:1 | function declaration | 0 |
+| unknownFunction.go:10:1:14:1 | function declaration | 0 |
diff --git a/ql/test/library-tests/semmle/go/Types/notype.expected b/ql/test/library-tests/semmle/go/Types/notype.expected
index 669165864d4..ded6be7bc59 100644
--- a/ql/test/library-tests/semmle/go/Types/notype.expected
+++ b/ql/test/library-tests/semmle/go/Types/notype.expected
@@ -1,3 +1,3 @@
-| unknownFunction.go:9:7:9:21 | unknownFunction | invalid type |
-| unknownFunction.go:9:7:9:23 | call to unknownFunction | invalid type |
-| unknownFunction.go:10:7:10:15 | ...+... | invalid type |
+| unknownFunction.go:11:7:11:21 | unknownFunction | invalid type |
+| unknownFunction.go:11:7:11:23 | call to unknownFunction | invalid type |
+| unknownFunction.go:12:7:12:15 | ...+... | invalid type |
diff --git a/ql/test/library-tests/semmle/go/Types/unknownFunction.go b/ql/test/library-tests/semmle/go/Types/unknownFunction.go
index 5af16d4b13f..1eda1dba942 100644
--- a/ql/test/library-tests/semmle/go/Types/unknownFunction.go
+++ b/ql/test/library-tests/semmle/go/Types/unknownFunction.go
@@ -1,7 +1,9 @@
package main
// This file tests type inference for expressions referencing undeclared entities.
-// It is therefore expected to produce extractor warnings.
+// It is therefore expected to expected frontend errors.
+
+// codeql test: expect frontend errors
import "fmt"
From ef497afc20da41320e105386466f7acf5923e978 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 14 Apr 2020 10:21:36 +0100
Subject: [PATCH 008/157] Mark a frontend error in `DeadStoreOfLocal` tests as
expected.
---
.../RedundantCode/DeadStoreOfLocal/DeadStoreOfLocal.expected | 2 +-
ql/test/query-tests/RedundantCode/DeadStoreOfLocal/main.go | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/DeadStoreOfLocal.expected b/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/DeadStoreOfLocal.expected
index b646631be51..58fd429b859 100644
--- a/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/DeadStoreOfLocal.expected
+++ b/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/DeadStoreOfLocal.expected
@@ -1,4 +1,4 @@
-| main.go:25:2:25:2 | assignment to x | This definition of x is never used. |
+| main.go:27:2:27:2 | assignment to x | This definition of x is never used. |
| testdata.go:32:2:32:2 | assignment to x | This definition of x is never used. |
| testdata.go:37:2:37:2 | assignment to x | This definition of x is never used. |
| testdata.go:61:2:61:2 | assignment to x | This definition of x is never used. |
diff --git a/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/main.go b/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/main.go
index 31062a18f98..05cad065d61 100644
--- a/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/main.go
+++ b/ql/test/query-tests/RedundantCode/DeadStoreOfLocal/main.go
@@ -2,6 +2,8 @@ package p
import "fmt"
+// codeql test: expect frontend errors
+
func test() {
if false {
x := deadStore() // OK
From c15094ab9e49b5032162ff4b9d9cf909d8cafb0c Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 14 Apr 2020 10:20:09 +0100
Subject: [PATCH 009/157] Mark frontend errors as expected in
`ImposibleInterfaceNilCheck`.
---
.../RedundantCode/ImpossibleInterfaceNilCheck/err.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/ql/test/query-tests/RedundantCode/ImpossibleInterfaceNilCheck/err.go b/ql/test/query-tests/RedundantCode/ImpossibleInterfaceNilCheck/err.go
index 8801b3b7465..034b34b3d67 100644
--- a/ql/test/query-tests/RedundantCode/ImpossibleInterfaceNilCheck/err.go
+++ b/ql/test/query-tests/RedundantCode/ImpossibleInterfaceNilCheck/err.go
@@ -2,6 +2,8 @@ package main
import "fmt"
+// codeql test: expect frontend errors
+
func errtest() {
x := unknownFunction()
var y interface{} = x
From 97b1d3a57cb897536046dbd828ad0a50f8a2b908 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 20 Apr 2020 07:58:50 +0100
Subject: [PATCH 010/157] Fix extraction of error positions for paths
containing colon.
---
extractor/extractor.go | 52 ++++++++++++++++++++++++------------------
1 file changed, 30 insertions(+), 22 deletions(-)
diff --git a/extractor/extractor.go b/extractor/extractor.go
index 8fe96147808..65c5501d6d5 100644
--- a/extractor/extractor.go
+++ b/extractor/extractor.go
@@ -11,6 +11,7 @@ import (
"log"
"os"
"path/filepath"
+ "regexp"
"runtime"
"strconv"
"strings"
@@ -254,38 +255,45 @@ func extractObjectType(tw *trap.Writer, obj types.Object, lbl trap.Label) {
}
}
+var (
+ // file:line:col
+ threePartPos = regexp.MustCompile(`^(.+):(\d+):(\d+)$`)
+ // file:line
+ twoPartPos = regexp.MustCompile(`^(.+):(\d+)$`)
+)
+
// extractError extracts the message and location of a frontend error
func extractError(tw *trap.Writer, err packages.Error, pkglbl trap.Label, idx int) {
var (
- lbl = tw.Labeler.FreshID()
- kind = dbscheme.ErrorTypes[err.Kind].Index()
- pos = err.Pos
- posComponents = strings.Split(err.Pos, ":")
- file = ""
- line = 0
- col = 0
- e error
+ lbl = tw.Labeler.FreshID()
+ kind = dbscheme.ErrorTypes[err.Kind].Index()
+ pos = err.Pos
+ file = ""
+ line = 0
+ col = 0
+ e error
)
- switch len(posComponents) {
- case 3:
+
+ if parts := threePartPos.FindStringSubmatch(pos); parts != nil {
// "file:line:col"
- col, e = strconv.Atoi(posComponents[2])
+ col, e = strconv.Atoi(parts[3])
if e != nil {
- log.Printf("Warning: malformed column number `%s`: %v", posComponents[2], e)
+ log.Printf("Warning: malformed column number `%s`: %v", parts[3], e)
}
- fallthrough
- case 2:
+ line, e = strconv.Atoi(parts[2])
+ if e != nil {
+ log.Printf("Warning: malformed line number `%s`: %v", parts[2], e)
+ }
+ file = parts[1]
+ } else if parts := twoPartPos.FindStringSubmatch(pos); parts != nil {
// "file:line"
- file = posComponents[0]
- line, e = strconv.Atoi(posComponents[1])
+ line, e = strconv.Atoi(parts[2])
if e != nil {
- log.Printf("Warning: malformed line number `%s`: %v", posComponents[1], e)
- }
- default:
- // "", "-"
- if pos != "" && pos != "-" {
- log.Printf("Warning: malformed error position `%s`", pos)
+ log.Printf("Warning: malformed line number `%s`: %v", parts[2], e)
}
+ file = parts[1]
+ } else if pos != "" && pos != "-" {
+ log.Printf("Warning: malformed error position `%s`", pos)
}
file = filepath.ToSlash(srcarchive.TransformPath(file))
dbscheme.ErrorsTable.Emit(tw, lbl, kind, err.Msg, pos, file, line, col, pkglbl, idx)
From 78bffa96b3b91b29ed31da2cf94fff1da75ed2b8 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 23 Apr 2020 18:41:42 -0700
Subject: [PATCH 011/157] Use CodeQL version 2.1.1
---
.github/workflows/codeqltest.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/codeqltest.yml b/.github/workflows/codeqltest.yml
index 31ed1503154..723e8b6f2b9 100644
--- a/.github/workflows/codeqltest.yml
+++ b/.github/workflows/codeqltest.yml
@@ -20,7 +20,7 @@ jobs:
echo "Done"
cd $HOME
echo "Downloading CodeQL CLI..."
- curl https://github.com/github/codeql-cli-binaries/releases/download/v2.0.3/codeql.zip -L -o codeql.zip
+ curl https://github.com/github/codeql-cli-binaries/releases/download/v2.1.1/codeql.zip -L -o codeql.zip
echo "Done"
echo "Unpacking CodeQL CLI..."
unzip -q codeql.zip
@@ -53,7 +53,7 @@ jobs:
echo "Done"
cd $HOME
echo "Downloading CodeQL CLI..."
- curl https://github.com/github/codeql-cli-binaries/releases/download/v2.0.3/codeql.zip -L -o codeql.zip
+ curl https://github.com/github/codeql-cli-binaries/releases/download/v2.1.1/codeql.zip -L -o codeql.zip
echo "Done"
echo "Unpacking CodeQL CLI..."
unzip -q codeql.zip
@@ -86,7 +86,7 @@ jobs:
echo "Done"
cd "$HOME"
echo "Downloading CodeQL CLI..."
- Invoke-WebRequest -Uri https://github.com/github/codeql-cli-binaries/releases/download/v2.0.3/codeql.zip -OutFile codeql.zip
+ Invoke-WebRequest -Uri https://github.com/github/codeql-cli-binaries/releases/download/v2.1.1/codeql.zip -OutFile codeql.zip
echo "Done"
echo "Unpacking CodeQL CLI..."
unzip -q codeql.zip
From 38f744ddd6cbd7bfd35104ec2bddc58929350fee Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 23 Apr 2020 20:04:19 -0700
Subject: [PATCH 012/157] Action: Use expand-archive on Windows
---
.github/workflows/codeqltest.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/codeqltest.yml b/.github/workflows/codeqltest.yml
index 723e8b6f2b9..73b050a46f7 100644
--- a/.github/workflows/codeqltest.yml
+++ b/.github/workflows/codeqltest.yml
@@ -89,7 +89,7 @@ jobs:
Invoke-WebRequest -Uri https://github.com/github/codeql-cli-binaries/releases/download/v2.1.1/codeql.zip -OutFile codeql.zip
echo "Done"
echo "Unpacking CodeQL CLI..."
- unzip -q codeql.zip
+ Expand-Archive codeql.zip
rm -fo codeql.zip
echo "Done"
From b3c363d1c217112901a3890354fd5c4cca9fde1a Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 23 Apr 2020 22:39:19 -0700
Subject: [PATCH 013/157] Try setting destination for expand-archive
---
.github/workflows/codeqltest.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/codeqltest.yml b/.github/workflows/codeqltest.yml
index 73b050a46f7..5b869c800b8 100644
--- a/.github/workflows/codeqltest.yml
+++ b/.github/workflows/codeqltest.yml
@@ -89,7 +89,7 @@ jobs:
Invoke-WebRequest -Uri https://github.com/github/codeql-cli-binaries/releases/download/v2.1.1/codeql.zip -OutFile codeql.zip
echo "Done"
echo "Unpacking CodeQL CLI..."
- Expand-Archive codeql.zip
+ Expand-Archive codeql.zip -DestinationPath $HOME
rm -fo codeql.zip
echo "Done"
From 6489538623714a7b54d816addb05e8492a810a24 Mon Sep 17 00:00:00 2001
From: Jason Reed
Date: Tue, 28 Apr 2020 12:00:24 -0400
Subject: [PATCH 014/157] Add queries for ide search.
This enables jump-to-definition and find-references in the VS Code
extension, for golang source archives.
---
ql/src/localDefinitions.ql | 19 +++++++++++++++++++
ql/src/localReferences.ql | 19 +++++++++++++++++++
2 files changed, 38 insertions(+)
create mode 100644 ql/src/localDefinitions.ql
create mode 100644 ql/src/localReferences.ql
diff --git a/ql/src/localDefinitions.ql b/ql/src/localDefinitions.ql
new file mode 100644
index 00000000000..9618d8f8637
--- /dev/null
+++ b/ql/src/localDefinitions.ql
@@ -0,0 +1,19 @@
+/**
+ * @name Jump-to-definition links
+ * @description Generates use-definition pairs that provide the data
+ * for jump-to-definition in the code viewer.
+ * @kind definitions
+ * @id go/ide-jump-to-definition
+ * @tags ide-contextual-queries/local-definitions
+ */
+
+import go
+
+external string selectedSourceFile();
+
+cached
+File getEncodedFile(string name) { result.getAbsolutePath().replaceAll(":", "_") = name }
+
+from Ident def, Ident use, Entity e
+where use.uses(e) and def.declares(e) and use.getFile() = getEncodedFile(selectedSourceFile())
+select use, def, "V"
diff --git a/ql/src/localReferences.ql b/ql/src/localReferences.ql
new file mode 100644
index 00000000000..ddd556fc8b3
--- /dev/null
+++ b/ql/src/localReferences.ql
@@ -0,0 +1,19 @@
+/**
+ * @name Find-references links
+ * @description Generates use-definition pairs that provide the data
+ * for find-references in the code viewer.
+ * @kind definitions
+ * @id go/ide-find-references
+ * @tags ide-contextual-queries/local-references
+ */
+
+import go
+
+external string selectedSourceFile();
+
+cached
+File getEncodedFile(string name) { result.getAbsolutePath().replaceAll(":", "_") = name }
+
+from Ident def, Ident use, Entity e
+where use.uses(e) and def.declares(e) and def.getFile() = getEncodedFile(selectedSourceFile())
+select use, def, "V"
From 92576e9c117316b422178968fc447402828c77a5 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Tue, 28 Apr 2020 23:11:48 +0530
Subject: [PATCH 015/157] User-controlled bypass of sensitive action
---
.../CWE-807/SensitiveConditionBypass.qhelp | 29 ++++++++++
.../CWE-807/SensitiveConditionBypass.ql | 35 ++++++++++++
.../CWE-807/SensitiveConditionBypass.qll | 57 +++++++++++++++++++
.../CWE-807/SensitiveConditionBypassBad.go | 6 ++
.../CWE-807/SensitiveConditionBypass.expected | 3 +
.../CWE-807/SensitiveConditionBypass.qlref | 1 +
ql/test/experimental/CWE-807/condition.go | 33 +++++++++++
7 files changed, 164 insertions(+)
create mode 100644 ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
create mode 100644 ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
create mode 100644 ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
create mode 100644 ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
create mode 100644 ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
create mode 100644 ql/test/experimental/CWE-807/SensitiveConditionBypass.qlref
create mode 100644 ql/test/experimental/CWE-807/condition.go
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
new file mode 100644
index 00000000000..67422e53a28
--- /dev/null
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
@@ -0,0 +1,29 @@
+
+
+
+
+Testing untrusted user input against a fixed constant results in
+a bypass of the conditional check as the attacker may alter the input to match the constant.
+When an incorrect check of this type is used to guard a potentially sensitive block,
+it results an attacker gaining access to the sensitive block.
+
+
+
+
+ Never decide whether to authenticate a user based on data that may be controlled by that user.
+ If necessary, ensure that the data is validated extensively when it is input before any
+ authentication checks are performed.
+
+It is still possible to have a system that "remembers" users, thus not requiring
+the user to login on every interaction. For example, personalization settings can be applied
+without authentication because this is not sensitive information. However, users
+should be allowed to take sensitive actions only when they have been fully authenticated.
+
+
+
+The following example shows a comparision where an user controlled
+expression is used to guard a sensitive method. This should be avoided.:
+
+
+
+
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
new file mode 100644
index 00000000000..61798836e77
--- /dev/null
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
@@ -0,0 +1,35 @@
+/**
+ * @name User-controlled bypassing of sensitive action
+ * @description This query tests for user-controlled bypassing
+ * of sensitive actions.
+ * @id go/sensitive-condition-bypass
+ * @kind problem
+ * @problem.severity high
+ * @tags external/cwe/cwe-807
+ * external/cwe/cwe-247
+ * external/cwe/cwe-350
+ */
+
+import go
+import SensitiveConditionBypass
+
+from
+ ControlFlow::ConditionGuardNode guard, DataFlow::Node sensitiveSink,
+ SensitiveExpr::Classification classification, Configuration config, DataFlow::PathNode source,
+ DataFlow::PathNode operand, DataFlow::PathNode constOperand, DataFlow::PathNode constSource,
+ ComparisonExpr comp, ConstConfiguration constConfig
+where
+ // there should be a flow between source and the operand sink
+ config.hasFlowPath(source, operand) and
+ // A constant string value should flow to a sink
+ constConfig.hasFlowPath(constSource, constOperand) and
+ // both the operand should belong to the same comparision expression
+ operand.getNode().asExpr() = comp.getAnOperand() and
+ constOperand.getNode().asExpr() = comp.getAnOperand() and
+ // get the ConditionGuardNode corresponding to the comparision expr.
+ guard.getCondition() = comp and
+ // the sink `sensitiveSink` should be sensitive,
+ isSensitive(sensitiveSink, classification) and
+ // the guard should control the sink
+ guard.dominates(sensitiveSink.getBasicBlock())
+select comp, "This sensitive comparision check can potentially be bypassed"
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
new file mode 100644
index 00000000000..77a503e7dc7
--- /dev/null
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
@@ -0,0 +1,57 @@
+import go
+import semmle.go.security.SensitiveActions
+
+/**
+ * Holds if `sink` is used in a context that suggests it may hold sensitive data of
+ * the given `type`.
+ */
+predicate isSensitive(DataFlow::Node sink, SensitiveExpr::Classification type) {
+ exists(Write write, string name |
+ write.getRhs() = sink and
+ name = write.getLhs().getName() and
+ // whitelist obvious test password variables
+ not name.regexpMatch(HeuristicNames::notSensitive())
+ |
+ name.regexpMatch(HeuristicNames::maybeSensitive(type))
+ )
+ or
+ exists(SensitiveCall a | sink.asExpr() = a and a.getClassification() = type)
+ or
+ exists(SensitiveExpr a | sink.asExpr() = a and a.getClassification() = type)
+ or
+ exists(SensitiveAction a | a = sink and type = SensitiveExpr::secret())
+}
+
+/**
+ * A data-flow configuration for reasoning about
+ * user-controlled bypassing of sensitive actions.
+ */
+class Configuration extends TaintTracking::Configuration {
+ Configuration() { this = "Condtional Expression Check Bypass" }
+
+ override predicate isSource(DataFlow::Node source) {
+ source instanceof UntrustedFlowSource
+ or
+ exists(DataFlow::FieldReadNode f |
+ f.getField().hasQualifiedName("net/http", "Request", "Host")
+ |
+ source = f
+ )
+ }
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(ComparisonExpr c | c.getAnOperand() = sink.asExpr())
+ }
+}
+
+class ConstConfiguration extends DataFlow::Configuration {
+ ConstConfiguration() { this = "Constant expression flow" }
+
+ override predicate isSource(DataFlow::Node source) {
+ exists(string val | source.getStringValue() = val)
+ }
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(ComparisonExpr c | c.getAnOperand() = sink.asExpr())
+ }
+}
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go b/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
new file mode 100644
index 00000000000..e2ca02615db
--- /dev/null
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
@@ -0,0 +1,6 @@
+func ex3(w http.ResponseWriter, r *http.Request) {
+ test2 := "test"
+ if r.Header.Get("X-Password") != test2 {
+ login()
+ }
+}
diff --git a/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected b/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
new file mode 100644
index 00000000000..dcc3edfce51
--- /dev/null
+++ b/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
@@ -0,0 +1,3 @@
+| condition.go:14:5:14:34 | ...!=... | This sensitive comparision check can potentially be bypassed |
+| condition.go:22:5:22:35 | ...!=... | This sensitive comparision check can potentially be bypassed |
+| condition.go:30:5:30:35 | ...!=... | This sensitive comparision check can potentially be bypassed |
diff --git a/ql/test/experimental/CWE-807/SensitiveConditionBypass.qlref b/ql/test/experimental/CWE-807/SensitiveConditionBypass.qlref
new file mode 100644
index 00000000000..da2ab35074a
--- /dev/null
+++ b/ql/test/experimental/CWE-807/SensitiveConditionBypass.qlref
@@ -0,0 +1 @@
+experimental/CWE-807/SensitiveConditionBypass.ql
diff --git a/ql/test/experimental/CWE-807/condition.go b/ql/test/experimental/CWE-807/condition.go
new file mode 100644
index 00000000000..d8861b53201
--- /dev/null
+++ b/ql/test/experimental/CWE-807/condition.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "io"
+ "net/http"
+)
+
+func use(xs ...interface{}) {}
+
+var test = "localhost"
+
+// bad both are from remote sources
+func ex1(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != test {
+ authkey := "randomDatta"
+ io.WriteString(w, authkey)
+ }
+}
+
+func ex2(w http.ResponseWriter, r *http.Request) {
+ test2 := "test"
+ if r.Header.Get("Origin") != test2 {
+ authkey := "randomDatta2"
+ io.WriteString(w, authkey)
+ }
+}
+
+func ex3(w http.ResponseWriter, r *http.Request) {
+ test2 := "test"
+ if r.Header.Get("Origin") != test2 {
+ login()
+ }
+}
From 9948596e2cf542dd6e5665df95d8b4a7cc64f128 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Tue, 28 Apr 2020 23:24:28 +0530
Subject: [PATCH 016/157] User-controlled bypass of a comparision
---
.../CWE-840/ConditionBypass.qhelp | 35 +++++++++++++++
.../experimental/CWE-840/ConditionBypass.ql | 44 +++++++++++++++++++
.../CWE-840/ConditionBypassBad.go | 12 +++++
.../CWE-840/ConditionBypassGood.go | 11 +++++
.../CWE-840/ConditionBypass.expected | 2 +
.../CWE-840/ConditionBypass.qlref | 1 +
ql/test/experimental/CWE-840/condition.go | 26 +++++++++++
7 files changed, 131 insertions(+)
create mode 100644 ql/src/experimental/CWE-840/ConditionBypass.qhelp
create mode 100644 ql/src/experimental/CWE-840/ConditionBypass.ql
create mode 100644 ql/src/experimental/CWE-840/ConditionBypassBad.go
create mode 100644 ql/src/experimental/CWE-840/ConditionBypassGood.go
create mode 100644 ql/test/experimental/CWE-840/ConditionBypass.expected
create mode 100644 ql/test/experimental/CWE-840/ConditionBypass.qlref
create mode 100644 ql/test/experimental/CWE-840/condition.go
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.qhelp b/ql/src/experimental/CWE-840/ConditionBypass.qhelp
new file mode 100644
index 00000000000..42bdc95e0f3
--- /dev/null
+++ b/ql/src/experimental/CWE-840/ConditionBypass.qhelp
@@ -0,0 +1,35 @@
+
+
+
+
+Testing untrusted user input against untrusted user input results in
+a bypass of the conditional check as the attacker may modify parameters to match.
+
+
+
+
+To guard against this, it is advisable to avoid framing a comparision
+where both sides are untrusted user inputs.
+Instead, use a configuration to store and access the values required.
+
+
+
+
+The following example shows a comparision where both the sides
+are from attacker controlled request headers. This should be avoided.:
+
+
+
+One way to remedy the problem is to test against a value stored in a configuration:
+
+
+
+
+
+ MITRE:
+
+ CWE-840.
+
+
+
+
\ No newline at end of file
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.ql b/ql/src/experimental/CWE-840/ConditionBypass.ql
new file mode 100644
index 00000000000..8fc7fb3d249
--- /dev/null
+++ b/ql/src/experimental/CWE-840/ConditionBypass.ql
@@ -0,0 +1,44 @@
+/**
+ * @name Comparision Expression Check Bypass
+ * @description This query tests for user-controlled bypassing
+ * of a comparision expression i.e. instances where both the
+ * lhs and rhs of a comparision are user controlled.
+ * @id go/condition-bypass
+ * @kind problem
+ * @problem.severity medium
+ * @tags external/cwe/cwe-840
+ */
+
+import go
+
+/**
+ * A data-flow configuration for reasoning about Condition Bypass.
+ */
+class Configuration extends TaintTracking::Configuration {
+ Configuration() { this = "Comparision Expression Check Bypass" }
+
+ override predicate isSource(DataFlow::Node source) {
+ source instanceof UntrustedFlowSource
+ or
+ exists(string fieldName |
+ source.(DataFlow::FieldReadNode).getField().hasQualifiedName("net/http", "Request", fieldName)
+ |
+ fieldName = "Host"
+ )
+ }
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(ComparisonExpr c | c.getAnOperand() = sink.asExpr())
+ }
+}
+
+from
+ Configuration config, DataFlow::PathNode lhsSource, DataFlow::PathNode lhs,
+ DataFlow::PathNode rhsSource, DataFlow::PathNode rhs, ComparisonExpr c
+where
+ config.hasFlowPath(rhsSource, rhs) and
+ rhs.getNode().asExpr() = c.getRightOperand() and
+ config.hasFlowPath(lhsSource, lhs) and
+ lhs.getNode().asExpr() = c.getLeftOperand()
+select c, "This comparision is between user controlled operands and "
++ "hence may be bypassed."
diff --git a/ql/src/experimental/CWE-840/ConditionBypassBad.go b/ql/src/experimental/CWE-840/ConditionBypassBad.go
new file mode 100644
index 00000000000..6ec70c694bd
--- /dev/null
+++ b/ql/src/experimental/CWE-840/ConditionBypassBad.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "net/http"
+)
+
+// bad the origin and the host headers are user controlled
+func ex1(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "http://"+r.Host {
+ //do something
+ }
+}
diff --git a/ql/src/experimental/CWE-840/ConditionBypassGood.go b/ql/src/experimental/CWE-840/ConditionBypassGood.go
new file mode 100644
index 00000000000..987f97a9cd0
--- /dev/null
+++ b/ql/src/experimental/CWE-840/ConditionBypassGood.go
@@ -0,0 +1,11 @@
+package main
+
+import (
+ "net/http"
+)
+
+func ex1(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != config.get("Host") {
+ //do something
+ }
+}
diff --git a/ql/test/experimental/CWE-840/ConditionBypass.expected b/ql/test/experimental/CWE-840/ConditionBypass.expected
new file mode 100644
index 00000000000..73f3b39a0cc
--- /dev/null
+++ b/ql/test/experimental/CWE-840/ConditionBypass.expected
@@ -0,0 +1,2 @@
+| condition.go:9:5:9:46 | ...!=... | This comparision is between user controlled operands and hence may be bypassed. |
+| condition.go:16:5:16:62 | ...!=... | This comparision is between user controlled operands and hence may be bypassed. |
diff --git a/ql/test/experimental/CWE-840/ConditionBypass.qlref b/ql/test/experimental/CWE-840/ConditionBypass.qlref
new file mode 100644
index 00000000000..d107d9110d5
--- /dev/null
+++ b/ql/test/experimental/CWE-840/ConditionBypass.qlref
@@ -0,0 +1 @@
+experimental/CWE-840/ConditionBypass.ql
diff --git a/ql/test/experimental/CWE-840/condition.go b/ql/test/experimental/CWE-840/condition.go
new file mode 100644
index 00000000000..b61d72b94e1
--- /dev/null
+++ b/ql/test/experimental/CWE-840/condition.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "net/http"
+)
+
+// bad : taken from https://www.gorillatoolkit.org/pkg/websocket
+func ex1(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "http://"+r.Host {
+ //do something
+ }
+}
+
+// bad both are from remote sources
+func ex2(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "http://"+r.Header.Get("Header") {
+ //do something
+ }
+}
+
+// good
+func ex3(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "http://"+"test" {
+ //do something
+ }
+}
From 0546c527afcb470589d17c597a034b55955c8c10 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 28 Apr 2020 21:41:29 +0100
Subject: [PATCH 017/157] Fix `getExitNode` for receiver outputs.
---
.../semmle/go/dataflow/FunctionInputsAndOutputs.qll | 5 +++--
.../semmle/go/dataflow/internal/DataFlowPrivate.qll | 11 +++++++++++
ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll | 12 +-----------
3 files changed, 15 insertions(+), 13 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll b/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
index 95a4c180b97..1cf73dad3e8 100644
--- a/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
+++ b/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
@@ -4,6 +4,7 @@
*/
import go
+private import semmle.go.dataflow.internal.DataFlowPrivate
/**
* An abstract representation of an input to a function, which is either a parameter
@@ -181,8 +182,8 @@ private class OutParameter extends FunctionOutput, TOutParameter {
}
override DataFlow::Node getExitNode(DataFlow::CallNode c) {
- exists(DataFlow::ArgumentNode arg |
- arg.argumentOf(c.asExpr(), index) and
+ exists(DataFlow::Node arg |
+ arg = getArgument(c, index) and
result.(DataFlow::PostUpdateNode).getPreUpdateNode() = arg
)
}
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowPrivate.qll b/ql/src/semmle/go/dataflow/internal/DataFlowPrivate.qll
index 16b85fa8aef..049bfb2667e 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowPrivate.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowPrivate.qll
@@ -272,3 +272,14 @@ predicate isUnreachableInCall(Node n, DataFlowCall call) {
}
int accessPathLimit() { result = 5 }
+
+/**
+ * Gets the `i`th argument of call `c`, where the receiver of a method call
+ * counts as argument -1.
+ */
+Node getArgument(CallNode c, int i) {
+ result = c.getArgument(i)
+ or
+ result = c.(MethodCallNode).getReceiver() and
+ i = -1
+}
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
index 09e396a9563..3645073cf2f 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
@@ -4,6 +4,7 @@
import go
import semmle.go.dataflow.FunctionInputsAndOutputs
+private import DataFlowPrivate
cached
private newtype TNode =
@@ -423,17 +424,6 @@ class PostUpdateNode extends Node {
Node getPreUpdateNode() { result = preupd }
}
-/**
- * Gets the `i`th argument of call `c`, where the receiver of a method call
- * counts as argument -1.
- */
-private Node getArgument(CallNode c, int i) {
- result = c.getArgument(i)
- or
- result = c.(MethodCallNode).getReceiver() and
- i = -1
-}
-
/**
* A data-flow node that occurs as an argument in a call, including receiver arguments.
*/
From a1222344eb1202650c5bdf66b62dfcc77cfd77b3 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 29 Apr 2020 07:46:25 +0100
Subject: [PATCH 018/157] Add tests.
---
.../FunctionInput_getEntryNode.expected | 2 ++
.../FunctionInput_getExitNode.expected | 1 +
.../FunctionModelStep.expected | 1 +
.../FunctionInputsAndOutputs/FunctionModelStep.ql | 13 +++++++++++++
.../FunctionOutput_getExitNode.expected | 2 ++
.../go/dataflow/FunctionInputsAndOutputs/tst.go | 11 +++++++++++
6 files changed, 30 insertions(+)
create mode 100644 ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.expected
create mode 100644 ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.ql
create mode 100644 ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/tst.go
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
index 316a5f82f61..2989f4ef314 100644
--- a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
@@ -2,6 +2,7 @@
| parameter 0 | main.go:53:2:53:22 | call to op2 | main.go:53:6:53:8 | "-" |
| parameter 0 | main.go:55:2:55:27 | call to Printf | main.go:55:13:55:20 | "%d, %d" |
| parameter 0 | main.go:57:2:57:27 | call to Printf | main.go:57:13:57:20 | "%d, %d" |
+| parameter 0 | tst.go:10:2:10:29 | call to ReadFrom | tst.go:10:23:10:28 | reader |
| parameter 1 | main.go:51:2:51:14 | call to op | main.go:51:10:51:10 | 1 |
| parameter 1 | main.go:53:2:53:22 | call to op2 | main.go:53:11:53:11 | 2 |
| parameter 1 | main.go:55:2:55:27 | call to Printf | main.go:55:23:55:23 | x |
@@ -11,3 +12,4 @@
| parameter 2 | main.go:55:2:55:27 | call to Printf | main.go:55:26:55:26 | y |
| parameter 2 | main.go:57:2:57:27 | call to Printf | main.go:57:26:57:26 | y |
| receiver | main.go:53:14:53:21 | call to bump | main.go:53:14:53:14 | c |
+| receiver | tst.go:10:2:10:29 | call to ReadFrom | tst.go:10:2:10:12 | bytesBuffer |
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getExitNode.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getExitNode.expected
index 92e681a7b01..f13c3f16bd1 100644
--- a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getExitNode.expected
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getExitNode.expected
@@ -1,6 +1,7 @@
| parameter 0 | main.go:5:1:11:1 | function declaration | main.go:5:9:5:10 | definition of op |
| parameter 0 | main.go:13:1:20:1 | function declaration | main.go:13:10:13:11 | definition of op |
| parameter 0 | main.go:40:1:48:1 | function declaration | main.go:40:12:40:12 | definition of b |
+| parameter 0 | tst.go:8:1:11:1 | function declaration | tst.go:8:12:8:17 | definition of reader |
| parameter 1 | main.go:5:1:11:1 | function declaration | main.go:5:20:5:20 | definition of x |
| parameter 1 | main.go:13:1:20:1 | function declaration | main.go:13:21:13:21 | definition of x |
| parameter 2 | main.go:5:1:11:1 | function declaration | main.go:5:27:5:27 | definition of y |
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.expected
new file mode 100644
index 00000000000..0c64f6cc4b4
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.expected
@@ -0,0 +1 @@
+| file://:0:0:0:0 | ReadFrom | tst.go:10:23:10:28 | reader | tst.go:9:2:9:12 | definition of bytesBuffer |
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.ql b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.ql
new file mode 100644
index 00000000000..d21a051038d
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionModelStep.ql
@@ -0,0 +1,13 @@
+import go
+
+class BytesReadFrom extends TaintTracking::FunctionModel, Method {
+ BytesReadFrom() { this.(Method).hasQualifiedName("bytes", "Buffer", "ReadFrom") }
+
+ override predicate hasTaintFlow(FunctionInput inp, FunctionOutput outp) {
+ inp.isParameter(0) and outp.isReceiver()
+ }
+}
+
+from Function fn, DataFlow::Node pred, DataFlow::Node succ
+where TaintTracking::functionModelStep(fn, pred, succ)
+select fn, pred, succ
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
index 19c2533a831..f84d4616a8f 100644
--- a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
@@ -1,6 +1,8 @@
+| receiver | tst.go:10:2:10:29 | call to ReadFrom | tst.go:9:2:9:12 | definition of bytesBuffer |
| result | main.go:51:2:51:14 | call to op | main.go:51:2:51:14 | call to op |
| result | main.go:53:2:53:22 | call to op2 | main.go:53:2:53:22 | call to op2 |
| result | main.go:53:14:53:21 | call to bump | main.go:53:14:53:21 | call to bump |
+| result | tst.go:9:17:9:33 | call to new | tst.go:9:17:9:33 | call to new |
| result 0 | main.go:54:10:54:15 | call to test | main.go:54:2:54:15 | ... := ...[0] |
| result 0 | main.go:56:9:56:15 | call to test2 | main.go:56:2:56:15 | ... = ...[0] |
| result 1 | main.go:54:10:54:15 | call to test | main.go:54:2:54:15 | ... := ...[1] |
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/tst.go b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/tst.go
new file mode 100644
index 00000000000..e36e3cf15d2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/tst.go
@@ -0,0 +1,11 @@
+package main
+
+import (
+ "bytes"
+ "io"
+)
+
+func test4(reader io.Reader) {
+ bytesBuffer := new(bytes.Buffer)
+ bytesBuffer.ReadFrom(reader)
+}
From a93477c3019c773c4bc8f418a5ab75624e12b311 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Wed, 29 Apr 2020 18:31:07 +0300
Subject: [PATCH 019/157] Add syscall functions to SystemCommandExecutors
---
.../go/frameworks/SystemCommandExecutors.qll | 4 +
.../SystemCommandExecutors.expected | 116 +++++++++---------
.../SystemCommandExecutors.go | 9 ++
3 files changed, 74 insertions(+), 55 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll b/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
index 940d30f6142..8ef7ce042c2 100644
--- a/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
+++ b/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
@@ -29,6 +29,10 @@ private class SystemCommandExecutors extends SystemCommandExecution::Range, Data
pkg = "os/exec" and name = "Command" and cmdArg = 0
or
pkg = "os/exec" and name = "CommandContext" and cmdArg = 1
+ or
+ pkg = "syscall" and
+ (name = "Exec" or name = "ForkExec" or name = "StartProcess") and
+ cmdArg = 0
)
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
index 0aaee3714bf..0544bdd9dcc 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
@@ -1,55 +1,61 @@
-| SystemCommandExecutors.go:29:3:29:36 | call to StartProcess | SystemCommandExecutors.go:29:19:29:24 | source |
-| SystemCommandExecutors.go:33:3:33:47 | call to StartProcess | SystemCommandExecutors.go:33:19:33:23 | shell |
-| SystemCommandExecutors.go:33:3:33:47 | call to StartProcess | SystemCommandExecutors.go:33:26:33:41 | composite literal |
-| SystemCommandExecutors.go:33:3:33:47 | call to StartProcess | SystemCommandExecutors.go:33:44:33:46 | nil |
-| SystemCommandExecutors.go:36:3:36:64 | call to StartProcess | SystemCommandExecutors.go:36:19:36:23 | shell |
-| SystemCommandExecutors.go:36:3:36:64 | call to StartProcess | SystemCommandExecutors.go:36:26:36:58 | call to append |
-| SystemCommandExecutors.go:36:3:36:64 | call to StartProcess | SystemCommandExecutors.go:36:61:36:63 | nil |
-| SystemCommandExecutors.go:39:3:39:61 | call to StartProcess | SystemCommandExecutors.go:39:19:39:23 | shell |
-| SystemCommandExecutors.go:39:3:39:61 | call to StartProcess | SystemCommandExecutors.go:39:26:39:55 | call to append |
-| SystemCommandExecutors.go:39:3:39:61 | call to StartProcess | SystemCommandExecutors.go:39:58:39:60 | nil |
-| SystemCommandExecutors.go:47:3:47:57 | call to StartProcess | SystemCommandExecutors.go:47:19:47:33 | assumedNonShell |
-| SystemCommandExecutors.go:50:3:50:74 | call to StartProcess | SystemCommandExecutors.go:50:19:50:33 | assumedNonShell |
-| SystemCommandExecutors.go:53:3:53:82 | call to StartProcess | SystemCommandExecutors.go:53:19:53:33 | assumedNonShell |
-| SystemCommandExecutors.go:59:3:59:31 | call to Command | SystemCommandExecutors.go:59:16:59:21 | source |
-| SystemCommandExecutors.go:62:3:62:41 | call to Command | SystemCommandExecutors.go:62:16:62:20 | shell |
-| SystemCommandExecutors.go:62:3:62:41 | call to Command | SystemCommandExecutors.go:62:23:62:26 | "a0" |
-| SystemCommandExecutors.go:62:3:62:41 | call to Command | SystemCommandExecutors.go:62:29:62:32 | "a1" |
-| SystemCommandExecutors.go:62:3:62:41 | call to Command | SystemCommandExecutors.go:62:35:62:40 | source |
-| SystemCommandExecutors.go:65:3:65:56 | call to Command | SystemCommandExecutors.go:65:16:65:20 | shell |
-| SystemCommandExecutors.go:65:3:65:56 | call to Command | SystemCommandExecutors.go:65:23:65:52 | call to append |
-| SystemCommandExecutors.go:68:3:68:114 | call to Command | SystemCommandExecutors.go:68:16:68:19 | "sh" |
-| SystemCommandExecutors.go:68:3:68:114 | call to Command | SystemCommandExecutors.go:68:22:68:25 | "-c" |
-| SystemCommandExecutors.go:68:3:68:114 | call to Command | SystemCommandExecutors.go:68:28:68:113 | ...+... |
-| SystemCommandExecutors.go:69:3:69:42 | call to Command | SystemCommandExecutors.go:69:16:69:21 | "sudo" |
-| SystemCommandExecutors.go:69:3:69:42 | call to Command | SystemCommandExecutors.go:69:24:69:27 | "sh" |
-| SystemCommandExecutors.go:69:3:69:42 | call to Command | SystemCommandExecutors.go:69:30:69:33 | "-c" |
-| SystemCommandExecutors.go:69:3:69:42 | call to Command | SystemCommandExecutors.go:69:36:69:41 | source |
-| SystemCommandExecutors.go:72:3:72:68 | call to Command | SystemCommandExecutors.go:72:16:72:21 | "ruby" |
-| SystemCommandExecutors.go:72:3:72:68 | call to Command | SystemCommandExecutors.go:72:24:72:27 | "-e" |
-| SystemCommandExecutors.go:72:3:72:68 | call to Command | SystemCommandExecutors.go:72:30:72:67 | call to Sprintf |
-| SystemCommandExecutors.go:73:3:73:80 | call to Command | SystemCommandExecutors.go:73:16:73:21 | "perl" |
-| SystemCommandExecutors.go:73:3:73:80 | call to Command | SystemCommandExecutors.go:73:24:73:27 | "-e" |
-| SystemCommandExecutors.go:73:3:73:80 | call to Command | SystemCommandExecutors.go:73:30:73:79 | call to Sprintf |
-| SystemCommandExecutors.go:74:3:74:86 | call to Command | SystemCommandExecutors.go:74:16:74:26 | "python2.7" |
-| SystemCommandExecutors.go:74:3:74:86 | call to Command | SystemCommandExecutors.go:74:29:74:32 | "-c" |
-| SystemCommandExecutors.go:74:3:74:86 | call to Command | SystemCommandExecutors.go:74:35:74:85 | call to Sprintf |
-| SystemCommandExecutors.go:75:3:75:87 | call to Command | SystemCommandExecutors.go:75:16:75:27 | "python3.6m" |
-| SystemCommandExecutors.go:75:3:75:87 | call to Command | SystemCommandExecutors.go:75:30:75:33 | "-c" |
-| SystemCommandExecutors.go:75:3:75:87 | call to Command | SystemCommandExecutors.go:75:36:75:86 | call to Sprintf |
-| SystemCommandExecutors.go:77:3:77:56 | call to Command | SystemCommandExecutors.go:77:16:77:33 | "python3.7-config" |
-| SystemCommandExecutors.go:78:3:78:44 | call to Command | SystemCommandExecutors.go:78:16:78:28 | "python3-pbr" |
-| SystemCommandExecutors.go:81:3:81:56 | call to Command | SystemCommandExecutors.go:81:16:81:20 | "ssh" |
-| SystemCommandExecutors.go:81:3:81:56 | call to Command | SystemCommandExecutors.go:81:23:81:26 | "-t" |
-| SystemCommandExecutors.go:81:3:81:56 | call to Command | SystemCommandExecutors.go:81:29:81:39 | "user@host" |
-| SystemCommandExecutors.go:81:3:81:56 | call to Command | SystemCommandExecutors.go:81:42:81:55 | ...+... |
-| SystemCommandExecutors.go:86:3:86:32 | call to CombinedOutput | SystemCommandExecutors.go:86:26:86:31 | source |
-| SystemCommandExecutors.go:87:3:87:24 | call to Output | SystemCommandExecutors.go:87:18:87:23 | source |
-| SystemCommandExecutors.go:88:3:88:21 | call to Run | SystemCommandExecutors.go:88:15:88:20 | source |
-| SystemCommandExecutors.go:89:3:89:23 | call to Start | SystemCommandExecutors.go:89:17:89:22 | source |
-| SystemCommandExecutors.go:93:3:93:83 | call to Command | SystemCommandExecutors.go:93:14:93:18 | shell |
-| SystemCommandExecutors.go:93:3:93:83 | call to Command | SystemCommandExecutors.go:93:21:93:79 | call to toInterfaceArray |
-| SystemCommandExecutors.go:94:3:94:101 | call to Call | SystemCommandExecutors.go:94:32:94:36 | shell |
-| SystemCommandExecutors.go:94:3:94:101 | call to Call | SystemCommandExecutors.go:94:39:94:97 | call to toInterfaceArray |
-| SystemCommandExecutors.go:95:3:95:104 | call to Command | SystemCommandExecutors.go:95:35:95:39 | shell |
-| SystemCommandExecutors.go:95:3:95:104 | call to Command | SystemCommandExecutors.go:95:42:95:100 | call to toInterfaceArray |
+| SystemCommandExecutors.go:30:3:30:36 | call to StartProcess | SystemCommandExecutors.go:30:19:30:24 | source |
+| SystemCommandExecutors.go:34:3:34:47 | call to StartProcess | SystemCommandExecutors.go:34:19:34:23 | shell |
+| SystemCommandExecutors.go:34:3:34:47 | call to StartProcess | SystemCommandExecutors.go:34:26:34:41 | composite literal |
+| SystemCommandExecutors.go:34:3:34:47 | call to StartProcess | SystemCommandExecutors.go:34:44:34:46 | nil |
+| SystemCommandExecutors.go:37:3:37:64 | call to StartProcess | SystemCommandExecutors.go:37:19:37:23 | shell |
+| SystemCommandExecutors.go:37:3:37:64 | call to StartProcess | SystemCommandExecutors.go:37:26:37:58 | call to append |
+| SystemCommandExecutors.go:37:3:37:64 | call to StartProcess | SystemCommandExecutors.go:37:61:37:63 | nil |
+| SystemCommandExecutors.go:40:3:40:61 | call to StartProcess | SystemCommandExecutors.go:40:19:40:23 | shell |
+| SystemCommandExecutors.go:40:3:40:61 | call to StartProcess | SystemCommandExecutors.go:40:26:40:55 | call to append |
+| SystemCommandExecutors.go:40:3:40:61 | call to StartProcess | SystemCommandExecutors.go:40:58:40:60 | nil |
+| SystemCommandExecutors.go:48:3:48:57 | call to StartProcess | SystemCommandExecutors.go:48:19:48:33 | assumedNonShell |
+| SystemCommandExecutors.go:51:3:51:74 | call to StartProcess | SystemCommandExecutors.go:51:19:51:33 | assumedNonShell |
+| SystemCommandExecutors.go:54:3:54:82 | call to StartProcess | SystemCommandExecutors.go:54:19:54:33 | assumedNonShell |
+| SystemCommandExecutors.go:60:3:60:31 | call to Command | SystemCommandExecutors.go:60:16:60:21 | source |
+| SystemCommandExecutors.go:63:3:63:41 | call to Command | SystemCommandExecutors.go:63:16:63:20 | shell |
+| SystemCommandExecutors.go:63:3:63:41 | call to Command | SystemCommandExecutors.go:63:23:63:26 | "a0" |
+| SystemCommandExecutors.go:63:3:63:41 | call to Command | SystemCommandExecutors.go:63:29:63:32 | "a1" |
+| SystemCommandExecutors.go:63:3:63:41 | call to Command | SystemCommandExecutors.go:63:35:63:40 | source |
+| SystemCommandExecutors.go:66:3:66:56 | call to Command | SystemCommandExecutors.go:66:16:66:20 | shell |
+| SystemCommandExecutors.go:66:3:66:56 | call to Command | SystemCommandExecutors.go:66:23:66:52 | call to append |
+| SystemCommandExecutors.go:69:3:69:114 | call to Command | SystemCommandExecutors.go:69:16:69:19 | "sh" |
+| SystemCommandExecutors.go:69:3:69:114 | call to Command | SystemCommandExecutors.go:69:22:69:25 | "-c" |
+| SystemCommandExecutors.go:69:3:69:114 | call to Command | SystemCommandExecutors.go:69:28:69:113 | ...+... |
+| SystemCommandExecutors.go:70:3:70:42 | call to Command | SystemCommandExecutors.go:70:16:70:21 | "sudo" |
+| SystemCommandExecutors.go:70:3:70:42 | call to Command | SystemCommandExecutors.go:70:24:70:27 | "sh" |
+| SystemCommandExecutors.go:70:3:70:42 | call to Command | SystemCommandExecutors.go:70:30:70:33 | "-c" |
+| SystemCommandExecutors.go:70:3:70:42 | call to Command | SystemCommandExecutors.go:70:36:70:41 | source |
+| SystemCommandExecutors.go:73:3:73:68 | call to Command | SystemCommandExecutors.go:73:16:73:21 | "ruby" |
+| SystemCommandExecutors.go:73:3:73:68 | call to Command | SystemCommandExecutors.go:73:24:73:27 | "-e" |
+| SystemCommandExecutors.go:73:3:73:68 | call to Command | SystemCommandExecutors.go:73:30:73:67 | call to Sprintf |
+| SystemCommandExecutors.go:74:3:74:80 | call to Command | SystemCommandExecutors.go:74:16:74:21 | "perl" |
+| SystemCommandExecutors.go:74:3:74:80 | call to Command | SystemCommandExecutors.go:74:24:74:27 | "-e" |
+| SystemCommandExecutors.go:74:3:74:80 | call to Command | SystemCommandExecutors.go:74:30:74:79 | call to Sprintf |
+| SystemCommandExecutors.go:75:3:75:86 | call to Command | SystemCommandExecutors.go:75:16:75:26 | "python2.7" |
+| SystemCommandExecutors.go:75:3:75:86 | call to Command | SystemCommandExecutors.go:75:29:75:32 | "-c" |
+| SystemCommandExecutors.go:75:3:75:86 | call to Command | SystemCommandExecutors.go:75:35:75:85 | call to Sprintf |
+| SystemCommandExecutors.go:76:3:76:87 | call to Command | SystemCommandExecutors.go:76:16:76:27 | "python3.6m" |
+| SystemCommandExecutors.go:76:3:76:87 | call to Command | SystemCommandExecutors.go:76:30:76:33 | "-c" |
+| SystemCommandExecutors.go:76:3:76:87 | call to Command | SystemCommandExecutors.go:76:36:76:86 | call to Sprintf |
+| SystemCommandExecutors.go:78:3:78:56 | call to Command | SystemCommandExecutors.go:78:16:78:33 | "python3.7-config" |
+| SystemCommandExecutors.go:79:3:79:44 | call to Command | SystemCommandExecutors.go:79:16:79:28 | "python3-pbr" |
+| SystemCommandExecutors.go:82:3:82:56 | call to Command | SystemCommandExecutors.go:82:16:82:20 | "ssh" |
+| SystemCommandExecutors.go:82:3:82:56 | call to Command | SystemCommandExecutors.go:82:23:82:26 | "-t" |
+| SystemCommandExecutors.go:82:3:82:56 | call to Command | SystemCommandExecutors.go:82:29:82:39 | "user@host" |
+| SystemCommandExecutors.go:82:3:82:56 | call to Command | SystemCommandExecutors.go:82:42:82:55 | ...+... |
+| SystemCommandExecutors.go:87:3:87:32 | call to CombinedOutput | SystemCommandExecutors.go:87:26:87:31 | source |
+| SystemCommandExecutors.go:88:3:88:24 | call to Output | SystemCommandExecutors.go:88:18:88:23 | source |
+| SystemCommandExecutors.go:89:3:89:21 | call to Run | SystemCommandExecutors.go:89:15:89:20 | source |
+| SystemCommandExecutors.go:90:3:90:23 | call to Start | SystemCommandExecutors.go:90:17:90:22 | source |
+| SystemCommandExecutors.go:94:3:94:83 | call to Command | SystemCommandExecutors.go:94:14:94:18 | shell |
+| SystemCommandExecutors.go:94:3:94:83 | call to Command | SystemCommandExecutors.go:94:21:94:79 | call to toInterfaceArray |
+| SystemCommandExecutors.go:95:3:95:101 | call to Call | SystemCommandExecutors.go:95:32:95:36 | shell |
+| SystemCommandExecutors.go:95:3:95:101 | call to Call | SystemCommandExecutors.go:95:39:95:97 | call to toInterfaceArray |
+| SystemCommandExecutors.go:96:3:96:104 | call to Command | SystemCommandExecutors.go:96:35:96:39 | shell |
+| SystemCommandExecutors.go:96:3:96:104 | call to Command | SystemCommandExecutors.go:96:42:96:100 | call to toInterfaceArray |
+| SystemCommandExecutors.go:100:3:100:60 | call to Exec | SystemCommandExecutors.go:100:16:100:21 | source |
+| SystemCommandExecutors.go:101:3:101:73 | call to ForkExec | SystemCommandExecutors.go:101:20:101:25 | source |
+| SystemCommandExecutors.go:102:3:102:77 | call to StartProcess | SystemCommandExecutors.go:102:24:102:29 | source |
+| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:24:104:28 | shell |
+| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:31:104:54 | composite literal |
+| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:57:104:75 | &... |
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
index 94f3748225b..e66a076249c 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
@@ -8,6 +8,7 @@ import (
"net/http"
"os"
"os/exec"
+ "syscall"
sh "github.com/codeskyblue/go-sh"
"golang.org/x/crypto/ssh"
@@ -94,6 +95,14 @@ func handler(w http.ResponseWriter, req *http.Request) {
sh.InteractiveSession().Call(shell, toInterfaceArray(append([]string{assumedNonShell}, source))...)
sh.InteractiveSession().Command(shell, toInterfaceArray(append([]string{assumedNonShell}, source))...)
}
+ // syscall
+ {
+ syscall.Exec(source, []string{"arg1", "arg2"}, []string{})
+ syscall.ForkExec(source, []string{"arg1", "arg2"}, &syscall.ProcAttr{})
+ syscall.StartProcess(source, []string{"arg1", "arg2"}, &syscall.ProcAttr{})
+
+ syscall.StartProcess(shell, []string{source, "arg2"}, &syscall.ProcAttr{})
+ }
}
func toInterfaceArray(strs []string) []interface{} {
res := make([]interface{}, 0)
From a357121e89b972ed0ae275a2efb7253af1745d87 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Wed, 29 Apr 2020 19:17:24 +0300
Subject: [PATCH 020/157] Fix test by removing a unix-only func; add
windows-only funcs
---
ql/src/semmle/go/frameworks/SystemCommandExecutors.qll | 8 +++++++-
.../SystemCommandExecutors.expected | 9 ++++-----
.../SystemCommandExecutors/SystemCommandExecutors.go | 1 -
3 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll b/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
index 8ef7ce042c2..654fa9eb6dc 100644
--- a/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
+++ b/ql/src/semmle/go/frameworks/SystemCommandExecutors.qll
@@ -30,9 +30,15 @@ private class SystemCommandExecutors extends SystemCommandExecution::Range, Data
or
pkg = "os/exec" and name = "CommandContext" and cmdArg = 1
or
+ // NOTE: syscall.ForkExec exists only on unix.
+ // NOTE: syscall.CreateProcess and syscall.CreateProcessAsUser exist only on windows.
pkg = "syscall" and
- (name = "Exec" or name = "ForkExec" or name = "StartProcess") and
+ (name = "Exec" or name = "ForkExec" or name = "StartProcess" or name = "CreateProcess") and
cmdArg = 0
+ or
+ pkg = "syscall" and
+ name = "CreateProcessAsUser" and
+ cmdArg = 1
)
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
index 0544bdd9dcc..619b92eebb8 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.expected
@@ -54,8 +54,7 @@
| SystemCommandExecutors.go:96:3:96:104 | call to Command | SystemCommandExecutors.go:96:35:96:39 | shell |
| SystemCommandExecutors.go:96:3:96:104 | call to Command | SystemCommandExecutors.go:96:42:96:100 | call to toInterfaceArray |
| SystemCommandExecutors.go:100:3:100:60 | call to Exec | SystemCommandExecutors.go:100:16:100:21 | source |
-| SystemCommandExecutors.go:101:3:101:73 | call to ForkExec | SystemCommandExecutors.go:101:20:101:25 | source |
-| SystemCommandExecutors.go:102:3:102:77 | call to StartProcess | SystemCommandExecutors.go:102:24:102:29 | source |
-| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:24:104:28 | shell |
-| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:31:104:54 | composite literal |
-| SystemCommandExecutors.go:104:3:104:76 | call to StartProcess | SystemCommandExecutors.go:104:57:104:75 | &... |
+| SystemCommandExecutors.go:101:3:101:77 | call to StartProcess | SystemCommandExecutors.go:101:24:101:29 | source |
+| SystemCommandExecutors.go:103:3:103:76 | call to StartProcess | SystemCommandExecutors.go:103:24:103:28 | shell |
+| SystemCommandExecutors.go:103:3:103:76 | call to StartProcess | SystemCommandExecutors.go:103:31:103:54 | composite literal |
+| SystemCommandExecutors.go:103:3:103:76 | call to StartProcess | SystemCommandExecutors.go:103:57:103:75 | &... |
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
index e66a076249c..2c184f3b00f 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/SystemCommandExecutors.go
@@ -98,7 +98,6 @@ func handler(w http.ResponseWriter, req *http.Request) {
// syscall
{
syscall.Exec(source, []string{"arg1", "arg2"}, []string{})
- syscall.ForkExec(source, []string{"arg1", "arg2"}, &syscall.ProcAttr{})
syscall.StartProcess(source, []string{"arg1", "arg2"}, &syscall.ProcAttr{})
syscall.StartProcess(shell, []string{source, "arg2"}, &syscall.ProcAttr{})
From 74481c4bad66c7fd72584c2243eab9c82ca057de Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 6 Apr 2020 18:03:26 +0300
Subject: [PATCH 021/157] CWE-681: initial commit
---
.../CWE-681/IncorrectNumericConversion.go | 13 ++
.../CWE-681/IncorrectNumericConversion.qhelp | 48 +++++
.../CWE-681/IncorrectNumericConversion.ql | 141 ++++++++++++
.../CWE-681/IncorrectNumericConversionGood.go | 32 +++
.../IncorrectNumericConversion.expected | 43 ++++
.../CWE-681/IncorrectNumericConversion.go | 201 ++++++++++++++++++
.../CWE-681/IncorrectNumericConversion.qlref | 1 +
7 files changed, 479 insertions(+)
create mode 100644 ql/src/Security/CWE-681/IncorrectNumericConversion.go
create mode 100644 ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp
create mode 100644 ql/src/Security/CWE-681/IncorrectNumericConversion.ql
create mode 100644 ql/src/Security/CWE-681/IncorrectNumericConversionGood.go
create mode 100644 ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
create mode 100644 ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go
create mode 100644 ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.go b/ql/src/Security/CWE-681/IncorrectNumericConversion.go
new file mode 100644
index 00000000000..11c343550ab
--- /dev/null
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "strconv"
+)
+
+func parseAllocate(wanted string) int32 {
+ parsed, err := strconv.Atoi(wanted)
+ if err != nil {
+ panic(err)
+ }
+ return int32(parsed)
+}
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp
new file mode 100644
index 00000000000..77dd3f1cfac
--- /dev/null
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp
@@ -0,0 +1,48 @@
+
+
+
+
+
+If a numeric value string is parsed using strconv.Atoi into an int, and then that int
+is converted into another type of a lower bit size, the result can produce unexpected values.
+
+
+
+
+
+If you need to parse numeric values with specific bit sizes, use the functions specific to each
+type (strconv.ParseFloat, strconv.ParseInt, strconv.ParseUint)
+that also allow to specify the wanted bit size.
+
+
+If this is not possible, then add upper (and lower) bound checks specific to each type and
+bit size (you can find the min and max value for each type in the `math` package).
+
+
+
+
+
+In the following example, assume that an input string is passed to parseAllocate function,
+parsed by strconv.Atoi, and then converted into an int32 type:
+
+
+
+The bounds are not checked, so this means that if the provided number is greater than max int32,
+the resulting value from the conversion will be different from the provided value.
+
+
+To avoid unexpected values, you should either use the other functions provided by the strconv
+package to parse the specific types and bit sizes; in this case, strconv.ParseInt as you
+can see in parseAllocateGood2 function; or check bounds as in parseAllocateGood1
+function.
+
+
+
+
+
+mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
+
+
+
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
new file mode 100644
index 00000000000..90b74791d09
--- /dev/null
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -0,0 +1,141 @@
+/**
+ * @name Incorrect Conversion between Numeric Types
+ * @description Converting the result of strconv.Atoi (which is of type `int`) to
+ * numeric types of lower bit size can produce unexpected values.
+ * @kind path-problem
+ * @id go/incorrect-numeric-conversion
+ * @tags security
+ * external/cwe/cwe-681
+ */
+
+import go
+import DataFlow::PathGraph
+
+class IntParser extends Function {
+ IntParser() { this.hasQualifiedName("strconv", "Atoi") }
+}
+
+class OverflowingConversionExpr extends ConversionExpr {
+ string conversionTypeName;
+
+ OverflowingConversionExpr() {
+ exists(ConversionExpr conv |
+ conversionTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
+ (
+ // anything lower than int64:
+ conversionTypeName = "int8" or
+ conversionTypeName = "int16" or
+ conversionTypeName = "int32" or
+ // anything lower than uint64:
+ conversionTypeName = "uint8" or
+ conversionTypeName = "uint16" or
+ conversionTypeName = "uint32" or
+ // anything lower than float64:
+ conversionTypeName = "float32"
+ )
+ |
+ this = conv
+ )
+ }
+
+ string getTypeName() { result = conversionTypeName }
+}
+
+class IfRelationalComparison extends IfStmt {
+ IfRelationalComparison() {
+ this.getCond() instanceof RelationalComparisonExpr or this.getCond() instanceof LandExpr
+ }
+
+ RelationalComparisonExpr getComparison() { result = this.getCond().(RelationalComparisonExpr) }
+
+ LandExpr getLandExpr() { result = this.getCond().(LandExpr) }
+}
+
+class FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ FlowConfig() { this = "FlowConfig" }
+
+ override predicate isSource(DataFlow::Node source) {
+ exists(IntParser atoi | source = atoi.getACall().getResult(0))
+ }
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(OverflowingConversionExpr conv | sink.asExpr() = conv)
+ }
+
+ override predicate isSanitizerIn(DataFlow::Node node) {
+ // If the conversion is inside an `if` block that compares the
+ // source as `source > 0`, then that sanitizes conversion of int64 to int32;
+ exists(IfRelationalComparison san, OverflowingConversionExpr conv |
+ conv = node.asExpr().(OverflowingConversionExpr) and
+ san.getThen().getAChild*() = conv and
+ (
+ conv.getTypeName() = "int32" and
+ san.getComparison().getLesserOperand().getNumericValue() = 0 and
+ san.getComparison().getGreaterOperand().getGlobalValueNumber() =
+ conv.getOperand().getGlobalValueNumber()
+ or
+ comparisonGreaterOperandIsEqualOrLess("int8", san, conv, getMaxInt8())
+ or
+ comparisonGreaterOperandIsEqualOrLess("int16", san, conv, getMaxInt16())
+ or
+ comparisonGreaterOperandIsEqualOrLess("int32", san, conv, getMaxInt32())
+ or
+ comparisonGreaterOperandIsEqualOrLess("uint8", san, conv, getMaxUint8())
+ or
+ comparisonGreaterOperandIsEqualOrLess("uint16", san, conv, getMaxUint16())
+ )
+ )
+ }
+}
+
+int getMaxInt8() {
+ result = 2.pow(7) - 1
+ // = 1<<7 - 1
+}
+
+int getMaxInt16() {
+ result = 2.pow(15) - 1
+ // = 1<<15 - 1
+}
+
+int getMaxInt32() {
+ result = 2.pow(31) - 1
+ // = 1<<31 - 1
+}
+
+int getMaxUint8() {
+ result = 2.pow(8) - 1
+ // = 1<<8 - 1
+}
+
+int getMaxUint16() {
+ result = 2.pow(16) - 1
+ // = 1<<16 - 1
+}
+
+predicate comparisonGreaterOperandIsEqualOrLess(
+ string typeName, IfRelationalComparison ifExpr, OverflowingConversionExpr conv, int value
+) {
+ conv.getTypeName() = typeName and
+ (
+ // exclude cases like: if parsed < math.MaxInt8 { int8(parsed)}
+ ifExpr.getComparison().getGreaterOperand().getNumericValue() = value and
+ // and lesser is the conversion operand:
+ ifExpr.getComparison().getLesserOperand().getGlobalValueNumber() =
+ conv.getOperand().getGlobalValueNumber()
+ or
+ // exclude cases like: if err == nil && parsed < math.MaxInt8 { int8(parsed)}
+ exists(RelationalComparisonExpr andExpr |
+ andExpr = ifExpr.getLandExpr().getAnOperand().(RelationalComparisonExpr)
+ |
+ andExpr.getGreaterOperand().getNumericValue() = value and
+ // and lesser is the conversion operand:
+ andExpr.getLesserOperand().getGlobalValueNumber() = conv.getOperand().getGlobalValueNumber()
+ )
+ )
+}
+
+from FlowConfig cfg, DataFlow::PathNode source, DataFlow::PathNode sink
+where cfg.hasFlowPath(source, sink)
+select source, source, sink,
+ "Incorrect type conversion of int from strconv.Atoi result to another numeric type"
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversionGood.go b/ql/src/Security/CWE-681/IncorrectNumericConversionGood.go
new file mode 100644
index 00000000000..003f82cd2a9
--- /dev/null
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversionGood.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "math"
+ "strconv"
+)
+
+func main() {
+
+}
+
+const DefaultAllocate int32 = 256
+
+func parseAllocateGood1(desired string) int32 {
+ parsed, err := strconv.Atoi(desired)
+ if err != nil {
+ return DefaultAllocate
+ }
+ // GOOD: check for lower and uppper bounds
+ if parsed > 0 && parsed <= math.MaxInt32 {
+ return int32(parsed)
+ }
+ return DefaultAllocate
+}
+func parseAllocateGood2(desired string) int32 {
+ // GOOD: parse specifying the bit size
+ parsed, err := strconv.ParseInt(desired, 10, 32)
+ if err != nil {
+ return DefaultAllocate
+ }
+ return int32(parsed)
+}
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
new file mode 100644
index 00000000000..03a09e927fa
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
@@ -0,0 +1,43 @@
+edges
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion |
+| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:58:7:58:18 | type conversion |
+| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:65:7:65:19 | type conversion |
+| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:72:7:72:19 | type conversion |
+| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:79:7:79:19 | type conversion |
+| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:86:7:86:20 | type conversion |
+| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:93:7:93:20 | type conversion |
+| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:100:7:100:21 | type conversion |
+| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:108:7:108:18 | type conversion |
+| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:116:7:116:23 | type conversion |
+nodes
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:35:41:35:50 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:58:7:58:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:65:7:65:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:72:7:72:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:79:7:79:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:86:7:86:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:93:7:93:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:100:7:100:21 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:108:7:108:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:116:7:116:23 | type conversion | semmle.label | type conversion |
+#select
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:58:7:58:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:65:7:65:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:72:7:72:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:79:7:79:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:86:7:86:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:93:7:93:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:100:7:100:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:108:7:108:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
+| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:116:7:116:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go
new file mode 100644
index 00000000000..a437bd70924
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go
@@ -0,0 +1,201 @@
+package main
+
+import (
+ "math"
+ "strconv"
+)
+
+func main() {
+
+}
+
+type Something struct {
+}
+type Config struct {
+}
+type Registry struct {
+}
+
+func LookupTarget(conf *Config, num int32) (int32, error) {
+ return 567, nil
+}
+func LookupNumberByName(reg *Registry, name string) (int32, error) {
+ return 567, nil
+}
+func lab(s string) (*Something, error) {
+ num, err := strconv.Atoi(s)
+
+ if err != nil {
+ number, err := LookupNumberByName(&Registry{}, s)
+ if err != nil {
+ return nil, err
+ }
+ num = int(number)
+ }
+ target, err := LookupTarget(&Config{}, int32(num))
+ if err != nil {
+ return nil, err
+ }
+
+ // convert the resolved target number back to a string
+
+ s = strconv.Itoa(int(target))
+
+ return nil, nil
+}
+
+const CustomMaxInt16 = 1<<15 - 1
+
+type CustomInt int16
+
+// these should be caught:
+func upperBoundIsNOTChecked(input string) {
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = int8(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = uint8(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = uint16(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = uint32(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = float32(parsed)
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ // NOTE: byte is uint8
+ _ = byte(parsed)
+ }
+ {
+ // using custom type:
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ _ = CustomInt(parsed)
+ }
+
+}
+
+// these should NOT be caught:
+func upperBoundIsChecked(input string) {
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxInt8 {
+ _ = int8(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxInt16 {
+ _ = int16(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed > 0 {
+ _ = int32(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxInt32 {
+ _ = int32(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxUint8 {
+ _ = uint8(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxUint16 {
+ _ = uint16(parsed)
+ }
+ }
+ {
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < math.MaxUint8 {
+ _ = byte(parsed)
+ }
+ }
+ { // multiple `and` conditions
+ parsed, err := strconv.Atoi(input)
+ if err == nil && 1 == 1 && parsed < math.MaxInt8 {
+ _ = int8(parsed)
+ }
+ }
+ { // custom maxInt16
+ parsed, err := strconv.Atoi(input)
+ if err != nil {
+ panic(err)
+ }
+ if parsed < CustomMaxInt16 {
+ _ = int16(parsed)
+ }
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
new file mode 100644
index 00000000000..7e678611eb2
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
@@ -0,0 +1 @@
+Security/CWE-681/IncorrectNumericConversion.ql
\ No newline at end of file
From 6d2c5be196b2409950cf05be3dd84b50b21a1741 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 6 Apr 2020 18:08:52 +0300
Subject: [PATCH 022/157] rename OverflowingConversionExpr to
NumericConversionExpr
---
.../Security/CWE-681/IncorrectNumericConversion.ql | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index 90b74791d09..4e7f714f201 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -15,10 +15,10 @@ class IntParser extends Function {
IntParser() { this.hasQualifiedName("strconv", "Atoi") }
}
-class OverflowingConversionExpr extends ConversionExpr {
+class NumericConversionExpr extends ConversionExpr {
string conversionTypeName;
- OverflowingConversionExpr() {
+ NumericConversionExpr() {
exists(ConversionExpr conv |
conversionTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
(
@@ -59,14 +59,14 @@ class FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
}
override predicate isSink(DataFlow::Node sink) {
- exists(OverflowingConversionExpr conv | sink.asExpr() = conv)
+ exists(NumericConversionExpr conv | sink.asExpr() = conv)
}
override predicate isSanitizerIn(DataFlow::Node node) {
// If the conversion is inside an `if` block that compares the
// source as `source > 0`, then that sanitizes conversion of int64 to int32;
- exists(IfRelationalComparison san, OverflowingConversionExpr conv |
- conv = node.asExpr().(OverflowingConversionExpr) and
+ exists(IfRelationalComparison san, NumericConversionExpr conv |
+ conv = node.asExpr().(NumericConversionExpr) and
san.getThen().getAChild*() = conv and
(
conv.getTypeName() = "int32" and
@@ -114,7 +114,7 @@ int getMaxUint16() {
}
predicate comparisonGreaterOperandIsEqualOrLess(
- string typeName, IfRelationalComparison ifExpr, OverflowingConversionExpr conv, int value
+ string typeName, IfRelationalComparison ifExpr, NumericConversionExpr conv, int value
) {
conv.getTypeName() = typeName and
(
From fd9e3a005e82911c629898a09aa1cc8e15bc75de Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 6 Apr 2020 21:40:29 +0300
Subject: [PATCH 023/157] fix comments
---
ql/src/Security/CWE-681/IncorrectNumericConversion.ql | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index 4e7f714f201..ffb2e0431b4 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -118,13 +118,13 @@ predicate comparisonGreaterOperandIsEqualOrLess(
) {
conv.getTypeName() = typeName and
(
- // exclude cases like: if parsed < math.MaxInt8 { int8(parsed)}
+ // exclude cases like: if parsed < math.MaxInt8 {return int8(parsed)}
ifExpr.getComparison().getGreaterOperand().getNumericValue() = value and
// and lesser is the conversion operand:
ifExpr.getComparison().getLesserOperand().getGlobalValueNumber() =
conv.getOperand().getGlobalValueNumber()
or
- // exclude cases like: if err == nil && parsed < math.MaxInt8 { int8(parsed)}
+ // exclude cases like: if err == nil && parsed < math.MaxInt8 {return int8(parsed)}
exists(RelationalComparisonExpr andExpr |
andExpr = ifExpr.getLandExpr().getAnOperand().(RelationalComparisonExpr)
|
From 8ecc2b95237e16b1d739bebc147538f69415bec3 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 6 Apr 2020 21:48:34 +0300
Subject: [PATCH 024/157] add comments, improve naming, refactor
---
.../CWE-681/IncorrectNumericConversion.ql | 27 ++++++++++---------
1 file changed, 15 insertions(+), 12 deletions(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index ffb2e0431b4..ba7bb5b76c9 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -63,26 +63,26 @@ class FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
}
override predicate isSanitizerIn(DataFlow::Node node) {
- // If the conversion is inside an `if` block that compares the
- // source as `source > 0`, then that sanitizes conversion of int64 to int32;
exists(IfRelationalComparison san, NumericConversionExpr conv |
conv = node.asExpr().(NumericConversionExpr) and
san.getThen().getAChild*() = conv and
(
+ // If the conversion is inside an `if` block that compares the source as
+ // `source > 0` or `source >= 0`, then that sanitizes conversion of int to int32;
conv.getTypeName() = "int32" and
san.getComparison().getLesserOperand().getNumericValue() = 0 and
san.getComparison().getGreaterOperand().getGlobalValueNumber() =
conv.getOperand().getGlobalValueNumber()
or
- comparisonGreaterOperandIsEqualOrLess("int8", san, conv, getMaxInt8())
+ comparisonGreaterOperandValueIsEqualOrLess("int8", san, conv, getMaxInt8())
or
- comparisonGreaterOperandIsEqualOrLess("int16", san, conv, getMaxInt16())
+ comparisonGreaterOperandValueIsEqualOrLess("int16", san, conv, getMaxInt16())
or
- comparisonGreaterOperandIsEqualOrLess("int32", san, conv, getMaxInt32())
+ comparisonGreaterOperandValueIsEqualOrLess("int32", san, conv, getMaxInt32())
or
- comparisonGreaterOperandIsEqualOrLess("uint8", san, conv, getMaxUint8())
+ comparisonGreaterOperandValueIsEqualOrLess("uint8", san, conv, getMaxUint8())
or
- comparisonGreaterOperandIsEqualOrLess("uint16", san, conv, getMaxUint16())
+ comparisonGreaterOperandValueIsEqualOrLess("uint16", san, conv, getMaxUint16())
)
)
}
@@ -113,21 +113,24 @@ int getMaxUint16() {
// = 1<<16 - 1
}
-predicate comparisonGreaterOperandIsEqualOrLess(
+predicate comparisonGreaterOperandValueIsEqualOrLess(
string typeName, IfRelationalComparison ifExpr, NumericConversionExpr conv, int value
) {
conv.getTypeName() = typeName and
(
// exclude cases like: if parsed < math.MaxInt8 {return int8(parsed)}
- ifExpr.getComparison().getGreaterOperand().getNumericValue() = value and
- // and lesser is the conversion operand:
- ifExpr.getComparison().getLesserOperand().getGlobalValueNumber() =
- conv.getOperand().getGlobalValueNumber()
+ exists(RelationalComparisonExpr comp | comp = ifExpr.getComparison() |
+ // greater operand is equal to value:
+ comp.getGreaterOperand().getNumericValue() = value and
+ // and lesser is the conversion operand:
+ comp.getLesserOperand().getGlobalValueNumber() = conv.getOperand().getGlobalValueNumber()
+ )
or
// exclude cases like: if err == nil && parsed < math.MaxInt8 {return int8(parsed)}
exists(RelationalComparisonExpr andExpr |
andExpr = ifExpr.getLandExpr().getAnOperand().(RelationalComparisonExpr)
|
+ // greater operand is equal to value:
andExpr.getGreaterOperand().getNumericValue() = value and
// and lesser is the conversion operand:
andExpr.getLesserOperand().getGlobalValueNumber() = conv.getOperand().getGlobalValueNumber()
From b176c4ad19b1843d57d173b2d6b7df837112ef9b Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 13:45:13 +0300
Subject: [PATCH 025/157] Add ParseFloat, ParseInt, ParseUint
---
.../CWE-681/IncorrectNumericConversion.ql | 182 ++++++++++++++----
1 file changed, 149 insertions(+), 33 deletions(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index ba7bb5b76c9..d19bf2332bb 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -11,14 +11,26 @@
import go
import DataFlow::PathGraph
-class IntParser extends Function {
- IntParser() { this.hasQualifiedName("strconv", "Atoi") }
+class Atoi extends Function {
+ Atoi() { this.hasQualifiedName("strconv", "Atoi") }
}
-class NumericConversionExpr extends ConversionExpr {
+class ParseFloat extends Function {
+ ParseFloat() { this.hasQualifiedName("strconv", "ParseFloat") }
+}
+
+class ParseInt extends Function {
+ ParseInt() { this.hasQualifiedName("strconv", "ParseInt") }
+}
+
+class ParseUint extends Function {
+ ParseUint() { this.hasQualifiedName("strconv", "ParseUint") }
+}
+
+class Lte32BitNumericConversionExpr extends ConversionExpr {
string conversionTypeName;
- NumericConversionExpr() {
+ Lte32BitNumericConversionExpr() {
exists(ConversionExpr conv |
conversionTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
(
@@ -41,6 +53,33 @@ class NumericConversionExpr extends ConversionExpr {
string getTypeName() { result = conversionTypeName }
}
+class Lte16BitNumericConversionExpr extends Lte32BitNumericConversionExpr {
+ Lte16BitNumericConversionExpr() {
+ conversionTypeName = this.getTypeName() and
+ (
+ // anything lower than int32:
+ conversionTypeName = "int8" or
+ conversionTypeName = "int16" or
+ // anything lower than uint32:
+ conversionTypeName = "uint8" or
+ conversionTypeName = "uint16"
+ )
+ }
+}
+
+class Lte8BitNumericConversionExpr extends Lte16BitNumericConversionExpr {
+ Lte8BitNumericConversionExpr() {
+ conversionTypeName = this.getTypeName() and
+ (
+ // anything lower than int16:
+ conversionTypeName = "int8"
+ or
+ // anything lower than uint16:
+ conversionTypeName = "uint8"
+ )
+ }
+}
+
class IfRelationalComparison extends IfStmt {
IfRelationalComparison() {
this.getCond() instanceof RelationalComparisonExpr or this.getCond() instanceof LandExpr
@@ -51,41 +90,115 @@ class IfRelationalComparison extends IfStmt {
LandExpr getLandExpr() { result = this.getCond().(LandExpr) }
}
-class FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
- FlowConfig() { this = "FlowConfig" }
+class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lte64FlowConfig() { this = "Lte64FlowConfig" }
override predicate isSource(DataFlow::Node source) {
- exists(IntParser atoi | source = atoi.getACall().getResult(0))
+ exists(Atoi atoi | source = atoi.getACall().getResult(0))
+ or
+ exists(ParseFloat parseFloat, DataFlow::CallNode call |
+ call = parseFloat.getACall() and call.getArgument(1).getIntValue() = [64]
+ |
+ source = call.getResult(0)
+ )
+ or
+ exists(ParseInt parseInt, DataFlow::CallNode call |
+ call = parseInt.getACall() and call.getArgument(2).getIntValue() = [0, 64]
+ |
+ source = call.getResult(0)
+ )
+ or
+ exists(ParseUint parseUint, DataFlow::CallNode call |
+ call = parseUint.getACall() and call.getArgument(2).getIntValue() = [0, 64]
+ |
+ source = call.getResult(0)
+ )
}
override predicate isSink(DataFlow::Node sink) {
- exists(NumericConversionExpr conv | sink.asExpr() = conv)
+ exists(Lte32BitNumericConversionExpr conv | sink.asExpr() = conv)
}
- override predicate isSanitizerIn(DataFlow::Node node) {
- exists(IfRelationalComparison san, NumericConversionExpr conv |
- conv = node.asExpr().(NumericConversionExpr) and
- san.getThen().getAChild*() = conv and
- (
- // If the conversion is inside an `if` block that compares the source as
- // `source > 0` or `source >= 0`, then that sanitizes conversion of int to int32;
- conv.getTypeName() = "int32" and
- san.getComparison().getLesserOperand().getNumericValue() = 0 and
- san.getComparison().getGreaterOperand().getGlobalValueNumber() =
- conv.getOperand().getGlobalValueNumber()
- or
- comparisonGreaterOperandValueIsEqualOrLess("int8", san, conv, getMaxInt8())
- or
- comparisonGreaterOperandValueIsEqualOrLess("int16", san, conv, getMaxInt16())
- or
- comparisonGreaterOperandValueIsEqualOrLess("int32", san, conv, getMaxInt32())
- or
- comparisonGreaterOperandValueIsEqualOrLess("uint8", san, conv, getMaxUint8())
- or
- comparisonGreaterOperandValueIsEqualOrLess("uint16", san, conv, getMaxUint16())
- )
+ override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
+}
+
+class Lte32FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lte32FlowConfig() { this = "Lte32FlowConfig" }
+
+ override predicate isSource(DataFlow::Node source) {
+ exists(ParseFloat parseFloat, DataFlow::CallNode call |
+ call = parseFloat.getACall() and call.getArgument(1).getIntValue() = [32]
+ |
+ source = call.getResult(0)
+ )
+ or
+ exists(ParseInt parseInt, DataFlow::CallNode call |
+ call = parseInt.getACall() and call.getArgument(2).getIntValue() = [32]
+ |
+ source = call.getResult(0)
+ )
+ or
+ exists(ParseUint parseUint, DataFlow::CallNode call |
+ call = parseUint.getACall() and call.getArgument(2).getIntValue() = [32]
+ |
+ source = call.getResult(0)
)
}
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(Lte16BitNumericConversionExpr conv | sink.asExpr() = conv)
+ }
+
+ override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
+}
+
+class Lte16FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lte16FlowConfig() { this = "Lte16FlowConfig" }
+
+ override predicate isSource(DataFlow::Node source) {
+ exists(ParseInt parseInt, DataFlow::CallNode call |
+ call = parseInt.getACall() and call.getArgument(2).getIntValue() = [16]
+ |
+ source = call.getResult(0)
+ )
+ or
+ exists(ParseUint parseUint, DataFlow::CallNode call |
+ call = parseUint.getACall() and call.getArgument(2).getIntValue() = [16]
+ |
+ source = call.getResult(0)
+ )
+ }
+
+ override predicate isSink(DataFlow::Node sink) {
+ exists(Lte8BitNumericConversionExpr conv | sink.asExpr() = conv)
+ }
+
+ override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
+}
+
+predicate isSanitizedInsideAnIfBoundCheck(DataFlow::Node node) {
+ exists(IfRelationalComparison san, Lte32BitNumericConversionExpr conv |
+ conv = node.asExpr().(Lte32BitNumericConversionExpr) and
+ san.getThen().getAChild*() = conv and
+ (
+ // If the conversion is inside an `if` block that compares the source as
+ // `source > 0` or `source >= 0`, then that sanitizes conversion of int to int32;
+ conv.getTypeName() = "int32" and
+ san.getComparison().getLesserOperand().getNumericValue() = 0 and
+ san.getComparison().getGreaterOperand().getGlobalValueNumber() =
+ conv.getOperand().getGlobalValueNumber()
+ or
+ comparisonGreaterOperandValueIsEqualOrLess("int8", san, conv, getMaxInt8())
+ or
+ comparisonGreaterOperandValueIsEqualOrLess("int16", san, conv, getMaxInt16())
+ or
+ comparisonGreaterOperandValueIsEqualOrLess("int32", san, conv, getMaxInt32())
+ or
+ comparisonGreaterOperandValueIsEqualOrLess("uint8", san, conv, getMaxUint8())
+ or
+ comparisonGreaterOperandValueIsEqualOrLess("uint16", san, conv, getMaxUint16())
+ )
+ )
}
int getMaxInt8() {
@@ -114,7 +227,7 @@ int getMaxUint16() {
}
predicate comparisonGreaterOperandValueIsEqualOrLess(
- string typeName, IfRelationalComparison ifExpr, NumericConversionExpr conv, int value
+ string typeName, IfRelationalComparison ifExpr, Lte32BitNumericConversionExpr conv, int value
) {
conv.getTypeName() = typeName and
(
@@ -138,7 +251,10 @@ predicate comparisonGreaterOperandValueIsEqualOrLess(
)
}
-from FlowConfig cfg, DataFlow::PathNode source, DataFlow::PathNode sink
-where cfg.hasFlowPath(source, sink)
+from DataFlow::PathNode source, DataFlow::PathNode sink
+where
+ exists(Lte64FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
+ exists(Lte32FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
+ exists(Lte16FlowConfig cfg | cfg.hasFlowPath(source, sink))
select source, source, sink,
"Incorrect type conversion of int from strconv.Atoi result to another numeric type"
From 05314a19eee9070fb22638213fdfdc1618258c89 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 16:01:31 +0300
Subject: [PATCH 026/157] Add comments, improve naming
---
.../CWE-681/IncorrectNumericConversion.ql | 113 ++++++++++--------
1 file changed, 65 insertions(+), 48 deletions(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index d19bf2332bb..25b7ca43799 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -1,7 +1,7 @@
/**
* @name Incorrect Conversion between Numeric Types
- * @description Converting the result of strconv.Atoi (which is of type `int`) to
- * numeric types of lower bit size can produce unexpected values.
+ * @description Converting the result of strconv.Atoi (and other parsers from strconv package)
+ * to numeric types of lower bit size can produce unexpected values.
* @kind path-problem
* @id go/incorrect-numeric-conversion
* @tags security
@@ -27,6 +27,10 @@ class ParseUint extends Function {
ParseUint() { this.hasQualifiedName("strconv", "ParseUint") }
}
+/**
+ * A type conversion expression towards a numeric type that has
+ * a bit size equal to or lower than 32 bits.
+ */
class Lte32BitNumericConversionExpr extends ConversionExpr {
string conversionTypeName;
@@ -35,13 +39,11 @@ class Lte32BitNumericConversionExpr extends ConversionExpr {
conversionTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
(
// anything lower than int64:
- conversionTypeName = "int8" or
- conversionTypeName = "int16" or
- conversionTypeName = "int32" or
+ conversionTypeName = ["int8", "int16", "int32"]
+ or
// anything lower than uint64:
- conversionTypeName = "uint8" or
- conversionTypeName = "uint16" or
- conversionTypeName = "uint32" or
+ conversionTypeName = ["uint8", "uint16", "uint32"]
+ or
// anything lower than float64:
conversionTypeName = "float32"
)
@@ -53,20 +55,27 @@ class Lte32BitNumericConversionExpr extends ConversionExpr {
string getTypeName() { result = conversionTypeName }
}
+/**
+ * A type conversion expression towards a numeric type that has
+ * a bit size equal to or lower than 16 bits.
+ */
class Lte16BitNumericConversionExpr extends Lte32BitNumericConversionExpr {
Lte16BitNumericConversionExpr() {
conversionTypeName = this.getTypeName() and
(
// anything lower than int32:
- conversionTypeName = "int8" or
- conversionTypeName = "int16" or
+ conversionTypeName = ["int8", "int16"]
+ or
// anything lower than uint32:
- conversionTypeName = "uint8" or
- conversionTypeName = "uint16"
+ conversionTypeName = ["uint8", "uint16"]
)
}
}
+/**
+ * A type conversion expression towards a numeric type that has
+ * a bit size equal to 8 bits.
+ */
class Lte8BitNumericConversionExpr extends Lte16BitNumericConversionExpr {
Lte8BitNumericConversionExpr() {
conversionTypeName = this.getTypeName() and
@@ -80,6 +89,10 @@ class Lte8BitNumericConversionExpr extends Lte16BitNumericConversionExpr {
}
}
+/**
+ * An `if` statement with the condition being either a relational comparison,
+ * or one or more `&&`.
+ */
class IfRelationalComparison extends IfStmt {
IfRelationalComparison() {
this.getCond() instanceof RelationalComparisonExpr or this.getCond() instanceof LandExpr
@@ -90,6 +103,9 @@ class IfRelationalComparison extends IfStmt {
LandExpr getLandExpr() { result = this.getCond().(LandExpr) }
}
+/**
+ * Flow of result of parsing a 64 bit number, to conversion to lower bit numbers.
+ */
class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
Lte64FlowConfig() { this = "Lte64FlowConfig" }
@@ -97,7 +113,7 @@ class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
exists(Atoi atoi | source = atoi.getACall().getResult(0))
or
exists(ParseFloat parseFloat, DataFlow::CallNode call |
- call = parseFloat.getACall() and call.getArgument(1).getIntValue() = [64]
+ call = parseFloat.getACall() and call.getArgument(1).getIntValue() = 64
|
source = call.getResult(0)
)
@@ -122,24 +138,27 @@ class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
}
+/**
+ * Flow of result of parsing a 32 bit number, to conversion to lower bit numbers.
+ */
class Lte32FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
Lte32FlowConfig() { this = "Lte32FlowConfig" }
override predicate isSource(DataFlow::Node source) {
exists(ParseFloat parseFloat, DataFlow::CallNode call |
- call = parseFloat.getACall() and call.getArgument(1).getIntValue() = [32]
+ call = parseFloat.getACall() and call.getArgument(1).getIntValue() = 32
|
source = call.getResult(0)
)
or
exists(ParseInt parseInt, DataFlow::CallNode call |
- call = parseInt.getACall() and call.getArgument(2).getIntValue() = [32]
+ call = parseInt.getACall() and call.getArgument(2).getIntValue() = 32
|
source = call.getResult(0)
)
or
exists(ParseUint parseUint, DataFlow::CallNode call |
- call = parseUint.getACall() and call.getArgument(2).getIntValue() = [32]
+ call = parseUint.getACall() and call.getArgument(2).getIntValue() = 32
|
source = call.getResult(0)
)
@@ -152,18 +171,21 @@ class Lte32FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
}
+/**
+ * Flow of result of parsing a 16 bit number, to conversion to lower bit numbers.
+ */
class Lte16FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
Lte16FlowConfig() { this = "Lte16FlowConfig" }
override predicate isSource(DataFlow::Node source) {
exists(ParseInt parseInt, DataFlow::CallNode call |
- call = parseInt.getACall() and call.getArgument(2).getIntValue() = [16]
+ call = parseInt.getACall() and call.getArgument(2).getIntValue() = 16
|
source = call.getResult(0)
)
or
exists(ParseUint parseUint, DataFlow::CallNode call |
- call = parseUint.getACall() and call.getArgument(2).getIntValue() = [16]
+ call = parseUint.getACall() and call.getArgument(2).getIntValue() = 16
|
source = call.getResult(0)
)
@@ -176,57 +198,52 @@ class Lte16FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
}
+/**
+ * Check if the node is a numeric conversion inside an `if` body, where
+ * the `if` condition contains an upper bound check on the conversion operand.
+ */
predicate isSanitizedInsideAnIfBoundCheck(DataFlow::Node node) {
- exists(IfRelationalComparison san, Lte32BitNumericConversionExpr conv |
+ exists(IfRelationalComparison comp, Lte32BitNumericConversionExpr conv |
+ // NOTE: using Lte32BitNumericConversionExpr because it also catches
+ // any lower bit conversions.
conv = node.asExpr().(Lte32BitNumericConversionExpr) and
- san.getThen().getAChild*() = conv and
+ comp.getThen().getAChild*() = conv and
(
// If the conversion is inside an `if` block that compares the source as
// `source > 0` or `source >= 0`, then that sanitizes conversion of int to int32;
conv.getTypeName() = "int32" and
- san.getComparison().getLesserOperand().getNumericValue() = 0 and
- san.getComparison().getGreaterOperand().getGlobalValueNumber() =
+ comp.getComparison().getLesserOperand().getNumericValue() = 0 and
+ comp.getComparison().getGreaterOperand().getGlobalValueNumber() =
conv.getOperand().getGlobalValueNumber()
or
- comparisonGreaterOperandValueIsEqualOrLess("int8", san, conv, getMaxInt8())
+ comparisonGreaterOperandValueIsEqual("int8", comp, conv, getMaxInt8())
or
- comparisonGreaterOperandValueIsEqualOrLess("int16", san, conv, getMaxInt16())
+ comparisonGreaterOperandValueIsEqual("int16", comp, conv, getMaxInt16())
or
- comparisonGreaterOperandValueIsEqualOrLess("int32", san, conv, getMaxInt32())
+ comparisonGreaterOperandValueIsEqual("int32", comp, conv, getMaxInt32())
or
- comparisonGreaterOperandValueIsEqualOrLess("uint8", san, conv, getMaxUint8())
+ comparisonGreaterOperandValueIsEqual("uint8", comp, conv, getMaxUint8())
or
- comparisonGreaterOperandValueIsEqualOrLess("uint16", san, conv, getMaxUint16())
+ comparisonGreaterOperandValueIsEqual("uint16", comp, conv, getMaxUint16())
)
)
}
-int getMaxInt8() {
- result = 2.pow(7) - 1
- // = 1<<7 - 1
-}
+int getMaxInt8() { result = 2.pow(7) - 1 }
-int getMaxInt16() {
- result = 2.pow(15) - 1
- // = 1<<15 - 1
-}
+int getMaxInt16() { result = 2.pow(15) - 1 }
-int getMaxInt32() {
- result = 2.pow(31) - 1
- // = 1<<31 - 1
-}
+int getMaxInt32() { result = 2.pow(31) - 1 }
-int getMaxUint8() {
- result = 2.pow(8) - 1
- // = 1<<8 - 1
-}
+int getMaxUint8() { result = 2.pow(8) - 1 }
-int getMaxUint16() {
- result = 2.pow(16) - 1
- // = 1<<16 - 1
-}
+int getMaxUint16() { result = 2.pow(16) - 1 }
-predicate comparisonGreaterOperandValueIsEqualOrLess(
+/**
+ * The `if` relational comparison (which can also be inside a `LandExpr`) stating that
+ * the greater operand is equal to `value`, and the lesses operand is the conversion operand.
+ */
+predicate comparisonGreaterOperandValueIsEqual(
string typeName, IfRelationalComparison ifExpr, Lte32BitNumericConversionExpr conv, int value
) {
conv.getTypeName() = typeName and
From fe661b227c128b3e135e6f790710925f97c63a3a Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 16:45:52 +0300
Subject: [PATCH 027/157] Improve alert message inside select statement
---
.../Security/CWE-681/IncorrectNumericConversion.ql | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index 25b7ca43799..129e6a60c5e 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -268,10 +268,22 @@ predicate comparisonGreaterOperandValueIsEqual(
)
}
+string getParserQualifiedNameFromResultType(string resultTypeName) {
+ resultTypeName = "int" and result = "strconv.Atoi"
+ or
+ resultTypeName = "int64" and result = "strconv.ParseInt"
+ or
+ resultTypeName = "uint64" and result = "strconv.ParseUint"
+ or
+ resultTypeName = "float64" and result = "strconv.ParseFloat"
+}
+
from DataFlow::PathNode source, DataFlow::PathNode sink
where
exists(Lte64FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
exists(Lte32FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
exists(Lte16FlowConfig cfg | cfg.hasFlowPath(source, sink))
select source, source, sink,
- "Incorrect type conversion of int from strconv.Atoi result to another numeric type"
+ "Incorrect type conversion of " + source.getNode().getType() + " from " +
+ getParserQualifiedNameFromResultType(source.getNode().getType().toString()) + " result to " +
+ sink.getNode().asExpr().(Lte32BitNumericConversionExpr).getTypeName()
From 57ac636d60830495ef45c8e8e8dc78bc3a2418db Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 17:17:49 +0300
Subject: [PATCH 028/157] Change alert message
---
ql/src/Security/CWE-681/IncorrectNumericConversion.ql | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
index 129e6a60c5e..7d0693c94f2 100644
--- a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/Security/CWE-681/IncorrectNumericConversion.ql
@@ -285,5 +285,5 @@ where
exists(Lte16FlowConfig cfg | cfg.hasFlowPath(source, sink))
select source, source, sink,
"Incorrect type conversion of " + source.getNode().getType() + " from " +
- getParserQualifiedNameFromResultType(source.getNode().getType().toString()) + " result to " +
+ getParserQualifiedNameFromResultType(source.getNode().getType().toString()) + " result to a lower bit size type " +
sink.getNode().asExpr().(Lte32BitNumericConversionExpr).getTypeName()
From f093226dab64f281cb6f3861d8bf763ee5375aa0 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 17:25:13 +0300
Subject: [PATCH 029/157] Move query to experimental
---
.../CWE-681/IncorrectNumericConversion.go | 0
.../CWE-681/IncorrectNumericConversion.qhelp | 0
.../CWE-681/IncorrectNumericConversion.ql | 0
.../CWE-681/IncorrectNumericConversionGood.go | 0
.../IncorrectNumericConversion.expected | 83 +++++++++++++++++++
.../CWE-681/IncorrectNumericConversion.go | 77 +++++++++++++++++
.../CWE-681/IncorrectNumericConversion.qlref | 1 +
.../IncorrectNumericConversion.expected | 43 ----------
.../CWE-681/IncorrectNumericConversion.qlref | 1 -
9 files changed, 161 insertions(+), 44 deletions(-)
rename ql/src/{Security => experimental}/CWE-681/IncorrectNumericConversion.go (100%)
rename ql/src/{Security => experimental}/CWE-681/IncorrectNumericConversion.qhelp (100%)
rename ql/src/{Security => experimental}/CWE-681/IncorrectNumericConversion.ql (100%)
rename ql/src/{Security => experimental}/CWE-681/IncorrectNumericConversionGood.go (100%)
create mode 100644 ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
rename ql/test/{query-tests/Security => experimental}/CWE-681/IncorrectNumericConversion.go (72%)
create mode 100644 ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
delete mode 100644 ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
delete mode 100644 ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.go b/ql/src/experimental/CWE-681/IncorrectNumericConversion.go
similarity index 100%
rename from ql/src/Security/CWE-681/IncorrectNumericConversion.go
rename to ql/src/experimental/CWE-681/IncorrectNumericConversion.go
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
similarity index 100%
rename from ql/src/Security/CWE-681/IncorrectNumericConversion.qhelp
rename to ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
similarity index 100%
rename from ql/src/Security/CWE-681/IncorrectNumericConversion.ql
rename to ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
diff --git a/ql/src/Security/CWE-681/IncorrectNumericConversionGood.go b/ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go
similarity index 100%
rename from ql/src/Security/CWE-681/IncorrectNumericConversionGood.go
rename to ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
new file mode 100644
index 00000000000..286b470f51d
--- /dev/null
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
@@ -0,0 +1,83 @@
+edges
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion |
+| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:57:7:57:19 | type conversion |
+| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:64:7:64:19 | type conversion |
+| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:73:7:73:18 | type conversion |
+| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:80:7:80:19 | type conversion |
+| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:87:7:87:19 | type conversion |
+| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:94:7:94:19 | type conversion |
+| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:103:7:103:18 | type conversion |
+| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion |
+| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion |
+| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion |
+| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:135:7:135:18 | type conversion |
+| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:142:7:142:19 | type conversion |
+| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:149:7:149:19 | type conversion |
+| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:156:7:156:19 | type conversion |
+| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:163:7:163:20 | type conversion |
+| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:170:7:170:20 | type conversion |
+| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:177:7:177:21 | type conversion |
+| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:185:7:185:18 | type conversion |
+| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:193:7:193:23 | type conversion |
+nodes
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:35:41:35:50 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | semmle.label | ... := ...[0] : float64 |
+| IncorrectNumericConversion.go:57:7:57:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | semmle.label | ... := ...[0] : float64 |
+| IncorrectNumericConversion.go:64:7:64:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:73:7:73:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:80:7:80:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:87:7:87:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:94:7:94:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:103:7:103:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:110:7:110:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:117:7:117:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:124:7:124:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:135:7:135:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:142:7:142:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:149:7:149:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:156:7:156:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:163:7:163:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:170:7:170:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:177:7:177:21 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:185:7:185:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:193:7:193:23 | type conversion | semmle.label | type conversion |
+#select
+| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:57:7:57:19 | type conversion | Incorrect type conversion of float64 from strconv.ParseFloat result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:64:7:64:19 | type conversion | Incorrect type conversion of float64 from strconv.ParseFloat result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:73:7:73:18 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:80:7:80:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:87:7:87:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:94:7:94:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:103:7:103:18 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:135:7:135:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:142:7:142:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:149:7:149:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:156:7:156:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:163:7:163:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint16 |
+| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:170:7:170:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint32 |
+| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:177:7:177:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type float32 |
+| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:185:7:185:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:193:7:193:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go b/ql/test/experimental/CWE-681/IncorrectNumericConversion.go
similarity index 72%
rename from ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go
rename to ql/test/experimental/CWE-681/IncorrectNumericConversion.go
index a437bd70924..3aade6f98df 100644
--- a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.go
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.go
@@ -48,6 +48,83 @@ const CustomMaxInt16 = 1<<15 - 1
type CustomInt int16
+func badParseFloat() {
+ {
+ parsed, err := strconv.ParseFloat("1.32", 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.ParseFloat("1.32", 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+}
+func badParseInt() {
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 16)
+ if err != nil {
+ panic(err)
+ }
+ _ = int8(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 0)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+}
+func badParseUint() {
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 16)
+ if err != nil {
+ panic(err)
+ }
+ _ = int8(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 0)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+}
+
// these should be caught:
func upperBoundIsNOTChecked(input string) {
{
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref b/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
new file mode 100644
index 00000000000..81a515ce260
--- /dev/null
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
@@ -0,0 +1 @@
+experimental/CWE-681/IncorrectNumericConversion.ql
\ No newline at end of file
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
deleted file mode 100644
index 03a09e927fa..00000000000
--- a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.expected
+++ /dev/null
@@ -1,43 +0,0 @@
-edges
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion |
-| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:58:7:58:18 | type conversion |
-| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:65:7:65:19 | type conversion |
-| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:72:7:72:19 | type conversion |
-| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:79:7:79:19 | type conversion |
-| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:86:7:86:20 | type conversion |
-| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:93:7:93:20 | type conversion |
-| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:100:7:100:21 | type conversion |
-| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:108:7:108:18 | type conversion |
-| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:116:7:116:23 | type conversion |
-nodes
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:35:41:35:50 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:58:7:58:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:65:7:65:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:72:7:72:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:79:7:79:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:86:7:86:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:93:7:93:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:100:7:100:21 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:108:7:108:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:116:7:116:23 | type conversion | semmle.label | type conversion |
-#select
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:54:3:54:36 | ... := ...[0] : int | IncorrectNumericConversion.go:58:7:58:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:61:3:61:36 | ... := ...[0] : int | IncorrectNumericConversion.go:65:7:65:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:68:3:68:36 | ... := ...[0] : int | IncorrectNumericConversion.go:72:7:72:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:75:3:75:36 | ... := ...[0] : int | IncorrectNumericConversion.go:79:7:79:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:82:3:82:36 | ... := ...[0] : int | IncorrectNumericConversion.go:86:7:86:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:89:3:89:36 | ... := ...[0] : int | IncorrectNumericConversion.go:93:7:93:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:96:3:96:36 | ... := ...[0] : int | IncorrectNumericConversion.go:100:7:100:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:103:3:103:36 | ... := ...[0] : int | IncorrectNumericConversion.go:108:7:108:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
-| IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:112:3:112:36 | ... := ...[0] : int | IncorrectNumericConversion.go:116:7:116:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to another numeric type |
diff --git a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref b/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
deleted file mode 100644
index 7e678611eb2..00000000000
--- a/ql/test/query-tests/Security/CWE-681/IncorrectNumericConversion.qlref
+++ /dev/null
@@ -1 +0,0 @@
-Security/CWE-681/IncorrectNumericConversion.ql
\ No newline at end of file
From 4517d4513fdc5fc042dcc33d3aa3b31c52166fe7 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 17:46:16 +0300
Subject: [PATCH 030/157] Update qhelp file and go examples
---
.../CWE-681/IncorrectNumericConversion.go | 9 ++++-
.../CWE-681/IncorrectNumericConversion.qhelp | 39 ++++++++++++++++---
.../CWE-681/IncorrectNumericConversionGood.go | 19 +++++++++
3 files changed, 60 insertions(+), 7 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.go b/ql/src/experimental/CWE-681/IncorrectNumericConversion.go
index 11c343550ab..518ca38cee0 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.go
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.go
@@ -4,10 +4,17 @@ import (
"strconv"
)
-func parseAllocate(wanted string) int32 {
+func parseAllocateBad1(wanted string) int32 {
parsed, err := strconv.Atoi(wanted)
if err != nil {
panic(err)
}
return int32(parsed)
}
+func parseAllocateBad2(wanted string) int32 {
+ parsed, err := strconv.ParseInt(wanted, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ return int32(parsed)
+}
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
index 77dd3f1cfac..61bf10d9400 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
@@ -5,16 +5,25 @@
-If a numeric value string is parsed using strconv.Atoi into an int, and then that int
+If a numeric value string is parsed using strconv.Atoi into an int, and subsequently that int
is converted into another type of a lower bit size, the result can produce unexpected values.
+
+This also applie to the results of strconv.ParseFloat, strconv.ParseInt,
+and strconv.ParseUint when the specified bit size is higher than the bit size of the
+type that number is converted to.
+
-If you need to parse numeric values with specific bit sizes, use the functions specific to each
-type (strconv.ParseFloat, strconv.ParseInt, strconv.ParseUint)
-that also allow to specify the wanted bit size.
+If you need to parse numeric values with specific bit sizes, avoid strconv.Atoi, and, instead,
+use the functions specific to each type (strconv.ParseFloat, strconv.ParseInt,
+strconv.ParseUint) that also allow to specify the wanted bit size.
+
+
+When using those functions, be careful to not convert the result to another type with a lower bit size than
+the bit size you specified when parsing the number.
If this is not possible, then add upper (and lower) bound checks specific to each type and
@@ -24,13 +33,13 @@ bit size (you can find the min and max value for each type in the `math` package
-In the following example, assume that an input string is passed to parseAllocate function,
+In the first example, assume that an input string is passed to parseAllocateBad1 function,
parsed by strconv.Atoi, and then converted into an int32 type:
The bounds are not checked, so this means that if the provided number is greater than max int32,
-the resulting value from the conversion will be different from the provided value.
+the resulting value from the conversion will be different from the actual provided value.
To avoid unexpected values, you should either use the other functions provided by the strconv
@@ -40,6 +49,24 @@ function.
+
+
+
+In the second example, assume that an input string is passed to parseAllocateBad2 function,
+parsed by strconv.ParseInt with a bit size set to 64, and then converted into an int32 type:
+
+
+
+If the provided number is greater than max int32, the resulting value from the conversion will be
+different from the actual provided value.
+
+
+To avoid unexpected values, you should specify the correct bit size as in parseAllocateGood3;
+or check bounds before making the conversion as in parseAllocateGood4.
+
+
+
+
mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go b/ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go
index 003f82cd2a9..29c111cf54e 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversionGood.go
@@ -30,3 +30,22 @@ func parseAllocateGood2(desired string) int32 {
}
return int32(parsed)
}
+
+func parseAllocateGood3(wanted string) int32 {
+ parsed, err := strconv.ParseInt(wanted, 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ return int32(parsed)
+}
+func parseAllocateGood4(wanted string) int32 {
+ parsed, err := strconv.ParseInt(wanted, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ // GOOD: check for lower and uppper bounds
+ if parsed > 0 && parsed <= math.MaxInt32 {
+ return int32(parsed)
+ }
+ return DefaultAllocate
+}
From d7130873649d3ff3938eccca2f65007d0cf0c719 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 18:22:52 +0300
Subject: [PATCH 031/157] Mention in qhelp file: CWE-190: Integer Overflow or
Wraparound
---
ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
index 61bf10d9400..2a693d1bc57 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
@@ -71,5 +71,8 @@ or check bounds before making the conversion as in parseAllocateGood4
mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
+
+mitre.org: CWE-190: Integer Overflow or Wraparound.
+
From c9c7e6c0a90ff8409a0b0f7863dd1742e9c9882b Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 7 Apr 2020 21:39:06 +0300
Subject: [PATCH 032/157] Add more test cases: add negative cases
---
.../IncorrectNumericConversion.expected | 72 ++++++++---------
.../CWE-681/IncorrectNumericConversion.go | 77 +++++++++++++++++++
2 files changed, 113 insertions(+), 36 deletions(-)
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
index 286b470f51d..4cc0102be01 100644
--- a/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
@@ -10,15 +10,15 @@ edges
| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion |
| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion |
| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion |
-| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:135:7:135:18 | type conversion |
-| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:142:7:142:19 | type conversion |
-| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:149:7:149:19 | type conversion |
-| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:156:7:156:19 | type conversion |
-| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:163:7:163:20 | type conversion |
-| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:170:7:170:20 | type conversion |
-| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:177:7:177:21 | type conversion |
-| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:185:7:185:18 | type conversion |
-| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:193:7:193:23 | type conversion |
+| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:212:7:212:18 | type conversion |
+| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:219:7:219:19 | type conversion |
+| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:226:7:226:19 | type conversion |
+| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:233:7:233:19 | type conversion |
+| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:240:7:240:20 | type conversion |
+| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:247:7:247:20 | type conversion |
+| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:254:7:254:21 | type conversion |
+| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:262:7:262:18 | type conversion |
+| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:270:7:270:23 | type conversion |
nodes
| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
| IncorrectNumericConversion.go:35:41:35:50 | type conversion | semmle.label | type conversion |
@@ -42,24 +42,24 @@ nodes
| IncorrectNumericConversion.go:117:7:117:19 | type conversion | semmle.label | type conversion |
| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
| IncorrectNumericConversion.go:124:7:124:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:135:7:135:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:142:7:142:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:149:7:149:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:156:7:156:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:163:7:163:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:170:7:170:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:177:7:177:21 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:185:7:185:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
-| IncorrectNumericConversion.go:193:7:193:23 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:212:7:212:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:219:7:219:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:226:7:226:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:233:7:233:19 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:240:7:240:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:247:7:247:20 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:254:7:254:21 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:262:7:262:18 | type conversion | semmle.label | type conversion |
+| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:270:7:270:23 | type conversion | semmle.label | type conversion |
#select
| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:57:7:57:19 | type conversion | Incorrect type conversion of float64 from strconv.ParseFloat result to a lower bit size type int16 |
@@ -72,12 +72,12 @@ nodes
| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int16 |
| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:131:3:131:36 | ... := ...[0] : int | IncorrectNumericConversion.go:135:7:135:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int8 |
-| IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:138:3:138:36 | ... := ...[0] : int | IncorrectNumericConversion.go:142:7:142:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
-| IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:145:3:145:36 | ... := ...[0] : int | IncorrectNumericConversion.go:149:7:149:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:152:3:152:36 | ... := ...[0] : int | IncorrectNumericConversion.go:156:7:156:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
-| IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:159:3:159:36 | ... := ...[0] : int | IncorrectNumericConversion.go:163:7:163:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint16 |
-| IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:166:3:166:36 | ... := ...[0] : int | IncorrectNumericConversion.go:170:7:170:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint32 |
-| IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:173:3:173:36 | ... := ...[0] : int | IncorrectNumericConversion.go:177:7:177:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type float32 |
-| IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:180:3:180:36 | ... := ...[0] : int | IncorrectNumericConversion.go:185:7:185:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
-| IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:189:3:189:36 | ... := ...[0] : int | IncorrectNumericConversion.go:193:7:193:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:212:7:212:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:219:7:219:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:226:7:226:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:233:7:233:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:240:7:240:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint16 |
+| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:247:7:247:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint32 |
+| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:254:7:254:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type float32 |
+| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:262:7:262:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:270:7:270:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.go b/ql/test/experimental/CWE-681/IncorrectNumericConversion.go
index 3aade6f98df..cdabda91a4a 100644
--- a/ql/test/experimental/CWE-681/IncorrectNumericConversion.go
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.go
@@ -125,6 +125,83 @@ func badParseUint() {
}
}
+func goodParseFloat() {
+ {
+ parsed, err := strconv.ParseFloat("1.32", 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.ParseFloat("1.32", 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int64(parsed)
+ }
+}
+func goodParseInt() {
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 16)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int64(parsed)
+ }
+ {
+ parsed, err := strconv.ParseInt("3456", 10, 0)
+ if err != nil {
+ panic(err)
+ }
+ _ = int64(parsed)
+ }
+}
+func goodParseUint() {
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 16)
+ if err != nil {
+ panic(err)
+ }
+ _ = int16(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ _ = int32(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ _ = int64(parsed)
+ }
+ {
+ parsed, err := strconv.ParseUint("3456", 10, 0)
+ if err != nil {
+ panic(err)
+ }
+ _ = int64(parsed)
+ }
+}
+
// these should be caught:
func upperBoundIsNOTChecked(input string) {
{
From 6f1f60896a2edba9aed3693b62b0388b398ecf66 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 27 Apr 2020 11:33:21 +0300
Subject: [PATCH 033/157] autoformat
---
ql/src/experimental/CWE-681/IncorrectNumericConversion.ql | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index 7d0693c94f2..d2c9b7f590c 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -285,5 +285,6 @@ where
exists(Lte16FlowConfig cfg | cfg.hasFlowPath(source, sink))
select source, source, sink,
"Incorrect type conversion of " + source.getNode().getType() + " from " +
- getParserQualifiedNameFromResultType(source.getNode().getType().toString()) + " result to a lower bit size type " +
+ getParserQualifiedNameFromResultType(source.getNode().getType().toString()) +
+ " result to a lower bit size type " +
sink.getNode().asExpr().(Lte32BitNumericConversionExpr).getTypeName()
From b9fae2e5d0276926a16d74333a77b156fc9010a0 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 27 Apr 2020 13:09:23 +0300
Subject: [PATCH 034/157] Add newline
---
ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref b/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
index 81a515ce260..884cf918f1c 100644
--- a/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.qlref
@@ -1 +1 @@
-experimental/CWE-681/IncorrectNumericConversion.ql
\ No newline at end of file
+experimental/CWE-681/IncorrectNumericConversion.ql
From 127cd3d003e9ea1d5b3cd8dbd944038fd2835164 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Thu, 30 Apr 2020 16:45:34 +0300
Subject: [PATCH 035/157] Refactor query
---
.../CWE-681/IncorrectNumericConversion.ql | 231 ++++++++----------
.../IncorrectNumericConversion.expected | 120 ++++-----
2 files changed, 165 insertions(+), 186 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index d2c9b7f590c..1746f70701a 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -27,66 +27,95 @@ class ParseUint extends Function {
ParseUint() { this.hasQualifiedName("strconv", "ParseUint") }
}
-/**
- * A type conversion expression towards a numeric type that has
- * a bit size equal to or lower than 32 bits.
- */
-class Lte32BitNumericConversionExpr extends ConversionExpr {
- string conversionTypeName;
+module ParserCall {
+ /**
+ * A data-flow call node that parses a number.
+ */
+ abstract class Range extends DataFlow::CallNode {
+ /** Gets the bit size of the result number. */
+ abstract int getTargetBitSize();
- Lte32BitNumericConversionExpr() {
+ /** Gets the name of the parser function. */
+ abstract string getParserName();
+ }
+}
+
+class ParserCall extends DataFlow::CallNode {
+ ParserCall::Range self;
+
+ ParserCall() { this = self }
+
+ int getTargetBitSize() { result = self.getTargetBitSize() }
+
+ string getParserName() { result = self.getParserName() }
+}
+
+int archBasedBitSize() { result = 0 }
+
+class AtoiCall extends DataFlow::CallNode, ParserCall::Range {
+ AtoiCall() { exists(Atoi atoi | this = atoi.getACall()) }
+
+ override int getTargetBitSize() { result = archBasedBitSize() }
+
+ override string getParserName() { result = "strconv.Atoi" }
+}
+
+class ParseIntCall extends DataFlow::CallNode, ParserCall::Range {
+ ParseIntCall() { exists(ParseInt parseInt | this = parseInt.getACall()) }
+
+ override int getTargetBitSize() { result = this.getArgument(2).getIntValue() }
+
+ override string getParserName() { result = "strconv.ParseInt" }
+}
+
+class ParseUintCall extends DataFlow::CallNode, ParserCall::Range {
+ ParseUintCall() { exists(ParseUint parseUint | this = parseUint.getACall()) }
+
+ override int getTargetBitSize() { result = this.getArgument(2).getIntValue() }
+
+ override string getParserName() { result = "strconv.ParseUint" }
+}
+
+class ParseFloatCall extends DataFlow::CallNode, ParserCall::Range {
+ ParseFloatCall() { exists(ParseFloat parseFloat | this = parseFloat.getACall()) }
+
+ override int getTargetBitSize() { result = this.getArgument(1).getIntValue() }
+
+ override string getParserName() { result = "strconv.ParseFloat" }
+}
+
+class NumericConversionExpr extends ConversionExpr {
+ string fullTypeName;
+ int bitSize;
+
+ NumericConversionExpr() {
exists(ConversionExpr conv |
- conversionTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
+ fullTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
(
- // anything lower than int64:
- conversionTypeName = ["int8", "int16", "int32"]
+ // 8 bit
+ fullTypeName = ["int8", "uint8"] and
+ bitSize = 8
or
- // anything lower than uint64:
- conversionTypeName = ["uint8", "uint16", "uint32"]
+ // 16 bit
+ fullTypeName = ["int16", "uint16"] and
+ bitSize = 16
or
- // anything lower than float64:
- conversionTypeName = "float32"
+ // 32 bit
+ fullTypeName = ["int32", "uint32", "float32"] and
+ bitSize = 32
+ or
+ // 64 bit
+ fullTypeName = ["int64", "uint64", "float64"] and
+ bitSize = 64
)
|
this = conv
)
}
- string getTypeName() { result = conversionTypeName }
-}
+ string getFullTypeName() { result = fullTypeName }
-/**
- * A type conversion expression towards a numeric type that has
- * a bit size equal to or lower than 16 bits.
- */
-class Lte16BitNumericConversionExpr extends Lte32BitNumericConversionExpr {
- Lte16BitNumericConversionExpr() {
- conversionTypeName = this.getTypeName() and
- (
- // anything lower than int32:
- conversionTypeName = ["int8", "int16"]
- or
- // anything lower than uint32:
- conversionTypeName = ["uint8", "uint16"]
- )
- }
-}
-
-/**
- * A type conversion expression towards a numeric type that has
- * a bit size equal to 8 bits.
- */
-class Lte8BitNumericConversionExpr extends Lte16BitNumericConversionExpr {
- Lte8BitNumericConversionExpr() {
- conversionTypeName = this.getTypeName() and
- (
- // anything lower than int16:
- conversionTypeName = "int8"
- or
- // anything lower than uint16:
- conversionTypeName = "uint8"
- )
- }
+ int getBitSize() { result = bitSize }
}
/**
@@ -106,33 +135,15 @@ class IfRelationalComparison extends IfStmt {
/**
* Flow of result of parsing a 64 bit number, to conversion to lower bit numbers.
*/
-class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
- Lte64FlowConfig() { this = "Lte64FlowConfig" }
+class Lt64BitFlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lt64BitFlowConfig() { this = "Lt64BitFlowConfig" }
override predicate isSource(DataFlow::Node source) {
- exists(Atoi atoi | source = atoi.getACall().getResult(0))
- or
- exists(ParseFloat parseFloat, DataFlow::CallNode call |
- call = parseFloat.getACall() and call.getArgument(1).getIntValue() = 64
- |
- source = call.getResult(0)
- )
- or
- exists(ParseInt parseInt, DataFlow::CallNode call |
- call = parseInt.getACall() and call.getArgument(2).getIntValue() = [0, 64]
- |
- source = call.getResult(0)
- )
- or
- exists(ParseUint parseUint, DataFlow::CallNode call |
- call = parseUint.getACall() and call.getArgument(2).getIntValue() = [0, 64]
- |
- source = call.getResult(0)
- )
+ exists(ParserCall call | call.getTargetBitSize() = [0, 64] | source = call)
}
override predicate isSink(DataFlow::Node sink) {
- exists(Lte32BitNumericConversionExpr conv | sink.asExpr() = conv)
+ exists(NumericConversionExpr conv | conv.getBitSize() = [32, 16, 8] | sink.asExpr() = conv)
}
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
@@ -141,31 +152,15 @@ class Lte64FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
/**
* Flow of result of parsing a 32 bit number, to conversion to lower bit numbers.
*/
-class Lte32FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
- Lte32FlowConfig() { this = "Lte32FlowConfig" }
+class Lt32BitFlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lt32BitFlowConfig() { this = "Lt32BitFlowConfig" }
override predicate isSource(DataFlow::Node source) {
- exists(ParseFloat parseFloat, DataFlow::CallNode call |
- call = parseFloat.getACall() and call.getArgument(1).getIntValue() = 32
- |
- source = call.getResult(0)
- )
- or
- exists(ParseInt parseInt, DataFlow::CallNode call |
- call = parseInt.getACall() and call.getArgument(2).getIntValue() = 32
- |
- source = call.getResult(0)
- )
- or
- exists(ParseUint parseUint, DataFlow::CallNode call |
- call = parseUint.getACall() and call.getArgument(2).getIntValue() = 32
- |
- source = call.getResult(0)
- )
+ exists(ParserCall call | call.getTargetBitSize() = [/*0,*/ 32] | source = call)
}
override predicate isSink(DataFlow::Node sink) {
- exists(Lte16BitNumericConversionExpr conv | sink.asExpr() = conv)
+ exists(NumericConversionExpr conv | conv.getBitSize() = [16, 8] | sink.asExpr() = conv)
}
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
@@ -174,25 +169,15 @@ class Lte32FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
/**
* Flow of result of parsing a 16 bit number, to conversion to lower bit numbers.
*/
-class Lte16FlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
- Lte16FlowConfig() { this = "Lte16FlowConfig" }
+class Lt16BitFlowConfig extends TaintTracking::Configuration, DataFlow::Configuration {
+ Lt16BitFlowConfig() { this = "Lt16BitFlowConfig" }
override predicate isSource(DataFlow::Node source) {
- exists(ParseInt parseInt, DataFlow::CallNode call |
- call = parseInt.getACall() and call.getArgument(2).getIntValue() = 16
- |
- source = call.getResult(0)
- )
- or
- exists(ParseUint parseUint, DataFlow::CallNode call |
- call = parseUint.getACall() and call.getArgument(2).getIntValue() = 16
- |
- source = call.getResult(0)
- )
+ exists(ParserCall call | call.getTargetBitSize() = 16 | source = call)
}
override predicate isSink(DataFlow::Node sink) {
- exists(Lte8BitNumericConversionExpr conv | sink.asExpr() = conv)
+ exists(NumericConversionExpr conv | conv.getBitSize() = 8 | sink.asExpr() = conv)
}
override predicate isSanitizerIn(DataFlow::Node node) { isSanitizedInsideAnIfBoundCheck(node) }
@@ -203,15 +188,14 @@ class Lte16FlowConfig extends TaintTracking::Configuration, DataFlow::Configurat
* the `if` condition contains an upper bound check on the conversion operand.
*/
predicate isSanitizedInsideAnIfBoundCheck(DataFlow::Node node) {
- exists(IfRelationalComparison comp, Lte32BitNumericConversionExpr conv |
- // NOTE: using Lte32BitNumericConversionExpr because it also catches
- // any lower bit conversions.
- conv = node.asExpr().(Lte32BitNumericConversionExpr) and
+ exists(IfRelationalComparison comp, NumericConversionExpr conv |
+ conv = node.asExpr().(NumericConversionExpr) and
+ conv.getBitSize() = [8, 16, 32] and
comp.getThen().getAChild*() = conv and
(
// If the conversion is inside an `if` block that compares the source as
// `source > 0` or `source >= 0`, then that sanitizes conversion of int to int32;
- conv.getTypeName() = "int32" and
+ conv.getFullTypeName() = "int32" and
comp.getComparison().getLesserOperand().getNumericValue() = 0 and
comp.getComparison().getGreaterOperand().getGlobalValueNumber() =
conv.getOperand().getGlobalValueNumber()
@@ -244,9 +228,9 @@ int getMaxUint16() { result = 2.pow(16) - 1 }
* the greater operand is equal to `value`, and the lesses operand is the conversion operand.
*/
predicate comparisonGreaterOperandValueIsEqual(
- string typeName, IfRelationalComparison ifExpr, Lte32BitNumericConversionExpr conv, int value
+ string typeName, IfRelationalComparison ifExpr, NumericConversionExpr conv, int value
) {
- conv.getTypeName() = typeName and
+ conv.getFullTypeName() = typeName and
(
// exclude cases like: if parsed < math.MaxInt8 {return int8(parsed)}
exists(RelationalComparisonExpr comp | comp = ifExpr.getComparison() |
@@ -268,23 +252,18 @@ predicate comparisonGreaterOperandValueIsEqual(
)
}
-string getParserQualifiedNameFromResultType(string resultTypeName) {
- resultTypeName = "int" and result = "strconv.Atoi"
+string formatBitSize(ParserCall call) {
+ call.getTargetBitSize() = 0 and result = "(arch-dependent)"
or
- resultTypeName = "int64" and result = "strconv.ParseInt"
- or
- resultTypeName = "uint64" and result = "strconv.ParseUint"
- or
- resultTypeName = "float64" and result = "strconv.ParseFloat"
+ call.getTargetBitSize() > 0 and result = call.getTargetBitSize().toString()
}
from DataFlow::PathNode source, DataFlow::PathNode sink
where
- exists(Lte64FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
- exists(Lte32FlowConfig cfg | cfg.hasFlowPath(source, sink)) or
- exists(Lte16FlowConfig cfg | cfg.hasFlowPath(source, sink))
-select source, source, sink,
- "Incorrect type conversion of " + source.getNode().getType() + " from " +
- getParserQualifiedNameFromResultType(source.getNode().getType().toString()) +
- " result to a lower bit size type " +
- sink.getNode().asExpr().(Lte32BitNumericConversionExpr).getTypeName()
+ exists(Lt64BitFlowConfig cfg | cfg.hasFlowPath(source, sink)) or
+ exists(Lt32BitFlowConfig cfg | cfg.hasFlowPath(source, sink)) or
+ exists(Lt16BitFlowConfig cfg | cfg.hasFlowPath(source, sink))
+select source.getNode(), source, sink,
+ "Incorrect conversion of a " + formatBitSize(source.getNode().(ParserCall)) + "-bit number from " +
+ source.getNode().(ParserCall).getParserName() + " result to a lower bit size type " +
+ sink.getNode().asExpr().(NumericConversionExpr).getFullTypeName()
diff --git a/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
index 4cc0102be01..64aa194012e 100644
--- a/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
+++ b/ql/test/experimental/CWE-681/IncorrectNumericConversion.expected
@@ -1,83 +1,83 @@
edges
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion |
-| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:57:7:57:19 | type conversion |
-| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:64:7:64:19 | type conversion |
-| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:73:7:73:18 | type conversion |
-| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:80:7:80:19 | type conversion |
-| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:87:7:87:19 | type conversion |
-| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:94:7:94:19 | type conversion |
-| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:103:7:103:18 | type conversion |
-| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion |
-| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion |
-| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion |
-| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:212:7:212:18 | type conversion |
-| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:219:7:219:19 | type conversion |
-| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:226:7:226:19 | type conversion |
-| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:233:7:233:19 | type conversion |
-| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:240:7:240:20 | type conversion |
-| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:247:7:247:20 | type conversion |
-| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:254:7:254:21 | type conversion |
-| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:262:7:262:18 | type conversion |
-| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:270:7:270:23 | type conversion |
+| IncorrectNumericConversion.go:26:14:26:28 | call to Atoi : tuple type | IncorrectNumericConversion.go:35:41:35:50 | type conversion |
+| IncorrectNumericConversion.go:53:18:53:47 | call to ParseFloat : tuple type | IncorrectNumericConversion.go:57:7:57:19 | type conversion |
+| IncorrectNumericConversion.go:60:18:60:47 | call to ParseFloat : tuple type | IncorrectNumericConversion.go:64:7:64:19 | type conversion |
+| IncorrectNumericConversion.go:69:18:69:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:73:7:73:18 | type conversion |
+| IncorrectNumericConversion.go:76:18:76:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:80:7:80:19 | type conversion |
+| IncorrectNumericConversion.go:83:18:83:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:87:7:87:19 | type conversion |
+| IncorrectNumericConversion.go:90:18:90:48 | call to ParseInt : tuple type | IncorrectNumericConversion.go:94:7:94:19 | type conversion |
+| IncorrectNumericConversion.go:99:18:99:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:103:7:103:18 | type conversion |
+| IncorrectNumericConversion.go:106:18:106:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:110:7:110:19 | type conversion |
+| IncorrectNumericConversion.go:113:18:113:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:117:7:117:19 | type conversion |
+| IncorrectNumericConversion.go:120:18:120:49 | call to ParseUint : tuple type | IncorrectNumericConversion.go:124:7:124:19 | type conversion |
+| IncorrectNumericConversion.go:208:18:208:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:212:7:212:18 | type conversion |
+| IncorrectNumericConversion.go:215:18:215:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:219:7:219:19 | type conversion |
+| IncorrectNumericConversion.go:222:18:222:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:226:7:226:19 | type conversion |
+| IncorrectNumericConversion.go:229:18:229:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:233:7:233:19 | type conversion |
+| IncorrectNumericConversion.go:236:18:236:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:240:7:240:20 | type conversion |
+| IncorrectNumericConversion.go:243:18:243:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:247:7:247:20 | type conversion |
+| IncorrectNumericConversion.go:250:18:250:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:254:7:254:21 | type conversion |
+| IncorrectNumericConversion.go:257:18:257:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:262:7:262:18 | type conversion |
+| IncorrectNumericConversion.go:266:18:266:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:270:7:270:23 | type conversion |
nodes
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:26:14:26:28 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:35:41:35:50 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | semmle.label | ... := ...[0] : float64 |
+| IncorrectNumericConversion.go:53:18:53:47 | call to ParseFloat : tuple type | semmle.label | call to ParseFloat : tuple type |
| IncorrectNumericConversion.go:57:7:57:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | semmle.label | ... := ...[0] : float64 |
+| IncorrectNumericConversion.go:60:18:60:47 | call to ParseFloat : tuple type | semmle.label | call to ParseFloat : tuple type |
| IncorrectNumericConversion.go:64:7:64:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:69:18:69:49 | call to ParseInt : tuple type | semmle.label | call to ParseInt : tuple type |
| IncorrectNumericConversion.go:73:7:73:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:76:18:76:49 | call to ParseInt : tuple type | semmle.label | call to ParseInt : tuple type |
| IncorrectNumericConversion.go:80:7:80:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:83:18:83:49 | call to ParseInt : tuple type | semmle.label | call to ParseInt : tuple type |
| IncorrectNumericConversion.go:87:7:87:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | semmle.label | ... := ...[0] : int64 |
+| IncorrectNumericConversion.go:90:18:90:48 | call to ParseInt : tuple type | semmle.label | call to ParseInt : tuple type |
| IncorrectNumericConversion.go:94:7:94:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:99:18:99:50 | call to ParseUint : tuple type | semmle.label | call to ParseUint : tuple type |
| IncorrectNumericConversion.go:103:7:103:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:106:18:106:50 | call to ParseUint : tuple type | semmle.label | call to ParseUint : tuple type |
| IncorrectNumericConversion.go:110:7:110:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:113:18:113:50 | call to ParseUint : tuple type | semmle.label | call to ParseUint : tuple type |
| IncorrectNumericConversion.go:117:7:117:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | semmle.label | ... := ...[0] : uint64 |
+| IncorrectNumericConversion.go:120:18:120:49 | call to ParseUint : tuple type | semmle.label | call to ParseUint : tuple type |
| IncorrectNumericConversion.go:124:7:124:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:208:18:208:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:212:7:212:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:215:18:215:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:219:7:219:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:222:18:222:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:226:7:226:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:229:18:229:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:233:7:233:19 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:236:18:236:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:240:7:240:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:243:18:243:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:247:7:247:20 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:250:18:250:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:254:7:254:21 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:257:18:257:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:262:7:262:18 | type conversion | semmle.label | type conversion |
-| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | semmle.label | ... := ...[0] : int |
+| IncorrectNumericConversion.go:266:18:266:36 | call to Atoi : tuple type | semmle.label | call to Atoi : tuple type |
| IncorrectNumericConversion.go:270:7:270:23 | type conversion | semmle.label | type conversion |
#select
-| IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:26:2:26:28 | ... := ...[0] : int | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:53:3:53:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:57:7:57:19 | type conversion | Incorrect type conversion of float64 from strconv.ParseFloat result to a lower bit size type int16 |
-| IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:60:3:60:47 | ... := ...[0] : float64 | IncorrectNumericConversion.go:64:7:64:19 | type conversion | Incorrect type conversion of float64 from strconv.ParseFloat result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:69:3:69:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:73:7:73:18 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int8 |
-| IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:76:3:76:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:80:7:80:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int16 |
-| IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:83:3:83:49 | ... := ...[0] : int64 | IncorrectNumericConversion.go:87:7:87:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:90:3:90:48 | ... := ...[0] : int64 | IncorrectNumericConversion.go:94:7:94:19 | type conversion | Incorrect type conversion of int64 from strconv.ParseInt result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:99:3:99:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:103:7:103:18 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int8 |
-| IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:106:3:106:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:110:7:110:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int16 |
-| IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:113:3:113:50 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:117:7:117:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:120:3:120:49 | ... := ...[0] : uint64 | IncorrectNumericConversion.go:124:7:124:19 | type conversion | Incorrect type conversion of uint64 from strconv.ParseUint result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:208:3:208:36 | ... := ...[0] : int | IncorrectNumericConversion.go:212:7:212:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int8 |
-| IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:215:3:215:36 | ... := ...[0] : int | IncorrectNumericConversion.go:219:7:219:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
-| IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:222:3:222:36 | ... := ...[0] : int | IncorrectNumericConversion.go:226:7:226:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int32 |
-| IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:229:3:229:36 | ... := ...[0] : int | IncorrectNumericConversion.go:233:7:233:19 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
-| IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:236:3:236:36 | ... := ...[0] : int | IncorrectNumericConversion.go:240:7:240:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint16 |
-| IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:243:3:243:36 | ... := ...[0] : int | IncorrectNumericConversion.go:247:7:247:20 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint32 |
-| IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:250:3:250:36 | ... := ...[0] : int | IncorrectNumericConversion.go:254:7:254:21 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type float32 |
-| IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:257:3:257:36 | ... := ...[0] : int | IncorrectNumericConversion.go:262:7:262:18 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type uint8 |
-| IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:266:3:266:36 | ... := ...[0] : int | IncorrectNumericConversion.go:270:7:270:23 | type conversion | Incorrect type conversion of int from strconv.Atoi result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:26:14:26:28 | call to Atoi | IncorrectNumericConversion.go:26:14:26:28 | call to Atoi : tuple type | IncorrectNumericConversion.go:35:41:35:50 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:53:18:53:47 | call to ParseFloat | IncorrectNumericConversion.go:53:18:53:47 | call to ParseFloat : tuple type | IncorrectNumericConversion.go:57:7:57:19 | type conversion | Incorrect conversion of a 32-bit number from strconv.ParseFloat result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:60:18:60:47 | call to ParseFloat | IncorrectNumericConversion.go:60:18:60:47 | call to ParseFloat : tuple type | IncorrectNumericConversion.go:64:7:64:19 | type conversion | Incorrect conversion of a 64-bit number from strconv.ParseFloat result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:69:18:69:49 | call to ParseInt | IncorrectNumericConversion.go:69:18:69:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:73:7:73:18 | type conversion | Incorrect conversion of a 16-bit number from strconv.ParseInt result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:76:18:76:49 | call to ParseInt | IncorrectNumericConversion.go:76:18:76:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:80:7:80:19 | type conversion | Incorrect conversion of a 32-bit number from strconv.ParseInt result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:83:18:83:49 | call to ParseInt | IncorrectNumericConversion.go:83:18:83:49 | call to ParseInt : tuple type | IncorrectNumericConversion.go:87:7:87:19 | type conversion | Incorrect conversion of a 64-bit number from strconv.ParseInt result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:90:18:90:48 | call to ParseInt | IncorrectNumericConversion.go:90:18:90:48 | call to ParseInt : tuple type | IncorrectNumericConversion.go:94:7:94:19 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.ParseInt result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:99:18:99:50 | call to ParseUint | IncorrectNumericConversion.go:99:18:99:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:103:7:103:18 | type conversion | Incorrect conversion of a 16-bit number from strconv.ParseUint result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:106:18:106:50 | call to ParseUint | IncorrectNumericConversion.go:106:18:106:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:110:7:110:19 | type conversion | Incorrect conversion of a 32-bit number from strconv.ParseUint result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:113:18:113:50 | call to ParseUint | IncorrectNumericConversion.go:113:18:113:50 | call to ParseUint : tuple type | IncorrectNumericConversion.go:117:7:117:19 | type conversion | Incorrect conversion of a 64-bit number from strconv.ParseUint result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:120:18:120:49 | call to ParseUint | IncorrectNumericConversion.go:120:18:120:49 | call to ParseUint : tuple type | IncorrectNumericConversion.go:124:7:124:19 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.ParseUint result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:208:18:208:36 | call to Atoi | IncorrectNumericConversion.go:208:18:208:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:212:7:212:18 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type int8 |
+| IncorrectNumericConversion.go:215:18:215:36 | call to Atoi | IncorrectNumericConversion.go:215:18:215:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:219:7:219:19 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type int16 |
+| IncorrectNumericConversion.go:222:18:222:36 | call to Atoi | IncorrectNumericConversion.go:222:18:222:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:226:7:226:19 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type int32 |
+| IncorrectNumericConversion.go:229:18:229:36 | call to Atoi | IncorrectNumericConversion.go:229:18:229:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:233:7:233:19 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:236:18:236:36 | call to Atoi | IncorrectNumericConversion.go:236:18:236:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:240:7:240:20 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type uint16 |
+| IncorrectNumericConversion.go:243:18:243:36 | call to Atoi | IncorrectNumericConversion.go:243:18:243:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:247:7:247:20 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type uint32 |
+| IncorrectNumericConversion.go:250:18:250:36 | call to Atoi | IncorrectNumericConversion.go:250:18:250:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:254:7:254:21 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type float32 |
+| IncorrectNumericConversion.go:257:18:257:36 | call to Atoi | IncorrectNumericConversion.go:257:18:257:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:262:7:262:18 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type uint8 |
+| IncorrectNumericConversion.go:266:18:266:36 | call to Atoi | IncorrectNumericConversion.go:266:18:266:36 | call to Atoi : tuple type | IncorrectNumericConversion.go:270:7:270:23 | type conversion | Incorrect conversion of a (arch-dependent)-bit number from strconv.Atoi result to a lower bit size type int16 |
From 836b8965e21db083a9375729df607bd7da5cf192 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Thu, 30 Apr 2020 16:59:30 +0300
Subject: [PATCH 036/157] Beautify .qhelp file
---
.../CWE-681/IncorrectNumericConversion.qhelp | 67 +++++++++----------
1 file changed, 31 insertions(+), 36 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
index 2a693d1bc57..606131ce6d9 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
@@ -1,78 +1,73 @@
+"-//Semmle//qhelp//EN"
+"qhelp.dtd">
-
-If a numeric value string is parsed using strconv.Atoi into an int, and subsequently that int
-is converted into another type of a lower bit size, the result can produce unexpected values.
+ If a numeric value string is parsed using strconv.Atoi into an int, and subsequently that int
+ is converted into another type of a lower bit size, the result can produce unexpected values.
-This also applie to the results of strconv.ParseFloat, strconv.ParseInt,
-and strconv.ParseUint when the specified bit size is higher than the bit size of the
-type that number is converted to.
+ This also applie to the results of strconv.ParseFloat, strconv.ParseInt,
+ and strconv.ParseUint when the specified bit size is higher than the bit size of the
+ type that number is converted to.
-
-If you need to parse numeric values with specific bit sizes, avoid strconv.Atoi, and, instead,
-use the functions specific to each type (strconv.ParseFloat, strconv.ParseInt,
-strconv.ParseUint) that also allow to specify the wanted bit size.
+ If you need to parse numeric values with specific bit sizes, avoid strconv.Atoi, and, instead,
+ use the functions specific to each type (strconv.ParseFloat, strconv.ParseInt,
+ strconv.ParseUint) that also allow to specify the wanted bit size.
-When using those functions, be careful to not convert the result to another type with a lower bit size than
-the bit size you specified when parsing the number.
+ When using those functions, be careful to not convert the result to another type with a lower bit size than
+ the bit size you specified when parsing the number.
-If this is not possible, then add upper (and lower) bound checks specific to each type and
-bit size (you can find the min and max value for each type in the `math` package).
+ If this is not possible, then add upper (and lower) bound checks specific to each type and
+ bit size (you can find the min and max value for each type in the `math` package).
-
-In the first example, assume that an input string is passed to parseAllocateBad1 function,
-parsed by strconv.Atoi, and then converted into an int32 type:
+ In the first example, assume that an input string is passed to parseAllocateBad1 function,
+ parsed by strconv.Atoi, and then converted into an int32 type:
-The bounds are not checked, so this means that if the provided number is greater than max int32,
-the resulting value from the conversion will be different from the actual provided value.
+ The bounds are not checked, so this means that if the provided number is greater than max int32,
+ the resulting value from the conversion will be different from the actual provided value.
-To avoid unexpected values, you should either use the other functions provided by the strconv
-package to parse the specific types and bit sizes; in this case, strconv.ParseInt as you
-can see in parseAllocateGood2 function; or check bounds as in parseAllocateGood1
-function.
+ To avoid unexpected values, you should either use the other functions provided by the strconv
+ package to parse the specific types and bit sizes; in this case, strconv.ParseInt as you
+ can see in parseAllocateGood2 function; or check bounds as in parseAllocateGood1
+ function.
-
-In the second example, assume that an input string is passed to parseAllocateBad2 function,
-parsed by strconv.ParseInt with a bit size set to 64, and then converted into an int32 type:
+ In the second example, assume that an input string is passed to parseAllocateBad2 function,
+ parsed by strconv.ParseInt with a bit size set to 64, and then converted into an int32 type:
-If the provided number is greater than max int32, the resulting value from the conversion will be
-different from the actual provided value.
+ If the provided number is greater than max int32, the resulting value from the conversion will be
+ different from the actual provided value.
-To avoid unexpected values, you should specify the correct bit size as in parseAllocateGood3;
-or check bounds before making the conversion as in parseAllocateGood4.
+ To avoid unexpected values, you should specify the correct bit size as in parseAllocateGood3;
+ or check bounds before making the conversion as in parseAllocateGood4.
-
-mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
+ mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
-mitre.org: CWE-190: Integer Overflow or Wraparound.
+ mitre.org: CWE-190: Integer Overflow or Wraparound.
-
+
\ No newline at end of file
From cd1d699208cb0056f6c759586deb39111d4a62af Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 30 Apr 2020 23:13:16 -0700
Subject: [PATCH 037/157] Improve BadRedirectCheck query
We now look for a path from the variable being checked to a redirect.
Additionally, several sources of false positives have been eliminated, and a model of relevant parts of the Macaron framework has been added.
---
change-notes/1.25/analysis-go.md | 17 +
change-notes/1.25/extractor-go.md | 5 +
ql/src/Security/CWE-601/BadRedirectCheck.ql | 160 ++++-
ql/src/go.qll | 1 +
ql/src/semmle/go/frameworks/Macaron.qll | 25 +
.../go/frameworks/Macaron/Redirect.expected | 2 +
.../semmle/go/frameworks/Macaron/Redirect.ql | 4 +
.../semmle/go/frameworks/Macaron/go.mod | 5 +
.../semmle/go/frameworks/Macaron/main.go | 23 +
.../vendor/gopkg.in/macaron.v1/LICENSE | 191 ++++++
.../vendor/gopkg.in/macaron.v1/stub.go | 580 ++++++++++++++++++
.../go/frameworks/Macaron/vendor/modules.txt | 3 +
.../BadRedirectCheck.expected | 50 +-
.../BadRedirectCheck/BadRedirectCheckGood.go | 2 +-
.../Security/CWE-601/BadRedirectCheck/cves.go | 16 +-
.../Security/CWE-601/BadRedirectCheck/main.go | 71 ++-
16 files changed, 1128 insertions(+), 27 deletions(-)
create mode 100644 change-notes/1.25/analysis-go.md
create mode 100644 change-notes/1.25/extractor-go.md
create mode 100644 ql/src/semmle/go/frameworks/Macaron.qll
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.expected
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/main.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/modules.txt
diff --git a/change-notes/1.25/analysis-go.md b/change-notes/1.25/analysis-go.md
new file mode 100644
index 00000000000..18c4a67dc40
--- /dev/null
+++ b/change-notes/1.25/analysis-go.md
@@ -0,0 +1,17 @@
+# Improvements to Go analysis
+
+## General improvements
+
+* A model for the Macaron HTTP library's `Context.Redirect` function was added.
+
+## New queries
+
+| **Query** | **Tags** | **Purpose** |
+|------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+
+## Changes to existing queries
+
+| **Query** | **Expected impact** | **Change** |
+|----------------------------------------------|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Bad redirect check (`go/bad-redirect-check`) | More accurate results | The query now checks for a use of the value checked by the result in a redirect call, and no longer uses names as a heuristic for whether the checked value is a URL. |
+
diff --git a/change-notes/1.25/extractor-go.md b/change-notes/1.25/extractor-go.md
new file mode 100644
index 00000000000..bb1067715a2
--- /dev/null
+++ b/change-notes/1.25/extractor-go.md
@@ -0,0 +1,5 @@
+[[ condition: enterprise-only ]]
+
+# Improvements to Go analysis
+
+## Changes to code extraction
diff --git a/ql/src/Security/CWE-601/BadRedirectCheck.ql b/ql/src/Security/CWE-601/BadRedirectCheck.ql
index d42605bb86b..968b1ab2c13 100644
--- a/ql/src/Security/CWE-601/BadRedirectCheck.ql
+++ b/ql/src/Security/CWE-601/BadRedirectCheck.ql
@@ -3,8 +3,8 @@
* @description A redirect check that checks for a leading slash but not two
* leading slashes or a leading slash followed by a backslash is
* incomplete.
- * @kind problem
- * @problem.severity warning
+ * @kind path-problem
+ * @problem.severity error
* @id go/bad-redirect-check
* @tags security
* external/cwe/cwe-601
@@ -12,6 +12,8 @@
*/
import go
+import semmle.go.security.OpenUrlRedirectCustomizations
+import DataFlow::PathGraph
StringOps::HasPrefix checkForLeadingSlash(SsaWithFields v) {
exists(DataFlow::Node substr |
@@ -21,43 +23,163 @@ StringOps::HasPrefix checkForLeadingSlash(SsaWithFields v) {
)
}
-DataFlow::Node checkForSecondSlash(SsaWithFields v) {
- exists(StringOps::HasPrefix hp | result = hp and hp.getBaseString() = v.getAUse() |
+predicate isCheckedForSecondSlash(SsaWithFields v) {
+ exists(StringOps::HasPrefix hp | hp.getBaseString() = v.getAUse() |
hp.getSubstring().getStringValue() = "//"
)
or
exists(DataFlow::EqualityTestNode eq, DataFlow::Node slash, DataFlow::ElementReadNode er |
- result = eq
- |
slash.getStringValue() = "/" and
er.getBase() = v.getAUse() and
er.getIndex().getIntValue() = 1 and
eq.eq(_, er, slash)
)
+ or
+ // a call to path.Clean will strip away multiple leading slashes
+ isCleaned(v.getAUse())
}
-DataFlow::Node checkForSecondBackslash(SsaWithFields v) {
- exists(StringOps::HasPrefix hp | result = hp and hp.getBaseString() = v.getAUse() |
+/**
+ * Holds if `nd` is the result of a call to `path.Clean`, or flows into the first argument
+ * of such a call, possibly inter-procedurally.
+ */
+predicate isCleaned(DataFlow::Node nd) {
+ exists(Function clean | clean.hasQualifiedName("path", "Clean") |
+ nd = clean.getACall()
+ or
+ nd = clean.getACall().getArgument(0)
+ )
+ or
+ isCleaned(nd.getAPredecessor())
+ or
+ exists(FuncDef f, FunctionInput inp | nd = inp.getExitNode(f) |
+ forex(DataFlow::CallNode call | call.getACallee() = f | isCleaned(inp.getEntryNode(call)))
+ )
+}
+
+predicate isCheckedForSecondBackslash(SsaWithFields v) {
+ exists(StringOps::HasPrefix hp | hp.getBaseString() = v.getAUse() |
hp.getSubstring().getStringValue() = "/\\"
)
or
exists(DataFlow::EqualityTestNode eq, DataFlow::Node slash, DataFlow::ElementReadNode er |
- result = eq
- |
slash.getStringValue() = "\\" and
er.getBase() = v.getAUse() and
er.getIndex().getIntValue() = 1 and
eq.eq(_, er, slash)
)
+ or
+ // if this variable comes from or is a net/url.URL.Path, backslashes are most likely sanitized,
+ // as the parse functions turn them into "%5C"
+ urlPath(v.getAUse())
}
-from DataFlow::Node node, SsaWithFields v
+/**
+ * Holds if `nd` derives its value from the field `url.URL.Path`, possibly inter-procedurally.
+ */
+predicate urlPath(DataFlow::Node nd) {
+ exists(Field f |
+ f.hasQualifiedName("net/url", "URL", "Path") and
+ nd = f.getARead()
+ )
+ or
+ urlPath(nd.getAPredecessor())
+ or
+ exists(FuncDef f, FunctionInput inp | nd = inp.getExitNode(f) |
+ forex(DataFlow::CallNode call | call.getACallee() = f | urlPath(inp.getEntryNode(call)))
+ )
+}
+
+class Configuration extends TaintTracking::Configuration {
+ Configuration() { this = "BadRedirectCheck" }
+
+ override predicate isSource(DataFlow::Node source) { this.isSource(source, _) }
+
+ /**
+ * Holds if `source` is the first node that flows into a use of a variable that is checked by a
+ * bad redirect check `check`..
+ */
+ predicate isSource(DataFlow::Node source, DataFlow::Node check) {
+ exists(SsaWithFields v |
+ DataFlow::localFlow(source, v.getAUse()) and
+ not exists(source.getAPredecessor()) and
+ isBadRedirectCheckOrWrapper(check, v)
+ )
+ }
+
+ override predicate isAdditionalTaintStep(DataFlow::Node pred, DataFlow::Node succ) {
+ // this is very over-approximate, because most filtering is done by the isSource predicate
+ exists(Write w | w.writesField(succ, _, pred))
+ }
+
+ override predicate isSanitizerOut(DataFlow::Node node) {
+ // assume this value is safe if something is prepended to it.
+ exists(StringOps::Concatenation conc, int i, int j | i < j |
+ node = conc.getOperand(j) and
+ exists(conc.getOperand(i))
+ )
+ or
+ exists(DataFlow::CallNode call, int i | call.getTarget().hasQualifiedName("path", "Join") |
+ i > 0 and node = call.getArgument(i)
+ )
+ }
+
+ override predicate isSink(DataFlow::Node sink) { sink instanceof OpenUrlRedirect::Sink }
+}
+
+/**
+ * Holds there is a check `check` that is a bad redirect check, and `v` is either
+ * checked directly by `check` or checked by a function that contains `check`.
+ */
+predicate isBadRedirectCheckOrWrapper(DataFlow::Node check, SsaWithFields v) {
+ isBadRedirectCheck(check, v)
+ or
+ exists(DataFlow::CallNode call, FuncDef f, FunctionInput input |
+ call = f.getACall() and
+ input.getEntryNode(call) = v.getAUse() and
+ isBadRedirectCheckWrapper(check, f, input)
+ )
+}
+
+/**
+ * Gets an SSA-with-fields variable that is similar to `v` in the sense that it has the same
+ * root variable and the same sequence of field accesses.
+ */
+SsaWithFields similar(SsaWithFields v) {
+ result.getBaseVariable().getSourceVariable() = v.getBaseVariable().getSourceVariable() and
+ result.getQualifiedName() = v.getQualifiedName()
+}
+
+/**
+ * Holds if `check` checks that `v` has a leading slash, but not whether it has another slash or a
+ * backslash in its second position.
+ */
+predicate isBadRedirectCheck(DataFlow::Node check, SsaWithFields v) {
+ // a check for a leading slash
+ check = checkForLeadingSlash(v) and
+ // where there does not exist a check for both a second slash and a second backslash
+ // (we allow those checks to be on variables that are most likely equivalent to `v`
+ // to rule out false positives due to minor variations in data flow)
+ not (
+ isCheckedForSecondSlash(similar(v)) and
+ isCheckedForSecondBackslash(similar(v))
+ )
+}
+
+/**
+ * Holds if `f` contains a bad redirect check `check`, that checks the parameter `input`.
+ */
+predicate isBadRedirectCheckWrapper(DataFlow::Node check, FuncDef f, FunctionInput input) {
+ exists(SsaWithFields v |
+ v.getAUse().getAPredecessor*() = input.getExitNode(f) and
+ isBadRedirectCheck(check, v)
+ )
+}
+
+from Configuration cfg, DataFlow::PathNode source, DataFlow::PathNode sink, DataFlow::Node check
where
- // there is a check for a leading slash
- node = checkForLeadingSlash(v) and
- // but not a check for both a second slash and a second backslash
- not (exists(checkForSecondSlash(v)) and exists(checkForSecondBackslash(v))) and
- v.getQualifiedName().regexpMatch("(?i).*url.*|.*redir.*|.*target.*")
-select node,
- "This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position.",
- v, v.getQualifiedName()
+ cfg.isSource(source.getNode(), check) and
+ cfg.hasFlowPath(source, sink)
+select check, source, sink,
+ "This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position.",
+ source.getNode(), "this value", sink.getNode(), "redirect"
diff --git a/ql/src/go.qll b/ql/src/go.qll
index f4f459f2e48..261ee7a827f 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -25,6 +25,7 @@ import semmle.go.dataflow.GlobalValueNumbering
import semmle.go.dataflow.TaintTracking
import semmle.go.dataflow.SSA
import semmle.go.frameworks.HTTP
+import semmle.go.frameworks.Macaron
import semmle.go.frameworks.SystemCommandExecutors
import semmle.go.frameworks.SQL
import semmle.go.frameworks.XPath
diff --git a/ql/src/semmle/go/frameworks/Macaron.qll b/ql/src/semmle/go/frameworks/Macaron.qll
new file mode 100644
index 00000000000..134e7a390bb
--- /dev/null
+++ b/ql/src/semmle/go/frameworks/Macaron.qll
@@ -0,0 +1,25 @@
+/**
+ * Provides classes for working with concepts relating to the Macaron web framework
+ */
+
+import go
+
+private module Macaron {
+ private class Context extends HTTP::ResponseWriter::Range {
+ Context() {
+ exists(Method m | m.hasQualifiedName("gopkg.in/macaron.v1", "Context", "Redirect") |
+ m = this.getType().getMethod("Redirect")
+ )
+ }
+ }
+
+ private class RedirectCall extends HTTP::Redirect::Range, DataFlow::MethodCallNode {
+ RedirectCall() {
+ this.getTarget().hasQualifiedName("gopkg.in/macaron.v1", "Context", "Redirect")
+ }
+
+ override DataFlow::Node getUrl() { result = this.getArgument(0) }
+
+ override HTTP::ResponseWriter getResponseWriter() { result.getARead() = this.getReceiver() }
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.expected b/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.expected
new file mode 100644
index 00000000000..45fd77beda2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.expected
@@ -0,0 +1,2 @@
+| main.go:15:2:15:25 | call to Redirect | main.go:14:12:14:14 | ctx |
+| main.go:19:2:19:25 | call to Redirect | main.go:18:13:18:15 | ctx |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.ql b/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.ql
new file mode 100644
index 00000000000..813ad53f2e0
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/Redirect.ql
@@ -0,0 +1,4 @@
+import go
+
+from HTTP::Redirect redir
+select redir, redir.getResponseWriter()
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/go.mod b/ql/test/library-tests/semmle/go/frameworks/Macaron/go.mod
new file mode 100644
index 00000000000..157e3b7b1c6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/go.mod
@@ -0,0 +1,5 @@
+module codeql-go-tests/frameworks/macaron
+
+go 1.14
+
+require gopkg.in/macaron.v1 v1.3.5
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/main.go b/ql/test/library-tests/semmle/go/frameworks/Macaron/main.go
new file mode 100644
index 00000000000..f47ec3c7074
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/main.go
@@ -0,0 +1,23 @@
+package main
+
+//go:generate depstubber -vendor gopkg.in/macaron.v1 Context
+
+import (
+ "gopkg.in/macaron.v1"
+)
+
+type EmbeddedContext struct {
+ *macaron.Context
+ foo string
+}
+
+func redir(ctx *macaron.Context) {
+ ctx.Redirect("/example")
+}
+
+func redir1(ctx *EmbeddedContext) {
+ ctx.Redirect("/example")
+}
+
+func main() {
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/LICENSE
new file mode 100644
index 00000000000..c8a16eb2eb9
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 The Macaron Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/stub.go b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/stub.go
new file mode 100644
index 00000000000..cac6e12385a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/gopkg.in/macaron.v1/stub.go
@@ -0,0 +1,580 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for gopkg.in/macaron.v1, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: gopkg.in/macaron.v1 (exports: Context; functions: )
+
+// Package macaron is a stub of gopkg.in/macaron.v1, generated by depstubber.
+package macaron
+
+import (
+ context "context"
+ io "io"
+ multipart "mime/multipart"
+ http "net/http"
+ reflect "reflect"
+)
+
+type BeforeFunc func(ResponseWriter)
+
+type ComboRouter struct{}
+
+func (_ *ComboRouter) Delete(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Get(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Head(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Name(_ string) {}
+
+func (_ *ComboRouter) Options(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Patch(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Post(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *ComboRouter) Put(_ ...Handler) *ComboRouter {
+ return nil
+}
+
+type Context struct {
+ Injector interface{}
+ Router *Router
+ Req Request
+ Resp ResponseWriter
+ Render Render
+ Locale Locale
+ Data map[string]interface{}
+}
+
+func (_ Context) Any(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) Apply(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ Context) Combo(_ string, _ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ Context) Delete(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) Error(_ int, _ ...string) {}
+
+func (_ Context) Get(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) GetVal(_ reflect.Type) reflect.Value {
+ return reflect.Value{}
+}
+
+func (_ Context) Group(_ string, _ func(), _ ...Handler) {}
+
+func (_ Context) HTMLBytes(_ string, _ interface{}, _ ...HTMLOptions) ([]uint8, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ Context) HTMLSetBytes(_ string, _ string, _ interface{}, _ ...HTMLOptions) ([]uint8, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ Context) HTMLSetString(_ string, _ string, _ interface{}, _ ...HTMLOptions) (string, interface {
+ Error() string
+}) {
+ return "", nil
+}
+
+func (_ Context) HTMLString(_ string, _ interface{}, _ ...HTMLOptions) (string, interface {
+ Error() string
+}) {
+ return "", nil
+}
+
+func (_ Context) Handle(_ string, _ string, _ []Handler) *Route {
+ return nil
+}
+
+func (_ Context) HasTemplateSet(_ string) bool {
+ return false
+}
+
+func (_ Context) Head(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) Header() http.Header {
+ return nil
+}
+
+func (_ Context) InternalServerError(_ ...Handler) {}
+
+func (_ Context) Invoke(_ interface{}) ([]reflect.Value, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ Context) JSON(_ int, _ interface{}) {}
+
+func (_ Context) JSONString(_ interface{}) (string, interface {
+ Error() string
+}) {
+ return "", nil
+}
+
+func (_ Context) Language() string {
+ return ""
+}
+
+func (_ Context) Map(_ interface{}) interface{} {
+ return nil
+}
+
+func (_ Context) MapTo(_ interface{}, _ interface{}) interface{} {
+ return nil
+}
+
+func (_ Context) NotFound(_ ...Handler) {}
+
+func (_ Context) Options(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) Patch(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) PlainText(_ int, _ []uint8) {}
+
+func (_ Context) Post(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) Put(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) RawData(_ int, _ []uint8) {}
+
+func (_ Context) Route(_ string, _ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ Context) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {}
+
+func (_ Context) Set(_ reflect.Type, _ reflect.Value) interface{} {
+ return nil
+}
+
+func (_ Context) SetAutoHead(_ bool) {}
+
+func (_ Context) SetHandlerWrapper(_ func(Handler) Handler) {}
+
+func (_ Context) SetParent(_ interface{}) {}
+
+func (_ Context) SetResponseWriter(_ http.ResponseWriter) {}
+
+func (_ Context) SetTemplatePath(_ string, _ string) {}
+
+func (_ Context) Status(_ int) {}
+
+func (_ Context) Tr(_ string, _ ...interface{}) string {
+ return ""
+}
+
+func (_ Context) URLFor(_ string, _ ...string) string {
+ return ""
+}
+
+func (_ Context) Write(_ []uint8) (int, interface {
+ Error() string
+}) {
+ return 0, nil
+}
+
+func (_ Context) WriteHeader(_ int) {}
+
+func (_ Context) XML(_ int, _ interface{}) {}
+
+func (_ *Context) ChangeStaticPath(_ string, _ string) {}
+
+func (_ *Context) GetCookie(_ string) string {
+ return ""
+}
+
+func (_ *Context) GetCookieFloat64(_ string) float64 {
+ return 0
+}
+
+func (_ *Context) GetCookieInt(_ string) int {
+ return 0
+}
+
+func (_ *Context) GetCookieInt64(_ string) int64 {
+ return 0
+}
+
+func (_ *Context) GetFile(_ string) (multipart.File, *multipart.FileHeader, interface {
+ Error() string
+}) {
+ return nil, nil, nil
+}
+
+func (_ *Context) GetSecureCookie(_ string) (string, bool) {
+ return "", false
+}
+
+func (_ *Context) GetSuperSecureCookie(_ string, _ string) (string, bool) {
+ return "", false
+}
+
+func (_ *Context) HTML(_ int, _ string, _ ...interface{}) {}
+
+func (_ *Context) HTMLSet(_ int, _ string, _ string, _ ...interface{}) {}
+
+func (_ *Context) Next() {}
+
+func (_ *Context) Params(_ string) string {
+ return ""
+}
+
+func (_ *Context) ParamsEscape(_ string) string {
+ return ""
+}
+
+func (_ *Context) ParamsFloat64(_ string) float64 {
+ return 0
+}
+
+func (_ *Context) ParamsInt(_ string) int {
+ return 0
+}
+
+func (_ *Context) ParamsInt64(_ string) int64 {
+ return 0
+}
+
+func (_ *Context) Query(_ string) string {
+ return ""
+}
+
+func (_ *Context) QueryBool(_ string) bool {
+ return false
+}
+
+func (_ *Context) QueryEscape(_ string) string {
+ return ""
+}
+
+func (_ *Context) QueryFloat64(_ string) float64 {
+ return 0
+}
+
+func (_ *Context) QueryInt(_ string) int {
+ return 0
+}
+
+func (_ *Context) QueryInt64(_ string) int64 {
+ return 0
+}
+
+func (_ *Context) QueryStrings(_ string) []string {
+ return nil
+}
+
+func (_ *Context) QueryTrim(_ string) string {
+ return ""
+}
+
+func (_ *Context) Redirect(_ string, _ ...int) {}
+
+func (_ *Context) RemoteAddr() string {
+ return ""
+}
+
+func (_ *Context) ReplaceAllParams(_ Params) {}
+
+func (_ *Context) SaveToFile(_ string, _ string) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ServeContent(_ string, _ io.ReadSeeker, _ ...interface{}) {}
+
+func (_ *Context) ServeFile(_ string, _ ...string) {}
+
+func (_ *Context) ServeFileContent(_ string, _ ...string) {}
+
+func (_ *Context) SetCookie(_ string, _ string, _ ...interface{}) {}
+
+func (_ *Context) SetParams(_ string, _ string) {}
+
+func (_ *Context) SetSecureCookie(_ string, _ string, _ ...interface{}) {}
+
+func (_ *Context) SetSuperSecureCookie(_ string, _ string, _ string, _ ...interface{}) {}
+
+func (_ *Context) Written() bool {
+ return false
+}
+
+type HTMLOptions struct {
+ Layout string
+}
+
+type Handler interface{}
+
+type Locale interface {
+ Language() string
+ Tr(_ string, _ ...interface{}) string
+}
+
+type Params map[string]string
+
+type Render interface {
+ Error(_ int, _ ...string)
+ HTML(_ int, _ string, _ interface{}, _ ...HTMLOptions)
+ HTMLBytes(_ string, _ interface{}, _ ...HTMLOptions) ([]uint8, interface {
+ Error() string
+ })
+ HTMLSet(_ int, _ string, _ string, _ interface{}, _ ...HTMLOptions)
+ HTMLSetBytes(_ string, _ string, _ interface{}, _ ...HTMLOptions) ([]uint8, interface {
+ Error() string
+ })
+ HTMLSetString(_ string, _ string, _ interface{}, _ ...HTMLOptions) (string, interface {
+ Error() string
+ })
+ HTMLString(_ string, _ interface{}, _ ...HTMLOptions) (string, interface {
+ Error() string
+ })
+ HasTemplateSet(_ string) bool
+ Header() http.Header
+ JSON(_ int, _ interface{})
+ JSONString(_ interface{}) (string, interface {
+ Error() string
+ })
+ PlainText(_ int, _ []uint8)
+ RawData(_ int, _ []uint8)
+ SetResponseWriter(_ http.ResponseWriter)
+ SetTemplatePath(_ string, _ string)
+ Status(_ int)
+ Write(_ []uint8) (int, interface {
+ Error() string
+ })
+ WriteHeader(_ int)
+ XML(_ int, _ interface{})
+}
+
+type Request struct {
+ Request *http.Request
+}
+
+func (_ Request) AddCookie(_ *http.Cookie) {}
+
+func (_ Request) BasicAuth() (string, string, bool) {
+ return "", "", false
+}
+
+func (_ Request) Clone(_ context.Context) *http.Request {
+ return nil
+}
+
+func (_ Request) Context() context.Context {
+ return nil
+}
+
+func (_ Request) Cookie(_ string) (*http.Cookie, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ Request) Cookies() []*http.Cookie {
+ return nil
+}
+
+func (_ Request) FormFile(_ string) (multipart.File, *multipart.FileHeader, interface {
+ Error() string
+}) {
+ return nil, nil, nil
+}
+
+func (_ Request) FormValue(_ string) string {
+ return ""
+}
+
+func (_ Request) MultipartReader() (*multipart.Reader, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ Request) ParseForm() interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ Request) ParseMultipartForm(_ int64) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ Request) PostFormValue(_ string) string {
+ return ""
+}
+
+func (_ Request) ProtoAtLeast(_ int, _ int) bool {
+ return false
+}
+
+func (_ Request) Referer() string {
+ return ""
+}
+
+func (_ Request) SetBasicAuth(_ string, _ string) {}
+
+func (_ Request) UserAgent() string {
+ return ""
+}
+
+func (_ Request) WithContext(_ context.Context) *http.Request {
+ return nil
+}
+
+func (_ Request) Write(_ io.Writer) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ Request) WriteProxy(_ io.Writer) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Request) Body() *RequestBody {
+ return nil
+}
+
+type RequestBody struct{}
+
+func (_ *RequestBody) Bytes() ([]uint8, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ *RequestBody) ReadCloser() io.ReadCloser {
+ return nil
+}
+
+func (_ *RequestBody) String() (string, interface {
+ Error() string
+}) {
+ return "", nil
+}
+
+type ResponseWriter interface {
+ Before(_ BeforeFunc)
+ Flush()
+ Header() http.Header
+ Size() int
+ Status() int
+ Write(_ []uint8) (int, interface {
+ Error() string
+ })
+ WriteHeader(_ int)
+ Written() bool
+}
+
+type Route struct{}
+
+func (_ *Route) Name(_ string) {}
+
+type Router struct{}
+
+func (_ *Router) Any(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Combo(_ string, _ ...Handler) *ComboRouter {
+ return nil
+}
+
+func (_ *Router) Delete(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Get(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Group(_ string, _ func(), _ ...Handler) {}
+
+func (_ *Router) Handle(_ string, _ string, _ []Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Head(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) InternalServerError(_ ...Handler) {}
+
+func (_ *Router) NotFound(_ ...Handler) {}
+
+func (_ *Router) Options(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Patch(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Post(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Put(_ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) Route(_ string, _ string, _ ...Handler) *Route {
+ return nil
+}
+
+func (_ *Router) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {}
+
+func (_ *Router) SetAutoHead(_ bool) {}
+
+func (_ *Router) SetHandlerWrapper(_ func(Handler) Handler) {}
+
+func (_ *Router) URLFor(_ string, _ ...string) string {
+ return ""
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/modules.txt
new file mode 100644
index 00000000000..58e7025ec1a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Macaron/vendor/modules.txt
@@ -0,0 +1,3 @@
+# gopkg.in/macaron.v1 v1.3.5
+## explicit
+gopkg.in/macaron.v1
diff --git a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheck.expected b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheck.expected
index e2830337608..8b27ffececd 100644
--- a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheck.expected
+++ b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheck.expected
@@ -1,5 +1,45 @@
-| BadRedirectCheck.go:4:23:4:37 | ...==... | This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position. | BadRedirectCheck.go:3:18:3:22 | (def@3:18) | redir |
-| cves.go:11:26:11:38 | ...==... | This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position. | cves.go:10:22:10:24 | (def@10:22) | url |
-| cves.go:22:6:22:37 | call to HasPrefix | This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position. | cves.go:21:2:21:9 | (def@21:2) | redirect |
-| cves.go:29:6:29:37 | call to HasPrefix | This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position. | cves.go:28:2:28:9 | (def@28:2) | redirect |
-| main.go:8:7:8:38 | call to HasPrefix | This expression checks '$@' for a leading slash but checks do not exist for both '/' and '\\' in the second position. | main.go:5:19:5:26 | (def@5:19) | redirect |
+edges
+| BadRedirectCheck.go:3:18:3:22 | argument corresponding to redir : string | BadRedirectCheck.go:5:10:5:14 | redir : string |
+| BadRedirectCheck.go:5:10:5:14 | redir : string | main.go:11:25:11:45 | call to sanitizeUrl |
+| cves.go:14:23:14:25 | argument corresponding to url : string | cves.go:16:26:16:28 | url |
+| cves.go:33:14:33:34 | call to Get : string | cves.go:37:25:37:32 | redirect |
+| cves.go:41:14:41:34 | call to Get : string | cves.go:45:25:45:32 | redirect |
+| main.go:10:18:10:25 | argument corresponding to redirect : string | main.go:11:37:11:44 | redirect : string |
+| main.go:11:37:11:44 | redirect : string | main.go:11:25:11:45 | call to sanitizeUrl |
+| main.go:32:24:32:26 | argument corresponding to url : string | main.go:34:26:34:28 | url |
+| main.go:68:17:68:24 | argument corresponding to redirect : string | main.go:73:9:73:28 | call to Clean : string |
+| main.go:73:9:73:28 | call to Clean : string | main.go:77:25:77:39 | call to getTarget1 |
+| main.go:76:19:76:21 | argument corresponding to url : string | main.go:77:36:77:38 | url : string |
+| main.go:77:36:77:38 | url : string | main.go:77:25:77:39 | call to getTarget1 |
+| main.go:87:9:87:14 | selection of Path : string | main.go:91:25:91:39 | call to getTarget2 |
+nodes
+| BadRedirectCheck.go:3:18:3:22 | argument corresponding to redir : string | semmle.label | argument corresponding to redir : string |
+| BadRedirectCheck.go:5:10:5:14 | redir : string | semmle.label | redir : string |
+| cves.go:14:23:14:25 | argument corresponding to url : string | semmle.label | argument corresponding to url : string |
+| cves.go:16:26:16:28 | url | semmle.label | url |
+| cves.go:33:14:33:34 | call to Get : string | semmle.label | call to Get : string |
+| cves.go:37:25:37:32 | redirect | semmle.label | redirect |
+| cves.go:41:14:41:34 | call to Get : string | semmle.label | call to Get : string |
+| cves.go:45:25:45:32 | redirect | semmle.label | redirect |
+| main.go:10:18:10:25 | argument corresponding to redirect : string | semmle.label | argument corresponding to redirect : string |
+| main.go:11:25:11:45 | call to sanitizeUrl | semmle.label | call to sanitizeUrl |
+| main.go:11:37:11:44 | redirect : string | semmle.label | redirect : string |
+| main.go:32:24:32:26 | argument corresponding to url : string | semmle.label | argument corresponding to url : string |
+| main.go:34:26:34:28 | url | semmle.label | url |
+| main.go:68:17:68:24 | argument corresponding to redirect : string | semmle.label | argument corresponding to redirect : string |
+| main.go:73:9:73:28 | call to Clean : string | semmle.label | call to Clean : string |
+| main.go:76:19:76:21 | argument corresponding to url : string | semmle.label | argument corresponding to url : string |
+| main.go:77:25:77:39 | call to getTarget1 | semmle.label | call to getTarget1 |
+| main.go:77:36:77:38 | url : string | semmle.label | url : string |
+| main.go:87:9:87:14 | selection of Path : string | semmle.label | selection of Path : string |
+| main.go:91:25:91:39 | call to getTarget2 | semmle.label | call to getTarget2 |
+#select
+| BadRedirectCheck.go:4:23:4:37 | ...==... | BadRedirectCheck.go:3:18:3:22 | argument corresponding to redir : string | main.go:11:25:11:45 | call to sanitizeUrl | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | BadRedirectCheck.go:3:18:3:22 | argument corresponding to redir | this value | main.go:11:25:11:45 | call to sanitizeUrl | redirect |
+| BadRedirectCheck.go:4:23:4:37 | ...==... | main.go:10:18:10:25 | argument corresponding to redirect : string | main.go:11:25:11:45 | call to sanitizeUrl | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | main.go:10:18:10:25 | argument corresponding to redirect | this value | main.go:11:25:11:45 | call to sanitizeUrl | redirect |
+| cves.go:11:26:11:38 | ...==... | cves.go:14:23:14:25 | argument corresponding to url : string | cves.go:16:26:16:28 | url | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | cves.go:14:23:14:25 | argument corresponding to url | this value | cves.go:16:26:16:28 | url | redirect |
+| cves.go:34:6:34:37 | call to HasPrefix | cves.go:33:14:33:34 | call to Get : string | cves.go:37:25:37:32 | redirect | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | cves.go:33:14:33:34 | call to Get | this value | cves.go:37:25:37:32 | redirect | redirect |
+| cves.go:42:6:42:37 | call to HasPrefix | cves.go:41:14:41:34 | call to Get : string | cves.go:45:25:45:32 | redirect | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | cves.go:41:14:41:34 | call to Get | this value | cves.go:45:25:45:32 | redirect | redirect |
+| main.go:25:7:25:38 | call to HasPrefix | main.go:32:24:32:26 | argument corresponding to url : string | main.go:34:26:34:28 | url | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | main.go:32:24:32:26 | argument corresponding to url | this value | main.go:34:26:34:28 | url | redirect |
+| main.go:69:5:69:22 | ...!=... | main.go:68:17:68:24 | argument corresponding to redirect : string | main.go:77:25:77:39 | call to getTarget1 | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | main.go:68:17:68:24 | argument corresponding to redirect | this value | main.go:77:25:77:39 | call to getTarget1 | redirect |
+| main.go:69:5:69:22 | ...!=... | main.go:76:19:76:21 | argument corresponding to url : string | main.go:77:25:77:39 | call to getTarget1 | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | main.go:76:19:76:21 | argument corresponding to url | this value | main.go:77:25:77:39 | call to getTarget1 | redirect |
+| main.go:83:5:83:20 | ...!=... | main.go:87:9:87:14 | selection of Path : string | main.go:91:25:91:39 | call to getTarget2 | This is a check that $@, which flows into a $@, has a leading slash, but not that it does not have '/' or '\\' in its second position. | main.go:87:9:87:14 | selection of Path | this value | main.go:91:25:91:39 | call to getTarget2 | redirect |
diff --git a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheckGood.go b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheckGood.go
index 89af5110a29..fe5fcbb2fde 100644
--- a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheckGood.go
+++ b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/BadRedirectCheckGood.go
@@ -1,6 +1,6 @@
package main
-func sanitizeUrl1(redir string) string {
+func sanitizeUrlGood(redir string) string {
if len(redir) > 1 && redir[0] == '/' && redir[1] != '/' && redir[1] != '\\' {
return redir
}
diff --git a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/cves.go b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/cves.go
index cd91df4d4a7..42e8bab3452 100644
--- a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/cves.go
+++ b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/cves.go
@@ -11,10 +11,22 @@ func isValidRedirect(url string) bool {
return len(url) >= 2 && url[0] == '/' && url[1] != '/' // NOT OK
}
-func isValidRedirect1(url string) bool {
+func alsoABadRedirect(url string, rw http.ResponseWriter, req *http.Request) {
+ if isValidRedirect(url) {
+ http.Redirect(rw, req, url, 302)
+ }
+}
+
+func isValidRedirectGood(url string) bool {
return len(url) >= 2 && url[0] == '/' && url[1] != '/' && url[1] != '\\' // OK
}
+func alsoAGoodRedirect(url string, rw http.ResponseWriter, req *http.Request) {
+ if isValidRedirectGood(url) {
+ http.Redirect(rw, req, url, 302)
+ }
+}
+
// CVE-2017-1000070 (both vulnerable!)
// Code from github.com/bitly/oauth2_proxy
func OAuthCallback(rw http.ResponseWriter, req *http.Request) {
@@ -22,6 +34,7 @@ func OAuthCallback(rw http.ResponseWriter, req *http.Request) {
if !strings.HasPrefix(redirect, "/") { // NOT OK
redirect = "/"
}
+ http.Redirect(rw, req, redirect, 302)
}
func OAuthCallback1(rw http.ResponseWriter, req *http.Request) {
@@ -29,4 +42,5 @@ func OAuthCallback1(rw http.ResponseWriter, req *http.Request) {
if !strings.HasPrefix(redirect, "/") || strings.HasPrefix(redirect, "//") { // NOT OK
redirect = "/"
}
+ http.Redirect(rw, req, redirect, 302)
}
diff --git a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
index 97b7b9c5f89..c76f9ec09cf 100644
--- a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
+++ b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
@@ -1,6 +1,23 @@
package main
-import "strings"
+import (
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+)
+
+func badRedirect(redirect string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, sanitizeUrl(redirect), 302)
+}
+
+func goodRedirect(redirect string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, sanitizeUrlGood(redirect), 302)
+}
+
+func goodRedirect2(url string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, path.Join("/", sanitizeUrl(url)), 302)
+}
func isValidRedir(redirect string) bool {
switch {
@@ -12,6 +29,12 @@ func isValidRedir(redirect string) bool {
}
}
+func alsoABadRedirect1(url string, rw http.ResponseWriter, req *http.Request) {
+ if isValidRedir(url) {
+ http.Redirect(rw, req, url, 302)
+ }
+}
+
func isValidRedir1(redirect string) bool {
switch {
// OK
@@ -21,3 +44,49 @@ func isValidRedir1(redirect string) bool {
return false
}
}
+
+func goodRedirect3(url string, rw http.ResponseWriter, req *http.Request) {
+ if isValidRedirectGood(url) {
+ http.Redirect(rw, req, url, 302)
+ }
+}
+
+func getTarget(redirect string) string {
+ u, _ := url.Parse(redirect)
+
+ if u.Path[0] != "/" {
+ return "/"
+ }
+
+ return path.Clean(u.Path)
+}
+
+func goodRedirect4(url string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, getTarget(url), 302)
+}
+
+func getTarget1(redirect string) string {
+ if redirect[0] != "/" {
+ return "/"
+ }
+
+ return path.Clean(redirect)
+}
+
+func badRedirect2(url string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, getTarget1(url), 302)
+}
+
+func getTarget2(redirect string) string {
+ u, _ := url.Parse(redirect)
+
+ if u.Path[0] != "/" {
+ return "/"
+ }
+
+ return u.Path
+}
+
+func badRedirect2(url string, rw http.ResponseWriter, req *http.Request) {
+ http.Redirect(rw, req, getTarget2(url), 302)
+}
From 980241603b00535607d1f7c81173fc792e686fa3 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Fri, 1 May 2020 07:57:13 +0100
Subject: [PATCH 038/157] Switch to new-style change notes.
---
change-notes/1.25/analysis-go.md | 17 -----------------
change-notes/1.25/extractor-go.md | 5 -----
change-notes/2020-05-01-bad-redirect-check.md | 2 ++
change-notes/2020-05-01-macaron-model.md | 2 ++
4 files changed, 4 insertions(+), 22 deletions(-)
delete mode 100644 change-notes/1.25/analysis-go.md
delete mode 100644 change-notes/1.25/extractor-go.md
create mode 100644 change-notes/2020-05-01-bad-redirect-check.md
create mode 100644 change-notes/2020-05-01-macaron-model.md
diff --git a/change-notes/1.25/analysis-go.md b/change-notes/1.25/analysis-go.md
deleted file mode 100644
index 18c4a67dc40..00000000000
--- a/change-notes/1.25/analysis-go.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Improvements to Go analysis
-
-## General improvements
-
-* A model for the Macaron HTTP library's `Context.Redirect` function was added.
-
-## New queries
-
-| **Query** | **Tags** | **Purpose** |
-|------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-
-## Changes to existing queries
-
-| **Query** | **Expected impact** | **Change** |
-|----------------------------------------------|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Bad redirect check (`go/bad-redirect-check`) | More accurate results | The query now checks for a use of the value checked by the result in a redirect call, and no longer uses names as a heuristic for whether the checked value is a URL. |
-
diff --git a/change-notes/1.25/extractor-go.md b/change-notes/1.25/extractor-go.md
deleted file mode 100644
index bb1067715a2..00000000000
--- a/change-notes/1.25/extractor-go.md
+++ /dev/null
@@ -1,5 +0,0 @@
-[[ condition: enterprise-only ]]
-
-# Improvements to Go analysis
-
-## Changes to code extraction
diff --git a/change-notes/2020-05-01-bad-redirect-check.md b/change-notes/2020-05-01-bad-redirect-check.md
new file mode 100644
index 00000000000..6ee5d0fb235
--- /dev/null
+++ b/change-notes/2020-05-01-bad-redirect-check.md
@@ -0,0 +1,2 @@
+lgtm,codescanning
+* The query "Bad redirect check" (`go/bad-redirect-check`) now requires that the checked variable is actually used in a redirect as opposed to relying on a name-based heuristic. This eliminates some false positive results, and adds more true positive results.
diff --git a/change-notes/2020-05-01-macaron-model.md b/change-notes/2020-05-01-macaron-model.md
new file mode 100644
index 00000000000..056e7d093cc
--- /dev/null
+++ b/change-notes/2020-05-01-macaron-model.md
@@ -0,0 +1,2 @@
+lgtm,codescanning
+* Basic support for the [Macaron](https://go-macaron.com/) HTTP library has been added, which may lead to more results from the security queries.
\ No newline at end of file
From 657108d598e4dc602b1f929d19973b4c40f810ec Mon Sep 17 00:00:00 2001
From: porcupineyhairs <61983466+porcupineyhairs@users.noreply.github.com>
Date: Mon, 4 May 2020 12:24:30 +0530
Subject: [PATCH 039/157] Add Email Content Injection Query (#108)
This adds a query for Email content injection issues.
It models the Golang's net/smtp library as well as
the Sendgrid email library (581 stars).
---
ql/src/experimental/CWE-640/EmailBad.go | 13 +
ql/src/experimental/CWE-640/EmailGood.go | 13 +
.../experimental/CWE-640/EmailInjection.qhelp | 48 +++
ql/src/experimental/CWE-640/EmailInjection.ql | 19 +
.../experimental/CWE-640/EmailInjection.qll | 29 ++
.../CWE-640/EmailInjectionCustomizations.qll | 30 ++
ql/src/go.qll | 1 +
ql/src/semmle/go/frameworks/Email.qll | 110 +++++
.../CWE-640/EmailInjection.expected | 37 ++
.../experimental/CWE-640/EmailInjection.qlref | 1 +
ql/test/experimental/CWE-640/email.go | 115 ++++++
ql/test/experimental/CWE-640/go.mod | 7 +
.../sendgrid/sendgrid-go/helpers/LICENSE | 21 +
.../sendgrid/sendgrid-go/helpers/mail/stub.go | 391 ++++++++++++++++++
.../experimental/CWE-640/vendor/modules.txt | 3 +
.../go/frameworks/Email/MailData.expected | 9 +
.../semmle/go/frameworks/Email/MailData.ql | 4 +
.../semmle/go/frameworks/Email/go.mod | 8 +
.../semmle/go/frameworks/Email/mail.go | 42 ++
.../sendgrid/sendgrid-go/helpers/mail/LICENSE | 21 +
.../sendgrid/sendgrid-go/helpers/mail/stub.go | 391 ++++++++++++++++++
.../go/frameworks/Email/vendor/modules.txt | 6 +
22 files changed, 1319 insertions(+)
create mode 100644 ql/src/experimental/CWE-640/EmailBad.go
create mode 100644 ql/src/experimental/CWE-640/EmailGood.go
create mode 100644 ql/src/experimental/CWE-640/EmailInjection.qhelp
create mode 100644 ql/src/experimental/CWE-640/EmailInjection.ql
create mode 100644 ql/src/experimental/CWE-640/EmailInjection.qll
create mode 100644 ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
create mode 100644 ql/src/semmle/go/frameworks/Email.qll
create mode 100644 ql/test/experimental/CWE-640/EmailInjection.expected
create mode 100644 ql/test/experimental/CWE-640/EmailInjection.qlref
create mode 100644 ql/test/experimental/CWE-640/email.go
create mode 100644 ql/test/experimental/CWE-640/go.mod
create mode 100644 ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/LICENSE
create mode 100644 ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
create mode 100644 ql/test/experimental/CWE-640/vendor/modules.txt
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/mail.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
diff --git a/ql/src/experimental/CWE-640/EmailBad.go b/ql/src/experimental/CWE-640/EmailBad.go
new file mode 100644
index 00000000000..aab8467b340
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailBad.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "net/http"
+ "net/smtp"
+)
+
+func mail(w http.ResponseWriter, r *http.Request) {
+ host := r.Header.Get("Host")
+ token := backend.getUserSecretResetToken(email)
+ body := "Click to reset password: " + host + "/" + token
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(body))
+}
diff --git a/ql/src/experimental/CWE-640/EmailGood.go b/ql/src/experimental/CWE-640/EmailGood.go
new file mode 100644
index 00000000000..d0cfc569b88
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailGood.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "net/http"
+ "net/smtp"
+)
+
+func mailGood(w http.ResponseWriter, r *http.Request) {
+ host := config.Get("Host")
+ token := backend.getUserSecretResetToken(email)
+ body := "Click to reset password: " + host + "/" + token
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(body))
+}
diff --git a/ql/src/experimental/CWE-640/EmailInjection.qhelp b/ql/src/experimental/CWE-640/EmailInjection.qhelp
new file mode 100644
index 00000000000..b1749fbb488
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailInjection.qhelp
@@ -0,0 +1,48 @@
+
+
+
+
+ Using untrusted input to construct an email induces multiple security
+ vulnerabilities. For instance, inclusion of an untrusted input in a email body
+ may allow an attacker to conduct Cross Site Scripting (XSS) Attacks. While
+ inclusion of an HTTP Header in the email body may allow a full account
+ compromise as shown in the example below.
+
+
+
+
+ Any data which is passed to an email subject or body must be sanitized before use.
+
+
+
+
+ In the following example snippet, the
+ host
+ field is user controlled.
+
+
+ A malicious user can send an HTTP request to the targeted web site,
+ but with a Host header that refers to his own web site. This means the
+ emails will be sent out to potential victims, originating from a server
+ they trust, but with links leading to a malicious web site.
+
+
+ If the email contains a password reset link, and should the victim click
+ the link, the secret reset token will be leaked to the attacker. Using the
+ leaked token, the attacker can then construct the real reset link and use it to
+ change the victim's password.
+
+
+
+ One way to prevent this is to load the host name from a trusted configuration file instead.
+
+
+
+
+
+ OWASP
+ Content Spoofing
+ .
+
+
+
\ No newline at end of file
diff --git a/ql/src/experimental/CWE-640/EmailInjection.ql b/ql/src/experimental/CWE-640/EmailInjection.ql
new file mode 100644
index 00000000000..7cf11fa1a3e
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailInjection.ql
@@ -0,0 +1,19 @@
+/**
+ * @name Email content injection
+ * @description Incorporating untrusted input directly into an email message can enable
+ * content spoofing, which in turn may lead to information leaks and other
+ * security issues.
+ * @id go/email-injection
+ * @kind path-problem
+ * @problem.severity error
+ * @tags security
+ * external/cwe/cwe-640
+ */
+
+import go
+import DataFlow::PathGraph
+import EmailInjection::EmailInjection
+
+from DataFlow::PathNode source, DataFlow::PathNode sink, Configuration config
+where config.hasFlowPath(source, sink)
+select sink, source, sink, "Email content may contain $@.", source.getNode(), "untrusted input"
diff --git a/ql/src/experimental/CWE-640/EmailInjection.qll b/ql/src/experimental/CWE-640/EmailInjection.qll
new file mode 100644
index 00000000000..4cf8b382c98
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailInjection.qll
@@ -0,0 +1,29 @@
+/**
+ * Provides a taint-tracking configuration for reasoning about
+ * server-side email-injection vulnerabilities.
+ *
+ * Note, for performance reasons: only import this file if
+ * `EmailInjection::Configuration` is needed, otherwise
+ * `EmailInjectionCustomizations` should be imported instead.
+ */
+
+import go
+
+/**
+ * Provides a taint-tracking configuration for reasoning about
+ * email-injection vulnerabilities.
+ */
+module EmailInjection {
+ import EmailInjectionCustomizations::EmailInjection
+
+ /**
+ * A taint-tracking configuration for reasoning about email-injection vulnerabilities.
+ */
+ class Configuration extends TaintTracking::Configuration {
+ Configuration() { this = "Email Injection" }
+
+ override predicate isSource(DataFlow::Node source) { source instanceof Source }
+
+ override predicate isSink(DataFlow::Node sink) { sink instanceof Sink }
+ }
+}
diff --git a/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll b/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
new file mode 100644
index 00000000000..104a5111c59
--- /dev/null
+++ b/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
@@ -0,0 +1,30 @@
+/** Provides classes for reasoning about email-injection vulnerabilities. */
+
+import go
+
+/**
+ * Provides a library for reasoning about email-injection vulnerabilities.
+ */
+module EmailInjection {
+ /**
+ * A data-flow node that should be considered a source of untrusted data for email-injection vulnerabilities.
+ */
+ abstract class Source extends DataFlow::Node { }
+
+ /**
+ * A data-flow node that should be considered a sink for email-injection vulnerabilities.
+ */
+ abstract class Sink extends DataFlow::Node { }
+
+ /** A source of untrusted data, considered as a taint source for email injection. */
+ class UntrustedFlowSourceAsSource extends Source {
+ UntrustedFlowSourceAsSource() { this instanceof UntrustedFlowSource }
+ }
+
+ /**
+ * A data-flow node that becomes part of an email considered as a taint sink for email injection.
+ */
+ class MailDataAsSink extends Sink {
+ MailDataAsSink() { this instanceof MailData }
+ }
+}
diff --git a/ql/src/go.qll b/ql/src/go.qll
index 261ee7a827f..36c43aa789d 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -24,6 +24,7 @@ import semmle.go.dataflow.DataFlow
import semmle.go.dataflow.GlobalValueNumbering
import semmle.go.dataflow.TaintTracking
import semmle.go.dataflow.SSA
+import semmle.go.frameworks.Email
import semmle.go.frameworks.HTTP
import semmle.go.frameworks.Macaron
import semmle.go.frameworks.SystemCommandExecutors
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
new file mode 100644
index 00000000000..7041cf86b57
--- /dev/null
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -0,0 +1,110 @@
+/** Provides classes for working with email-related APIs. */
+
+import go
+
+/**
+ * A data-flow node that represents data written to an email.
+ * Data in this case includes the email headers and the mail body
+ *
+ * Extend this class to refine existing API models. If you want to model new APIs,
+ * extend `MailDataCall::Range` instead.
+ */
+class MailData extends DataFlow::Node {
+ MailDataCall::Range self;
+
+ MailData() { this = self.getData() }
+}
+
+/** Provides classes for working with calls which write data to an email. */
+module MailDataCall {
+ /**
+ * A data-flow node that represents a call which writes data to an email.
+ * Data in this case refers to email headers and the mail body
+ *
+ */
+ abstract class Range extends DataFlow::CallNode {
+ /** Gets data written to an email connection. */
+ abstract DataFlow::Node getData();
+ }
+
+ /** Get the package name `github.com/sendgrid/sendgrid-go/helpers/mail`. */
+ bindingset[result]
+ private string sendgridMail() { result = "github.com/sendgrid/sendgrid-go/helpers/mail" }
+
+ /** A Client.Data expression string used in an API function of the net/smtp package. */
+ private class SmtpData extends Range {
+ SmtpData() {
+ // func (c *Client) Data() (io.WriteCloser, error)
+ this.getTarget().(Method).hasQualifiedName("net/smtp", "Client", "Data")
+ }
+
+ override DataFlow::Node getData() {
+ exists(DataFlow::CallNode write, DataFlow::Node writer, int i |
+ this.getResult(0) = writer and
+ (
+ write.getTarget().hasQualifiedName("fmt", "Fprintf")
+ or
+ write.getTarget().hasQualifiedName("io", "WriteString")
+ ) and
+ writer.getASuccessor*() = write.getArgument(0) and
+ i > 0 and
+ write.getArgument(i) = result
+ )
+ }
+ }
+
+ /** A send mail expression string used in an API function of the net/smtp package. */
+ private class SmtpSendMail extends Range {
+ SmtpSendMail() {
+ // func SendMail(addr string, a Auth, from string, to []string, msg []byte) error
+ this.getTarget().hasQualifiedName("net/smtp", "SendMail")
+ }
+
+ override DataFlow::Node getData() { result = this.getArgument(4) }
+ }
+
+ /** A call to `NewSingleEmail` API function of the Sendgrid mail package. */
+ private class SendGridSingleEmail extends Range {
+ SendGridSingleEmail() {
+ // func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3
+ this.getTarget().hasQualifiedName(sendgridMail(), "NewSingleEmail")
+ }
+
+ override DataFlow::Node getData() { result = this.getArgument([1, 3, 4]) }
+ }
+
+ /* Gets the value of the `i`-th content parameter of the given `call` */
+ private DataFlow::Node getContent(DataFlow::CallNode call, int i) {
+ exists(DataFlow::CallNode cn, DataFlow::Node content |
+ // func NewContent(contentType string, value string) *Content
+ cn.getTarget().hasQualifiedName(sendgridMail(), "NewContent") and
+ cn.getResult() = content and
+ content.getASuccessor*() = call.getArgument(i) and
+ result = cn.getArgument(1)
+ )
+ }
+
+ /** A call to `NewV3MailInit` API function of the Sendgrid mail package. */
+ private class SendGridV3Init extends Range {
+ SendGridV3Init() {
+ // func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3
+ this.getTarget().hasQualifiedName(sendgridMail(), "NewV3MailInit")
+ }
+
+ override DataFlow::Node getData() {
+ exists(int i | result = getContent(this, i) and i >= 3)
+ or
+ result = this.getArgument(1)
+ }
+ }
+
+ /** A call to `AddContent` API function of the Sendgrid mail package. */
+ private class SendGridAddContent extends Range {
+ SendGridAddContent() {
+ // func (s *SGMailV3) AddContent(c ...*Content) *SGMailV3
+ this.getTarget().(Method).hasQualifiedName(sendgridMail(), "SGMailV3", "AddContent")
+ }
+
+ override DataFlow::Node getData() { result = getContent(this, _) }
+ }
+}
diff --git a/ql/test/experimental/CWE-640/EmailInjection.expected b/ql/test/experimental/CWE-640/EmailInjection.expected
new file mode 100644
index 00000000000..7d7e2988d09
--- /dev/null
+++ b/ql/test/experimental/CWE-640/EmailInjection.expected
@@ -0,0 +1,37 @@
+edges
+| email.go:24:10:24:17 | selection of Header : Header | email.go:27:56:27:67 | type conversion |
+| email.go:34:21:34:31 | call to Referer : string | email.go:36:57:36:78 | type conversion |
+| email.go:42:21:42:31 | call to Referer : string | email.go:46:25:46:38 | untrustedInput |
+| email.go:51:21:51:31 | call to Referer : string | email.go:57:46:57:59 | untrustedInput |
+| email.go:51:21:51:31 | call to Referer : string | email.go:58:52:58:65 | untrustedInput |
+| email.go:63:21:63:31 | call to Referer : string | email.go:65:47:65:60 | untrustedInput |
+| email.go:73:21:73:31 | call to Referer : string | email.go:79:47:79:60 | untrustedInput |
+| email.go:87:21:87:31 | call to Referer : string | email.go:94:37:94:50 | untrustedInput |
+| email.go:87:21:87:31 | call to Referer : string | email.go:96:48:96:61 | untrustedInput |
+nodes
+| email.go:24:10:24:17 | selection of Header : Header | semmle.label | selection of Header : Header |
+| email.go:27:56:27:67 | type conversion | semmle.label | type conversion |
+| email.go:34:21:34:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:36:57:36:78 | type conversion | semmle.label | type conversion |
+| email.go:42:21:42:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:46:25:46:38 | untrustedInput | semmle.label | untrustedInput |
+| email.go:51:21:51:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:57:46:57:59 | untrustedInput | semmle.label | untrustedInput |
+| email.go:58:52:58:65 | untrustedInput | semmle.label | untrustedInput |
+| email.go:63:21:63:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:65:47:65:60 | untrustedInput | semmle.label | untrustedInput |
+| email.go:73:21:73:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:79:47:79:60 | untrustedInput | semmle.label | untrustedInput |
+| email.go:87:21:87:31 | call to Referer : string | semmle.label | call to Referer : string |
+| email.go:94:37:94:50 | untrustedInput | semmle.label | untrustedInput |
+| email.go:96:48:96:61 | untrustedInput | semmle.label | untrustedInput |
+#select
+| email.go:27:56:27:67 | type conversion | email.go:24:10:24:17 | selection of Header : Header | email.go:27:56:27:67 | type conversion | Email content may contain $@. | email.go:24:10:24:17 | selection of Header | untrusted input |
+| email.go:36:57:36:78 | type conversion | email.go:34:21:34:31 | call to Referer : string | email.go:36:57:36:78 | type conversion | Email content may contain $@. | email.go:34:21:34:31 | call to Referer | untrusted input |
+| email.go:46:25:46:38 | untrustedInput | email.go:42:21:42:31 | call to Referer : string | email.go:46:25:46:38 | untrustedInput | Email content may contain $@. | email.go:42:21:42:31 | call to Referer | untrusted input |
+| email.go:57:46:57:59 | untrustedInput | email.go:51:21:51:31 | call to Referer : string | email.go:57:46:57:59 | untrustedInput | Email content may contain $@. | email.go:51:21:51:31 | call to Referer | untrusted input |
+| email.go:58:52:58:65 | untrustedInput | email.go:51:21:51:31 | call to Referer : string | email.go:58:52:58:65 | untrustedInput | Email content may contain $@. | email.go:51:21:51:31 | call to Referer | untrusted input |
+| email.go:65:47:65:60 | untrustedInput | email.go:63:21:63:31 | call to Referer : string | email.go:65:47:65:60 | untrustedInput | Email content may contain $@. | email.go:63:21:63:31 | call to Referer | untrusted input |
+| email.go:79:47:79:60 | untrustedInput | email.go:73:21:73:31 | call to Referer : string | email.go:79:47:79:60 | untrustedInput | Email content may contain $@. | email.go:73:21:73:31 | call to Referer | untrusted input |
+| email.go:94:37:94:50 | untrustedInput | email.go:87:21:87:31 | call to Referer : string | email.go:94:37:94:50 | untrustedInput | Email content may contain $@. | email.go:87:21:87:31 | call to Referer | untrusted input |
+| email.go:96:48:96:61 | untrustedInput | email.go:87:21:87:31 | call to Referer : string | email.go:96:48:96:61 | untrustedInput | Email content may contain $@. | email.go:87:21:87:31 | call to Referer | untrusted input |
diff --git a/ql/test/experimental/CWE-640/EmailInjection.qlref b/ql/test/experimental/CWE-640/EmailInjection.qlref
new file mode 100644
index 00000000000..a6d8abad1c9
--- /dev/null
+++ b/ql/test/experimental/CWE-640/EmailInjection.qlref
@@ -0,0 +1 @@
+experimental/CWE-640/EmailInjection.ql
\ No newline at end of file
diff --git a/ql/test/experimental/CWE-640/email.go b/ql/test/experimental/CWE-640/email.go
new file mode 100644
index 00000000000..44a54b59fe7
--- /dev/null
+++ b/ql/test/experimental/CWE-640/email.go
@@ -0,0 +1,115 @@
+package main
+
+//go:generate depstubber -vendor github.com/sendgrid/sendgrid-go/helpers/mail "" NewEmail,NewSingleEmail,NewContent,NewV3Mail,NewV3MailInit
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/smtp"
+
+ sendgrid "github.com/sendgrid/sendgrid-go/helpers/mail"
+)
+
+// OK
+func mailGood(w http.ResponseWriter, r *http.Request) {
+ host := config["Host"]
+ token := backend.getUserSecretResetToken(email)
+ body := "Click to reset password: " + host + "/" + token
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(body))
+}
+
+// Not OK
+func mail(w http.ResponseWriter, r *http.Request) {
+ host := r.Header.Get("Host")
+ token := backend.getUserSecretResetToken(email)
+ body := "Click to reset password: " + host + "/" + token
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(body))
+}
+
+func main() {
+
+ // Not OK
+ http.HandleFunc("/ex0", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(untrustedInput))
+
+ })
+
+ // Not OK
+ http.HandleFunc("/ex1", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ s, _ := smtp.Dial("test.test")
+ write, _ := s.Data()
+ io.WriteString(write, untrustedInput)
+ })
+
+ // Not OK
+ http.HandleFunc("/ex2", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ from := sendgrid.NewEmail("from", "from@from.com")
+ to := sendgrid.NewEmail("to", "to@to.com")
+ subject := "test"
+ body := "body"
+ sendgrid.NewSingleEmail(from, subject, to, untrustedInput, body)
+ sendgrid.NewSingleEmail(from, subject, to, body, untrustedInput)
+ })
+
+ // Not OK
+ http.HandleFunc("/ex3", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ content := sendgrid.NewContent("text/html", untrustedInput)
+
+ v := sendgrid.NewV3Mail()
+ v.AddContent(content)
+ })
+
+ // Not OK
+ http.HandleFunc("/ex4", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ from := sendgrid.NewEmail("from", "from@from.com")
+ to := sendgrid.NewEmail("to", "to@to.com")
+ subject := "test"
+
+ content := sendgrid.NewContent("text/html", untrustedInput)
+
+ v := sendgrid.NewV3MailInit(from, subject, to, content, content)
+ v.AddContent(content)
+ })
+
+ // Not OK
+ http.HandleFunc("/ex5", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ from := sendgrid.NewEmail("from", "from@from.com")
+ to := sendgrid.NewEmail("to", "to@to.com")
+
+ content := sendgrid.NewContent("text/html", "test")
+
+ v := sendgrid.NewV3MailInit(from, untrustedInput, to, content, content)
+
+ content2 := sendgrid.NewContent("text/html", untrustedInput)
+
+ v.AddContent(content2)
+ })
+
+ log.Println(http.ListenAndServe(":80", nil))
+
+}
+
+// Backend is an empty struct
+type Backend struct{}
+
+func (*Backend) getUserSecretResetToken(email string) string {
+ return ""
+}
+
+var email = "test@test.com"
+
+var config map[string]string
+var backend = &Backend{}
diff --git a/ql/test/experimental/CWE-640/go.mod b/ql/test/experimental/CWE-640/go.mod
new file mode 100644
index 00000000000..979044b6bf2
--- /dev/null
+++ b/ql/test/experimental/CWE-640/go.mod
@@ -0,0 +1,7 @@
+module main
+
+go 1.14
+
+require (
+ github.com/sendgrid/sendgrid-go v3.5.0+incompatible
+)
diff --git a/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/LICENSE b/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/LICENSE
new file mode 100644
index 00000000000..e5441aa6a0a
--- /dev/null
+++ b/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2019 Twilio SendGrid, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go b/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
new file mode 100644
index 00000000000..cd1956cecc0
--- /dev/null
+++ b/ql/test/experimental/CWE-640/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
@@ -0,0 +1,391 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/sendgrid/sendgrid-go/helpers/mail, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/sendgrid/sendgrid-go/helpers/mail (exports: ; functions: NewEmail,NewSingleEmail,NewContent,NewV3Mail,NewV3MailInit)
+
+// Package mail is a stub of github.com/sendgrid/sendgrid-go/helpers/mail, generated by depstubber.
+package mail
+
+import ()
+
+type Asm struct {
+ GroupID int
+ GroupsToDisplay []int
+}
+
+func (_ *Asm) AddGroupsToDisplay(_ ...int) *Asm {
+ return nil
+}
+
+func (_ *Asm) SetGroupID(_ int) *Asm {
+ return nil
+}
+
+type Attachment struct {
+ Content string
+ Type string
+ Name string
+ Filename string
+ Disposition string
+ ContentID string
+}
+
+func (_ *Attachment) SetContent(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetContentID(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetDisposition(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetFilename(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetType(_ string) *Attachment {
+ return nil
+}
+
+type BccSetting struct {
+ Enable *bool
+ Email string
+}
+
+func (_ *BccSetting) SetEmail(_ string) *BccSetting {
+ return nil
+}
+
+func (_ *BccSetting) SetEnable(_ bool) *BccSetting {
+ return nil
+}
+
+type ClickTrackingSetting struct {
+ Enable *bool
+ EnableText *bool
+}
+
+func (_ *ClickTrackingSetting) SetEnable(_ bool) *ClickTrackingSetting {
+ return nil
+}
+
+func (_ *ClickTrackingSetting) SetEnableText(_ bool) *ClickTrackingSetting {
+ return nil
+}
+
+type Content struct {
+ Type string
+ Value string
+}
+
+type Email struct {
+ Name string
+ Address string
+}
+
+type FooterSetting struct {
+ Enable *bool
+ Text string
+ Html string
+}
+
+func (_ *FooterSetting) SetEnable(_ bool) *FooterSetting {
+ return nil
+}
+
+func (_ *FooterSetting) SetHTML(_ string) *FooterSetting {
+ return nil
+}
+
+func (_ *FooterSetting) SetText(_ string) *FooterSetting {
+ return nil
+}
+
+type GaSetting struct {
+ Enable *bool
+ CampaignSource string
+ CampaignTerm string
+ CampaignContent string
+ CampaignName string
+ CampaignMedium string
+}
+
+func (_ *GaSetting) SetCampaignContent(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignMedium(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignName(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignSource(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignTerm(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetEnable(_ bool) *GaSetting {
+ return nil
+}
+
+type MailSettings struct {
+ BCC *BccSetting
+ BypassListManagement *Setting
+ Footer *FooterSetting
+ SandboxMode *Setting
+ SpamCheckSetting *SpamCheckSetting
+}
+
+func (_ *MailSettings) SetBCC(_ *BccSetting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetBypassListManagement(_ *Setting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetFooter(_ *FooterSetting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetSandboxMode(_ *Setting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetSpamCheckSettings(_ *SpamCheckSetting) *MailSettings {
+ return nil
+}
+
+func NewContent(_ string, _ string) *Content {
+ return nil
+}
+
+func NewEmail(_ string, _ string) *Email {
+ return nil
+}
+
+func NewSingleEmail(_ *Email, _ string, _ *Email, _ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func NewV3Mail() *SGMailV3 {
+ return nil
+}
+
+func NewV3MailInit(_ *Email, _ string, _ *Email, _ ...*Content) *SGMailV3 {
+ return nil
+}
+
+type OpenTrackingSetting struct {
+ Enable *bool
+ SubstitutionTag string
+}
+
+func (_ *OpenTrackingSetting) SetEnable(_ bool) *OpenTrackingSetting {
+ return nil
+}
+
+func (_ *OpenTrackingSetting) SetSubstitutionTag(_ string) *OpenTrackingSetting {
+ return nil
+}
+
+type Personalization struct {
+ To []*Email
+ CC []*Email
+ BCC []*Email
+ Subject string
+ Headers map[string]string
+ Substitutions map[string]string
+ CustomArgs map[string]string
+ DynamicTemplateData map[string]interface{}
+ Categories []string
+ SendAt int
+}
+
+func (_ *Personalization) AddBCCs(_ ...*Email) {}
+
+func (_ *Personalization) AddCCs(_ ...*Email) {}
+
+func (_ *Personalization) AddTos(_ ...*Email) {}
+
+func (_ *Personalization) SetCustomArg(_ string, _ string) {}
+
+func (_ *Personalization) SetDynamicTemplateData(_ string, _ interface{}) {}
+
+func (_ *Personalization) SetHeader(_ string, _ string) {}
+
+func (_ *Personalization) SetSendAt(_ int) {}
+
+func (_ *Personalization) SetSubstitution(_ string, _ string) {}
+
+type SGMailV3 struct {
+ From *Email
+ Subject string
+ Personalizations []*Personalization
+ Content []*Content
+ Attachments []*Attachment
+ TemplateID string
+ Sections map[string]string
+ Headers map[string]string
+ Categories []string
+ CustomArgs map[string]string
+ SendAt int
+ BatchID string
+ Asm *Asm
+ IPPoolID string
+ MailSettings *MailSettings
+ TrackingSettings *TrackingSettings
+ ReplyTo *Email
+}
+
+func (_ *SGMailV3) AddAttachment(_ ...*Attachment) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddCategories(_ ...string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddContent(_ ...*Content) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddPersonalizations(_ ...*Personalization) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddSection(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetASM(_ *Asm) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetBatchID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetCustomArg(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetFrom(_ *Email) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetHeader(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetIPPoolID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetMailSettings(_ *MailSettings) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetReplyTo(_ *Email) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetSendAt(_ int) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetTemplateID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetTrackingSettings(_ *TrackingSettings) *SGMailV3 {
+ return nil
+}
+
+type SandboxModeSetting struct {
+ Enable *bool
+ ForwardSpam *bool
+ SpamCheck *SpamCheckSetting
+}
+
+type Setting struct {
+ Enable *bool
+}
+
+type SpamCheckSetting struct {
+ Enable *bool
+ SpamThreshold int
+ PostToURL string
+}
+
+func (_ *SpamCheckSetting) SetEnable(_ bool) *SpamCheckSetting {
+ return nil
+}
+
+func (_ *SpamCheckSetting) SetPostToURL(_ string) *SpamCheckSetting {
+ return nil
+}
+
+func (_ *SpamCheckSetting) SetSpamThreshold(_ int) *SpamCheckSetting {
+ return nil
+}
+
+type SubscriptionTrackingSetting struct {
+ Enable *bool
+ Text string
+ Html string
+ SubstitutionTag string
+}
+
+func (_ *SubscriptionTrackingSetting) SetEnable(_ bool) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetHTML(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetSubstitutionTag(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetText(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+type TrackingSettings struct {
+ ClickTracking *ClickTrackingSetting
+ OpenTracking *OpenTrackingSetting
+ SubscriptionTracking *SubscriptionTrackingSetting
+ GoogleAnalytics *GaSetting
+ BCC *BccSetting
+ BypassListManagement *Setting
+ Footer *FooterSetting
+ SandboxMode *SandboxModeSetting
+}
+
+func (_ *TrackingSettings) SetClickTracking(_ *ClickTrackingSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetGoogleAnalytics(_ *GaSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetOpenTracking(_ *OpenTrackingSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetSubscriptionTracking(_ *SubscriptionTrackingSetting) *TrackingSettings {
+ return nil
+}
diff --git a/ql/test/experimental/CWE-640/vendor/modules.txt b/ql/test/experimental/CWE-640/vendor/modules.txt
new file mode 100644
index 00000000000..4b7525957df
--- /dev/null
+++ b/ql/test/experimental/CWE-640/vendor/modules.txt
@@ -0,0 +1,3 @@
+# github.com/sendgrid/sendgrid-go v3.5.0+incompatible
+## explicit
+github.com/sendgrid/sendgrid-go
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
new file mode 100644
index 00000000000..9e9801a2837
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
@@ -0,0 +1,9 @@
+| mail.go:16:56:16:77 | type conversion |
+| mail.go:22:24:22:37 | untrustedInput |
+| mail.go:29:32:29:36 | alert |
+| mail.go:29:43:29:47 | alert |
+| mail.go:29:50:29:54 | alert |
+| mail.go:32:46:32:50 | alert |
+| mail.go:36:47:36:51 | alert |
+| mail.go:37:47:37:51 | alert |
+| mail.go:40:35:40:39 | alert |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
new file mode 100644
index 00000000000..4a630d2a96c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
@@ -0,0 +1,4 @@
+import go
+
+from MailData f
+select f
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/go.mod b/ql/test/library-tests/semmle/go/frameworks/Email/go.mod
new file mode 100644
index 00000000000..d3b7fdb67f8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/go.mod
@@ -0,0 +1,8 @@
+module main
+
+go 1.14
+
+require (
+ github.com/sendgrid/sendgrid-go v3.5.0+incompatible
+ github.com/stretchr/testify v1.5.1 // indirect
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/mail.go b/ql/test/library-tests/semmle/go/frameworks/Email/mail.go
new file mode 100644
index 00000000000..823ec92acf6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/mail.go
@@ -0,0 +1,42 @@
+package main
+
+//go:generate depstubber -vendor github.com/sendgrid/sendgrid-go/helpers/mail "" NewEmail,NewSingleEmail,NewContent,NewV3Mail,NewV3MailInit
+
+import (
+ "io"
+ "net/smtp"
+
+ sendgrid "github.com/sendgrid/sendgrid-go/helpers/mail"
+)
+
+func main() {
+ untrustedInput := "test"
+
+ // Not OK - 1 alert
+ smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(untrustedInput))
+
+ s, _ := smtp.Dial("test.test")
+ write, _ := s.Data()
+
+ // Not OK - 1 alert
+ io.WriteString(write, untrustedInput)
+
+ from := sendgrid.NewEmail("from", "from@from.com")
+ to := sendgrid.NewEmail("to", "to@to.com")
+ alert := "sub"
+
+ // Not OK - 3 alerts
+ sendgrid.NewSingleEmail(from, alert, to, alert, alert)
+
+ // Not OK - 1 alert
+ content := sendgrid.NewContent("text/html", alert)
+ v := sendgrid.NewV3Mail()
+ v.AddContent(content)
+
+ content2 := sendgrid.NewContent("text/html", alert)
+ content3 := sendgrid.NewContent("text/html", alert)
+
+ // Not OK - 3 alerts
+ v = sendgrid.NewV3MailInit(from, alert, to, content2, content3)
+
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/LICENSE
new file mode 100644
index 00000000000..e5441aa6a0a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2019 Twilio SendGrid, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
new file mode 100644
index 00000000000..cd1956cecc0
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/stub.go
@@ -0,0 +1,391 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/sendgrid/sendgrid-go/helpers/mail, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/sendgrid/sendgrid-go/helpers/mail (exports: ; functions: NewEmail,NewSingleEmail,NewContent,NewV3Mail,NewV3MailInit)
+
+// Package mail is a stub of github.com/sendgrid/sendgrid-go/helpers/mail, generated by depstubber.
+package mail
+
+import ()
+
+type Asm struct {
+ GroupID int
+ GroupsToDisplay []int
+}
+
+func (_ *Asm) AddGroupsToDisplay(_ ...int) *Asm {
+ return nil
+}
+
+func (_ *Asm) SetGroupID(_ int) *Asm {
+ return nil
+}
+
+type Attachment struct {
+ Content string
+ Type string
+ Name string
+ Filename string
+ Disposition string
+ ContentID string
+}
+
+func (_ *Attachment) SetContent(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetContentID(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetDisposition(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetFilename(_ string) *Attachment {
+ return nil
+}
+
+func (_ *Attachment) SetType(_ string) *Attachment {
+ return nil
+}
+
+type BccSetting struct {
+ Enable *bool
+ Email string
+}
+
+func (_ *BccSetting) SetEmail(_ string) *BccSetting {
+ return nil
+}
+
+func (_ *BccSetting) SetEnable(_ bool) *BccSetting {
+ return nil
+}
+
+type ClickTrackingSetting struct {
+ Enable *bool
+ EnableText *bool
+}
+
+func (_ *ClickTrackingSetting) SetEnable(_ bool) *ClickTrackingSetting {
+ return nil
+}
+
+func (_ *ClickTrackingSetting) SetEnableText(_ bool) *ClickTrackingSetting {
+ return nil
+}
+
+type Content struct {
+ Type string
+ Value string
+}
+
+type Email struct {
+ Name string
+ Address string
+}
+
+type FooterSetting struct {
+ Enable *bool
+ Text string
+ Html string
+}
+
+func (_ *FooterSetting) SetEnable(_ bool) *FooterSetting {
+ return nil
+}
+
+func (_ *FooterSetting) SetHTML(_ string) *FooterSetting {
+ return nil
+}
+
+func (_ *FooterSetting) SetText(_ string) *FooterSetting {
+ return nil
+}
+
+type GaSetting struct {
+ Enable *bool
+ CampaignSource string
+ CampaignTerm string
+ CampaignContent string
+ CampaignName string
+ CampaignMedium string
+}
+
+func (_ *GaSetting) SetCampaignContent(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignMedium(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignName(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignSource(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetCampaignTerm(_ string) *GaSetting {
+ return nil
+}
+
+func (_ *GaSetting) SetEnable(_ bool) *GaSetting {
+ return nil
+}
+
+type MailSettings struct {
+ BCC *BccSetting
+ BypassListManagement *Setting
+ Footer *FooterSetting
+ SandboxMode *Setting
+ SpamCheckSetting *SpamCheckSetting
+}
+
+func (_ *MailSettings) SetBCC(_ *BccSetting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetBypassListManagement(_ *Setting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetFooter(_ *FooterSetting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetSandboxMode(_ *Setting) *MailSettings {
+ return nil
+}
+
+func (_ *MailSettings) SetSpamCheckSettings(_ *SpamCheckSetting) *MailSettings {
+ return nil
+}
+
+func NewContent(_ string, _ string) *Content {
+ return nil
+}
+
+func NewEmail(_ string, _ string) *Email {
+ return nil
+}
+
+func NewSingleEmail(_ *Email, _ string, _ *Email, _ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func NewV3Mail() *SGMailV3 {
+ return nil
+}
+
+func NewV3MailInit(_ *Email, _ string, _ *Email, _ ...*Content) *SGMailV3 {
+ return nil
+}
+
+type OpenTrackingSetting struct {
+ Enable *bool
+ SubstitutionTag string
+}
+
+func (_ *OpenTrackingSetting) SetEnable(_ bool) *OpenTrackingSetting {
+ return nil
+}
+
+func (_ *OpenTrackingSetting) SetSubstitutionTag(_ string) *OpenTrackingSetting {
+ return nil
+}
+
+type Personalization struct {
+ To []*Email
+ CC []*Email
+ BCC []*Email
+ Subject string
+ Headers map[string]string
+ Substitutions map[string]string
+ CustomArgs map[string]string
+ DynamicTemplateData map[string]interface{}
+ Categories []string
+ SendAt int
+}
+
+func (_ *Personalization) AddBCCs(_ ...*Email) {}
+
+func (_ *Personalization) AddCCs(_ ...*Email) {}
+
+func (_ *Personalization) AddTos(_ ...*Email) {}
+
+func (_ *Personalization) SetCustomArg(_ string, _ string) {}
+
+func (_ *Personalization) SetDynamicTemplateData(_ string, _ interface{}) {}
+
+func (_ *Personalization) SetHeader(_ string, _ string) {}
+
+func (_ *Personalization) SetSendAt(_ int) {}
+
+func (_ *Personalization) SetSubstitution(_ string, _ string) {}
+
+type SGMailV3 struct {
+ From *Email
+ Subject string
+ Personalizations []*Personalization
+ Content []*Content
+ Attachments []*Attachment
+ TemplateID string
+ Sections map[string]string
+ Headers map[string]string
+ Categories []string
+ CustomArgs map[string]string
+ SendAt int
+ BatchID string
+ Asm *Asm
+ IPPoolID string
+ MailSettings *MailSettings
+ TrackingSettings *TrackingSettings
+ ReplyTo *Email
+}
+
+func (_ *SGMailV3) AddAttachment(_ ...*Attachment) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddCategories(_ ...string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddContent(_ ...*Content) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddPersonalizations(_ ...*Personalization) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) AddSection(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetASM(_ *Asm) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetBatchID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetCustomArg(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetFrom(_ *Email) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetHeader(_ string, _ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetIPPoolID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetMailSettings(_ *MailSettings) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetReplyTo(_ *Email) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetSendAt(_ int) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetTemplateID(_ string) *SGMailV3 {
+ return nil
+}
+
+func (_ *SGMailV3) SetTrackingSettings(_ *TrackingSettings) *SGMailV3 {
+ return nil
+}
+
+type SandboxModeSetting struct {
+ Enable *bool
+ ForwardSpam *bool
+ SpamCheck *SpamCheckSetting
+}
+
+type Setting struct {
+ Enable *bool
+}
+
+type SpamCheckSetting struct {
+ Enable *bool
+ SpamThreshold int
+ PostToURL string
+}
+
+func (_ *SpamCheckSetting) SetEnable(_ bool) *SpamCheckSetting {
+ return nil
+}
+
+func (_ *SpamCheckSetting) SetPostToURL(_ string) *SpamCheckSetting {
+ return nil
+}
+
+func (_ *SpamCheckSetting) SetSpamThreshold(_ int) *SpamCheckSetting {
+ return nil
+}
+
+type SubscriptionTrackingSetting struct {
+ Enable *bool
+ Text string
+ Html string
+ SubstitutionTag string
+}
+
+func (_ *SubscriptionTrackingSetting) SetEnable(_ bool) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetHTML(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetSubstitutionTag(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+func (_ *SubscriptionTrackingSetting) SetText(_ string) *SubscriptionTrackingSetting {
+ return nil
+}
+
+type TrackingSettings struct {
+ ClickTracking *ClickTrackingSetting
+ OpenTracking *OpenTrackingSetting
+ SubscriptionTracking *SubscriptionTrackingSetting
+ GoogleAnalytics *GaSetting
+ BCC *BccSetting
+ BypassListManagement *Setting
+ Footer *FooterSetting
+ SandboxMode *SandboxModeSetting
+}
+
+func (_ *TrackingSettings) SetClickTracking(_ *ClickTrackingSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetGoogleAnalytics(_ *GaSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetOpenTracking(_ *OpenTrackingSetting) *TrackingSettings {
+ return nil
+}
+
+func (_ *TrackingSettings) SetSubscriptionTracking(_ *SubscriptionTrackingSetting) *TrackingSettings {
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
new file mode 100644
index 00000000000..d782a4cc242
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
@@ -0,0 +1,6 @@
+# github.com/sendgrid/sendgrid-go v3.5.0+incompatible
+## explicit
+github.com/sendgrid/sendgrid-go
+# github.com/stretchr/testify v1.5.1
+## explicit
+github.com/stretchr/testify
From 04a19b71504a0211584cba331a57e2b23a2f7596 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 09:13:23 +0100
Subject: [PATCH 040/157] Clean up `EmailInjection.qll` and related libraries.
---
.../experimental/CWE-640/EmailInjection.qhelp | 26 ++--
.../CWE-640/EmailInjectionCustomizations.qll | 2 +-
ql/src/semmle/go/frameworks/Email.qll | 114 ++++++++----------
.../semmle/go/frameworks/Email/MailData.ql | 2 +-
4 files changed, 63 insertions(+), 81 deletions(-)
diff --git a/ql/src/experimental/CWE-640/EmailInjection.qhelp b/ql/src/experimental/CWE-640/EmailInjection.qhelp
index b1749fbb488..f3ac1dc4bbe 100644
--- a/ql/src/experimental/CWE-640/EmailInjection.qhelp
+++ b/ql/src/experimental/CWE-640/EmailInjection.qhelp
@@ -2,32 +2,30 @@
- Using untrusted input to construct an email induces multiple security
- vulnerabilities. For instance, inclusion of an untrusted input in a email body
- may allow an attacker to conduct Cross Site Scripting (XSS) Attacks. While
- inclusion of an HTTP Header in the email body may allow a full account
- compromise as shown in the example below.
+ Using untrusted input to construct an email can cause multiple security
+ vulnerabilities. For instance, inclusion of an untrusted input in an email body
+ may allow an attacker to conduct Cross Site Scripting (XSS) attacks, while
+ inclusion of an HTTP header may allow a full account compromise as shown in the
+ example below.
- Any data which is passed to an email subject or body must be sanitized before use.
+ Any data which is passed to an email subject or body must be sanitized before use.
- In the following example snippet, the
- host
- field is user controlled.
+ In the following example snippet, the host field is user controlled.
- A malicious user can send an HTTP request to the targeted web site,
- but with a Host header that refers to his own web site. This means the
- emails will be sent out to potential victims, originating from a server
+ A malicious user can send an HTTP request to the targeted web site,
+ but with a Host header that refers to their own web site. This means the
+ emails will be sent out to potential victims, originating from a server
they trust, but with links leading to a malicious web site.
- If the email contains a password reset link, and should the victim click
+ If the email contains a password reset link, and should the victim click
the link, the secret reset token will be leaked to the attacker. Using the
leaked token, the attacker can then construct the real reset link and use it to
change the victim's password.
@@ -45,4 +43,4 @@
.
-
\ No newline at end of file
+
diff --git a/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll b/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
index 104a5111c59..77e3ad97a3b 100644
--- a/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
+++ b/ql/src/experimental/CWE-640/EmailInjectionCustomizations.qll
@@ -25,6 +25,6 @@ module EmailInjection {
* A data-flow node that becomes part of an email considered as a taint sink for email injection.
*/
class MailDataAsSink extends Sink {
- MailDataAsSink() { this instanceof MailData }
+ MailDataAsSink() { this instanceof EmailData }
}
}
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
index 7041cf86b57..90cbb523e19 100644
--- a/ql/src/semmle/go/frameworks/Email.qll
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -3,44 +3,36 @@
import go
/**
- * A data-flow node that represents data written to an email.
- * Data in this case includes the email headers and the mail body
+ * A data-flow node that represents data written to an email, either as part
+ * of the headers or as part of the body.
*
* Extend this class to refine existing API models. If you want to model new APIs,
- * extend `MailDataCall::Range` instead.
+ * extend `EmailData::Range` instead.
*/
-class MailData extends DataFlow::Node {
- MailDataCall::Range self;
+class EmailData extends DataFlow::Node {
+ EmailData::Range self;
- MailData() { this = self.getData() }
+ EmailData() { this = self }
}
-/** Provides classes for working with calls which write data to an email. */
-module MailDataCall {
+/** Provides classes for working with data that is incorporated into an email. */
+module EmailData {
/**
- * A data-flow node that represents a call which writes data to an email.
- * Data in this case refers to email headers and the mail body
+ * A data-flow node that represents data which is written to an email, either as part
+ * of the headers or as part of the body.
*
+ * Extend this class to model new APIs. If you want to refine existing API models,
+ * extend `EmailData` instead.
*/
- abstract class Range extends DataFlow::CallNode {
- /** Gets data written to an email connection. */
- abstract DataFlow::Node getData();
- }
+ abstract class Range extends DataFlow::Node { }
- /** Get the package name `github.com/sendgrid/sendgrid-go/helpers/mail`. */
- bindingset[result]
- private string sendgridMail() { result = "github.com/sendgrid/sendgrid-go/helpers/mail" }
-
- /** A Client.Data expression string used in an API function of the net/smtp package. */
+ /** A data-flow node that is written to an email using the net/smtp package. */
private class SmtpData extends Range {
SmtpData() {
// func (c *Client) Data() (io.WriteCloser, error)
- this.getTarget().(Method).hasQualifiedName("net/smtp", "Client", "Data")
- }
-
- override DataFlow::Node getData() {
- exists(DataFlow::CallNode write, DataFlow::Node writer, int i |
- this.getResult(0) = writer and
+ exists(Method data, DataFlow::CallNode write, DataFlow::Node writer, int i |
+ data.hasQualifiedName("net/smtp", "Client", "Data") and
+ writer = data.getACall().getResult(0) and
(
write.getTarget().hasQualifiedName("fmt", "Fprintf")
or
@@ -48,32 +40,22 @@ module MailDataCall {
) and
writer.getASuccessor*() = write.getArgument(0) and
i > 0 and
- write.getArgument(i) = result
+ write.getArgument(i) = this
+ )
+ or
+ // func SendMail(addr string, a Auth, from string, to []string, msg []byte) error
+ exists(Function sendMail |
+ sendMail.hasQualifiedName("net/smtp", "SendMail") and
+ this = sendMail.getACall().getArgument(4)
)
}
}
- /** A send mail expression string used in an API function of the net/smtp package. */
- private class SmtpSendMail extends Range {
- SmtpSendMail() {
- // func SendMail(addr string, a Auth, from string, to []string, msg []byte) error
- this.getTarget().hasQualifiedName("net/smtp", "SendMail")
- }
+ /** Gets the package name `github.com/sendgrid/sendgrid-go/helpers/mail`. */
+ bindingset[result]
+ private string sendgridMail() { result = "github.com/sendgrid/sendgrid-go/helpers/mail" }
- override DataFlow::Node getData() { result = this.getArgument(4) }
- }
-
- /** A call to `NewSingleEmail` API function of the Sendgrid mail package. */
- private class SendGridSingleEmail extends Range {
- SendGridSingleEmail() {
- // func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3
- this.getTarget().hasQualifiedName(sendgridMail(), "NewSingleEmail")
- }
-
- override DataFlow::Node getData() { result = this.getArgument([1, 3, 4]) }
- }
-
- /* Gets the value of the `i`-th content parameter of the given `call` */
+ /* Gets the value of the `i`th content parameter of the given `call` */
private DataFlow::Node getContent(DataFlow::CallNode call, int i) {
exists(DataFlow::CallNode cn, DataFlow::Node content |
// func NewContent(contentType string, value string) *Content
@@ -84,27 +66,29 @@ module MailDataCall {
)
}
- /** A call to `NewV3MailInit` API function of the Sendgrid mail package. */
- private class SendGridV3Init extends Range {
- SendGridV3Init() {
- // func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3
- this.getTarget().hasQualifiedName(sendgridMail(), "NewV3MailInit")
- }
-
- override DataFlow::Node getData() {
- exists(int i | result = getContent(this, i) and i >= 3)
+ /** A data-flow node that is written to an email using the sendgrid/sendgrid-go package. */
+ private class SendGridSingleEmail extends Range {
+ SendGridSingleEmail() {
+ // func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3
+ exists(Function newSingleEmail |
+ newSingleEmail.hasQualifiedName(sendgridMail(), "NewSingleEmail") and
+ this = newSingleEmail.getACall().getArgument([1, 3, 4])
+ )
+ or
+ // func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3
+ exists(Function newv3MailInit |
+ newv3MailInit.hasQualifiedName(sendgridMail(), "NewV3MailInit")
+ |
+ this = getContent(newv3MailInit.getACall(), any(int i | i >= 3))
+ or
+ this = newv3MailInit.getACall().getArgument(1)
+ )
or
- result = this.getArgument(1)
- }
- }
-
- /** A call to `AddContent` API function of the Sendgrid mail package. */
- private class SendGridAddContent extends Range {
- SendGridAddContent() {
// func (s *SGMailV3) AddContent(c ...*Content) *SGMailV3
- this.getTarget().(Method).hasQualifiedName(sendgridMail(), "SGMailV3", "AddContent")
+ exists(Method addContent |
+ addContent.hasQualifiedName(sendgridMail(), "SGMailV3", "AddContent") and
+ this = getContent(addContent.getACall(), _)
+ )
}
-
- override DataFlow::Node getData() { result = getContent(this, _) }
}
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
index 4a630d2a96c..c67ce8753e1 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
@@ -1,4 +1,4 @@
import go
-from MailData f
+from EmailData f
select f
From d0e8d6efda101d1bbae846e4927e04d54f0f0045 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 09:47:34 +0100
Subject: [PATCH 041/157] Fix post-update nodes for function arguments.
---
ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll | 2 +-
.../FunctionOutput_getExitNode.expected | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
index 3645073cf2f..6ccd6338b76 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
@@ -408,7 +408,7 @@ class PostUpdateNode extends Node {
)
or
preupd instanceof ArgumentNode and
- mutableType(preupd.getType())
+ mutableType(preupd.getType().getUnderlyingType())
) and
(
preupd = this.(SsaNode).getAUse()
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
index f84d4616a8f..cf0fe4752b4 100644
--- a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionOutput_getExitNode.expected
@@ -1,3 +1,4 @@
+| parameter 0 | tst.go:10:2:10:29 | call to ReadFrom | tst.go:8:12:8:17 | definition of reader |
| receiver | tst.go:10:2:10:29 | call to ReadFrom | tst.go:9:2:9:12 | definition of bytesBuffer |
| result | main.go:51:2:51:14 | call to op | main.go:51:2:51:14 | call to op |
| result | main.go:53:2:53:22 | call to op2 | main.go:53:2:53:22 | call to op2 |
From 5b0c48e332e6129b3d1258bd74d2303df66b744c Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 09:48:41 +0100
Subject: [PATCH 042/157] Add taint models for `fmt.Fprintf` and
`io.WriteString`.
---
ql/src/semmle/go/frameworks/Stdlib.qll | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index ec496aca063..82c7d51f8d8 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -93,6 +93,15 @@ module Fmt {
override DataFlow::Node getAMessageComponent() { result = this.getAnArgument() }
}
+
+ private class FprintfModel extends TaintTracking::FunctionModel {
+ FprintfModel() { this.hasQualifiedName("fmt", "Fprintf") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(any(int i | i > 0)) and
+ output.isParameter(0)
+ }
+ }
}
/** Provides models of commonly used functions in the `io` package. */
@@ -106,6 +115,15 @@ module Io {
inp.isReceiver() and outp.isParameter(0)
}
}
+
+ private class WriteStringModel extends TaintTracking::FunctionModel {
+ WriteStringModel() { this.hasQualifiedName("io", "WriteString") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(1) and
+ output.isParameter(0)
+ }
+ }
}
/** Provides models of commonly used functions in the `io/ioutil` package. */
From 5e8e51993e1469a8db23e4b48ed87a761a3d518c Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 09:48:55 +0100
Subject: [PATCH 043/157] Simplify `SmtpData`.
---
ql/src/semmle/go/frameworks/Email.qll | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
index 90cbb523e19..8bcb90c7df6 100644
--- a/ql/src/semmle/go/frameworks/Email.qll
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -30,17 +30,9 @@ module EmailData {
private class SmtpData extends Range {
SmtpData() {
// func (c *Client) Data() (io.WriteCloser, error)
- exists(Method data, DataFlow::CallNode write, DataFlow::Node writer, int i |
+ exists(Method data |
data.hasQualifiedName("net/smtp", "Client", "Data") and
- writer = data.getACall().getResult(0) and
- (
- write.getTarget().hasQualifiedName("fmt", "Fprintf")
- or
- write.getTarget().hasQualifiedName("io", "WriteString")
- ) and
- writer.getASuccessor*() = write.getArgument(0) and
- i > 0 and
- write.getArgument(i) = this
+ this.(DataFlow::SsaNode).getInit() = data.getACall().getResult(0)
)
or
// func SendMail(addr string, a Auth, from string, to []string, msg []byte) error
From e632c75de33e1f196b0428fe30e0c68fbf3ba749 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 16:11:23 +0100
Subject: [PATCH 044/157] Add support for taint models involving "backwards"
taint propagation from results to arguments.
---
.../go/dataflow/FunctionInputsAndOutputs.qll | 56 ++++++++++++++++++-
.../FunctionInput_getEntryNode.expected | 1 +
2 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll b/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
index 1cf73dad3e8..922aee5298c 100644
--- a/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
+++ b/ql/src/semmle/go/dataflow/FunctionInputsAndOutputs.qll
@@ -12,7 +12,14 @@ private import semmle.go.dataflow.internal.DataFlowPrivate
*/
private newtype TFunctionInput =
TInParameter(int i) { exists(SignatureType s | exists(s.getParameterType(i))) } or
- TInReceiver()
+ TInReceiver() or
+ TInResult(int index) {
+ // the one and only result
+ index = -1
+ or
+ // one among several results
+ exists(SignatureType s | exists(s.getResultType(index)))
+ }
/**
* An abstract representation of an input to a function, which is either a parameter
@@ -25,6 +32,12 @@ class FunctionInput extends TFunctionInput {
/** Holds if this represents the receiver of a function. */
predicate isReceiver() { none() }
+ /** Holds if this represents the result of a function. */
+ predicate isResult() { none() }
+
+ /** Holds if this represents the `i`th result of a function. */
+ predicate isResult(int i) { none() }
+
/** Gets the data-flow node corresponding to this input for the call `c`. */
final DataFlow::Node getNode(DataFlow::CallNode c) { result = getEntryNode(c) }
@@ -70,6 +83,47 @@ private class ReceiverInput extends FunctionInput, TInReceiver {
override string toString() { result = "receiver" }
}
+/**
+ * A result position of a function, viewed as an input.
+ *
+ * Results are usually outputs rather than inputs, but for taint tracking it can be useful to
+ * think of taint propagating backwards from a result of a function to its arguments. For instance,
+ * the function `bufio.NewWriter` returns a writer `bw` that buffers write operations to an
+ * underlying writer `w`. If tainted data is written to `bw`, then it makes sense to propagate
+ * that taint back to the underlying writer `w`, which can be modeled by saying that
+ * `bufio.NewWriter` propagates taint from its result to its first argument.
+ */
+private class ResultInput extends FunctionInput, TInResult {
+ int index;
+
+ ResultInput() { this = TInResult(index) }
+
+ override predicate isResult() { index = -1 }
+
+ override predicate isResult(int i) { i = index and i >= 0 }
+
+ override DataFlow::Node getEntryNode(DataFlow::CallNode c) {
+ exists(DataFlow::PostUpdateNode pun, DataFlow::Node init |
+ pun = result and
+ init = pun.(DataFlow::SsaNode).getInit()
+ |
+ index = -1 and
+ init = c.getResult()
+ or
+ index >= 0 and
+ init = c.getResult(index)
+ )
+ }
+
+ override DataFlow::Node getExitNode(FuncDef f) { none() }
+
+ override string toString() {
+ index = -1 and result = "result"
+ or
+ index >= 0 and result = "result " + index
+ }
+}
+
/**
* An abstract representation of an output of a function, which is one of its results.
*/
diff --git a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
index 2989f4ef314..649f5fd6273 100644
--- a/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
+++ b/ql/test/library-tests/semmle/go/dataflow/FunctionInputsAndOutputs/FunctionInput_getEntryNode.expected
@@ -13,3 +13,4 @@
| parameter 2 | main.go:57:2:57:27 | call to Printf | main.go:57:26:57:26 | y |
| receiver | main.go:53:14:53:21 | call to bump | main.go:53:14:53:14 | c |
| receiver | tst.go:10:2:10:29 | call to ReadFrom | tst.go:10:2:10:12 | bytesBuffer |
+| result | tst.go:9:17:9:33 | call to new | tst.go:9:2:9:12 | definition of bytesBuffer |
From 54f10157b042355db16f71b86386fa4779b2a66f Mon Sep 17 00:00:00 2001
From: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
Date: Tue, 5 May 2020 11:16:09 +0100
Subject: [PATCH 045/157] Update ql/src/semmle/go/frameworks/Email.qll
Co-authored-by: Sauyon Lee
---
ql/src/semmle/go/frameworks/Email.qll | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
index 90cbb523e19..d55d75ebd89 100644
--- a/ql/src/semmle/go/frameworks/Email.qll
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -52,7 +52,6 @@ module EmailData {
}
/** Gets the package name `github.com/sendgrid/sendgrid-go/helpers/mail`. */
- bindingset[result]
private string sendgridMail() { result = "github.com/sendgrid/sendgrid-go/helpers/mail" }
/* Gets the value of the `i`th content parameter of the given `call` */
@@ -67,8 +66,8 @@ module EmailData {
}
/** A data-flow node that is written to an email using the sendgrid/sendgrid-go package. */
- private class SendGridSingleEmail extends Range {
- SendGridSingleEmail() {
+ private class SendGridEmail extends Range {
+ SendGridEmail() {
// func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3
exists(Function newSingleEmail |
newSingleEmail.hasQualifiedName(sendgridMail(), "NewSingleEmail") and
From a841077cbe84d36574b0885388bb4e9e85589de1 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Tue, 5 May 2020 03:25:08 -0700
Subject: [PATCH 046/157] Add support for Mux library
---
change-notes/2020-05-05-mux-model.md | 3 ++
ql/src/go.qll | 1 +
ql/src/semmle/go/frameworks/Mux.qll | 15 ++++++++
.../Mux/UntrustedFlowSources.expected | 2 +
.../go/frameworks/Mux/UntrustedFlowSources.ql | 3 ++
.../semmle/go/frameworks/Mux/go.mod | 5 +++
.../semmle/go/frameworks/Mux/mux.go | 37 +++++++++++++++++++
.../Mux/vendor/github.com/gorilla/mux/LICENSE | 27 ++++++++++++++
.../Mux/vendor/github.com/gorilla/mux/stub.go | 16 ++++++++
.../go/frameworks/Mux/vendor/modules.txt | 3 ++
10 files changed, 112 insertions(+)
create mode 100644 change-notes/2020-05-05-mux-model.md
create mode 100644 ql/src/semmle/go/frameworks/Mux.qll
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.expected
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Mux/vendor/modules.txt
diff --git a/change-notes/2020-05-05-mux-model.md b/change-notes/2020-05-05-mux-model.md
new file mode 100644
index 00000000000..4ca9d688dd7
--- /dev/null
+++ b/change-notes/2020-05-05-mux-model.md
@@ -0,0 +1,3 @@
+lgtm,codescanning
+* Basic support for the [Mux](https://github.com/gorilla/mux/) HTTP library has been added, which
+ may lead to more results from the security queries.
diff --git a/ql/src/go.qll b/ql/src/go.qll
index 36c43aa789d..23457315d48 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -27,6 +27,7 @@ import semmle.go.dataflow.SSA
import semmle.go.frameworks.Email
import semmle.go.frameworks.HTTP
import semmle.go.frameworks.Macaron
+import semmle.go.frameworks.Mux
import semmle.go.frameworks.SystemCommandExecutors
import semmle.go.frameworks.SQL
import semmle.go.frameworks.XPath
diff --git a/ql/src/semmle/go/frameworks/Mux.qll b/ql/src/semmle/go/frameworks/Mux.qll
new file mode 100644
index 00000000000..2c2d783f566
--- /dev/null
+++ b/ql/src/semmle/go/frameworks/Mux.qll
@@ -0,0 +1,15 @@
+/**
+ * Provides classes for working with concepts in the Mux HTTP middleware library.
+ */
+
+import go
+
+/**
+ * Provides classes for working with concepts in the Mux HTTP middleware library.
+ */
+module Mux {
+ /** An access to a Mux middleware variable. */
+ class RequestVars extends DataFlow::UntrustedFlowSource::Range, DataFlow::CallNode {
+ RequestVars() { this.getTarget().hasQualifiedName("github.com/gorilla/mux", "Vars") }
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.expected b/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.expected
new file mode 100644
index 00000000000..84776eef575
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.expected
@@ -0,0 +1,2 @@
+| mux.go:15:10:15:20 | call to Vars |
+| mux.go:21:13:21:23 | call to Vars |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.ql b/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.ql
new file mode 100644
index 00000000000..0715d64f8e2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/UntrustedFlowSources.ql
@@ -0,0 +1,3 @@
+import go
+
+select any(UntrustedFlowSource ufs)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/go.mod b/ql/test/library-tests/semmle/go/frameworks/Mux/go.mod
new file mode 100644
index 00000000000..c173488c7c7
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/go.mod
@@ -0,0 +1,5 @@
+module codeql-go-tests/frameworks/Mux
+
+go 1.14
+
+require github.com/gorilla/mux v1.7.4
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go b/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
new file mode 100644
index 00000000000..9b70a2122aa
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
@@ -0,0 +1,37 @@
+package main
+
+//go:generate depstubber -vendor github.com/gorilla/mux "" Vars
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "os/exec"
+
+ "github.com/gorilla/mux"
+)
+
+func ArticlesHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+
+func CmdHandler(w http.ResponseWriter, r *http.Request) {
+ cmdName := mux.Vars(r)["cmd"]
+
+ cmd := exec.Command(cmdName)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Print(err)
+ }
+ fmt.Fprintf(w, "%s\n", stdoutStderr)
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/run/{cmd}", CmdHandler)
+ r.HandleFunc("/articles/{category}", ArticlesHandler)
+ http.Handle("/", r)
+ log.Fatal(http.ListenAndServe(":8090", nil))
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 00000000000..6903df6386e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
new file mode 100644
index 00000000000..f5087942ae5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
@@ -0,0 +1,16 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/gorilla/mux, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/gorilla/mux (exports: ; functions: Vars)
+
+// Package mux is a stub of github.com/gorilla/mux, generated by depstubber.
+package mux
+
+import (
+ http "net/http"
+)
+
+func Vars(_ *http.Request) map[string]string {
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/modules.txt
new file mode 100644
index 00000000000..d96be1fa71b
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/modules.txt
@@ -0,0 +1,3 @@
+# github.com/gorilla/mux v1.7.4
+## explicit
+github.com/gorilla/mux
From be94f2b9e6e6b5dc593e75f862f606855ae94104 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 16:12:23 +0100
Subject: [PATCH 047/157] Improve and extend various standard-library function
models.
---
ql/src/semmle/go/frameworks/Stdlib.qll | 78 ++++++++++++++++++--------
1 file changed, 55 insertions(+), 23 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index 82c7d51f8d8..86c8a3909ea 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -67,39 +67,55 @@ module PathFilePath {
}
}
+/** Provides models of commonly used functions in the `bytes` package. */
+private module Bytes {
+ private class BufferBytes extends TaintTracking::FunctionModel, Method {
+ BufferBytes() { this.hasQualifiedName("bytes", "Buffer", ["Bytes", "String"]) }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isReceiver() and output.isResult()
+ }
+ }
+}
+
/** Provides models of commonly used functions in the `fmt` package. */
module Fmt {
/** The `Sprint` function or one of its variants. */
class Sprinter extends TaintTracking::FunctionModel {
- Sprinter() {
- exists(string sprint | sprint.matches("Sprint%") | hasQualifiedName("fmt", sprint))
- }
+ Sprinter() { this.hasQualifiedName("fmt", ["Sprint", "Sprintf", "Sprintln"]) }
override predicate hasTaintFlow(DataFlow::FunctionInput inp, DataFlow::FunctionOutput outp) {
inp.isParameter(_) and outp.isResult()
}
}
+ /** The `Print` function or one of its variants. */
+ private class Printer extends Function {
+ Printer() { this.hasQualifiedName("fmt", ["Print", "Printf", "Println"]) }
+ }
+
+ /** A call to `Print`, `Fprint`, or similar. */
private class PrintCall extends LoggerCall::Range, DataFlow::CallNode {
- PrintCall() {
- exists(string fn |
- fn = "Print%"
- or
- fn = "Fprint%"
- |
- this.getTarget().hasQualifiedName("fmt", fn)
- )
- }
+ PrintCall() { this.getTarget() instanceof Printer or this.getTarget() instanceof Fprinter }
override DataFlow::Node getAMessageComponent() { result = this.getAnArgument() }
}
- private class FprintfModel extends TaintTracking::FunctionModel {
- FprintfModel() { this.hasQualifiedName("fmt", "Fprintf") }
+ /** The `Fprint` function or one of its variants. */
+ private class Fprinter extends TaintTracking::FunctionModel {
+ Fprinter() { this.hasQualifiedName("fmt", ["Fprint", "Fprintf", "Fprintln"]) }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
- input.isParameter(any(int i | i > 0)) and
- output.isParameter(0)
+ input.isParameter(any(int i | i > 0)) and output.isParameter(0)
+ }
+ }
+
+ /** The `Sscan` function or one of its variants. */
+ private class Sscanner extends TaintTracking::FunctionModel {
+ Sscanner() { this.hasQualifiedName("fmt", ["Sscan", "Sscanf", "Sscanln"]) }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isParameter(any(int i | i > 0))
}
}
}
@@ -107,21 +123,37 @@ module Fmt {
/** Provides models of commonly used functions in the `io` package. */
module Io {
private class ReaderRead extends TaintTracking::FunctionModel, Method {
- ReaderRead() {
- exists(Method im | im.hasQualifiedName("io", "Reader", "Read") | this.implements(im))
- }
+ ReaderRead() { this.implements("io", "Reader", "Read") }
override predicate hasTaintFlow(FunctionInput inp, FunctionOutput outp) {
inp.isReceiver() and outp.isParameter(0)
}
}
- private class WriteStringModel extends TaintTracking::FunctionModel {
- WriteStringModel() { this.hasQualifiedName("io", "WriteString") }
+ private class WriterWrite extends TaintTracking::FunctionModel, Method {
+ WriterWrite() { this.implements("io", "Writer", "Write") }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
- input.isParameter(1) and
- output.isParameter(0)
+ input.isParameter(0) and output.isReceiver()
+ }
+ }
+
+ private class WriteString extends TaintTracking::FunctionModel {
+ WriteString() { this.hasQualifiedName("io", "WriteString") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(1) and output.isParameter(0)
+ }
+ }
+}
+
+/** Provides models of commonly used functions in the `bufio` package. */
+module Bufio {
+ private class NewWriter extends TaintTracking::FunctionModel {
+ NewWriter() { this.hasQualifiedName("bufio", "NewWriter") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isResult() and output.isParameter(0)
}
}
}
From 5a96b0e8acc898dd9c89d2c9f04034f942f6cade Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 16:13:53 +0100
Subject: [PATCH 048/157] Add two function models for handling MIME APIs.
---
ql/src/semmle/go/frameworks/Email.qll | 30 +++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
index 8bcb90c7df6..b867dfb9813 100644
--- a/ql/src/semmle/go/frameworks/Email.qll
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -84,3 +84,33 @@ module EmailData {
}
}
}
+
+/**
+ * A taint model of the `Writer.CreatePart` method from `mime/multipart`.
+ *
+ * If tainted data is written to the multipart section created by this method, the underlying writer
+ * should be considered tainted as well.
+ */
+private class MultipartWriterCreatePartModel extends TaintTracking::FunctionModel, Method {
+ MultipartWriterCreatePartModel() {
+ this.hasQualifiedName("mime/multipart", "Writer", "CreatePart")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isResult(0) and output.isReceiver()
+ }
+}
+
+/**
+ * A taint model of the `NewWriter` function from `mime/multipart`.
+ *
+ * If tainted data is written to the writer created by this function, the underlying writer
+ * should be considered tainted as well.
+ */
+private class MultipartNewWriterModel extends TaintTracking::FunctionModel {
+ MultipartNewWriterModel() { this.hasQualifiedName("mime/multipart", "NewWriter") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isResult() and output.isParameter(0)
+ }
+}
From 60a6c9686310e2a58623feba34a65011402562f1 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 16:22:39 +0100
Subject: [PATCH 049/157] Simplify modeling of `NewContent`.
---
ql/src/semmle/go/frameworks/Email.qll | 25 +++++++---------
.../CWE-640/EmailInjection.expected | 30 +++++++++++--------
.../go/frameworks/Email/MailData.expected | 8 ++---
3 files changed, 33 insertions(+), 30 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Email.qll b/ql/src/semmle/go/frameworks/Email.qll
index b867dfb9813..50eb3462595 100644
--- a/ql/src/semmle/go/frameworks/Email.qll
+++ b/ql/src/semmle/go/frameworks/Email.qll
@@ -47,15 +47,15 @@ module EmailData {
bindingset[result]
private string sendgridMail() { result = "github.com/sendgrid/sendgrid-go/helpers/mail" }
- /* Gets the value of the `i`th content parameter of the given `call` */
- private DataFlow::Node getContent(DataFlow::CallNode call, int i) {
- exists(DataFlow::CallNode cn, DataFlow::Node content |
+ private class NewContent extends TaintTracking::FunctionModel {
+ NewContent() {
// func NewContent(contentType string, value string) *Content
- cn.getTarget().hasQualifiedName(sendgridMail(), "NewContent") and
- cn.getResult() = content and
- content.getASuccessor*() = call.getArgument(i) and
- result = cn.getArgument(1)
- )
+ this.hasQualifiedName(sendgridMail(), "NewContent")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(1) and output.isResult()
+ }
}
/** A data-flow node that is written to an email using the sendgrid/sendgrid-go package. */
@@ -69,17 +69,14 @@ module EmailData {
or
// func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3
exists(Function newv3MailInit |
- newv3MailInit.hasQualifiedName(sendgridMail(), "NewV3MailInit")
- |
- this = getContent(newv3MailInit.getACall(), any(int i | i >= 3))
- or
- this = newv3MailInit.getACall().getArgument(1)
+ newv3MailInit.hasQualifiedName(sendgridMail(), "NewV3MailInit") and
+ this = newv3MailInit.getACall().getArgument(any(int i | i = 1 or i >= 3))
)
or
// func (s *SGMailV3) AddContent(c ...*Content) *SGMailV3
exists(Method addContent |
addContent.hasQualifiedName(sendgridMail(), "SGMailV3", "AddContent") and
- this = getContent(addContent.getACall(), _)
+ this = addContent.getACall().getAnArgument()
)
}
}
diff --git a/ql/test/experimental/CWE-640/EmailInjection.expected b/ql/test/experimental/CWE-640/EmailInjection.expected
index 7d7e2988d09..37a3af5ae75 100644
--- a/ql/test/experimental/CWE-640/EmailInjection.expected
+++ b/ql/test/experimental/CWE-640/EmailInjection.expected
@@ -1,37 +1,43 @@
edges
| email.go:24:10:24:17 | selection of Header : Header | email.go:27:56:27:67 | type conversion |
| email.go:34:21:34:31 | call to Referer : string | email.go:36:57:36:78 | type conversion |
-| email.go:42:21:42:31 | call to Referer : string | email.go:46:25:46:38 | untrustedInput |
+| email.go:42:21:42:31 | call to Referer : string | email.go:45:3:45:7 | definition of write |
| email.go:51:21:51:31 | call to Referer : string | email.go:57:46:57:59 | untrustedInput |
| email.go:51:21:51:31 | call to Referer : string | email.go:58:52:58:65 | untrustedInput |
-| email.go:63:21:63:31 | call to Referer : string | email.go:65:47:65:60 | untrustedInput |
-| email.go:73:21:73:31 | call to Referer : string | email.go:79:47:79:60 | untrustedInput |
+| email.go:63:21:63:31 | call to Referer : string | email.go:68:16:68:22 | content |
+| email.go:73:21:73:31 | call to Referer : string | email.go:81:50:81:56 | content |
+| email.go:73:21:73:31 | call to Referer : string | email.go:81:59:81:65 | content |
+| email.go:73:21:73:31 | call to Referer : string | email.go:82:16:82:22 | content |
| email.go:87:21:87:31 | call to Referer : string | email.go:94:37:94:50 | untrustedInput |
-| email.go:87:21:87:31 | call to Referer : string | email.go:96:48:96:61 | untrustedInput |
+| email.go:87:21:87:31 | call to Referer : string | email.go:98:16:98:23 | content2 |
nodes
| email.go:24:10:24:17 | selection of Header : Header | semmle.label | selection of Header : Header |
| email.go:27:56:27:67 | type conversion | semmle.label | type conversion |
| email.go:34:21:34:31 | call to Referer : string | semmle.label | call to Referer : string |
| email.go:36:57:36:78 | type conversion | semmle.label | type conversion |
| email.go:42:21:42:31 | call to Referer : string | semmle.label | call to Referer : string |
-| email.go:46:25:46:38 | untrustedInput | semmle.label | untrustedInput |
+| email.go:45:3:45:7 | definition of write | semmle.label | definition of write |
| email.go:51:21:51:31 | call to Referer : string | semmle.label | call to Referer : string |
| email.go:57:46:57:59 | untrustedInput | semmle.label | untrustedInput |
| email.go:58:52:58:65 | untrustedInput | semmle.label | untrustedInput |
| email.go:63:21:63:31 | call to Referer : string | semmle.label | call to Referer : string |
-| email.go:65:47:65:60 | untrustedInput | semmle.label | untrustedInput |
+| email.go:68:16:68:22 | content | semmle.label | content |
| email.go:73:21:73:31 | call to Referer : string | semmle.label | call to Referer : string |
-| email.go:79:47:79:60 | untrustedInput | semmle.label | untrustedInput |
+| email.go:81:50:81:56 | content | semmle.label | content |
+| email.go:81:59:81:65 | content | semmle.label | content |
+| email.go:82:16:82:22 | content | semmle.label | content |
| email.go:87:21:87:31 | call to Referer : string | semmle.label | call to Referer : string |
| email.go:94:37:94:50 | untrustedInput | semmle.label | untrustedInput |
-| email.go:96:48:96:61 | untrustedInput | semmle.label | untrustedInput |
+| email.go:98:16:98:23 | content2 | semmle.label | content2 |
#select
| email.go:27:56:27:67 | type conversion | email.go:24:10:24:17 | selection of Header : Header | email.go:27:56:27:67 | type conversion | Email content may contain $@. | email.go:24:10:24:17 | selection of Header | untrusted input |
| email.go:36:57:36:78 | type conversion | email.go:34:21:34:31 | call to Referer : string | email.go:36:57:36:78 | type conversion | Email content may contain $@. | email.go:34:21:34:31 | call to Referer | untrusted input |
-| email.go:46:25:46:38 | untrustedInput | email.go:42:21:42:31 | call to Referer : string | email.go:46:25:46:38 | untrustedInput | Email content may contain $@. | email.go:42:21:42:31 | call to Referer | untrusted input |
+| email.go:45:3:45:7 | definition of write | email.go:42:21:42:31 | call to Referer : string | email.go:45:3:45:7 | definition of write | Email content may contain $@. | email.go:42:21:42:31 | call to Referer | untrusted input |
| email.go:57:46:57:59 | untrustedInput | email.go:51:21:51:31 | call to Referer : string | email.go:57:46:57:59 | untrustedInput | Email content may contain $@. | email.go:51:21:51:31 | call to Referer | untrusted input |
| email.go:58:52:58:65 | untrustedInput | email.go:51:21:51:31 | call to Referer : string | email.go:58:52:58:65 | untrustedInput | Email content may contain $@. | email.go:51:21:51:31 | call to Referer | untrusted input |
-| email.go:65:47:65:60 | untrustedInput | email.go:63:21:63:31 | call to Referer : string | email.go:65:47:65:60 | untrustedInput | Email content may contain $@. | email.go:63:21:63:31 | call to Referer | untrusted input |
-| email.go:79:47:79:60 | untrustedInput | email.go:73:21:73:31 | call to Referer : string | email.go:79:47:79:60 | untrustedInput | Email content may contain $@. | email.go:73:21:73:31 | call to Referer | untrusted input |
+| email.go:68:16:68:22 | content | email.go:63:21:63:31 | call to Referer : string | email.go:68:16:68:22 | content | Email content may contain $@. | email.go:63:21:63:31 | call to Referer | untrusted input |
+| email.go:81:50:81:56 | content | email.go:73:21:73:31 | call to Referer : string | email.go:81:50:81:56 | content | Email content may contain $@. | email.go:73:21:73:31 | call to Referer | untrusted input |
+| email.go:81:59:81:65 | content | email.go:73:21:73:31 | call to Referer : string | email.go:81:59:81:65 | content | Email content may contain $@. | email.go:73:21:73:31 | call to Referer | untrusted input |
+| email.go:82:16:82:22 | content | email.go:73:21:73:31 | call to Referer : string | email.go:82:16:82:22 | content | Email content may contain $@. | email.go:73:21:73:31 | call to Referer | untrusted input |
| email.go:94:37:94:50 | untrustedInput | email.go:87:21:87:31 | call to Referer : string | email.go:94:37:94:50 | untrustedInput | Email content may contain $@. | email.go:87:21:87:31 | call to Referer | untrusted input |
-| email.go:96:48:96:61 | untrustedInput | email.go:87:21:87:31 | call to Referer : string | email.go:96:48:96:61 | untrustedInput | Email content may contain $@. | email.go:87:21:87:31 | call to Referer | untrusted input |
+| email.go:98:16:98:23 | content2 | email.go:87:21:87:31 | call to Referer : string | email.go:98:16:98:23 | content2 | Email content may contain $@. | email.go:87:21:87:31 | call to Referer | untrusted input |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
index 9e9801a2837..160f7a0ead7 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
@@ -1,9 +1,9 @@
| mail.go:16:56:16:77 | type conversion |
-| mail.go:22:24:22:37 | untrustedInput |
+| mail.go:19:2:19:6 | definition of write |
| mail.go:29:32:29:36 | alert |
| mail.go:29:43:29:47 | alert |
| mail.go:29:50:29:54 | alert |
-| mail.go:32:46:32:50 | alert |
-| mail.go:36:47:36:51 | alert |
-| mail.go:37:47:37:51 | alert |
+| mail.go:34:15:34:21 | content |
| mail.go:40:35:40:39 | alert |
+| mail.go:40:46:40:53 | content2 |
+| mail.go:40:56:40:63 | content3 |
From b177d58c88eb4486947692651a090bd572098c54 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 4 May 2020 17:07:15 +0100
Subject: [PATCH 050/157] Tweak test.
The query under test isn't a `@problem` query, so we should refer to "alerts".
---
.../go/frameworks/Email/EmailData.expected | 9 +++++++
.../Email/{MailData.ql => EmailData.ql} | 0
.../go/frameworks/Email/MailData.expected | 9 -------
.../semmle/go/frameworks/Email/mail.go | 26 ++++++++-----------
4 files changed, 20 insertions(+), 24 deletions(-)
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/EmailData.expected
rename ql/test/library-tests/semmle/go/frameworks/Email/{MailData.ql => EmailData.ql} (100%)
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/EmailData.expected b/ql/test/library-tests/semmle/go/frameworks/Email/EmailData.expected
new file mode 100644
index 00000000000..99b33b4a780
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/EmailData.expected
@@ -0,0 +1,9 @@
+| mail.go:15:73:15:94 | type conversion |
+| mail.go:18:19:18:23 | definition of write |
+| mail.go:26:49:26:52 | text |
+| mail.go:26:76:26:79 | text |
+| mail.go:27:20:27:23 | text |
+| mail.go:31:33:31:39 | content |
+| mail.go:36:52:36:55 | text |
+| mail.go:36:79:36:86 | content2 |
+| mail.go:37:20:37:27 | content3 |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql b/ql/test/library-tests/semmle/go/frameworks/Email/EmailData.ql
similarity index 100%
rename from ql/test/library-tests/semmle/go/frameworks/Email/MailData.ql
rename to ql/test/library-tests/semmle/go/frameworks/Email/EmailData.ql
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected b/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
deleted file mode 100644
index 160f7a0ead7..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Email/MailData.expected
+++ /dev/null
@@ -1,9 +0,0 @@
-| mail.go:16:56:16:77 | type conversion |
-| mail.go:19:2:19:6 | definition of write |
-| mail.go:29:32:29:36 | alert |
-| mail.go:29:43:29:47 | alert |
-| mail.go:29:50:29:54 | alert |
-| mail.go:34:15:34:21 | content |
-| mail.go:40:35:40:39 | alert |
-| mail.go:40:46:40:53 | content2 |
-| mail.go:40:56:40:63 | content3 |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/mail.go b/ql/test/library-tests/semmle/go/frameworks/Email/mail.go
index 823ec92acf6..fe5565c2305 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Email/mail.go
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/mail.go
@@ -12,31 +12,27 @@ import (
func main() {
untrustedInput := "test"
- // Not OK - 1 alert
- smtp.SendMail("test.test", nil, "from@from.com", nil, []byte(untrustedInput))
+ smtp.SendMail("test.test", nil, "from@from.com", nil /* email data */, []byte(untrustedInput))
s, _ := smtp.Dial("test.test")
- write, _ := s.Data()
+ /* email data */ write, _ := s.Data()
- // Not OK - 1 alert
io.WriteString(write, untrustedInput)
from := sendgrid.NewEmail("from", "from@from.com")
to := sendgrid.NewEmail("to", "to@to.com")
- alert := "sub"
+ text := "sub"
- // Not OK - 3 alerts
- sendgrid.NewSingleEmail(from, alert, to, alert, alert)
+ sendgrid.NewSingleEmail(from /* email data */, text, to /* email data */, text,
+ /* email data */ text)
- // Not OK - 1 alert
- content := sendgrid.NewContent("text/html", alert)
+ content := sendgrid.NewContent("text/html", text)
v := sendgrid.NewV3Mail()
- v.AddContent(content)
+ v.AddContent( /* email data */ content)
- content2 := sendgrid.NewContent("text/html", alert)
- content3 := sendgrid.NewContent("text/html", alert)
-
- // Not OK - 3 alerts
- v = sendgrid.NewV3MailInit(from, alert, to, content2, content3)
+ content2 := sendgrid.NewContent("text/html", text)
+ content3 := sendgrid.NewContent("text/html", text)
+ v = sendgrid.NewV3MailInit(from /* email data */, text, to /* email data */, content2,
+ /* email data */ content3)
}
From a79f2b4f44b0fea1e9416744c45386c65d690757 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 5 May 2020 12:05:01 +0100
Subject: [PATCH 051/157] Add change note for `CleartextLogging`.
---
change-notes/2020-05-05-clear-text-logging.md | 2 ++
1 file changed, 2 insertions(+)
create mode 100644 change-notes/2020-05-05-clear-text-logging.md
diff --git a/change-notes/2020-05-05-clear-text-logging.md b/change-notes/2020-05-05-clear-text-logging.md
new file mode 100644
index 00000000000..ad9e974557f
--- /dev/null
+++ b/change-notes/2020-05-05-clear-text-logging.md
@@ -0,0 +1,2 @@
+lgtm,codescanning
+* The query "Clear-text logging of sensitive information" has been improved to recognize more logging APIs, which may lead to more alerts.
\ No newline at end of file
From 5653889a39f961d41dc4393437f4acd5fbef7c03 Mon Sep 17 00:00:00 2001
From: Jason Reed
Date: Tue, 5 May 2020 09:22:44 -0400
Subject: [PATCH 052/157] Exclude IDE queries from query suites.
---
ql/src/codeql-suites/go-lgtm-full.qls | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/ql/src/codeql-suites/go-lgtm-full.qls b/ql/src/codeql-suites/go-lgtm-full.qls
index ee466f62619..6950fccec02 100644
--- a/ql/src/codeql-suites/go-lgtm-full.qls
+++ b/ql/src/codeql-suites/go-lgtm-full.qls
@@ -2,3 +2,8 @@
- qlpack: codeql-go
- apply: lgtm-selectors.yml
from: codeql-suite-helpers
+# These are only for IDE use.
+- exclude:
+ tags contain:
+ - ide-contextual-queries/local-definitions
+ - ide-contextual-queries/local-references
From 08f5451fce22c18b9a06b284e79a7d5c224c17aa Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 6 May 2020 07:32:15 +0100
Subject: [PATCH 053/157] Address review comments.
---
.../semmle/go/dataflow/internal/DataFlowUtil.qll | 14 ++++++++------
ql/src/semmle/go/frameworks/Stdlib.qll | 3 ++-
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
index 6ccd6338b76..d01829a1f5c 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
@@ -408,7 +408,7 @@ class PostUpdateNode extends Node {
)
or
preupd instanceof ArgumentNode and
- mutableType(preupd.getType().getUnderlyingType())
+ mutableType(preupd.getType())
) and
(
preupd = this.(SsaNode).getAUse()
@@ -463,11 +463,13 @@ class ArgumentNode extends Node {
* mutate it or something it points to.
*/
predicate mutableType(Type tp) {
- tp instanceof ArrayType or
- tp instanceof SliceType or
- tp instanceof MapType or
- tp instanceof PointerType or
- tp instanceof InterfaceType
+ exists(Type underlying | underlying = tp.getUnderlyingType() |
+ underlying instanceof ArrayType or
+ underlying instanceof SliceType or
+ underlying instanceof MapType or
+ underlying instanceof PointerType or
+ underlying instanceof InterfaceType
+ )
}
/**
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index 86c8a3909ea..36c9516a449 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -115,7 +115,8 @@ module Fmt {
Sscanner() { this.hasQualifiedName("fmt", ["Sscan", "Sscanf", "Sscanln"]) }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
- input.isParameter(0) and output.isParameter(any(int i | i > 0))
+ input.isParameter(0) and
+ exists(int i | if getName() = "Sscanf" then i > 1 else i > 0 | output.isParameter(i))
}
}
}
From d6a5a72c01cdbf54112283961426332084cb06ab Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 6 May 2020 13:54:28 +0100
Subject: [PATCH 054/157] Fix copy-pasted typo.
---
ql/src/semmle/go/Concepts.qll | 2 +-
ql/src/semmle/go/StringOps.qll | 4 ++--
ql/src/semmle/go/frameworks/SQL.qll | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/ql/src/semmle/go/Concepts.qll b/ql/src/semmle/go/Concepts.qll
index b43d75cf8cb..e17b5f3889a 100644
--- a/ql/src/semmle/go/Concepts.qll
+++ b/ql/src/semmle/go/Concepts.qll
@@ -11,7 +11,7 @@ import semmle.go.dataflow.FunctionInputsAndOutputs
* A data-flow node that executes an operating system command,
* for instance by spawning a new process.
*
- * Extends this class to refine existing API models. If you want to model new APIs,
+ * Extend this class to refine existing API models. If you want to model new APIs,
* extend `SystemCommandExecution::Range` instead.
*/
class SystemCommandExecution extends DataFlow::Node {
diff --git a/ql/src/semmle/go/StringOps.qll b/ql/src/semmle/go/StringOps.qll
index 8fdc4223716..dfc8ceb3c9f 100644
--- a/ql/src/semmle/go/StringOps.qll
+++ b/ql/src/semmle/go/StringOps.qll
@@ -9,7 +9,7 @@ module StringOps {
/**
* An expression that is equivalent to `strings.HasPrefix(A, B)` or `!strings.HasPrefix(A, B)`.
*
- * Extends this class to refine existing API models. If you want to model new APIs,
+ * Extend this class to refine existing API models. If you want to model new APIs,
* extend `StringOps::HasPrefix::Range` instead.
*/
class HasPrefix extends DataFlow::Node {
@@ -43,7 +43,7 @@ module StringOps {
/**
* An expression that is equivalent to `strings.HasPrefix(A, B)` or `!strings.HasPrefix(A, B)`.
*
- * Extends this class to model new APIs. If you want to refine existing API models, extend
+ * Extend this class to model new APIs. If you want to refine existing API models, extend
* `StringOps::HasPrefix` instead.
*/
abstract class Range extends DataFlow::Node {
diff --git a/ql/src/semmle/go/frameworks/SQL.qll b/ql/src/semmle/go/frameworks/SQL.qll
index 49f61a74452..a0b22c0cada 100644
--- a/ql/src/semmle/go/frameworks/SQL.qll
+++ b/ql/src/semmle/go/frameworks/SQL.qll
@@ -9,7 +9,7 @@ module SQL {
/**
* A data-flow node whose string value is interpreted as (part of) a SQL query.
*
- * Extends this class to refine existing API models. If you want to model new APIs,
+ * Extend this class to refine existing API models. If you want to model new APIs,
* extend `SQL::QueryString::Range` instead.
*/
class QueryString extends DataFlow::Node {
From d008d2a6a8514858a335be80cc55fb9f81703583 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 9 Mar 2020 16:13:07 +0000
Subject: [PATCH 055/157] Fix performance issue in partial paths exploration.
cf https://github.com/Semmle/ql/pull/3021
---
ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index 5eb98aefe4e..0150f4e0aa0 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -2949,7 +2949,7 @@ private module FlowExploration {
config = mid.getConfiguration()
}
- pragma[noinline]
+ pragma[nomagic]
private predicate partialPathOutOfCallable1(
PartialPathNodePriv mid, DataFlowCall call, ReturnKindExt kind, CallContext cc,
PartialAccessPath ap, Configuration config
From 8d10a8dd5b9d5f9127fd22d3eb614bff647b7db1 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 16 Mar 2020 13:37:19 +0000
Subject: [PATCH 056/157] Fix bug in type pruning.
cf https://github.com/Semmle/ql/pull/3020
---
.../go/dataflow/internal/DataFlowImpl.qll | 97 +++++++------------
1 file changed, 34 insertions(+), 63 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index 0150f4e0aa0..e3f1da355ef 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -1307,15 +1307,20 @@ private predicate localFlowExit(Node node, Configuration config) {
*/
pragma[nomagic]
private predicate localFlowStepPlus(
- Node node1, Node node2, boolean preservesValue, Configuration config, LocalCallContext cc
+ Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
+ LocalCallContext cc
) {
not isUnreachableInCall(node2, cc.(LocalCallContextSpecificCall).getCall()) and
(
localFlowEntry(node1, config) and
(
- localFlowStep(node1, node2, config) and preservesValue = true
+ localFlowStep(node1, node2, config) and
+ preservesValue = true and
+ t = getErasedNodeTypeBound(node1)
or
- additionalLocalFlowStep(node1, node2, config) and preservesValue = false
+ additionalLocalFlowStep(node1, node2, config) and
+ preservesValue = false and
+ t = getErasedNodeTypeBound(node2)
) and
node1 != node2 and
cc.relevantFor(node1.getEnclosingCallable()) and
@@ -1323,17 +1328,18 @@ private predicate localFlowStepPlus(
nodeCand(TNormalNode(node2), unbind(config))
or
exists(Node mid |
- localFlowStepPlus(node1, mid, preservesValue, config, cc) and
+ localFlowStepPlus(node1, mid, preservesValue, t, config, cc) and
localFlowStep(mid, node2, config) and
not mid instanceof CastNode and
nodeCand(TNormalNode(node2), unbind(config))
)
or
exists(Node mid |
- localFlowStepPlus(node1, mid, _, config, cc) and
+ localFlowStepPlus(node1, mid, _, _, config, cc) and
additionalLocalFlowStep(mid, node2, config) and
not mid instanceof CastNode and
preservesValue = false and
+ t = getErasedNodeTypeBound(node2) and
nodeCand(TNormalNode(node2), unbind(config))
)
)
@@ -1345,17 +1351,18 @@ private predicate localFlowStepPlus(
*/
pragma[nomagic]
private predicate localFlowBigStep(
- Node node1, Node node2, boolean preservesValue, Configuration config, LocalCallContext callContext
+ Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
+ LocalCallContext callContext
) {
- localFlowStepPlus(node1, node2, preservesValue, config, callContext) and
+ localFlowStepPlus(node1, node2, preservesValue, t, config, callContext) and
localFlowExit(node2, config)
}
pragma[nomagic]
private predicate localFlowBigStepExt(
- NodeExt node1, NodeExt node2, boolean preservesValue, Configuration config
+ NodeExt node1, NodeExt node2, boolean preservesValue, AccessPathFrontNil apf, Configuration config
) {
- localFlowBigStep(node1.getNode(), node2.getNode(), preservesValue, config, _)
+ localFlowBigStep(node1.getNode(), node2.getNode(), preservesValue, apf.getType(), config, _)
}
private newtype TAccessPathFront =
@@ -1395,46 +1402,24 @@ private predicate flowCandFwd(
else any()
}
-/**
- * A node that requires an empty access path and should have its tracked type
- * (re-)computed. This is either a source or a node reached through an
- * additional step.
- */
-private class AccessPathFrontNilNode extends NormalNodeExt {
- AccessPathFrontNilNode() {
- nodeCand(this, _) and
- (
- any(Configuration c).isSource(this.getNode())
- or
- localFlowBigStepExt(_, this, false, _)
- or
- additionalJumpStepExt(_, this, _)
- )
- }
-
- /** Gets the `nil` path front for this node. */
- AccessPathFrontNil getApf() { result = TFrontNil(this.getErasedNodeTypeBound()) }
-}
-
private predicate flowCandFwd0(
NodeExt node, boolean fromArg, AccessPathFront apf, Configuration config
) {
nodeCand2(node, _, false, config) and
config.isSource(node.getNode()) and
fromArg = false and
- apf = node.(AccessPathFrontNilNode).getApf()
+ apf = TFrontNil(node.getErasedNodeTypeBound())
or
nodeCand(node, unbind(config)) and
(
exists(NodeExt mid |
flowCandFwd(mid, fromArg, apf, config) and
- localFlowBigStepExt(mid, node, true, config)
+ localFlowBigStepExt(mid, node, true, _, config)
)
or
exists(NodeExt mid, AccessPathFrontNil nil |
flowCandFwd(mid, fromArg, nil, config) and
- localFlowBigStepExt(mid, node, false, config) and
- apf = node.(AccessPathFrontNilNode).getApf()
+ localFlowBigStepExt(mid, node, false, apf, config)
)
or
exists(NodeExt mid |
@@ -1447,7 +1432,7 @@ private predicate flowCandFwd0(
flowCandFwd(mid, _, nil, config) and
additionalJumpStepExt(mid, node, config) and
fromArg = false and
- apf = node.(AccessPathFrontNilNode).getApf()
+ apf = TFrontNil(node.getErasedNodeTypeBound())
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
@@ -1589,13 +1574,13 @@ private predicate flowCand0(
apf instanceof AccessPathFrontNil
or
exists(NodeExt mid |
- localFlowBigStepExt(node, mid, true, config) and
+ localFlowBigStepExt(node, mid, true, _, config) and
flowCand(mid, toReturn, apf, config)
)
or
exists(NodeExt mid, AccessPathFrontNil nil |
flowCandFwd(node, _, apf, config) and
- localFlowBigStepExt(node, mid, false, config) and
+ localFlowBigStepExt(node, mid, false, _, config) and
flowCand(mid, toReturn, nil, config) and
apf instanceof AccessPathFrontNil
)
@@ -1810,18 +1795,6 @@ private predicate popWithFront(AccessPath ap0, Content f, AccessPathFront apf, A
/** Gets the access path obtained by pushing `f` onto `ap`. */
private AccessPath push(Content f, AccessPath ap) { ap = pop(f, result) }
-/**
- * A node that requires an empty access path and should have its tracked type
- * (re-)computed. This is either a source or a node reached through an
- * additional step.
- */
-private class AccessPathNilNode extends NormalNodeExt {
- AccessPathNilNode() { flowCand(this.(AccessPathFrontNilNode), _, _, _) }
-
- /** Gets the `nil` path for this node. */
- AccessPathNil getAp() { result = TNil(this.getErasedNodeTypeBound()) }
-}
-
/**
* Holds if data can flow from a source to `node` with the given `ap`.
*/
@@ -1838,20 +1811,19 @@ private predicate flowFwd0(
flowCand(node, _, _, config) and
config.isSource(node.getNode()) and
fromArg = false and
- ap = node.(AccessPathNilNode).getAp() and
+ ap = TNil(node.getErasedNodeTypeBound()) and
apf = ap.(AccessPathNil).getFront()
or
flowCand(node, _, _, unbind(config)) and
(
exists(NodeExt mid |
flowFwd(mid, fromArg, apf, ap, config) and
- localFlowBigStepExt(mid, node, true, config)
+ localFlowBigStepExt(mid, node, true, _, config)
)
or
exists(NodeExt mid, AccessPathNil nil |
flowFwd(mid, fromArg, _, nil, config) and
- localFlowBigStepExt(mid, node, false, config) and
- ap = node.(AccessPathNilNode).getAp() and
+ localFlowBigStepExt(mid, node, false, apf, config) and
apf = ap.(AccessPathNil).getFront()
)
or
@@ -1865,7 +1837,7 @@ private predicate flowFwd0(
flowFwd(mid, _, _, nil, config) and
additionalJumpStepExt(mid, node, config) and
fromArg = false and
- ap = node.(AccessPathNilNode).getAp() and
+ ap = TNil(node.getErasedNodeTypeBound()) and
apf = ap.(AccessPathNil).getFront()
)
or
@@ -1982,13 +1954,13 @@ private predicate flow0(NodeExt node, boolean toReturn, AccessPath ap, Configura
ap instanceof AccessPathNil
or
exists(NodeExt mid |
- localFlowBigStepExt(node, mid, true, config) and
+ localFlowBigStepExt(node, mid, true, _, config) and
flow(mid, toReturn, ap, config)
)
or
exists(NodeExt mid, AccessPathNil nil |
flowFwd(node, _, _, ap, config) and
- localFlowBigStepExt(node, mid, false, config) and
+ localFlowBigStepExt(node, mid, false, _, config) and
flow(mid, toReturn, nil, config) and
ap instanceof AccessPathNil
)
@@ -2164,7 +2136,7 @@ private newtype TPathNode =
config.isSource(node) and
cc instanceof CallContextAny and
sc instanceof SummaryCtxNone and
- ap = any(AccessPathNilNode nil | nil.getNode() = node).getAp()
+ ap = TNil(getErasedNodeTypeBound(node))
or
// ... or a step from an existing PathNode to another node.
exists(PathNodeMid mid |
@@ -2357,12 +2329,11 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
pathIntoLocalStep(mid, midnode, cc, enclosing, sc, ap0, conf) and
localCC = getLocalCallContext(cc, enclosing)
|
- localFlowBigStep(midnode, node, true, conf, localCC) and
+ localFlowBigStep(midnode, node, true, _, conf, localCC) and
ap = ap0
or
- localFlowBigStep(midnode, node, false, conf, localCC) and
- ap0 instanceof AccessPathNil and
- ap = any(AccessPathNilNode nil | nil.getNode() = node).getAp()
+ localFlowBigStep(midnode, node, false, ap.(AccessPathNil).getType(), conf, localCC) and
+ ap0 instanceof AccessPathNil
)
or
jumpStep(mid.getNode(), node, mid.getConfiguration()) and
@@ -2374,7 +2345,7 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
cc instanceof CallContextAny and
sc instanceof SummaryCtxNone and
mid.getAp() instanceof AccessPathNil and
- ap = any(AccessPathNilNode nil | nil.getNode() = node).getAp()
+ ap = TNil(getErasedNodeTypeBound(node))
or
exists(Content f, AccessPath ap0 | pathReadStep(mid, node, ap0, f, cc) and ap = pop(f, ap0)) and
sc = mid.getSummaryCtx()
@@ -2397,7 +2368,7 @@ private predicate pathIntoLocalStep(
midnode = mid.getNode() and
cc = mid.getCallContext() and
conf = mid.getConfiguration() and
- localFlowBigStep(midnode, _, _, conf, _) and
+ localFlowBigStep(midnode, _, _, _, conf, _) and
enclosing = midnode.getEnclosingCallable() and
sc = mid.getSummaryCtx() and
ap0 = mid.getAp()
From 96120e1e35ae6183ca3dff2c9530f8b809ad8f8f Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Tue, 17 Mar 2020 08:51:25 +0000
Subject: [PATCH 057/157] Update expected output.
---
ql/test/query-tests/Security/CWE-089/SqlInjection.expected | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/ql/test/query-tests/Security/CWE-089/SqlInjection.expected b/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
index c12da6e5205..e2ff8cbd7ec 100644
--- a/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
+++ b/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
@@ -2,8 +2,8 @@ edges
| SqlInjection.go:11:3:11:9 | selection of URL : pointer type | SqlInjection.go:12:11:12:11 | q |
| issue48.go:17:25:17:32 | selection of Body : ReadCloser | issue48.go:22:11:22:12 | q3 |
| issue48.go:27:26:27:33 | selection of Body : ReadCloser | issue48.go:32:11:32:12 | q4 |
-| issue48.go:37:17:37:50 | type conversion : slice type | issue48.go:41:11:41:12 | q5 |
-| issue48.go:37:24:37:30 | selection of URL : pointer type | issue48.go:37:17:37:50 | type conversion : slice type |
+| issue48.go:37:17:37:50 | type conversion : string | issue48.go:41:11:41:12 | q5 |
+| issue48.go:37:24:37:30 | selection of URL : pointer type | issue48.go:37:17:37:50 | type conversion : string |
| main.go:10:11:10:16 | selection of Form : Values | main.go:10:11:10:28 | index expression |
| main.go:14:63:14:67 | selection of URL : pointer type | main.go:14:11:14:84 | call to Sprintf |
| main.go:15:63:15:70 | selection of Header : Header | main.go:15:11:15:85 | call to Sprintf |
@@ -48,7 +48,7 @@ nodes
| issue48.go:22:11:22:12 | q3 | semmle.label | q3 |
| issue48.go:27:26:27:33 | selection of Body : ReadCloser | semmle.label | selection of Body : ReadCloser |
| issue48.go:32:11:32:12 | q4 | semmle.label | q4 |
-| issue48.go:37:17:37:50 | type conversion : slice type | semmle.label | type conversion : slice type |
+| issue48.go:37:17:37:50 | type conversion : string | semmle.label | type conversion : string |
| issue48.go:37:24:37:30 | selection of URL : pointer type | semmle.label | selection of URL : pointer type |
| issue48.go:41:11:41:12 | q5 | semmle.label | q5 |
| main.go:10:11:10:16 | selection of Form : Values | semmle.label | selection of Form : Values |
From 5cd9168e4d2f7548049be795d74ad244e7a172c0 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 26 Mar 2020 08:01:01 +0000
Subject: [PATCH 058/157] Data flow: Refactoring + performance improvements
cf https://github.com/Semmle/ql/pull/2903
---
.../go/dataflow/internal/DataFlowImpl.qll | 1018 ++++++++---------
.../dataflow/internal/DataFlowImplCommon.qll | 2 +-
2 files changed, 494 insertions(+), 526 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index e3f1da355ef..f7f9b2a0393 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -573,14 +573,24 @@ private predicate parameterThroughFlowCand(ParameterNode p, Configuration config
)
}
-private predicate store(Node n1, Content f, Node n2) {
- storeDirect(n1, f, n2) or
- argumentValueFlowsThrough(_, n1, TContentNone(), TContentSome(f), n2)
+pragma[nomagic]
+private predicate store(Node n1, Content f, Node n2, Configuration config) {
+ readStoreCand1(f, config) and
+ nodeCand1(n2, unbind(config)) and
+ (
+ storeDirect(n1, f, n2) or
+ argumentValueFlowsThrough(_, n1, TContentNone(), TContentSome(f), n2)
+ )
}
-private predicate read(Node n1, Content f, Node n2) {
- readDirect(n1, f, n2) or
- argumentValueFlowsThrough(_, n1, TContentSome(f), TContentNone(), n2)
+pragma[nomagic]
+private predicate read(Node n1, Content f, Node n2, Configuration config) {
+ readStoreCand1(f, config) and
+ nodeCand1(n2, unbind(config)) and
+ (
+ readDirect(n1, f, n2) or
+ argumentValueFlowsThrough(_, n1, TContentSome(f), TContentNone(), n2)
+ )
}
/**
@@ -619,8 +629,7 @@ private predicate parameterFlow(
// read step
exists(Node mid, Content f, Summary midsum |
parameterFlow(p, mid, _, _, midsum, config) and
- read(mid, f, node) and
- readStoreCand1(f, unbind(config)) and
+ read(mid, f, node, config) and
summary = midsum.readStep(f) and
t1 = f.getType() and
t1 = t2
@@ -629,8 +638,7 @@ private predicate parameterFlow(
// store step
exists(Node mid, Content f, Summary midsum |
parameterFlow(p, mid, t1, /* t1 */ _, midsum, config) and
- store(mid, f, node) and
- readStoreCand1(f, unbind(config)) and
+ store(mid, f, node, config) and
summary = midsum.storeStep(f) and
compatibleTypes(t1, f.getType()) and
t2 = f.getContainerType()
@@ -662,6 +670,7 @@ private predicate viableParamArgCand(
not inBarrier(p, config)
}
+pragma[nomagic]
private predicate parameterFlowReturn(
ParameterNode p, ReturnNodeExt ret, ReturnKindExt kind, DataFlowType t1, DataFlowType t2,
Summary summary, Configuration config
@@ -702,22 +711,34 @@ private predicate argumentFlowsThrough(
)
}
+pragma[noinline]
+private predicate readStoreNode(
+ DataFlowCall call, ArgumentNode arg, Content f1, Configuration config
+) {
+ exists(Content f2, Node out |
+ argumentValueFlowsThrough(call, arg, TContentSome(f1), TContentSome(f2), out) and
+ nodeCand1(out, config) and
+ readStoreCand1(f2, unbind(config))
+ )
+}
+
private newtype TNodeExt =
TNormalNode(Node node) { nodeCand1(node, _) } or
- TReadStoreNode(DataFlowCall call, Node node1, Node node2, Content f1, Content f2) {
- exists(Configuration config |
- nodeCand1(node1, config) and
- argumentValueFlowsThrough(call, node1, TContentSome(f1), TContentSome(f2), node2) and
- nodeCand1(node2, unbind(config)) and
- readStoreCand1(f1, unbind(config)) and
- readStoreCand1(f2, unbind(config))
- )
+ TReadStoreNode(DataFlowCall call, ArgumentNode arg, Content f1, Configuration config) {
+ nodeCand1(arg, config) and
+ readStoreNode(call, arg, f1, config) and
+ readStoreCand1(f1, unbind(config))
+ } or
+ TReadTaintNode(ArgumentNode arg, Content f, Configuration config) {
+ argumentFlowsThrough(arg, _, _, _, TSummaryReadTaint(f), config)
+ } or
+ TTaintStoreNode(ArgumentNode arg, DataFlowType t, Configuration config) {
+ argumentFlowsThrough(arg, _, t, _, TSummaryTaintStore(_), config)
}
/**
* An extended data flow node. Either a normal node, or an intermediate node
- * used to split up a read+store step through a call into first a read step
- * followed by a store step.
+ * used to split up a summarized flow steps.
*
* This is purely an internal implementation detail.
*/
@@ -769,46 +790,113 @@ private class NormalCastingNodeExt extends CastingNodeExt, NormalNodeExt {
private class ReadStoreNodeExt extends CastingNodeExt, TReadStoreNode {
private DataFlowCall call;
- private Node node1;
- private Node node2;
+ private ArgumentNode arg;
private Content f1;
- private Content f2;
+ private Configuration config0;
- ReadStoreNodeExt() { this = TReadStoreNode(call, node1, node2, f1, f2) }
+ ReadStoreNodeExt() { this = TReadStoreNode(call, arg, f1, config0) }
override Node getNode() { none() }
override DataFlowType getErasedNodeTypeBound() { result = f1.getType() }
- override DataFlowCallable getEnclosingCallable() { result = node1.getEnclosingCallable() }
+ override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
- override predicate isCand1(Configuration config) {
- nodeCand1(node1, config) and nodeCand1(node2, config)
- }
+ override predicate isCand1(Configuration config) { config = config0 }
- override string toString() {
- result = "(inside) " + call.toString() + " [" + f1 + " -> " + f2 + "]"
- }
+ override string toString() { result = "(inside) " + call.toString() + " [read " + f1 + "]" }
override predicate hasLocationInfo(
string filepath, int startline, int startcolumn, int endline, int endcolumn
) {
- call.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
}
}
-pragma[nomagic]
-private predicate readExt(NodeExt node1, Content f, NodeExt node2) {
- read(node1.getNode(), f, node2.getNode())
+private class ReadTaintNode extends NodeExt, TReadTaintNode {
+ private ArgumentNode arg;
+ private Content f;
+ private Configuration config0;
+
+ ReadTaintNode() { this = TReadTaintNode(arg, f, config0) }
+
+ override Node getNode() { none() }
+
+ override DataFlowType getErasedNodeTypeBound() { result = f.getType() }
+
+ override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
+
+ override predicate isCand1(Configuration config) { config = config0 }
+
+ override string toString() { result = arg.toString() + " [read taint " + f + "]" }
+
+ override predicate hasLocationInfo(
+ string filepath, int startline, int startcolumn, int endline, int endcolumn
+ ) {
+ arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ }
+}
+
+private class TaintStoreNode extends NodeExt, TTaintStoreNode {
+ private ArgumentNode arg;
+ private DataFlowType t;
+ private Configuration config0;
+
+ TaintStoreNode() { this = TTaintStoreNode(arg, t, config0) }
+
+ override Node getNode() { none() }
+
+ override DataFlowType getErasedNodeTypeBound() { result = t }
+
+ override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
+
+ override predicate isCand1(Configuration config) { config = config0 }
+
+ override string toString() { result = arg.toString() + " [taint store]" }
+
+ override predicate hasLocationInfo(
+ string filepath, int startline, int startcolumn, int endline, int endcolumn
+ ) {
+ arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ }
+}
+
+private predicate additionalLocalFlowStepExt(
+ NodeExt node1, NodeExt node2, DataFlowType t, Configuration config
+) {
+ exists(ArgumentNode arg, Content f |
+ node1 = TReadTaintNode(arg, f, config) and
+ argumentFlowsThrough(arg, node2.getNode(), _, t, TSummaryReadTaint(f), config)
+ )
or
- node2 = TReadStoreNode(_, node1.getNode(), _, f, _)
+ node2 = TTaintStoreNode(node1.getNode(), t, config)
}
pragma[nomagic]
-private predicate storeExt(NodeExt node1, Content f, NodeExt node2) {
- store(node1.getNode(), f, node2.getNode())
+private predicate readExt(NodeExt node1, Content f, NodeExt node2, Configuration config) {
+ read(node1.getNode(), f, node2.getNode(), config)
or
- node1 = TReadStoreNode(_, _, node2.getNode(), _, f)
+ node2 = TReadStoreNode(_, node1.getNode(), f, config)
+ or
+ node2 = TReadTaintNode(node1.getNode(), f, config)
+}
+
+pragma[nomagic]
+private predicate storeExt(NodeExt node1, Content f, NodeExt node2, Configuration config) {
+ store(node1.getNode(), f, node2.getNode(), config)
+ or
+ exists(DataFlowCall call, ArgumentNode arg, Content f1, Node n2 |
+ node1 = TReadStoreNode(call, arg, f1, config) and
+ n2 = node2.getNode() and
+ argumentValueFlowsThrough(call, arg, TContentSome(f1), TContentSome(f), n2) and
+ nodeCand1(n2, unbind(config)) and
+ readStoreCand1(f, unbind(config))
+ )
+ or
+ exists(ArgumentNode arg, DataFlowType t |
+ node1 = TTaintStoreNode(arg, t, config) and
+ argumentFlowsThrough(arg, node2.getNode(), t, _, TSummaryTaintStore(f), config)
+ )
}
private predicate jumpStepExt(NodeExt node1, NodeExt node2, Configuration config) {
@@ -824,9 +912,9 @@ private predicate argumentValueFlowsThrough(NodeExt node1, NodeExt node2) {
}
private predicate argumentFlowsThrough(
- NodeExt arg, NodeExt out, Summary summary, Configuration config
+ NodeExt arg, NodeExt out, DataFlowType t, Configuration config
) {
- argumentFlowsThrough(arg.getNode(), out.getNode(), _, _, summary, config)
+ argumentFlowsThrough(arg.getNode(), out.getNode(), _, t, TSummaryTaint(), config)
}
/**
@@ -867,6 +955,8 @@ private predicate additionalLocalFlowStepOrFlowThroughCallable(
or
argumentFlowsThrough(n1, n2, _, _, TSummaryTaint(), config)
)
+ or
+ additionalLocalFlowStepExt(node1, node2, _, config)
}
pragma[noinline]
@@ -880,7 +970,7 @@ private ReturnPosition getReturnPosition1(ReturnNodeExt node, Configuration conf
* through a `ReturnNode` or through an argument that has been mutated, and
* that this step is part of a path from a source to a sink.
*/
-private predicate flowOutOfCallable(Node node1, Node node2, Configuration config) {
+private predicate flowOutOfCallableNodeCand1(ReturnNodeExt node1, Node node2, Configuration config) {
nodeCand1(node2, config) and
not outBarrier(node1, config) and
not inBarrier(node2, config) and
@@ -894,7 +984,9 @@ private predicate flowOutOfCallable(Node node1, Node node2, Configuration config
* Holds if data can flow into a callable and that this step is part of a
* path from a source to a sink.
*/
-private predicate flowIntoCallable(ArgumentNode node1, ParameterNode node2, Configuration config) {
+private predicate flowIntoCallableNodeCand1(
+ ArgumentNode node1, ParameterNode node2, Configuration config
+) {
viableParamArgCand(_, node2, node1, config)
}
@@ -904,7 +996,10 @@ private predicate flowIntoCallable(ArgumentNode node1, ParameterNode node2, Conf
* contexts.
*/
private int branch(Node n1, Configuration conf) {
- result = strictcount(Node n | flowOutOfCallable(n1, n, conf) or flowIntoCallable(n1, n, conf))
+ result =
+ strictcount(Node n |
+ flowOutOfCallableNodeCand1(n1, n, conf) or flowIntoCallableNodeCand1(n1, n, conf)
+ )
}
/**
@@ -913,7 +1008,10 @@ private int branch(Node n1, Configuration conf) {
* contexts.
*/
private int join(Node n2, Configuration conf) {
- result = strictcount(Node n | flowOutOfCallable(n, n2, conf) or flowIntoCallable(n, n2, conf))
+ result =
+ strictcount(Node n |
+ flowOutOfCallableNodeCand1(n, n2, conf) or flowIntoCallableNodeCand1(n, n2, conf)
+ )
}
/**
@@ -923,13 +1021,13 @@ private int join(Node n2, Configuration conf) {
* `allowsFieldFlow` flag indicates whether the branching is within the limit
* specified by the configuration.
*/
-private predicate flowOutOfCallable(
+private predicate flowOutOfCallableNodeCand1(
NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
) {
- exists(Node n1, Node n2 |
+ exists(ReturnNodeExt n1, Node n2 |
n1 = node1.getNode() and
n2 = node2.getNode() and
- flowOutOfCallable(n1, n2, config) and
+ flowOutOfCallableNodeCand1(n1, n2, config) and
exists(int b, int j |
b = branch(n1, config) and
j = join(n2, config) and
@@ -945,13 +1043,13 @@ private predicate flowOutOfCallable(
* path from a source to a sink. The `allowsFieldFlow` flag indicates whether
* the branching is within the limit specified by the configuration.
*/
-private predicate flowIntoCallable(
+private predicate flowIntoCallableNodeCand1(
NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
) {
- exists(Node n1, Node n2 |
+ exists(ArgumentNode n1, ParameterNode n2 |
n1 = node1.getNode() and
n2 = node2.getNode() and
- flowIntoCallable(n1, n2, config) and
+ flowIntoCallableNodeCand1(n1, n2, config) and
exists(int b, int j |
b = branch(n1, config) and
j = join(n2, config) and
@@ -1001,43 +1099,26 @@ private predicate nodeCandFwd2(NodeExt node, boolean fromArg, boolean stored, Co
// store
exists(NodeExt mid, Content f |
nodeCandFwd2(mid, fromArg, _, config) and
- storeExt(mid, f, node) and
- readStoreCand1(f, unbind(config)) and
- stored = true
- )
- or
- // taint store
- exists(NodeExt mid, Content f |
- nodeCandFwd2(mid, fromArg, false, config) and
- argumentFlowsThrough(mid, node, TSummaryTaintStore(f), config) and
- readStoreCand1(f, unbind(config)) and
+ storeExt(mid, f, node, config) and
stored = true
)
or
// read
exists(Content f |
nodeCandFwd2Read(f, node, fromArg, config) and
- storeCandFwd2(f, config) and
- (stored = false or stored = true)
- )
- or
- // read taint
- exists(Content f |
- nodeCandFwd2ReadTaint(f, node, fromArg, config) and
- storeCandFwd2(f, config) and
- stored = false
+ storeCandFwd2(f, stored, config)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
nodeCandFwd2(mid, _, stored, config) and
- flowIntoCallable(mid, node, allowsFieldFlow, config) and
+ flowIntoCallableNodeCand1(mid, node, allowsFieldFlow, config) and
fromArg = true and
(stored = false or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
nodeCandFwd2(mid, false, stored, config) and
- flowOutOfCallable(mid, node, allowsFieldFlow, config) and
+ flowOutOfCallableNodeCand1(mid, node, allowsFieldFlow, config) and
fromArg = false and
(stored = false or allowsFieldFlow = true)
)
@@ -1048,13 +1129,12 @@ private predicate nodeCandFwd2(NodeExt node, boolean fromArg, boolean stored, Co
* Holds if `f` is the target of a store in the flow covered by `nodeCandFwd2`.
*/
pragma[noinline]
-private predicate storeCandFwd2(Content f, Configuration config) {
+private predicate storeCandFwd2(Content f, boolean stored, Configuration config) {
exists(NodeExt mid, NodeExt node |
useFieldFlow(config) and
node.isCand1(unbind(config)) and
- nodeCandFwd2(mid, _, _, config) and
- storeExt(mid, f, node) and
- readStoreCand1(f, unbind(config))
+ nodeCandFwd2(mid, _, stored, config) and
+ storeExt(mid, f, node, config)
)
}
@@ -1062,128 +1142,72 @@ pragma[nomagic]
private predicate nodeCandFwd2Read(Content f, NodeExt node, boolean fromArg, Configuration config) {
exists(NodeExt mid |
nodeCandFwd2(mid, fromArg, true, config) and
- readExt(mid, f, node) and
- readStoreCand1(f, unbind(config))
+ readExt(mid, f, node, config)
)
}
-pragma[nomagic]
-private predicate nodeCandFwd2ReadTaint(
- Content f, NodeExt node, boolean fromArg, Configuration config
-) {
- exists(NodeExt mid |
- nodeCandFwd2(mid, fromArg, true, config) and
- argumentFlowsThrough(mid, node, TSummaryReadTaint(f), config) and
- readStoreCand1(f, unbind(config))
- )
-}
-
-private predicate readCandFwd2(Content f, Configuration config) {
- exists(NodeExt node |
- nodeCandFwd2Read(f, node, _, config) or
- nodeCandFwd2ReadTaint(f, node, _, config)
- |
- nodeCandFwd2(node, _, _, config)
- )
-}
-
-private predicate readStoreCandFwd2(Content f, Configuration config) {
- readCandFwd2(f, config) and
- storeCandFwd2(f, config)
-}
-
-private predicate summaryFwd2(Summary s, Configuration config) {
- // No need for the `s = TSummaryTaint()` case as it is not used with `argumentFlowsThroughFwd2`.
- exists(Content f | s = TSummaryReadTaint(f) | readStoreCandFwd2(f, config))
- or
- exists(Content f | s = TSummaryTaintStore(f) | readStoreCandFwd2(f, config))
-}
-
-private predicate argumentFlowsThroughFwd2(NodeExt n1, NodeExt n2, Summary s, Configuration config) {
- argumentFlowsThrough(n1, n2, s, config) and
- nodeCandFwd2(n1, _, _, config) and
- nodeCandFwd2(n2, _, _, unbind(config)) and
- summaryFwd2(s, unbind(config))
-}
-
/**
* Holds if `node` is part of a path from a source to a sink in the given
* configuration taking simple call contexts into consideration.
*/
-private predicate nodeCand2(NodeExt node, boolean toReturn, boolean stored, Configuration config) {
+private predicate nodeCand2(NodeExt node, boolean toReturn, boolean read, Configuration config) {
nodeCandFwd2(node, _, false, config) and
config.isSink(node.getNode()) and
toReturn = false and
- stored = false
+ read = false
or
- nodeCandFwd2(node, _, unbindBool(stored), unbind(config)) and
+ nodeCandFwd2(node, _, unbindBool(read), unbind(config)) and
(
exists(NodeExt mid |
localFlowStepOrFlowThroughCallable(node, mid, config) and
- nodeCand2(mid, toReturn, stored, config)
+ nodeCand2(mid, toReturn, read, config)
)
or
exists(NodeExt mid |
additionalLocalFlowStepOrFlowThroughCallable(node, mid, config) and
- nodeCand2(mid, toReturn, stored, config) and
- stored = false
+ nodeCand2(mid, toReturn, read, config) and
+ read = false
)
or
exists(NodeExt mid |
jumpStepExt(node, mid, config) and
- nodeCand2(mid, _, stored, config) and
+ nodeCand2(mid, _, read, config) and
toReturn = false
)
or
exists(NodeExt mid |
additionalJumpStepExt(node, mid, config) and
- nodeCand2(mid, _, stored, config) and
+ nodeCand2(mid, _, read, config) and
toReturn = false and
- stored = false
+ read = false
)
or
// store
exists(Content f |
- nodeCand2Store(f, node, toReturn, config) and
- readCand2(f, config) and
- (stored = false or stored = true)
- )
- or
- // taint store
- exists(Content f |
- nodeCand2TaintStore(f, node, toReturn, config) and
- readCand2(f, config) and
- stored = false
+ nodeCand2Store(f, node, toReturn, read, config) and
+ readCand2(f, read, config)
)
or
// read
- exists(NodeExt mid, Content f |
- readExt(node, f, mid) and
- storeCandFwd2(f, unbind(config)) and
- nodeCand2(mid, toReturn, _, config) and
- stored = true
- )
- or
- // read taint
- exists(NodeExt mid, Content f |
- argumentFlowsThroughFwd2(node, mid, TSummaryReadTaint(f), config) and
- storeCandFwd2(f, unbind(config)) and
- nodeCand2(mid, toReturn, false, config) and
- stored = true
+ exists(NodeExt mid, Content f, boolean read0 |
+ readExt(node, f, mid, config) and
+ storeCandFwd2(f, unbindBool(read0), unbind(config)) and
+ nodeCand2(mid, toReturn, read0, config) and
+ read = true
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallable(node, mid, allowsFieldFlow, config) and
- nodeCand2(mid, false, stored, config) and
+ flowIntoCallableNodeCand1(node, mid, allowsFieldFlow, config) and
+ nodeCand2(mid, false, read, config) and
toReturn = false and
- (stored = false or allowsFieldFlow = true)
+ (read = false or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallable(node, mid, allowsFieldFlow, config) and
- nodeCand2(mid, _, stored, config) and
+ flowOutOfCallableNodeCand1(node, mid, allowsFieldFlow, config) and
+ nodeCand2(mid, _, read, config) and
toReturn = true and
- (stored = false or allowsFieldFlow = true)
+ (read = false or allowsFieldFlow = true)
)
)
}
@@ -1192,41 +1216,32 @@ private predicate nodeCand2(NodeExt node, boolean toReturn, boolean stored, Conf
* Holds if `f` is the target of a read in the flow covered by `nodeCand2`.
*/
pragma[noinline]
-private predicate readCand2(Content f, Configuration config) {
+private predicate readCand2(Content f, boolean read, Configuration config) {
exists(NodeExt mid, NodeExt node |
useFieldFlow(config) and
nodeCandFwd2(node, _, true, unbind(config)) and
- readExt(node, f, mid) and
- storeCandFwd2(f, unbind(config)) and
- nodeCand2(mid, _, _, config)
+ readExt(node, f, mid, config) and
+ storeCandFwd2(f, unbindBool(read), unbind(config)) and
+ nodeCand2(mid, _, read, config)
)
}
pragma[nomagic]
-private predicate nodeCand2Store(Content f, NodeExt node, boolean toReturn, Configuration config) {
- exists(NodeExt mid |
- storeExt(node, f, mid) and
- nodeCand2(mid, toReturn, true, config)
- )
-}
-
-pragma[nomagic]
-private predicate nodeCand2TaintStore(
- Content f, NodeExt node, boolean toReturn, Configuration config
+private predicate nodeCand2Store(
+ Content f, NodeExt node, boolean toReturn, boolean stored, Configuration config
) {
exists(NodeExt mid |
- argumentFlowsThroughFwd2(node, mid, TSummaryTaintStore(f), config) and
- nodeCand2(mid, toReturn, true, config)
+ storeExt(node, f, mid, config) and
+ nodeCand2(mid, toReturn, true, config) and
+ nodeCandFwd2(node, _, stored, unbind(config))
)
}
pragma[nomagic]
-private predicate storeCand(Content f, Configuration conf) {
+private predicate storeCand2(Content f, boolean stored, Configuration conf) {
exists(NodeExt node |
- nodeCand2Store(f, node, _, conf) or
- nodeCand2TaintStore(f, node, _, conf)
- |
- nodeCand2(node, _, _, conf)
+ nodeCand2Store(f, node, _, stored, conf) and
+ nodeCand2(node, _, stored, conf)
)
}
@@ -1236,133 +1251,162 @@ private predicate storeCand(Content f, Configuration conf) {
*/
pragma[noinline]
private predicate readStoreCand(Content f, Configuration conf) {
- storeCand(f, conf) and
- readCand2(f, conf)
-}
-
-private predicate nodeCand(NodeExt node, Configuration config) { nodeCand2(node, _, _, config) }
-
-private predicate summary2(Summary s, Configuration config) {
- s = TSummaryTaint()
- or
- exists(Content f | s = TSummaryReadTaint(f) | readStoreCand(f, config))
- or
- exists(Content f | s = TSummaryTaintStore(f) | readStoreCand(f, config))
-}
-
-private predicate argumentFlowsThrough2(
- NodeExt n1, NodeExt n2, DataFlowType t1, DataFlowType t2, Summary s, Configuration config
-) {
- argumentFlowsThrough(n1.getNode(), n2.getNode(), t1, t2, s, config) and
- nodeCand(n1, config) and
- nodeCand(n2, unbind(config)) and
- summary2(s, unbind(config))
-}
-
-/**
- * Holds if `node` can be the first node in a maximal subsequence of local
- * flow steps in a dataflow path.
- */
-private predicate localFlowEntry(Node node, Configuration config) {
- nodeCand(TNormalNode(node), config) and
- (
- config.isSource(node) or
- jumpStep(_, node, config) or
- additionalJumpStep(_, node, config) or
- node instanceof ParameterNode or
- node instanceof OutNode or
- node instanceof PostUpdateNode or
- readDirect(_, _, node) or
- node instanceof CastNode
+ exists(boolean apNonEmpty |
+ storeCand2(f, apNonEmpty, conf) and
+ readCand2(f, apNonEmpty, conf)
)
}
-/**
- * Holds if `node` can be the last node in a maximal subsequence of local
- * flow steps in a dataflow path.
- */
-private predicate localFlowExit(Node node, Configuration config) {
- exists(Node next | nodeCand(TNormalNode(next), config) |
- jumpStep(node, next, config) or
- additionalJumpStep(node, next, config) or
- flowIntoCallable(node, next, config) or
- flowOutOfCallable(node, next, config) or
- argumentFlowsThrough2(TNormalNode(node), TNormalNode(next), _, _, _, config) or
- argumentValueFlowsThrough(_, node, TContentNone(), TContentNone(), next) or
- storeDirect(node, _, next) or
- readDirect(node, _, next)
- )
- or
- node instanceof CastNode
- or
- config.isSink(node)
-}
+private predicate nodeCand2(NodeExt node, Configuration config) { nodeCand2(node, _, _, config) }
-/**
- * Holds if the local path from `node1` to `node2` is a prefix of a maximal
- * subsequence of local flow steps in a dataflow path.
- *
- * This is the transitive closure of `[additional]localFlowStep` beginning
- * at `localFlowEntry`.
- */
pragma[nomagic]
-private predicate localFlowStepPlus(
- Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
- LocalCallContext cc
+private predicate flowOutOfCallableNodeCand2(
+ NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
) {
- not isUnreachableInCall(node2, cc.(LocalCallContextSpecificCall).getCall()) and
- (
- localFlowEntry(node1, config) and
+ flowOutOfCallableNodeCand1(node1, node2, allowsFieldFlow, config) and
+ nodeCand2(node2, config) and
+ nodeCand2(node1, unbind(config))
+}
+
+pragma[nomagic]
+private predicate flowIntoCallableNodeCand2(
+ NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
+) {
+ flowIntoCallableNodeCand1(node1, node2, allowsFieldFlow, config) and
+ nodeCand2(node2, config) and
+ nodeCand2(node1, unbind(config))
+}
+
+private module LocalFlowBigStep {
+ /**
+ * Holds if `node` can be the first node in a maximal subsequence of local
+ * flow steps in a dataflow path.
+ */
+ private predicate localFlowEntry(Node node, Configuration config) {
+ nodeCand2(TNormalNode(node), config) and
(
- localFlowStep(node1, node2, config) and
- preservesValue = true and
- t = getErasedNodeTypeBound(node1)
+ config.isSource(node) or
+ jumpStep(_, node, config) or
+ additionalJumpStep(_, node, config) or
+ node instanceof ParameterNode or
+ node instanceof OutNode or
+ node instanceof PostUpdateNode or
+ readDirect(_, _, node) or
+ node instanceof CastNode
+ )
+ }
+
+ /**
+ * Holds if `node` can be the last node in a maximal subsequence of local
+ * flow steps in a dataflow path.
+ */
+ private predicate localFlowExit(Node node, Configuration config) {
+ exists(Node next | nodeCand2(TNormalNode(next), config) |
+ jumpStep(node, next, config) or
+ additionalJumpStep(node, next, config) or
+ flowIntoCallableNodeCand1(node, next, config) or
+ flowOutOfCallableNodeCand1(node, next, config) or
+ argumentFlowsThrough(node, next, _, _, _, config) or
+ argumentValueFlowsThrough(_, node, TContentNone(), TContentNone(), next) or
+ storeDirect(node, _, next) or
+ readDirect(node, _, next)
+ )
+ or
+ node instanceof CastNode
+ or
+ config.isSink(node)
+ }
+
+ /**
+ * Holds if the local path from `node1` to `node2` is a prefix of a maximal
+ * subsequence of local flow steps in a dataflow path.
+ *
+ * This is the transitive closure of `[additional]localFlowStep` beginning
+ * at `localFlowEntry`.
+ */
+ pragma[nomagic]
+ private predicate localFlowStepPlus(
+ Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
+ LocalCallContext cc
+ ) {
+ not isUnreachableInCall(node2, cc.(LocalCallContextSpecificCall).getCall()) and
+ (
+ localFlowEntry(node1, config) and
+ (
+ localFlowStep(node1, node2, config) and
+ preservesValue = true and
+ t = getErasedNodeTypeBound(node1)
+ or
+ additionalLocalFlowStep(node1, node2, config) and
+ preservesValue = false and
+ t = getErasedNodeTypeBound(node2)
+ ) and
+ node1 != node2 and
+ cc.relevantFor(node1.getEnclosingCallable()) and
+ not isUnreachableInCall(node1, cc.(LocalCallContextSpecificCall).getCall()) and
+ nodeCand2(TNormalNode(node2), unbind(config))
or
- additionalLocalFlowStep(node1, node2, config) and
- preservesValue = false and
- t = getErasedNodeTypeBound(node2)
- ) and
- node1 != node2 and
- cc.relevantFor(node1.getEnclosingCallable()) and
- not isUnreachableInCall(node1, cc.(LocalCallContextSpecificCall).getCall()) and
- nodeCand(TNormalNode(node2), unbind(config))
- or
- exists(Node mid |
- localFlowStepPlus(node1, mid, preservesValue, t, config, cc) and
- localFlowStep(mid, node2, config) and
- not mid instanceof CastNode and
- nodeCand(TNormalNode(node2), unbind(config))
+ exists(Node mid |
+ localFlowStepPlus(node1, mid, preservesValue, t, config, cc) and
+ localFlowStep(mid, node2, config) and
+ not mid instanceof CastNode and
+ nodeCand2(TNormalNode(node2), unbind(config))
+ )
+ or
+ exists(Node mid |
+ localFlowStepPlus(node1, mid, _, _, config, cc) and
+ additionalLocalFlowStep(mid, node2, config) and
+ not mid instanceof CastNode and
+ preservesValue = false and
+ t = getErasedNodeTypeBound(node2) and
+ nodeCand2(TNormalNode(node2), unbind(config))
+ )
)
+ }
+
+ /**
+ * Holds if `node1` can step to `node2` in one or more local steps and this
+ * path can occur as a maximal subsequence of local steps in a dataflow path.
+ */
+ pragma[nomagic]
+ predicate localFlowBigStep(
+ Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
+ LocalCallContext callContext
+ ) {
+ localFlowStepPlus(node1, node2, preservesValue, t, config, callContext) and
+ localFlowExit(node2, config)
+ }
+
+ pragma[nomagic]
+ predicate localFlowBigStepExt(
+ NodeExt node1, NodeExt node2, boolean preservesValue, AccessPathFrontNil apf,
+ Configuration config
+ ) {
+ localFlowBigStep(node1.getNode(), node2.getNode(), preservesValue, apf.getType(), config, _)
or
- exists(Node mid |
- localFlowStepPlus(node1, mid, _, _, config, cc) and
- additionalLocalFlowStep(mid, node2, config) and
- not mid instanceof CastNode and
- preservesValue = false and
- t = getErasedNodeTypeBound(node2) and
- nodeCand(TNormalNode(node2), unbind(config))
- )
- )
+ additionalLocalFlowStepExt(node1, node2, apf.getType(), config) and
+ nodeCand2(node1, config) and
+ nodeCand2(node2, unbind(config)) and
+ preservesValue = false
+ }
}
-/**
- * Holds if `node1` can step to `node2` in one or more local steps and this
- * path can occur as a maximal subsequence of local steps in a dataflow path.
- */
+private import LocalFlowBigStep
+
pragma[nomagic]
-private predicate localFlowBigStep(
- Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
- LocalCallContext callContext
-) {
- localFlowStepPlus(node1, node2, preservesValue, t, config, callContext) and
- localFlowExit(node2, config)
+private predicate readExtCand2(NodeExt node1, Content f, NodeExt node2, Configuration config) {
+ readExt(node1, f, node2, config) and
+ nodeCand2(node1, _, true, unbind(config)) and
+ nodeCand2(node2, config) and
+ readStoreCand(f, unbind(config))
}
pragma[nomagic]
-private predicate localFlowBigStepExt(
- NodeExt node1, NodeExt node2, boolean preservesValue, AccessPathFrontNil apf, Configuration config
-) {
- localFlowBigStep(node1.getNode(), node2.getNode(), preservesValue, apf.getType(), config, _)
+private predicate storeExtCand2(NodeExt node1, Content f, NodeExt node2, Configuration config) {
+ storeExt(node1, f, node2, config) and
+ nodeCand2(node1, config) and
+ nodeCand2(node2, _, true, unbind(config)) and
+ readStoreCand(f, unbind(config))
}
private newtype TAccessPathFront =
@@ -1372,27 +1416,40 @@ private newtype TAccessPathFront =
/**
* The front of an `AccessPath`. This is either a head or a nil.
*/
-private class AccessPathFront extends TAccessPathFront {
- string toString() {
- exists(DataFlowType t | this = TFrontNil(t) | result = ppReprType(t))
- or
- exists(Content f | this = TFrontHead(f) | result = f.toString())
- }
+abstract private class AccessPathFront extends TAccessPathFront {
+ abstract string toString();
- DataFlowType getType() {
- this = TFrontNil(result)
- or
- exists(Content head | this = TFrontHead(head) | result = head.getContainerType())
- }
+ abstract DataFlowType getType();
+
+ abstract boolean toBoolNonEmpty();
predicate headUsesContent(Content f) { this = TFrontHead(f) }
}
-private class AccessPathFrontNil extends AccessPathFront, TFrontNil { }
+private class AccessPathFrontNil extends AccessPathFront, TFrontNil {
+ override string toString() {
+ exists(DataFlowType t | this = TFrontNil(t) | result = ppReprType(t))
+ }
+
+ override DataFlowType getType() { this = TFrontNil(result) }
+
+ override boolean toBoolNonEmpty() { result = false }
+}
+
+private class AccessPathFrontHead extends AccessPathFront, TFrontHead {
+ override string toString() { exists(Content f | this = TFrontHead(f) | result = f.toString()) }
+
+ override DataFlowType getType() {
+ exists(Content head | this = TFrontHead(head) | result = head.getContainerType())
+ }
+
+ override boolean toBoolNonEmpty() { result = true }
+}
/**
* Holds if data can flow from a source to `node` with the given `apf`.
*/
+pragma[nomagic]
private predicate flowCandFwd(
NodeExt node, boolean fromArg, AccessPathFront apf, Configuration config
) {
@@ -1402,6 +1459,7 @@ private predicate flowCandFwd(
else any()
}
+pragma[nomagic]
private predicate flowCandFwd0(
NodeExt node, boolean fromArg, AccessPathFront apf, Configuration config
) {
@@ -1410,18 +1468,18 @@ private predicate flowCandFwd0(
fromArg = false and
apf = TFrontNil(node.getErasedNodeTypeBound())
or
- nodeCand(node, unbind(config)) and
+ exists(NodeExt mid |
+ flowCandFwd(mid, fromArg, apf, config) and
+ localFlowBigStepExt(mid, node, true, _, config)
+ )
+ or
+ exists(NodeExt mid, AccessPathFrontNil nil |
+ flowCandFwd(mid, fromArg, nil, config) and
+ localFlowBigStepExt(mid, node, false, apf, config)
+ )
+ or
+ nodeCand2(node, unbind(config)) and
(
- exists(NodeExt mid |
- flowCandFwd(mid, fromArg, apf, config) and
- localFlowBigStepExt(mid, node, true, _, config)
- )
- or
- exists(NodeExt mid, AccessPathFrontNil nil |
- flowCandFwd(mid, fromArg, nil, config) and
- localFlowBigStepExt(mid, node, false, apf, config)
- )
- or
exists(NodeExt mid |
flowCandFwd(mid, _, apf, config) and
jumpStepExt(mid, node, config) and
@@ -1437,14 +1495,14 @@ private predicate flowCandFwd0(
or
exists(NodeExt mid, boolean allowsFieldFlow |
flowCandFwd(mid, _, apf, config) and
- flowIntoCallable(mid, node, allowsFieldFlow, config) and
+ flowIntoCallableNodeCand2(mid, node, allowsFieldFlow, config) and
fromArg = true and
(apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
flowCandFwd(mid, false, apf, config) and
- flowOutOfCallable(mid, node, allowsFieldFlow, config) and
+ flowOutOfCallableNodeCand2(mid, node, allowsFieldFlow, config) and
fromArg = false and
(apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
)
@@ -1456,103 +1514,41 @@ private predicate flowCandFwd0(
or
exists(NodeExt mid, AccessPathFrontNil nil, DataFlowType t |
flowCandFwd(mid, fromArg, nil, config) and
- argumentFlowsThrough2(mid, node, _, t, TSummaryTaint(), config) and
+ argumentFlowsThrough(mid, node, t, config) and
apf = TFrontNil(t)
)
)
or
exists(NodeExt mid, Content f |
flowCandFwd(mid, fromArg, _, config) and
- storeExt(mid, f, node) and
- nodeCand(node, unbind(config)) and
- readStoreCand(f, unbind(config)) and
- apf.headUsesContent(f)
- )
- or
- exists(NodeExt mid, AccessPathFrontNil nil, Content f |
- flowCandFwd(mid, fromArg, nil, config) and
- argumentFlowsThrough2(mid, node, _, _, TSummaryTaintStore(f), config) and
+ storeExtCand2(mid, f, node, config) and
+ nodeCand2(node, _, true, unbind(config)) and
apf.headUsesContent(f)
)
or
exists(Content f |
flowCandFwdRead(f, node, fromArg, config) and
- consCandFwd(f, apf, config)
- )
- or
- exists(Content f, AccessPathFrontNil nil, DataFlowType t |
- flowCandFwdReadTaint(f, node, fromArg, t, config) and
- consCandFwd(f, nil, config) and
- apf = TFrontNil(t)
+ consCandFwd(f, apf, config) and
+ nodeCand2(node, _, unbindBool(apf.toBoolNonEmpty()), unbind(config))
)
}
-pragma[noinline]
+pragma[nomagic]
private predicate consCandFwd(Content f, AccessPathFront apf, Configuration config) {
exists(NodeExt mid, NodeExt n |
flowCandFwd(mid, _, apf, config) and
- storeExt(mid, f, n) and
- nodeCand(n, unbind(config)) and
- readStoreCand(f, unbind(config)) and
+ storeExtCand2(mid, f, n, config) and
+ nodeCand2(n, _, true, unbind(config)) and
compatibleTypes(apf.getType(), f.getType())
)
}
pragma[nomagic]
private predicate flowCandFwdRead(Content f, NodeExt node, boolean fromArg, Configuration config) {
- exists(NodeExt mid, AccessPathFront apf |
- flowCandFwd(mid, fromArg, apf, config) and
- readExt(mid, f, node) and
- apf.headUsesContent(f) and
- nodeCand(node, unbind(config))
- )
-}
-
-pragma[nomagic]
-private predicate flowCandFwdReadTaint(
- Content f, NodeExt node, boolean fromArg, DataFlowType t, Configuration config
-) {
- exists(NodeExt mid, AccessPathFront apf |
- flowCandFwd(mid, fromArg, apf, config) and
- argumentFlowsThrough2(mid, node, _, t, TSummaryReadTaint(f), config) and
- apf.headUsesContent(f)
- )
-}
-
-pragma[noinline]
-private predicate flowCandFwdEmptyAp(NodeExt node, Configuration config) {
- flowCandFwd(node, _, any(AccessPathFrontNil nil), config)
-}
-
-pragma[noinline]
-private predicate consCandFwdEmptyAp(Content f, Configuration config) {
- consCandFwd(f, any(AccessPathFrontNil nil), config)
-}
-
-private predicate argumentFlowsThrough3(
- NodeExt n1, NodeExt n2, DataFlowType t1, DataFlowType t2, Summary s, Configuration config
-) {
- argumentFlowsThrough2(n1, n2, t1, t2, s, config) and
- flowCandFwdEmptyAp(n1, config) and
- flowCandFwdEmptyAp(n2, unbind(config)) and
- s = TSummaryTaint()
- or
- exists(Content f, AccessPathFront apf |
- argumentFlowsThrough2(n1, n2, t1, t2, s, config) and
- flowCandFwdEmptyAp(n1, config) and
- flowCandFwd(n2, _, apf, unbind(config)) and
- s = TSummaryTaintStore(f) and
- consCandFwdEmptyAp(f, unbind(config)) and
- apf.headUsesContent(f)
- )
- or
- exists(Content f, AccessPathFront apf |
- argumentFlowsThrough2(n1, n2, t1, t2, s, config) and
- flowCandFwd(n1, _, apf, config) and
- flowCandFwdEmptyAp(n2, unbind(config)) and
- s = TSummaryReadTaint(f) and
- consCandFwdEmptyAp(f, unbind(config)) and
- apf.headUsesContent(f)
+ exists(NodeExt mid, AccessPathFrontHead apf0 |
+ flowCandFwd(mid, fromArg, apf0, config) and
+ readExtCand2(mid, f, node, config) and
+ apf0.headUsesContent(f)
)
}
@@ -1560,11 +1556,13 @@ private predicate argumentFlowsThrough3(
* Holds if data can flow from a source to `node` with the given `apf` and
* from there flow to a sink.
*/
+pragma[nomagic]
private predicate flowCand(NodeExt node, boolean toReturn, AccessPathFront apf, Configuration config) {
flowCand0(node, toReturn, apf, config) and
flowCandFwd(node, _, apf, config)
}
+pragma[nomagic]
private predicate flowCand0(
NodeExt node, boolean toReturn, AccessPathFront apf, Configuration config
) {
@@ -1600,14 +1598,14 @@ private predicate flowCand0(
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallable(node, mid, allowsFieldFlow, config) and
+ flowIntoCallableNodeCand2(node, mid, allowsFieldFlow, config) and
flowCand(mid, false, apf, config) and
toReturn = false and
(apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallable(node, mid, allowsFieldFlow, config) and
+ flowOutOfCallableNodeCand2(node, mid, allowsFieldFlow, config) and
flowCand(mid, _, apf, config) and
toReturn = true and
(apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
@@ -1619,39 +1617,23 @@ private predicate flowCand0(
)
or
exists(NodeExt mid, AccessPathFrontNil nil |
- argumentFlowsThrough3(node, mid, _, _, TSummaryTaint(), config) and
+ argumentFlowsThrough(node, mid, _, config) and
flowCand(mid, toReturn, nil, config) and
apf instanceof AccessPathFrontNil and
flowCandFwd(node, _, apf, config)
)
or
- exists(Content f, AccessPathFront apf0 |
+ exists(Content f, AccessPathFrontHead apf0 |
flowCandStore(node, f, toReturn, apf0, config) and
apf0.headUsesContent(f) and
consCand(f, apf, config)
)
or
- exists(NodeExt mid, Content f, AccessPathFront apf0, AccessPathFrontNil nil |
- flowCandFwd(node, _, apf, config) and
- apf instanceof AccessPathFrontNil and
- argumentFlowsThrough3(node, mid, _, _, TSummaryTaintStore(f), config) and
- flowCand(mid, toReturn, apf0, config) and
- apf0.headUsesContent(f) and
- consCand(f, nil, unbind(config))
- )
- or
exists(Content f, AccessPathFront apf0 |
flowCandRead(node, f, toReturn, apf0, config) and
consCandFwd(f, apf0, config) and
apf.headUsesContent(f)
)
- or
- exists(NodeExt mid, AccessPathFrontNil nil1, AccessPathFrontNil nil2, Content f |
- argumentFlowsThrough3(node, mid, _, _, TSummaryReadTaint(f), config) and
- flowCand(mid, toReturn, nil1, config) and
- consCandFwd(f, nil2, unbind(config)) and
- apf.headUsesContent(f)
- )
}
pragma[nomagic]
@@ -1659,17 +1641,17 @@ private predicate flowCandRead(
NodeExt node, Content f, boolean toReturn, AccessPathFront apf0, Configuration config
) {
exists(NodeExt mid |
- readExt(node, f, mid) and
+ readExtCand2(node, f, mid, config) and
flowCand(mid, toReturn, apf0, config)
)
}
pragma[nomagic]
private predicate flowCandStore(
- NodeExt node, Content f, boolean toReturn, AccessPathFront apf0, Configuration config
+ NodeExt node, Content f, boolean toReturn, AccessPathFrontHead apf0, Configuration config
) {
exists(NodeExt mid |
- storeExt(node, f, mid) and
+ storeExtCand2(node, f, mid, config) and
flowCand(mid, toReturn, apf0, config)
)
}
@@ -1677,7 +1659,7 @@ private predicate flowCandStore(
pragma[nomagic]
private predicate consCand(Content f, AccessPathFront apf, Configuration config) {
consCandFwd(f, apf, config) and
- exists(NodeExt n, AccessPathFront apf0 |
+ exists(NodeExt n, AccessPathFrontHead apf0 |
flowCandFwd(n, _, apf0, config) and
apf0.headUsesContent(f) and
flowCandRead(n, f, _, apf, config)
@@ -1701,25 +1683,11 @@ private newtype TAccessPath =
abstract private class AccessPath extends TAccessPath {
abstract string toString();
- Content getHead() {
- this = TConsNil(result, _)
- or
- this = TConsCons(result, _, _)
- }
+ abstract Content getHead();
- int len() {
- this = TNil(_) and result = 0
- or
- this = TConsNil(_, _) and result = 1
- or
- this = TConsCons(_, _, result)
- }
+ abstract int len();
- DataFlowType getType() {
- this = TNil(result)
- or
- result = this.getHead().getContainerType()
- }
+ abstract DataFlowType getType();
abstract AccessPathFront getFront();
@@ -1730,13 +1698,19 @@ abstract private class AccessPath extends TAccessPath {
}
private class AccessPathNil extends AccessPath, TNil {
- override string toString() {
- exists(DataFlowType t | this = TNil(t) | result = concat(": " + ppReprType(t)))
- }
+ private DataFlowType t;
- override AccessPathFront getFront() {
- exists(DataFlowType t | this = TNil(t) | result = TFrontNil(t))
- }
+ AccessPathNil() { this = TNil(t) }
+
+ override string toString() { result = concat(": " + ppReprType(t)) }
+
+ override Content getHead() { none() }
+
+ override int len() { result = 0 }
+
+ override DataFlowType getType() { result = t }
+
+ override AccessPathFront getFront() { result = TFrontNil(t) }
override predicate pop(Content head, AccessPath tail) { none() }
}
@@ -1744,41 +1718,55 @@ private class AccessPathNil extends AccessPath, TNil {
abstract private class AccessPathCons extends AccessPath { }
private class AccessPathConsNil extends AccessPathCons, TConsNil {
+ private Content f;
+ private DataFlowType t;
+
+ AccessPathConsNil() { this = TConsNil(f, t) }
+
override string toString() {
- exists(Content f, DataFlowType t | this = TConsNil(f, t) |
- // The `concat` becomes "" if `ppReprType` has no result.
- result = "[" + f.toString() + "]" + concat(" : " + ppReprType(t))
- )
+ // The `concat` becomes "" if `ppReprType` has no result.
+ result = "[" + f.toString() + "]" + concat(" : " + ppReprType(t))
}
- override AccessPathFront getFront() {
- exists(Content f | this = TConsNil(f, _) | result = TFrontHead(f))
- }
+ override Content getHead() { result = f }
- override predicate pop(Content head, AccessPath tail) {
- exists(DataFlowType t | this = TConsNil(head, t) and tail = TNil(t))
- }
+ override int len() { result = 1 }
+
+ override DataFlowType getType() { result = f.getContainerType() }
+
+ override AccessPathFront getFront() { result = TFrontHead(f) }
+
+ override predicate pop(Content head, AccessPath tail) { head = f and tail = TNil(t) }
}
private class AccessPathConsCons extends AccessPathCons, TConsCons {
+ private Content f1;
+ private Content f2;
+ private int len;
+
+ AccessPathConsCons() { this = TConsCons(f1, f2, len) }
+
override string toString() {
- exists(Content f1, Content f2, int len | this = TConsCons(f1, f2, len) |
- if len = 2
- then result = "[" + f1.toString() + ", " + f2.toString() + "]"
- else result = "[" + f1.toString() + ", " + f2.toString() + ", ... (" + len.toString() + ")]"
- )
+ if len = 2
+ then result = "[" + f1.toString() + ", " + f2.toString() + "]"
+ else result = "[" + f1.toString() + ", " + f2.toString() + ", ... (" + len.toString() + ")]"
}
- override AccessPathFront getFront() {
- exists(Content f | this = TConsCons(f, _, _) | result = TFrontHead(f))
- }
+ override Content getHead() { result = f1 }
+
+ override int len() { result = len }
+
+ override DataFlowType getType() { result = f1.getContainerType() }
+
+ override AccessPathFront getFront() { result = TFrontHead(f1) }
override predicate pop(Content head, AccessPath tail) {
- exists(int len, Content next | this = TConsCons(head, next, len) |
- tail = TConsCons(next, _, len - 1)
+ head = f1 and
+ (
+ tail = TConsCons(f2, _, len - 1)
or
len = 2 and
- tail = TConsNil(next, _)
+ tail = TConsNil(f2, _)
)
}
}
@@ -1786,12 +1774,6 @@ private class AccessPathConsCons extends AccessPathCons, TConsCons {
/** Gets the access path obtained by popping `f` from `ap`, if any. */
private AccessPath pop(Content f, AccessPath ap) { ap.pop(f, result) }
-/** Holds if `ap0` corresponds to the cons of `f` and `ap` and `apf` is the front of `ap`. */
-pragma[noinline]
-private predicate popWithFront(AccessPath ap0, Content f, AccessPathFront apf, AccessPath ap) {
- ap = pop(f, ap0) and apf = ap.getFront()
-}
-
/** Gets the access path obtained by pushing `f` onto `ap`. */
private AccessPath push(Content f, AccessPath ap) { ap = pop(f, result) }
@@ -1843,14 +1825,14 @@ private predicate flowFwd0(
or
exists(NodeExt mid, boolean allowsFieldFlow |
flowFwd(mid, _, apf, ap, config) and
- flowIntoCallable(mid, node, allowsFieldFlow, config) and
+ flowIntoCallableNodeCand2(mid, node, allowsFieldFlow, config) and
fromArg = true and
(ap instanceof AccessPathNil or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
flowFwd(mid, false, apf, ap, config) and
- flowOutOfCallable(mid, node, allowsFieldFlow, config) and
+ flowOutOfCallableNodeCand2(mid, node, allowsFieldFlow, config) and
fromArg = false and
(ap instanceof AccessPathNil or allowsFieldFlow = true)
)
@@ -1862,7 +1844,7 @@ private predicate flowFwd0(
or
exists(NodeExt mid, AccessPathNil nil, DataFlowType t |
flowFwd(mid, fromArg, _, nil, config) and
- argumentFlowsThrough3(mid, node, _, t, TSummaryTaint(), config) and
+ argumentFlowsThrough(mid, node, t, config) and
ap = TNil(t) and
apf = ap.(AccessPathNil).getFront()
)
@@ -1873,18 +1855,9 @@ private predicate flowFwd0(
ap = push(f, ap0)
)
or
- exists(Content f, AccessPath ap0 |
- flowFwdRead(node, f, ap0, fromArg, config) and
- popWithFront(ap0, f, apf, ap)
- )
- or
- exists(Content f, NodeExt mid, AccessPathFront apf0, DataFlowType t |
- flowFwd(mid, fromArg, apf0, any(AccessPathConsNil consnil), config) and
- argumentFlowsThrough3(mid, node, _, t, TSummaryReadTaint(f), config) and
- apf0.headUsesContent(f) and
- flowCand(node, _, _, unbind(config)) and
- ap = TNil(t) and
- apf = ap.(AccessPathNil).getFront()
+ exists(Content f |
+ flowFwdRead(node, f, push(f, ap), fromArg, config) and
+ flowConsCandFwd(f, apf, ap, config)
)
}
@@ -1897,31 +1870,23 @@ private predicate flowFwdStore(
flowFwd(mid, fromArg, apf0, ap0, config) and
flowFwdStore1(mid, f, node, apf0, apf, config)
)
- or
- exists(NodeExt mid, DataFlowType t |
- flowFwd(mid, fromArg, _, any(AccessPathNil nil), config) and
- argumentFlowsThrough3(mid, node, t, _, TSummaryTaintStore(f), config) and
- consCand(f, TFrontNil(t), unbind(config)) and
- ap0 = TNil(t) and
- apf.headUsesContent(f) and
- flowCand(node, _, apf, unbind(config))
- )
}
-pragma[noinline]
+pragma[nomagic]
private predicate flowFwdStore0(
NodeExt mid, Content f, NodeExt node, AccessPathFront apf0, Configuration config
) {
- storeExt(mid, f, node) and
- consCand(f, apf0, config)
+ storeExtCand2(mid, f, node, config) and
+ flowCand(mid, _, apf0, config)
}
pragma[noinline]
private predicate flowFwdStore1(
- NodeExt mid, Content f, NodeExt node, AccessPathFront apf0, AccessPathFront apf,
+ NodeExt mid, Content f, NodeExt node, AccessPathFront apf0, AccessPathFrontHead apf,
Configuration config
) {
flowFwdStore0(mid, f, node, apf0, config) and
+ consCand(f, apf0, config) and
apf.headUsesContent(f) and
flowCand(node, _, apf, unbind(config))
}
@@ -1930,14 +1895,24 @@ pragma[nomagic]
private predicate flowFwdRead(
NodeExt node, Content f, AccessPath ap0, boolean fromArg, Configuration config
) {
- exists(NodeExt mid, AccessPathFront apf0 |
+ exists(NodeExt mid, AccessPathFrontHead apf0 |
flowFwd(mid, fromArg, apf0, ap0, config) and
- readExt(mid, f, node) and
+ readExtCand2(mid, f, node, config) and
apf0.headUsesContent(f) and
flowCand(node, _, _, unbind(config))
)
}
+pragma[nomagic]
+private predicate flowConsCandFwd(
+ Content f, AccessPathFront apf, AccessPath ap, Configuration config
+) {
+ exists(NodeExt n |
+ flowFwd(n, _, apf, ap, config) and
+ flowFwdStore1(n, f, _, apf, _, config)
+ )
+}
+
/**
* Holds if data can flow from a source to `node` with the given `ap` and
* from there flow to a sink.
@@ -1980,14 +1955,14 @@ private predicate flow0(NodeExt node, boolean toReturn, AccessPath ap, Configura
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallable(node, mid, allowsFieldFlow, config) and
+ flowIntoCallableNodeCand2(node, mid, allowsFieldFlow, config) and
flow(mid, false, ap, config) and
toReturn = false and
(ap instanceof AccessPathNil or allowsFieldFlow = true)
)
or
exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallable(node, mid, allowsFieldFlow, config) and
+ flowOutOfCallableNodeCand2(node, mid, allowsFieldFlow, config) and
flow(mid, _, ap, config) and
toReturn = true and
(ap instanceof AccessPathNil or allowsFieldFlow = true)
@@ -1998,65 +1973,58 @@ private predicate flow0(NodeExt node, boolean toReturn, AccessPath ap, Configura
flow(mid, toReturn, ap, config)
)
or
- exists(NodeExt mid, AccessPathNil ap0 |
- argumentFlowsThrough3(node, mid, _, _, TSummaryTaint(), config) and
- flow(mid, toReturn, ap0, config) and
+ exists(NodeExt mid, AccessPathNil nil |
+ argumentFlowsThrough(node, mid, _, config) and
+ flow(mid, toReturn, nil, config) and
ap instanceof AccessPathNil and
flowFwd(node, _, _, ap, config)
)
or
- exists(NodeExt mid, AccessPath ap0 |
- storeFwd(node, _, mid, ap, ap0, config) and
- flow(mid, toReturn, ap0, config)
- )
- or
- exists(Content f, AccessPath ap0 |
- flowTaintStore(node, f, toReturn, ap0, config) and
- pop(f, ap0) instanceof AccessPathNil and
- ap instanceof AccessPathNil and
- flowFwd(node, _, _, ap, config)
+ exists(Content f |
+ flowStore(f, node, toReturn, ap, config) and
+ flowConsCand(f, ap, config)
)
or
exists(NodeExt mid, AccessPath ap0 |
- readFwd(node, mid, ap, ap0, config) and
+ readFwd(node, _, mid, ap, ap0, config) and
flow(mid, toReturn, ap0, config)
)
- or
- exists(NodeExt mid, Content f |
- argumentFlowsThrough3(node, mid, _, _, TSummaryReadTaint(f), config) and
- flow(mid, toReturn, any(AccessPathNil nil1), config) and
- ap = push(f, any(AccessPathNil nil2)) and
- flowFwd(node, _, _, ap, config)
- )
}
pragma[nomagic]
private predicate storeFwd(
NodeExt node1, Content f, NodeExt node2, AccessPath ap, AccessPath ap0, Configuration config
) {
- storeExt(node1, f, node2) and
+ storeExtCand2(node1, f, node2, config) and
flowFwdStore(node2, f, ap, _, _, config) and
ap0 = push(f, ap)
}
pragma[nomagic]
-private predicate flowTaintStore(
- NodeExt node, Content f, boolean toReturn, AccessPath ap0, Configuration config
+private predicate flowStore(
+ Content f, NodeExt node, boolean toReturn, AccessPath ap, Configuration config
) {
- exists(NodeExt mid |
- argumentFlowsThrough3(node, mid, _, _, TSummaryTaintStore(f), config) and
+ exists(NodeExt mid, AccessPath ap0 |
+ storeFwd(node, f, mid, ap, ap0, config) and
flow(mid, toReturn, ap0, config)
)
}
pragma[nomagic]
private predicate readFwd(
- NodeExt node1, NodeExt node2, AccessPath ap, AccessPath ap0, Configuration config
+ NodeExt node1, Content f, NodeExt node2, AccessPath ap, AccessPath ap0, Configuration config
) {
- exists(Content f |
- readExt(node1, f, node2) and
- flowFwdRead(node2, f, ap, _, config) and
- ap0 = pop(f, ap)
+ readExtCand2(node1, f, node2, config) and
+ flowFwdRead(node2, f, ap, _, config) and
+ ap0 = pop(f, ap) and
+ flowConsCandFwd(f, _, ap0, unbind(config))
+}
+
+pragma[nomagic]
+private predicate flowConsCand(Content f, AccessPath ap, Configuration config) {
+ exists(NodeExt n, NodeExt mid |
+ flow(mid, _, ap, config) and
+ readFwd(n, f, mid, _, ap, config)
)
}
@@ -2347,10 +2315,10 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
mid.getAp() instanceof AccessPathNil and
ap = TNil(getErasedNodeTypeBound(node))
or
- exists(Content f, AccessPath ap0 | pathReadStep(mid, node, ap0, f, cc) and ap = pop(f, ap0)) and
+ exists(Content f | pathReadStep(mid, node, push(f, ap), f, cc)) and
sc = mid.getSummaryCtx()
or
- exists(Content f, AccessPath ap0 | pathStoreStep(mid, node, ap0, f, cc) and ap = push(f, ap0)) and
+ exists(Content f | pathStoreStep(mid, node, pop(f, ap), f, cc)) and
sc = mid.getSummaryCtx()
or
pathIntoCallable(mid, node, _, cc, sc, _) and ap = mid.getAp()
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
index ffbeeb94efc..783ac641e6e 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
@@ -243,7 +243,7 @@ private module Cached {
* - Types are checked using the `compatibleTypes()` relation.
*/
cached
- module Final {
+ private module Final {
/**
* Holds if `p` can flow to `node` in the same callable using only
* value-preserving steps, not taking call contexts into account.
From c9ba6dd6724064e705dd8a367423c5c7d5eab03f Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 26 Mar 2020 08:01:47 +0000
Subject: [PATCH 059/157] Fix up `hasLocationInfo` predicate.
---
ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index f7f9b2a0393..ff7b9b2f0b1 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -809,7 +809,7 @@ private class ReadStoreNodeExt extends CastingNodeExt, TReadStoreNode {
override predicate hasLocationInfo(
string filepath, int startline, int startcolumn, int endline, int endcolumn
) {
- arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
}
}
@@ -833,7 +833,7 @@ private class ReadTaintNode extends NodeExt, TReadTaintNode {
override predicate hasLocationInfo(
string filepath, int startline, int startcolumn, int endline, int endcolumn
) {
- arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
}
}
@@ -857,7 +857,7 @@ private class TaintStoreNode extends NodeExt, TTaintStoreNode {
override predicate hasLocationInfo(
string filepath, int startline, int startcolumn, int endline, int endcolumn
) {
- arg.getLocation().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
+ arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
}
}
From aabe2f2f820019883b716a2aacfda7560aae3067 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 30 Mar 2020 09:22:03 +0100
Subject: [PATCH 060/157] Data flow: No magic in returnFlowCallableCand.
cf https://github.com/Semmle/ql/pull/3142
---
ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll | 1 +
1 file changed, 1 insertion(+)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index ff7b9b2f0b1..ebbaab507a6 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -548,6 +548,7 @@ private predicate throughFlowNodeCand(Node node, Configuration config) {
}
/** Holds if flow may return from `callable`. */
+pragma[nomagic]
private predicate returnFlowCallableCand(
DataFlowCallable callable, ReturnKindExt kind, Configuration config
) {
From f2b43f65f9b66050006730e46a8fe42f7ac0feca Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 1 Apr 2020 09:40:24 +0100
Subject: [PATCH 061/157] Data flow: Exclude param-param flow through identical
params.
cf https://github.com/Semmle/ql/pull/3060
---
ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index ebbaab507a6..50a5e284dfd 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -2089,6 +2089,8 @@ private class SummaryCtxSome extends SummaryCtx, TSummaryCtxSome {
SummaryCtxSome() { this = TSummaryCtxSome(p, ap) }
+ int getParameterPos() { p.isParameterOf(_, result) }
+
override string toString() { result = p + ": " + ap }
predicate hasLocationInfo(
@@ -2482,13 +2484,15 @@ pragma[nomagic]
private predicate paramFlowsThrough(
ReturnKindExt kind, CallContextCall cc, SummaryCtxSome sc, AccessPath ap, Configuration config
) {
- exists(PathNodeMid mid, ReturnNodeExt ret |
+ exists(PathNodeMid mid, ReturnNodeExt ret, int pos |
mid.getNode() = ret and
kind = ret.getKind() and
cc = mid.getCallContext() and
sc = mid.getSummaryCtx() and
config = mid.getConfiguration() and
- ap = mid.getAp()
+ ap = mid.getAp() and
+ pos = sc.getParameterPos() and
+ not kind.(ParamUpdateReturnKind).getPosition() = pos
)
}
From 968d4d9cdd647b83e5f0c32cfcff80536dfaddc3 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 29 Apr 2020 14:37:46 +0100
Subject: [PATCH 062/157] Revert the join order fix from
https://github.com/github/codeql/pull/2872.
cf https://github.com/github/codeql/pull/3202
---
.../go/dataflow/internal/DataFlowImpl.qll | 27 +++++--------------
1 file changed, 7 insertions(+), 20 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index 50a5e284dfd..a305dc25a36 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -2293,12 +2293,13 @@ private class PathNodeSink extends PathNodeImpl, TPathNodeSink {
* a callable is recorded by `cc`.
*/
private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCtx sc, AccessPath ap) {
- exists(
- AccessPath ap0, Node midnode, Configuration conf, DataFlowCallable enclosing,
- LocalCallContext localCC
- |
- pathIntoLocalStep(mid, midnode, cc, enclosing, sc, ap0, conf) and
- localCC = getLocalCallContext(cc, enclosing)
+ exists(AccessPath ap0, Node midnode, Configuration conf, LocalCallContext localCC |
+ midnode = mid.getNode() and
+ conf = mid.getConfiguration() and
+ cc = mid.getCallContext() and
+ sc = mid.getSummaryCtx() and
+ localCC = getLocalCallContext(cc, midnode.getEnclosingCallable()) and
+ ap0 = mid.getAp()
|
localFlowBigStep(midnode, node, true, _, conf, localCC) and
ap = ap0
@@ -2331,20 +2332,6 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
pathThroughCallable(mid, node, cc, ap) and sc = mid.getSummaryCtx()
}
-pragma[nomagic]
-private predicate pathIntoLocalStep(
- PathNodeMid mid, Node midnode, CallContext cc, DataFlowCallable enclosing, SummaryCtx sc,
- AccessPath ap0, Configuration conf
-) {
- midnode = mid.getNode() and
- cc = mid.getCallContext() and
- conf = mid.getConfiguration() and
- localFlowBigStep(midnode, _, _, _, conf, _) and
- enclosing = midnode.getEnclosingCallable() and
- sc = mid.getSummaryCtx() and
- ap0 = mid.getAp()
-}
-
pragma[nomagic]
private predicate readCand(Node node1, Content f, Node node2, Configuration config) {
readDirect(node1, f, node2) and
From fd2e618be28802de0e9795b41339ff9bed8a0fb0 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 6 May 2020 09:32:56 +0100
Subject: [PATCH 063/157] Data flow: No more summaries
cf https://github.com/github/codeql/pull/3110
---
.../go/dataflow/internal/DataFlowImpl.qll | 1986 ++++++++---------
.../dataflow/internal/DataFlowImplCommon.qll | 459 ++--
2 files changed, 1123 insertions(+), 1322 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index a305dc25a36..db0fbcf7130 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -251,15 +251,11 @@ private predicate additionalJumpStep(Node node1, Node node2, Configuration confi
*/
private predicate useFieldFlow(Configuration config) { config.fieldFlowBranchLimit() >= 1 }
-pragma[noinline]
-private ReturnPosition viableReturnPos(DataFlowCall call, ReturnKindExt kind) {
- viableCallable(call) = result.getCallable() and
- kind = result.getKind()
-}
-
/**
- * Holds if `node` is reachable from a source in the given configuration
- * taking simple call contexts into consideration.
+ * Holds if `node` is reachable from a source in the configuration `config`.
+ *
+ * The Boolean `fromArg` records whether the node is reached through an
+ * argument in a call.
*/
private predicate nodeCandFwd1(Node node, boolean fromArg, Configuration config) {
not fullBarrier(node, config) and
@@ -293,14 +289,14 @@ private predicate nodeCandFwd1(Node node, boolean fromArg, Configuration config)
exists(Node mid |
useFieldFlow(config) and
nodeCandFwd1(mid, fromArg, config) and
- storeDirect(mid, _, node) and
+ store(mid, _, node) and
not outBarrier(mid, config)
)
or
// read
exists(Content f |
nodeCandFwd1Read(f, node, fromArg, config) and
- storeCandFwd1(f, config) and
+ nodeCandFwd1IsStored(f, config) and
not inBarrier(node, config)
)
or
@@ -317,13 +313,34 @@ private predicate nodeCandFwd1(Node node, boolean fromArg, Configuration config)
fromArg = false
or
nodeCandFwd1OutFromArg(call, node, config) and
- flowOutCandFwd1(call, fromArg, config)
+ nodeCandFwd1IsEntered(call, fromArg, config)
)
)
}
private predicate nodeCandFwd1(Node node, Configuration config) { nodeCandFwd1(node, _, config) }
+pragma[nomagic]
+private predicate nodeCandFwd1Read(Content f, Node node, boolean fromArg, Configuration config) {
+ exists(Node mid |
+ nodeCandFwd1(mid, fromArg, config) and
+ read(mid, f, node)
+ )
+}
+
+/**
+ * Holds if `f` is the target of a store in the flow covered by `nodeCandFwd1`.
+ */
+pragma[nomagic]
+private predicate nodeCandFwd1IsStored(Content f, Configuration config) {
+ exists(Node mid, Node node |
+ not fullBarrier(node, config) and
+ useFieldFlow(config) and
+ nodeCandFwd1(mid, config) and
+ store(mid, f, node)
+ )
+}
+
pragma[nomagic]
private predicate nodeCandFwd1ReturnPosition(
ReturnPosition pos, boolean fromArg, Configuration config
@@ -335,43 +352,10 @@ private predicate nodeCandFwd1ReturnPosition(
}
pragma[nomagic]
-private predicate nodeCandFwd1Read(Content f, Node node, boolean fromArg, Configuration config) {
- exists(Node mid |
- nodeCandFwd1(mid, fromArg, config) and
- readDirect(mid, f, node)
- )
-}
-
-/**
- * Holds if `f` is the target of a store in the flow covered by `nodeCandFwd1`.
- */
-pragma[nomagic]
-private predicate storeCandFwd1(Content f, Configuration config) {
- exists(Node mid, Node node |
- not fullBarrier(node, config) and
- useFieldFlow(config) and
- nodeCandFwd1(mid, config) and
- storeDirect(mid, f, node)
- )
-}
-
-pragma[nomagic]
-private predicate nodeCandFwd1ReturnKind(
- DataFlowCall call, ReturnKindExt kind, boolean fromArg, Configuration config
-) {
+private predicate nodeCandFwd1Out(DataFlowCall call, Node out, boolean fromArg, Configuration config) {
exists(ReturnPosition pos |
nodeCandFwd1ReturnPosition(pos, fromArg, config) and
- pos = viableReturnPos(call, kind)
- )
-}
-
-pragma[nomagic]
-private predicate nodeCandFwd1Out(
- DataFlowCall call, Node node, boolean fromArg, Configuration config
-) {
- exists(ReturnKindExt kind |
- nodeCandFwd1ReturnKind(call, kind, fromArg, config) and
- node = kind.getAnOutNode(call)
+ viableReturnPosOut(call, pos, out)
)
}
@@ -384,7 +368,7 @@ private predicate nodeCandFwd1OutFromArg(DataFlowCall call, Node node, Configura
* Holds if an argument to `call` is reached in the flow covered by `nodeCandFwd1`.
*/
pragma[nomagic]
-private predicate flowOutCandFwd1(DataFlowCall call, boolean fromArg, Configuration config) {
+private predicate nodeCandFwd1IsEntered(DataFlowCall call, boolean fromArg, Configuration config) {
exists(ArgumentNode arg |
nodeCandFwd1(arg, fromArg, config) and
viableParamArg(call, _, arg)
@@ -395,8 +379,11 @@ bindingset[result, b]
private boolean unbindBool(boolean b) { result != b.booleanNot() }
/**
- * Holds if `node` is part of a path from a source to a sink in the given
- * configuration taking simple call contexts into consideration.
+ * Holds if `node` is part of a path from a source to a sink in the
+ * configuration `config`.
+ *
+ * The Boolean `toReturn` records whether the node must be returned from
+ * the enclosing callable in order to reach a sink.
*/
pragma[nomagic]
private predicate nodeCand1(Node node, boolean toReturn, Configuration config) {
@@ -435,55 +422,43 @@ private predicate nodeCand1_0(Node node, boolean toReturn, Configuration config)
// store
exists(Content f |
nodeCand1Store(f, node, toReturn, config) and
- readCand1(f, config)
+ nodeCand1IsRead(f, config)
)
or
// read
exists(Node mid, Content f |
- readDirect(node, f, mid) and
- storeCandFwd1(f, unbind(config)) and
+ read(node, f, mid) and
+ nodeCandFwd1IsStored(f, unbind(config)) and
nodeCand1(mid, toReturn, config)
)
or
// flow into a callable
exists(DataFlowCall call |
- nodeCand1Arg(call, node, false, config) and
+ nodeCand1In(call, node, false, config) and
toReturn = false
or
- nodeCand1ArgToReturn(call, node, config) and
- flowInCand1(call, toReturn, config)
+ nodeCand1InToReturn(call, node, config) and
+ nodeCand1IsReturned(call, toReturn, config)
)
or
// flow out of a callable
exists(ReturnPosition pos |
- nodeCand1ReturnPosition(pos, config) and
+ nodeCand1Out(pos, config) and
getReturnPosition(node) = pos and
toReturn = true
)
}
-pragma[nomagic]
-private predicate nodeCand1(Node node, Configuration config) { nodeCand1(node, _, config) }
-
-pragma[nomagic]
-private predicate nodeCand1ReturnPosition(ReturnPosition pos, Configuration config) {
- exists(DataFlowCall call, ReturnKindExt kind, Node out |
- nodeCand1(out, _, config) and
- pos = viableReturnPos(call, kind) and
- out = kind.getAnOutNode(call)
- )
-}
-
/**
* Holds if `f` is the target of a read in the flow covered by `nodeCand1`.
*/
pragma[nomagic]
-private predicate readCand1(Content f, Configuration config) {
+private predicate nodeCand1IsRead(Content f, Configuration config) {
exists(Node mid, Node node |
useFieldFlow(config) and
nodeCandFwd1(node, unbind(config)) and
- readDirect(node, f, mid) and
- storeCandFwd1(f, unbind(config)) and
+ read(node, f, mid) and
+ nodeCandFwd1IsStored(f, unbind(config)) and
nodeCand1(mid, _, config)
)
}
@@ -492,8 +467,8 @@ pragma[nomagic]
private predicate nodeCand1Store(Content f, Node node, boolean toReturn, Configuration config) {
exists(Node mid |
nodeCand1(mid, toReturn, config) and
- storeCandFwd1(f, unbind(config)) and
- storeDirect(node, f, mid)
+ nodeCandFwd1IsStored(f, unbind(config)) and
+ store(node, f, mid)
)
}
@@ -501,13 +476,29 @@ private predicate nodeCand1Store(Content f, Node node, boolean toReturn, Configu
* Holds if `f` is the target of both a read and a store in the flow covered
* by `nodeCand1`.
*/
-private predicate readStoreCand1(Content f, Configuration conf) {
- readCand1(f, conf) and
+private predicate nodeCand1IsReadAndStored(Content f, Configuration conf) {
+ nodeCand1IsRead(f, conf) and
nodeCand1Store(f, _, _, conf)
}
pragma[nomagic]
-private predicate viableParamArgCandFwd1(
+private predicate viableReturnPosOutNodeCandFwd1(
+ DataFlowCall call, ReturnPosition pos, Node out, Configuration config
+) {
+ nodeCandFwd1ReturnPosition(pos, _, config) and
+ viableReturnPosOut(call, pos, out)
+}
+
+pragma[nomagic]
+private predicate nodeCand1Out(ReturnPosition pos, Configuration config) {
+ exists(DataFlowCall call, Node out |
+ nodeCand1(out, _, config) and
+ viableReturnPosOutNodeCandFwd1(call, pos, out, config)
+ )
+}
+
+pragma[nomagic]
+private predicate viableParamArgNodeCandFwd1(
DataFlowCall call, ParameterNode p, ArgumentNode arg, Configuration config
) {
viableParamArg(call, p, arg) and
@@ -515,32 +506,35 @@ private predicate viableParamArgCandFwd1(
}
pragma[nomagic]
-private predicate nodeCand1Arg(
+private predicate nodeCand1In(
DataFlowCall call, ArgumentNode arg, boolean toReturn, Configuration config
) {
exists(ParameterNode p |
nodeCand1(p, toReturn, config) and
- viableParamArgCandFwd1(call, p, arg, config)
+ viableParamArgNodeCandFwd1(call, p, arg, config)
)
}
pragma[nomagic]
-private predicate nodeCand1ArgToReturn(DataFlowCall call, ArgumentNode arg, Configuration config) {
- nodeCand1Arg(call, arg, true, config)
+private predicate nodeCand1InToReturn(DataFlowCall call, ArgumentNode arg, Configuration config) {
+ nodeCand1In(call, arg, true, config)
}
/**
* Holds if an output from `call` is reached in the flow covered by `nodeCand1`.
*/
pragma[nomagic]
-private predicate flowInCand1(DataFlowCall call, boolean toReturn, Configuration config) {
+private predicate nodeCand1IsReturned(DataFlowCall call, boolean toReturn, Configuration config) {
exists(Node out |
nodeCand1(out, toReturn, config) and
nodeCandFwd1OutFromArg(call, out, config)
)
}
-private predicate throughFlowNodeCand(Node node, Configuration config) {
+pragma[nomagic]
+private predicate nodeCand1(Node node, Configuration config) { nodeCand1(node, _, config) }
+
+private predicate throughFlowNodeCand1(Node node, Configuration config) {
nodeCand1(node, true, config) and
not fullBarrier(node, config) and
not inBarrier(node, config) and
@@ -549,11 +543,11 @@ private predicate throughFlowNodeCand(Node node, Configuration config) {
/** Holds if flow may return from `callable`. */
pragma[nomagic]
-private predicate returnFlowCallableCand(
+private predicate returnFlowCallableNodeCand1(
DataFlowCallable callable, ReturnKindExt kind, Configuration config
) {
exists(ReturnNodeExt ret |
- throughFlowNodeCand(ret, config) and
+ throughFlowNodeCand1(ret, config) and
callable = ret.getEnclosingCallable() and
kind = ret.getKind()
)
@@ -563,10 +557,10 @@ private predicate returnFlowCallableCand(
* Holds if flow may enter through `p` and reach a return node making `p` a
* candidate for the origin of a summary.
*/
-private predicate parameterThroughFlowCand(ParameterNode p, Configuration config) {
+private predicate parameterThroughFlowNodeCand1(ParameterNode p, Configuration config) {
exists(ReturnKindExt kind |
- throughFlowNodeCand(p, config) and
- returnFlowCallableCand(p.getEnclosingCallable(), kind, config) and
+ throughFlowNodeCand1(p, config) and
+ returnFlowCallableNodeCand1(p.getEnclosingCallable(), kind, config) and
// we don't expect a parameter to return stored in itself
not exists(int pos |
kind.(ParamUpdateReturnKind).getPosition() = pos and p.isParameterOf(_, pos)
@@ -576,419 +570,73 @@ private predicate parameterThroughFlowCand(ParameterNode p, Configuration config
pragma[nomagic]
private predicate store(Node n1, Content f, Node n2, Configuration config) {
- readStoreCand1(f, config) and
+ nodeCand1IsReadAndStored(f, config) and
nodeCand1(n2, unbind(config)) and
- (
- storeDirect(n1, f, n2) or
- argumentValueFlowsThrough(_, n1, TContentNone(), TContentSome(f), n2)
- )
+ store(n1, f, n2)
}
pragma[nomagic]
private predicate read(Node n1, Content f, Node n2, Configuration config) {
- readStoreCand1(f, config) and
+ nodeCand1IsReadAndStored(f, config) and
nodeCand1(n2, unbind(config)) and
- (
- readDirect(n1, f, n2) or
- argumentValueFlowsThrough(_, n1, TContentSome(f), TContentNone(), n2)
- )
-}
-
-/**
- * Holds if `p` can flow to `node` in the same callable with `summary`
- * representing the flow path. The type of the tracked object is `t2`, and if
- * the summary includes a store step, `t1` is the tracked type just prior to the
- * store, that is, the type of the stored object, otherwise `t1` is equal to `t2`.
- */
-pragma[nomagic]
-private predicate parameterFlow(
- ParameterNode p, Node node, DataFlowType t1, DataFlowType t2, Summary summary,
- Configuration config
-) {
- parameterThroughFlowCand(p, config) and
- p = node and
- t1 = getErasedNodeTypeBound(node) and
- t1 = t2 and
- summary = TSummaryVal()
- or
- throughFlowNodeCand(node, unbind(config)) and
- (
- exists(Node mid |
- parameterFlow(p, mid, t1, t2, summary, config) and
- localFlowStep(mid, node, config) and
- compatibleTypes(t2, getErasedNodeTypeBound(node))
- )
- or
- exists(Node mid, Summary midsum |
- parameterFlow(p, mid, _, _, midsum, config) and
- additionalLocalFlowStep(mid, node, config) and
- t1 = getErasedNodeTypeBound(node) and
- t1 = t2 and
- summary = midsum.additionalStep()
- )
- or
- // read step
- exists(Node mid, Content f, Summary midsum |
- parameterFlow(p, mid, _, _, midsum, config) and
- read(mid, f, node, config) and
- summary = midsum.readStep(f) and
- t1 = f.getType() and
- t1 = t2
- )
- or
- // store step
- exists(Node mid, Content f, Summary midsum |
- parameterFlow(p, mid, t1, /* t1 */ _, midsum, config) and
- store(mid, f, node, config) and
- summary = midsum.storeStep(f) and
- compatibleTypes(t1, f.getType()) and
- t2 = f.getContainerType()
- )
- or
- // value flow through a callable
- exists(Node arg |
- parameterFlow(p, arg, t1, t2, summary, config) and
- argumentValueFlowsThrough(_, arg, TContentNone(), TContentNone(), node) and
- compatibleTypes(t2, getErasedNodeTypeBound(node))
- )
- or
- // flow through a callable
- exists(Node arg, Summary s1, Summary s2 |
- parameterFlow(p, arg, _, _, s1, config) and
- argumentFlowsThrough(arg, node, t1, t2, s2, config) and
- summary = s1.compose(s2)
- )
- )
-}
-
-private predicate viableParamArgCand(
- DataFlowCall call, ParameterNode p, ArgumentNode arg, Configuration config
-) {
- viableParamArg(call, p, arg) and
- nodeCand1(arg, unbind(config)) and
- nodeCand1(p, config) and
- not outBarrier(arg, config) and
- not inBarrier(p, config)
-}
-
-pragma[nomagic]
-private predicate parameterFlowReturn(
- ParameterNode p, ReturnNodeExt ret, ReturnKindExt kind, DataFlowType t1, DataFlowType t2,
- Summary summary, Configuration config
-) {
- parameterFlow(p, ret, t1, t2, summary, config) and
- kind = ret.getKind() and
- not summary.isPartial() and
- not exists(int pos | kind.(ParamUpdateReturnKind).getPosition() = pos and p.isParameterOf(_, pos))
-}
-
-pragma[nomagic]
-private predicate argumentFlowsThrough0(
- DataFlowCall call, ArgumentNode arg, ReturnKindExt kind, DataFlowType t1, DataFlowType t2,
- Summary summary, Configuration config
-) {
- exists(ParameterNode p |
- viableParamArgCand(call, p, arg, config) and
- parameterFlowReturn(p, _, kind, t1, t2, summary, config)
- )
-}
-
-/**
- * Holds if data can flow from `arg` to `out` through a call with `summary`
- * representing the flow path. The type of the tracked object is `t2`, and if
- * the summary includes a store step, `t1` is the tracked type just prior to the
- * store, that is, the type of the stored object, otherwise `t1` is equal to `t2`.
- */
-private predicate argumentFlowsThrough(
- ArgumentNode arg, Node out, DataFlowType t1, DataFlowType t2, Summary summary,
- Configuration config
-) {
- nodeCand1(out, unbind(config)) and
- not inBarrier(out, config) and
- compatibleTypes(t2, getErasedNodeTypeBound(out)) and
- exists(DataFlowCall call, ReturnKindExt kind |
- argumentFlowsThrough0(call, arg, kind, t1, t2, summary, config) and
- out = kind.getAnOutNode(call)
- )
+ read(n1, f, n2)
}
pragma[noinline]
-private predicate readStoreNode(
- DataFlowCall call, ArgumentNode arg, Content f1, Configuration config
-) {
- exists(Content f2, Node out |
- argumentValueFlowsThrough(call, arg, TContentSome(f1), TContentSome(f2), out) and
- nodeCand1(out, config) and
- readStoreCand1(f2, unbind(config))
- )
+private predicate localFlowStepNodeCand1(Node node1, Node node2, Configuration config) {
+ nodeCand1(node1, config) and
+ localFlowStep(node1, node2, config)
}
-private newtype TNodeExt =
- TNormalNode(Node node) { nodeCand1(node, _) } or
- TReadStoreNode(DataFlowCall call, ArgumentNode arg, Content f1, Configuration config) {
- nodeCand1(arg, config) and
- readStoreNode(call, arg, f1, config) and
- readStoreCand1(f1, unbind(config))
- } or
- TReadTaintNode(ArgumentNode arg, Content f, Configuration config) {
- argumentFlowsThrough(arg, _, _, _, TSummaryReadTaint(f), config)
- } or
- TTaintStoreNode(ArgumentNode arg, DataFlowType t, Configuration config) {
- argumentFlowsThrough(arg, _, t, _, TSummaryTaintStore(_), config)
- }
-
-/**
- * An extended data flow node. Either a normal node, or an intermediate node
- * used to split up a summarized flow steps.
- *
- * This is purely an internal implementation detail.
- */
-abstract private class NodeExt extends TNodeExt {
- /** Gets the underlying (normal) node, if any. */
- abstract Node getNode();
-
- abstract DataFlowType getErasedNodeTypeBound();
-
- abstract DataFlowCallable getEnclosingCallable();
-
- abstract predicate isCand1(Configuration config);
-
- abstract string toString();
-
- abstract predicate hasLocationInfo(
- string filepath, int startline, int startcolumn, int endline, int endcolumn
- );
-}
-
-/** A `Node` at which a cast can occur such that the type should be checked. */
-abstract private class CastingNodeExt extends NodeExt { }
-
-private class NormalNodeExt extends NodeExt, TNormalNode {
- override Node getNode() { this = TNormalNode(result) }
-
- override DataFlowType getErasedNodeTypeBound() {
- result = getErasedRepr(this.getNode().getTypeBound())
- }
-
- override DataFlowCallable getEnclosingCallable() {
- result = this.getNode().getEnclosingCallable()
- }
-
- override predicate isCand1(Configuration config) { nodeCand1(this.getNode(), config) }
-
- override string toString() { result = this.getNode().toString() }
-
- override predicate hasLocationInfo(
- string filepath, int startline, int startcolumn, int endline, int endcolumn
- ) {
- this.getNode().hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
- }
-}
-
-private class NormalCastingNodeExt extends CastingNodeExt, NormalNodeExt {
- NormalCastingNodeExt() { this.getNode() instanceof CastingNode }
-}
-
-private class ReadStoreNodeExt extends CastingNodeExt, TReadStoreNode {
- private DataFlowCall call;
- private ArgumentNode arg;
- private Content f1;
- private Configuration config0;
-
- ReadStoreNodeExt() { this = TReadStoreNode(call, arg, f1, config0) }
-
- override Node getNode() { none() }
-
- override DataFlowType getErasedNodeTypeBound() { result = f1.getType() }
-
- override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
-
- override predicate isCand1(Configuration config) { config = config0 }
-
- override string toString() { result = "(inside) " + call.toString() + " [read " + f1 + "]" }
-
- override predicate hasLocationInfo(
- string filepath, int startline, int startcolumn, int endline, int endcolumn
- ) {
- arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
- }
-}
-
-private class ReadTaintNode extends NodeExt, TReadTaintNode {
- private ArgumentNode arg;
- private Content f;
- private Configuration config0;
-
- ReadTaintNode() { this = TReadTaintNode(arg, f, config0) }
-
- override Node getNode() { none() }
-
- override DataFlowType getErasedNodeTypeBound() { result = f.getType() }
-
- override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
-
- override predicate isCand1(Configuration config) { config = config0 }
-
- override string toString() { result = arg.toString() + " [read taint " + f + "]" }
-
- override predicate hasLocationInfo(
- string filepath, int startline, int startcolumn, int endline, int endcolumn
- ) {
- arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
- }
-}
-
-private class TaintStoreNode extends NodeExt, TTaintStoreNode {
- private ArgumentNode arg;
- private DataFlowType t;
- private Configuration config0;
-
- TaintStoreNode() { this = TTaintStoreNode(arg, t, config0) }
-
- override Node getNode() { none() }
-
- override DataFlowType getErasedNodeTypeBound() { result = t }
-
- override DataFlowCallable getEnclosingCallable() { result = arg.getEnclosingCallable() }
-
- override predicate isCand1(Configuration config) { config = config0 }
-
- override string toString() { result = arg.toString() + " [taint store]" }
-
- override predicate hasLocationInfo(
- string filepath, int startline, int startcolumn, int endline, int endcolumn
- ) {
- arg.hasLocationInfo(filepath, startline, startcolumn, endline, endcolumn)
- }
-}
-
-private predicate additionalLocalFlowStepExt(
- NodeExt node1, NodeExt node2, DataFlowType t, Configuration config
-) {
- exists(ArgumentNode arg, Content f |
- node1 = TReadTaintNode(arg, f, config) and
- argumentFlowsThrough(arg, node2.getNode(), _, t, TSummaryReadTaint(f), config)
- )
- or
- node2 = TTaintStoreNode(node1.getNode(), t, config)
+pragma[noinline]
+private predicate additionalLocalFlowStepNodeCand1(Node node1, Node node2, Configuration config) {
+ nodeCand1(node1, config) and
+ additionalLocalFlowStep(node1, node2, config)
}
pragma[nomagic]
-private predicate readExt(NodeExt node1, Content f, NodeExt node2, Configuration config) {
- read(node1.getNode(), f, node2.getNode(), config)
- or
- node2 = TReadStoreNode(_, node1.getNode(), f, config)
- or
- node2 = TReadTaintNode(node1.getNode(), f, config)
-}
-
-pragma[nomagic]
-private predicate storeExt(NodeExt node1, Content f, NodeExt node2, Configuration config) {
- store(node1.getNode(), f, node2.getNode(), config)
- or
- exists(DataFlowCall call, ArgumentNode arg, Content f1, Node n2 |
- node1 = TReadStoreNode(call, arg, f1, config) and
- n2 = node2.getNode() and
- argumentValueFlowsThrough(call, arg, TContentSome(f1), TContentSome(f), n2) and
- nodeCand1(n2, unbind(config)) and
- readStoreCand1(f, unbind(config))
- )
- or
- exists(ArgumentNode arg, DataFlowType t |
- node1 = TTaintStoreNode(arg, t, config) and
- argumentFlowsThrough(arg, node2.getNode(), t, _, TSummaryTaintStore(f), config)
- )
-}
-
-private predicate jumpStepExt(NodeExt node1, NodeExt node2, Configuration config) {
- jumpStep(node1.getNode(), node2.getNode(), config)
-}
-
-private predicate additionalJumpStepExt(NodeExt node1, NodeExt node2, Configuration config) {
- additionalJumpStep(node1.getNode(), node2.getNode(), config)
-}
-
-private predicate argumentValueFlowsThrough(NodeExt node1, NodeExt node2) {
- argumentValueFlowsThrough(_, node1.getNode(), TContentNone(), TContentNone(), node2.getNode())
-}
-
-private predicate argumentFlowsThrough(
- NodeExt arg, NodeExt out, DataFlowType t, Configuration config
+private predicate viableReturnPosOutNodeCand1(
+ DataFlowCall call, ReturnPosition pos, Node out, Configuration config
) {
- argumentFlowsThrough(arg.getNode(), out.getNode(), _, t, TSummaryTaint(), config)
+ nodeCand1(out, _, config) and
+ viableReturnPosOutNodeCandFwd1(call, pos, out, config)
}
/**
- * Holds if data can flow from `node1` to `node2` in one local step or a step
- * through a callable.
- */
-pragma[noinline]
-private predicate localFlowStepOrFlowThroughCallable(
- NodeExt node1, NodeExt node2, Configuration config
-) {
- exists(Node n1, Node n2 |
- n1 = node1.getNode() and
- n2 = node2.getNode()
- |
- nodeCand1(n1, config) and
- localFlowStep(n1, n2, config)
- or
- nodeCand1(n1, config) and
- argumentValueFlowsThrough(_, n1, TContentNone(), TContentNone(), n2)
- )
-}
-
-/**
- * Holds if data can flow from `node1` to `node2` in one local step or a step
- * through a callable, in both cases using an additional flow step from the
- * configuration.
- */
-pragma[noinline]
-private predicate additionalLocalFlowStepOrFlowThroughCallable(
- NodeExt node1, NodeExt node2, Configuration config
-) {
- exists(Node n1, Node n2 |
- n1 = node1.getNode() and
- n2 = node2.getNode()
- |
- nodeCand1(n1, config) and
- additionalLocalFlowStep(n1, n2, config)
- or
- argumentFlowsThrough(n1, n2, _, _, TSummaryTaint(), config)
- )
- or
- additionalLocalFlowStepExt(node1, node2, _, config)
-}
-
-pragma[noinline]
-private ReturnPosition getReturnPosition1(ReturnNodeExt node, Configuration config) {
- result = getReturnPosition(node) and
- nodeCand1(node, config)
-}
-
-/**
- * Holds if data can flow out of a callable from `node1` to `node2`, either
+ * Holds if data can flow out of `call` from `ret` to `out`, either
* through a `ReturnNode` or through an argument that has been mutated, and
* that this step is part of a path from a source to a sink.
*/
-private predicate flowOutOfCallableNodeCand1(ReturnNodeExt node1, Node node2, Configuration config) {
- nodeCand1(node2, config) and
- not outBarrier(node1, config) and
- not inBarrier(node2, config) and
- exists(DataFlowCall call, ReturnKindExt kind |
- getReturnPosition1(node1, unbind(config)) = viableReturnPos(call, kind) and
- node2 = kind.getAnOutNode(call)
- )
+pragma[nomagic]
+private predicate flowOutOfCallNodeCand1(
+ DataFlowCall call, ReturnNodeExt ret, Node out, Configuration config
+) {
+ viableReturnPosOutNodeCand1(call, getReturnPosition(ret), out, config) and
+ nodeCand1(ret, config) and
+ not outBarrier(ret, config) and
+ not inBarrier(out, config)
+}
+
+pragma[nomagic]
+private predicate viableParamArgNodeCand1(
+ DataFlowCall call, ParameterNode p, ArgumentNode arg, Configuration config
+) {
+ viableParamArgNodeCandFwd1(call, p, arg, config) and
+ nodeCand1(arg, config)
}
/**
- * Holds if data can flow into a callable and that this step is part of a
+ * Holds if data can flow into `call` and that this step is part of a
* path from a source to a sink.
*/
-private predicate flowIntoCallableNodeCand1(
- ArgumentNode node1, ParameterNode node2, Configuration config
+pragma[nomagic]
+private predicate flowIntoCallNodeCand1(
+ DataFlowCall call, ArgumentNode arg, ParameterNode p, Configuration config
) {
- viableParamArgCand(_, node2, node1, config)
+ viableParamArgNodeCand1(call, p, arg, config) and
+ nodeCand1(p, config) and
+ not outBarrier(arg, config) and
+ not inBarrier(p, config)
}
/**
@@ -999,7 +647,7 @@ private predicate flowIntoCallableNodeCand1(
private int branch(Node n1, Configuration conf) {
result =
strictcount(Node n |
- flowOutOfCallableNodeCand1(n1, n, conf) or flowIntoCallableNodeCand1(n1, n, conf)
+ flowOutOfCallNodeCand1(_, n1, n, conf) or flowIntoCallNodeCand1(_, n1, n, conf)
)
}
@@ -1011,117 +659,126 @@ private int branch(Node n1, Configuration conf) {
private int join(Node n2, Configuration conf) {
result =
strictcount(Node n |
- flowOutOfCallableNodeCand1(n, n2, conf) or flowIntoCallableNodeCand1(n, n2, conf)
+ flowOutOfCallNodeCand1(_, n, n2, conf) or flowIntoCallNodeCand1(_, n, n2, conf)
)
}
/**
- * Holds if data can flow out of a callable from `node1` to `node2`, either
+ * Holds if data can flow out of `call` from `ret` to `out`, either
* through a `ReturnNode` or through an argument that has been mutated, and
* that this step is part of a path from a source to a sink. The
* `allowsFieldFlow` flag indicates whether the branching is within the limit
* specified by the configuration.
*/
-private predicate flowOutOfCallableNodeCand1(
- NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
+pragma[nomagic]
+private predicate flowOutOfCallNodeCand1(
+ DataFlowCall call, ReturnNodeExt ret, Node out, boolean allowsFieldFlow, Configuration config
) {
- exists(ReturnNodeExt n1, Node n2 |
- n1 = node1.getNode() and
- n2 = node2.getNode() and
- flowOutOfCallableNodeCand1(n1, n2, config) and
- exists(int b, int j |
- b = branch(n1, config) and
- j = join(n2, config) and
- if b.minimum(j) <= config.fieldFlowBranchLimit()
- then allowsFieldFlow = true
- else allowsFieldFlow = false
- )
+ flowOutOfCallNodeCand1(call, ret, out, config) and
+ exists(int b, int j |
+ b = branch(ret, config) and
+ j = join(out, config) and
+ if b.minimum(j) <= config.fieldFlowBranchLimit()
+ then allowsFieldFlow = true
+ else allowsFieldFlow = false
)
}
/**
- * Holds if data can flow into a callable and that this step is part of a
+ * Holds if data can flow into `call` and that this step is part of a
* path from a source to a sink. The `allowsFieldFlow` flag indicates whether
* the branching is within the limit specified by the configuration.
*/
-private predicate flowIntoCallableNodeCand1(
- NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
+pragma[nomagic]
+private predicate flowIntoCallNodeCand1(
+ DataFlowCall call, ArgumentNode arg, ParameterNode p, boolean allowsFieldFlow,
+ Configuration config
) {
- exists(ArgumentNode n1, ParameterNode n2 |
- n1 = node1.getNode() and
- n2 = node2.getNode() and
- flowIntoCallableNodeCand1(n1, n2, config) and
- exists(int b, int j |
- b = branch(n1, config) and
- j = join(n2, config) and
- if b.minimum(j) <= config.fieldFlowBranchLimit()
- then allowsFieldFlow = true
- else allowsFieldFlow = false
- )
+ flowIntoCallNodeCand1(call, arg, p, config) and
+ exists(int b, int j |
+ b = branch(arg, config) and
+ j = join(p, config) and
+ if b.minimum(j) <= config.fieldFlowBranchLimit()
+ then allowsFieldFlow = true
+ else allowsFieldFlow = false
)
}
/**
- * Holds if `node` is part of a path from a source to a sink in the given
- * configuration taking simple call contexts into consideration.
+ * Holds if `node` is reachable from a source in the configuration `config`.
+ * The Boolean `stored` records whether the tracked value is stored into a
+ * field of `node`.
+ *
+ * The Boolean `fromArg` records whether the node is reached through an
+ * argument in a call, and if so, `argStored` records whether the tracked
+ * value was stored into a field of the argument.
*/
-private predicate nodeCandFwd2(NodeExt node, boolean fromArg, boolean stored, Configuration config) {
- nodeCand1(node.getNode(), config) and
- config.isSource(node.getNode()) and
+private predicate nodeCandFwd2(
+ Node node, boolean fromArg, BooleanOption argStored, boolean stored, Configuration config
+) {
+ nodeCand1(node, config) and
+ config.isSource(node) and
fromArg = false and
+ argStored = TBooleanNone() and
stored = false
or
- node.isCand1(unbind(config)) and
+ nodeCand1(node, unbind(config)) and
(
- exists(NodeExt mid |
- nodeCandFwd2(mid, fromArg, stored, config) and
- localFlowStepOrFlowThroughCallable(mid, node, config)
+ exists(Node mid |
+ nodeCandFwd2(mid, fromArg, argStored, stored, config) and
+ localFlowStepNodeCand1(mid, node, config)
)
or
- exists(NodeExt mid |
- nodeCandFwd2(mid, fromArg, stored, config) and
- additionalLocalFlowStepOrFlowThroughCallable(mid, node, config) and
+ exists(Node mid |
+ nodeCandFwd2(mid, fromArg, argStored, stored, config) and
+ additionalLocalFlowStepNodeCand1(mid, node, config) and
stored = false
)
or
- exists(NodeExt mid |
- nodeCandFwd2(mid, _, stored, config) and
- jumpStepExt(mid, node, config) and
- fromArg = false
+ exists(Node mid |
+ nodeCandFwd2(mid, _, _, stored, config) and
+ jumpStep(mid, node, config) and
+ fromArg = false and
+ argStored = TBooleanNone()
)
or
- exists(NodeExt mid |
- nodeCandFwd2(mid, _, stored, config) and
- additionalJumpStepExt(mid, node, config) and
+ exists(Node mid |
+ nodeCandFwd2(mid, _, _, stored, config) and
+ additionalJumpStep(mid, node, config) and
fromArg = false and
+ argStored = TBooleanNone() and
stored = false
)
or
// store
- exists(NodeExt mid, Content f |
- nodeCandFwd2(mid, fromArg, _, config) and
- storeExt(mid, f, node, config) and
+ exists(Node mid, Content f |
+ nodeCandFwd2(mid, fromArg, argStored, _, config) and
+ store(mid, f, node, config) and
stored = true
)
or
// read
exists(Content f |
- nodeCandFwd2Read(f, node, fromArg, config) and
- storeCandFwd2(f, stored, config)
+ nodeCandFwd2Read(f, node, fromArg, argStored, config) and
+ nodeCandFwd2IsStored(f, stored, config)
)
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- nodeCandFwd2(mid, _, stored, config) and
- flowIntoCallableNodeCand1(mid, node, allowsFieldFlow, config) and
- fromArg = true and
- (stored = false or allowsFieldFlow = true)
- )
+ // flow into a callable
+ nodeCandFwd2In(_, node, _, _, stored, config) and
+ fromArg = true and
+ if parameterThroughFlowNodeCand1(node, config)
+ then argStored = TBooleanSome(stored)
+ else argStored = TBooleanNone()
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- nodeCandFwd2(mid, false, stored, config) and
- flowOutOfCallableNodeCand1(mid, node, allowsFieldFlow, config) and
- fromArg = false and
- (stored = false or allowsFieldFlow = true)
+ // flow out of a callable
+ exists(DataFlowCall call |
+ nodeCandFwd2Out(call, node, fromArg, argStored, stored, config) and
+ fromArg = false
+ or
+ exists(boolean argStored0 |
+ nodeCandFwd2OutFromArg(call, node, argStored0, stored, config) and
+ nodeCandFwd2IsEntered(call, fromArg, argStored, argStored0, config)
+ )
)
)
}
@@ -1130,86 +787,148 @@ private predicate nodeCandFwd2(NodeExt node, boolean fromArg, boolean stored, Co
* Holds if `f` is the target of a store in the flow covered by `nodeCandFwd2`.
*/
pragma[noinline]
-private predicate storeCandFwd2(Content f, boolean stored, Configuration config) {
- exists(NodeExt mid, NodeExt node |
+private predicate nodeCandFwd2IsStored(Content f, boolean stored, Configuration config) {
+ exists(Node mid, Node node |
useFieldFlow(config) and
- node.isCand1(unbind(config)) and
- nodeCandFwd2(mid, _, stored, config) and
- storeExt(mid, f, node, config)
+ nodeCand1(node, unbind(config)) and
+ nodeCandFwd2(mid, _, _, stored, config) and
+ store(mid, f, node, config)
)
}
pragma[nomagic]
-private predicate nodeCandFwd2Read(Content f, NodeExt node, boolean fromArg, Configuration config) {
- exists(NodeExt mid |
- nodeCandFwd2(mid, fromArg, true, config) and
- readExt(mid, f, node, config)
+private predicate nodeCandFwd2Read(
+ Content f, Node node, boolean fromArg, BooleanOption argStored, Configuration config
+) {
+ exists(Node mid |
+ nodeCandFwd2(mid, fromArg, argStored, true, config) and
+ read(mid, f, node, config)
+ )
+}
+
+pragma[nomagic]
+private predicate nodeCandFwd2In(
+ DataFlowCall call, ParameterNode p, boolean fromArg, BooleanOption argStored, boolean stored,
+ Configuration config
+) {
+ exists(ArgumentNode arg, boolean allowsFieldFlow |
+ nodeCandFwd2(arg, fromArg, argStored, stored, config) and
+ flowIntoCallNodeCand1(call, arg, p, allowsFieldFlow, config)
+ |
+ stored = false or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate nodeCandFwd2Out(
+ DataFlowCall call, Node out, boolean fromArg, BooleanOption argStored, boolean stored,
+ Configuration config
+) {
+ exists(ReturnNodeExt ret, boolean allowsFieldFlow |
+ nodeCandFwd2(ret, fromArg, argStored, stored, config) and
+ flowOutOfCallNodeCand1(call, ret, out, allowsFieldFlow, config)
+ |
+ stored = false or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate nodeCandFwd2OutFromArg(
+ DataFlowCall call, Node out, boolean argStored, boolean stored, Configuration config
+) {
+ nodeCandFwd2Out(call, out, true, TBooleanSome(argStored), stored, config)
+}
+
+/**
+ * Holds if an argument to `call` is reached in the flow covered by `nodeCandFwd2`.
+ */
+pragma[nomagic]
+private predicate nodeCandFwd2IsEntered(
+ DataFlowCall call, boolean fromArg, BooleanOption argStored, boolean stored, Configuration config
+) {
+ exists(ParameterNode p |
+ nodeCandFwd2In(call, p, fromArg, argStored, stored, config) and
+ parameterThroughFlowNodeCand1(p, config)
)
}
/**
- * Holds if `node` is part of a path from a source to a sink in the given
- * configuration taking simple call contexts into consideration.
+ * Holds if `node` is part of a path from a source to a sink in the
+ * configuration `config`. The Boolean `read` records whether the tracked
+ * value must be read from a field of `node` in order to reach a sink.
+ *
+ * The Boolean `toReturn` records whether the node must be returned from
+ * the enclosing callable in order to reach a sink, and if so, `returnRead`
+ * records whether a field must be read from the returned value.
*/
-private predicate nodeCand2(NodeExt node, boolean toReturn, boolean read, Configuration config) {
- nodeCandFwd2(node, _, false, config) and
- config.isSink(node.getNode()) and
+private predicate nodeCand2(
+ Node node, boolean toReturn, BooleanOption returnRead, boolean read, Configuration config
+) {
+ nodeCandFwd2(node, _, _, false, config) and
+ config.isSink(node) and
toReturn = false and
+ returnRead = TBooleanNone() and
read = false
or
- nodeCandFwd2(node, _, unbindBool(read), unbind(config)) and
+ nodeCandFwd2(node, _, _, unbindBool(read), unbind(config)) and
(
- exists(NodeExt mid |
- localFlowStepOrFlowThroughCallable(node, mid, config) and
- nodeCand2(mid, toReturn, read, config)
+ exists(Node mid |
+ localFlowStepNodeCand1(node, mid, config) and
+ nodeCand2(mid, toReturn, returnRead, read, config)
)
or
- exists(NodeExt mid |
- additionalLocalFlowStepOrFlowThroughCallable(node, mid, config) and
- nodeCand2(mid, toReturn, read, config) and
+ exists(Node mid |
+ additionalLocalFlowStepNodeCand1(node, mid, config) and
+ nodeCand2(mid, toReturn, returnRead, read, config) and
read = false
)
or
- exists(NodeExt mid |
- jumpStepExt(node, mid, config) and
- nodeCand2(mid, _, read, config) and
- toReturn = false
+ exists(Node mid |
+ jumpStep(node, mid, config) and
+ nodeCand2(mid, _, _, read, config) and
+ toReturn = false and
+ returnRead = TBooleanNone()
)
or
- exists(NodeExt mid |
- additionalJumpStepExt(node, mid, config) and
- nodeCand2(mid, _, read, config) and
+ exists(Node mid |
+ additionalJumpStep(node, mid, config) and
+ nodeCand2(mid, _, _, read, config) and
toReturn = false and
+ returnRead = TBooleanNone() and
read = false
)
or
// store
exists(Content f |
- nodeCand2Store(f, node, toReturn, read, config) and
- readCand2(f, read, config)
+ nodeCand2Store(f, node, toReturn, returnRead, read, config) and
+ nodeCand2IsRead(f, read, config)
)
or
// read
- exists(NodeExt mid, Content f, boolean read0 |
- readExt(node, f, mid, config) and
- storeCandFwd2(f, unbindBool(read0), unbind(config)) and
- nodeCand2(mid, toReturn, read0, config) and
+ exists(Node mid, Content f, boolean read0 |
+ read(node, f, mid, config) and
+ nodeCandFwd2IsStored(f, unbindBool(read0), unbind(config)) and
+ nodeCand2(mid, toReturn, returnRead, read0, config) and
read = true
)
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallableNodeCand1(node, mid, allowsFieldFlow, config) and
- nodeCand2(mid, false, read, config) and
- toReturn = false and
- (read = false or allowsFieldFlow = true)
+ // flow into a callable
+ exists(DataFlowCall call |
+ nodeCand2In(call, node, toReturn, returnRead, read, config) and
+ toReturn = false
+ or
+ exists(boolean returnRead0 |
+ nodeCand2InToReturn(call, node, returnRead0, read, config) and
+ nodeCand2IsReturned(call, toReturn, returnRead, returnRead0, config)
+ )
)
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallableNodeCand1(node, mid, allowsFieldFlow, config) and
- nodeCand2(mid, _, read, config) and
- toReturn = true and
- (read = false or allowsFieldFlow = true)
- )
+ // flow out of a callable
+ nodeCand2Out(_, node, _, _, read, config) and
+ toReturn = true and
+ if nodeCandFwd2(node, true, TBooleanSome(_), unbindBool(read), config)
+ then returnRead = TBooleanSome(read)
+ else returnRead = TBooleanNone()
)
}
@@ -1217,32 +936,36 @@ private predicate nodeCand2(NodeExt node, boolean toReturn, boolean read, Config
* Holds if `f` is the target of a read in the flow covered by `nodeCand2`.
*/
pragma[noinline]
-private predicate readCand2(Content f, boolean read, Configuration config) {
- exists(NodeExt mid, NodeExt node |
+private predicate nodeCand2IsRead(Content f, boolean read, Configuration config) {
+ exists(Node mid, Node node |
useFieldFlow(config) and
- nodeCandFwd2(node, _, true, unbind(config)) and
- readExt(node, f, mid, config) and
- storeCandFwd2(f, unbindBool(read), unbind(config)) and
- nodeCand2(mid, _, read, config)
+ nodeCandFwd2(node, _, _, true, unbind(config)) and
+ read(node, f, mid, config) and
+ nodeCandFwd2IsStored(f, unbindBool(read), unbind(config)) and
+ nodeCand2(mid, _, _, read, config)
)
}
pragma[nomagic]
private predicate nodeCand2Store(
- Content f, NodeExt node, boolean toReturn, boolean stored, Configuration config
+ Content f, Node node, boolean toReturn, BooleanOption returnRead, boolean stored,
+ Configuration config
) {
- exists(NodeExt mid |
- storeExt(node, f, mid, config) and
- nodeCand2(mid, toReturn, true, config) and
- nodeCandFwd2(node, _, stored, unbind(config))
+ exists(Node mid |
+ store(node, f, mid, config) and
+ nodeCand2(mid, toReturn, returnRead, true, config) and
+ nodeCandFwd2(node, _, _, stored, unbind(config))
)
}
+/**
+ * Holds if `f` is the target of a store in the flow covered by `nodeCand2`.
+ */
pragma[nomagic]
-private predicate storeCand2(Content f, boolean stored, Configuration conf) {
- exists(NodeExt node |
- nodeCand2Store(f, node, _, stored, conf) and
- nodeCand2(node, _, stored, conf)
+private predicate nodeCand2IsStored(Content f, boolean stored, Configuration conf) {
+ exists(Node node |
+ nodeCand2Store(f, node, _, _, stored, conf) and
+ nodeCand2(node, _, _, stored, conf)
)
}
@@ -1251,29 +974,76 @@ private predicate storeCand2(Content f, boolean stored, Configuration conf) {
* covered by `nodeCand2`.
*/
pragma[noinline]
-private predicate readStoreCand(Content f, Configuration conf) {
+private predicate nodeCand2IsReadAndStored(Content f, Configuration conf) {
exists(boolean apNonEmpty |
- storeCand2(f, apNonEmpty, conf) and
- readCand2(f, apNonEmpty, conf)
+ nodeCand2IsStored(f, apNonEmpty, conf) and
+ nodeCand2IsRead(f, apNonEmpty, conf)
)
}
-private predicate nodeCand2(NodeExt node, Configuration config) { nodeCand2(node, _, _, config) }
+pragma[nomagic]
+private predicate nodeCand2Out(
+ DataFlowCall call, ReturnNodeExt ret, boolean toReturn, BooleanOption returnRead, boolean read,
+ Configuration config
+) {
+ exists(Node out, boolean allowsFieldFlow |
+ nodeCand2(out, toReturn, returnRead, read, config) and
+ flowOutOfCallNodeCand1(call, ret, out, allowsFieldFlow, config)
+ |
+ read = false or allowsFieldFlow = true
+ )
+}
pragma[nomagic]
-private predicate flowOutOfCallableNodeCand2(
- NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
+private predicate nodeCand2In(
+ DataFlowCall call, ArgumentNode arg, boolean toReturn, BooleanOption returnRead, boolean read,
+ Configuration config
) {
- flowOutOfCallableNodeCand1(node1, node2, allowsFieldFlow, config) and
+ exists(ParameterNode p, boolean allowsFieldFlow |
+ nodeCand2(p, toReturn, returnRead, read, config) and
+ flowIntoCallNodeCand1(call, arg, p, allowsFieldFlow, config)
+ |
+ read = false or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate nodeCand2InToReturn(
+ DataFlowCall call, ArgumentNode arg, boolean returnRead, boolean read, Configuration config
+) {
+ nodeCand2In(call, arg, true, TBooleanSome(returnRead), read, config)
+}
+
+/**
+ * Holds if an output from `call` is reached in the flow covered by `nodeCand2`.
+ */
+pragma[nomagic]
+private predicate nodeCand2IsReturned(
+ DataFlowCall call, boolean toReturn, BooleanOption returnRead, boolean read, Configuration config
+) {
+ exists(ReturnNodeExt ret |
+ nodeCand2Out(call, ret, toReturn, returnRead, read, config) and
+ nodeCandFwd2(ret, true, TBooleanSome(_), read, config)
+ )
+}
+
+private predicate nodeCand2(Node node, Configuration config) { nodeCand2(node, _, _, _, config) }
+
+pragma[nomagic]
+private predicate flowOutOfCallNodeCand2(
+ DataFlowCall call, ReturnNodeExt node1, Node node2, boolean allowsFieldFlow, Configuration config
+) {
+ flowOutOfCallNodeCand1(call, node1, node2, allowsFieldFlow, config) and
nodeCand2(node2, config) and
nodeCand2(node1, unbind(config))
}
pragma[nomagic]
-private predicate flowIntoCallableNodeCand2(
- NodeExt node1, NodeExt node2, boolean allowsFieldFlow, Configuration config
+private predicate flowIntoCallNodeCand2(
+ DataFlowCall call, ArgumentNode node1, ParameterNode node2, boolean allowsFieldFlow,
+ Configuration config
) {
- flowIntoCallableNodeCand1(node1, node2, allowsFieldFlow, config) and
+ flowIntoCallNodeCand1(call, node1, node2, allowsFieldFlow, config) and
nodeCand2(node2, config) and
nodeCand2(node1, unbind(config))
}
@@ -1284,7 +1054,7 @@ private module LocalFlowBigStep {
* flow steps in a dataflow path.
*/
private predicate localFlowEntry(Node node, Configuration config) {
- nodeCand2(TNormalNode(node), config) and
+ nodeCand2(node, config) and
(
config.isSource(node) or
jumpStep(_, node, config) or
@@ -1292,7 +1062,7 @@ private module LocalFlowBigStep {
node instanceof ParameterNode or
node instanceof OutNode or
node instanceof PostUpdateNode or
- readDirect(_, _, node) or
+ read(_, _, node) or
node instanceof CastNode
)
}
@@ -1302,15 +1072,13 @@ private module LocalFlowBigStep {
* flow steps in a dataflow path.
*/
private predicate localFlowExit(Node node, Configuration config) {
- exists(Node next | nodeCand2(TNormalNode(next), config) |
+ exists(Node next | nodeCand2(next, config) |
jumpStep(node, next, config) or
additionalJumpStep(node, next, config) or
- flowIntoCallableNodeCand1(node, next, config) or
- flowOutOfCallableNodeCand1(node, next, config) or
- argumentFlowsThrough(node, next, _, _, _, config) or
- argumentValueFlowsThrough(_, node, TContentNone(), TContentNone(), next) or
- storeDirect(node, _, next) or
- readDirect(node, _, next)
+ flowIntoCallNodeCand1(_, node, next, config) or
+ flowOutOfCallNodeCand1(_, node, next, config) or
+ store(node, _, next) or
+ read(node, _, next)
)
or
node instanceof CastNode
@@ -1318,6 +1086,13 @@ private module LocalFlowBigStep {
config.isSink(node)
}
+ pragma[noinline]
+ private predicate additionalLocalFlowStepNodeCand2(Node node1, Node node2, Configuration config) {
+ additionalLocalFlowStepNodeCand1(node1, node2, config) and
+ nodeCand2(node1, _, _, false, config) and
+ nodeCand2(node2, _, _, false, unbind(config))
+ }
+
/**
* Holds if the local path from `node1` to `node2` is a prefix of a maximal
* subsequence of local flow steps in a dataflow path.
@@ -1334,33 +1109,33 @@ private module LocalFlowBigStep {
(
localFlowEntry(node1, config) and
(
- localFlowStep(node1, node2, config) and
+ localFlowStepNodeCand1(node1, node2, config) and
preservesValue = true and
t = getErasedNodeTypeBound(node1)
or
- additionalLocalFlowStep(node1, node2, config) and
+ additionalLocalFlowStepNodeCand2(node1, node2, config) and
preservesValue = false and
t = getErasedNodeTypeBound(node2)
) and
node1 != node2 and
cc.relevantFor(node1.getEnclosingCallable()) and
not isUnreachableInCall(node1, cc.(LocalCallContextSpecificCall).getCall()) and
- nodeCand2(TNormalNode(node2), unbind(config))
+ nodeCand2(node2, unbind(config))
or
exists(Node mid |
localFlowStepPlus(node1, mid, preservesValue, t, config, cc) and
- localFlowStep(mid, node2, config) and
+ localFlowStepNodeCand1(mid, node2, config) and
not mid instanceof CastNode and
- nodeCand2(TNormalNode(node2), unbind(config))
+ nodeCand2(node2, unbind(config))
)
or
exists(Node mid |
localFlowStepPlus(node1, mid, _, _, config, cc) and
- additionalLocalFlowStep(mid, node2, config) and
+ additionalLocalFlowStepNodeCand2(mid, node2, config) and
not mid instanceof CastNode and
preservesValue = false and
t = getErasedNodeTypeBound(node2) and
- nodeCand2(TNormalNode(node2), unbind(config))
+ nodeCand2(node2, unbind(config))
)
)
}
@@ -1371,307 +1146,365 @@ private module LocalFlowBigStep {
*/
pragma[nomagic]
predicate localFlowBigStep(
- Node node1, Node node2, boolean preservesValue, DataFlowType t, Configuration config,
+ Node node1, Node node2, boolean preservesValue, AccessPathFrontNil apf, Configuration config,
LocalCallContext callContext
) {
- localFlowStepPlus(node1, node2, preservesValue, t, config, callContext) and
+ localFlowStepPlus(node1, node2, preservesValue, apf.getType(), config, callContext) and
localFlowExit(node2, config)
}
-
- pragma[nomagic]
- predicate localFlowBigStepExt(
- NodeExt node1, NodeExt node2, boolean preservesValue, AccessPathFrontNil apf,
- Configuration config
- ) {
- localFlowBigStep(node1.getNode(), node2.getNode(), preservesValue, apf.getType(), config, _)
- or
- additionalLocalFlowStepExt(node1, node2, apf.getType(), config) and
- nodeCand2(node1, config) and
- nodeCand2(node2, unbind(config)) and
- preservesValue = false
- }
}
private import LocalFlowBigStep
pragma[nomagic]
-private predicate readExtCand2(NodeExt node1, Content f, NodeExt node2, Configuration config) {
- readExt(node1, f, node2, config) and
- nodeCand2(node1, _, true, unbind(config)) and
+private predicate readCand2(Node node1, Content f, Node node2, Configuration config) {
+ read(node1, f, node2, config) and
+ nodeCand2(node1, _, _, true, unbind(config)) and
nodeCand2(node2, config) and
- readStoreCand(f, unbind(config))
+ nodeCand2IsReadAndStored(f, unbind(config))
}
pragma[nomagic]
-private predicate storeExtCand2(NodeExt node1, Content f, NodeExt node2, Configuration config) {
- storeExt(node1, f, node2, config) and
+private predicate storeCand2(Node node1, Content f, Node node2, Configuration config) {
+ store(node1, f, node2, config) and
nodeCand2(node1, config) and
- nodeCand2(node2, _, true, unbind(config)) and
- readStoreCand(f, unbind(config))
-}
-
-private newtype TAccessPathFront =
- TFrontNil(DataFlowType t) or
- TFrontHead(Content f)
-
-/**
- * The front of an `AccessPath`. This is either a head or a nil.
- */
-abstract private class AccessPathFront extends TAccessPathFront {
- abstract string toString();
-
- abstract DataFlowType getType();
-
- abstract boolean toBoolNonEmpty();
-
- predicate headUsesContent(Content f) { this = TFrontHead(f) }
-}
-
-private class AccessPathFrontNil extends AccessPathFront, TFrontNil {
- override string toString() {
- exists(DataFlowType t | this = TFrontNil(t) | result = ppReprType(t))
- }
-
- override DataFlowType getType() { this = TFrontNil(result) }
-
- override boolean toBoolNonEmpty() { result = false }
-}
-
-private class AccessPathFrontHead extends AccessPathFront, TFrontHead {
- override string toString() { exists(Content f | this = TFrontHead(f) | result = f.toString()) }
-
- override DataFlowType getType() {
- exists(Content head | this = TFrontHead(head) | result = head.getContainerType())
- }
-
- override boolean toBoolNonEmpty() { result = true }
+ nodeCand2(node2, _, _, true, unbind(config)) and
+ nodeCand2IsReadAndStored(f, unbind(config))
}
/**
- * Holds if data can flow from a source to `node` with the given `apf`.
+ * Holds if `node` is reachable with access path front `apf` from a
+ * source in the configuration `config`.
+ *
+ * The Boolean `fromArg` records whether the node is reached through an
+ * argument in a call, and if so, `argApf` records the front of the
+ * access path of that argument.
*/
pragma[nomagic]
private predicate flowCandFwd(
- NodeExt node, boolean fromArg, AccessPathFront apf, Configuration config
+ Node node, boolean fromArg, AccessPathFrontOption argApf, AccessPathFront apf,
+ Configuration config
) {
- flowCandFwd0(node, fromArg, apf, config) and
- if node instanceof CastingNodeExt
- then compatibleTypes(node.getErasedNodeTypeBound(), apf.getType())
+ flowCandFwd0(node, fromArg, argApf, apf, config) and
+ if node instanceof CastingNode
+ then compatibleTypes(getErasedNodeTypeBound(node), apf.getType())
else any()
}
pragma[nomagic]
private predicate flowCandFwd0(
- NodeExt node, boolean fromArg, AccessPathFront apf, Configuration config
+ Node node, boolean fromArg, AccessPathFrontOption argApf, AccessPathFront apf,
+ Configuration config
) {
- nodeCand2(node, _, false, config) and
- config.isSource(node.getNode()) and
+ nodeCand2(node, _, _, false, config) and
+ config.isSource(node) and
fromArg = false and
- apf = TFrontNil(node.getErasedNodeTypeBound())
+ argApf = TAccessPathFrontNone() and
+ apf = TFrontNil(getErasedNodeTypeBound(node))
or
- exists(NodeExt mid |
- flowCandFwd(mid, fromArg, apf, config) and
- localFlowBigStepExt(mid, node, true, _, config)
+ exists(Node mid |
+ flowCandFwd(mid, fromArg, argApf, apf, config) and
+ localFlowBigStep(mid, node, true, _, config, _)
)
or
- exists(NodeExt mid, AccessPathFrontNil nil |
- flowCandFwd(mid, fromArg, nil, config) and
- localFlowBigStepExt(mid, node, false, apf, config)
+ exists(Node mid, AccessPathFrontNil nil |
+ flowCandFwd(mid, fromArg, argApf, nil, config) and
+ localFlowBigStep(mid, node, false, apf, config, _)
)
or
- nodeCand2(node, unbind(config)) and
- (
- exists(NodeExt mid |
- flowCandFwd(mid, _, apf, config) and
- jumpStepExt(mid, node, config) and
- fromArg = false
- )
- or
- exists(NodeExt mid, AccessPathFrontNil nil |
- flowCandFwd(mid, _, nil, config) and
- additionalJumpStepExt(mid, node, config) and
- fromArg = false and
- apf = TFrontNil(node.getErasedNodeTypeBound())
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowCandFwd(mid, _, apf, config) and
- flowIntoCallableNodeCand2(mid, node, allowsFieldFlow, config) and
- fromArg = true and
- (apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowCandFwd(mid, false, apf, config) and
- flowOutOfCallableNodeCand2(mid, node, allowsFieldFlow, config) and
- fromArg = false and
- (apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid |
- flowCandFwd(mid, fromArg, apf, config) and
- argumentValueFlowsThrough(mid, node)
- )
- or
- exists(NodeExt mid, AccessPathFrontNil nil, DataFlowType t |
- flowCandFwd(mid, fromArg, nil, config) and
- argumentFlowsThrough(mid, node, t, config) and
- apf = TFrontNil(t)
- )
+ exists(Node mid |
+ flowCandFwd(mid, _, _, apf, config) and
+ nodeCand2(node, unbind(config)) and
+ jumpStep(mid, node, config) and
+ fromArg = false and
+ argApf = TAccessPathFrontNone()
)
or
- exists(NodeExt mid, Content f |
- flowCandFwd(mid, fromArg, _, config) and
- storeExtCand2(mid, f, node, config) and
- nodeCand2(node, _, true, unbind(config)) and
+ exists(Node mid, AccessPathFrontNil nil |
+ flowCandFwd(mid, _, _, nil, config) and
+ nodeCand2(node, unbind(config)) and
+ additionalJumpStep(mid, node, config) and
+ fromArg = false and
+ argApf = TAccessPathFrontNone() and
+ apf = TFrontNil(getErasedNodeTypeBound(node))
+ )
+ or
+ // store
+ exists(Node mid, Content f |
+ flowCandFwd(mid, fromArg, argApf, _, config) and
+ storeCand2(mid, f, node, config) and
+ nodeCand2(node, _, _, true, unbind(config)) and
apf.headUsesContent(f)
)
or
+ // read
exists(Content f |
- flowCandFwdRead(f, node, fromArg, config) and
- consCandFwd(f, apf, config) and
- nodeCand2(node, _, unbindBool(apf.toBoolNonEmpty()), unbind(config))
+ flowCandFwdRead(f, node, fromArg, argApf, config) and
+ flowCandFwdConsCand(f, apf, config) and
+ nodeCand2(node, _, _, unbindBool(apf.toBoolNonEmpty()), unbind(config))
+ )
+ or
+ // flow into a callable
+ flowCandFwdIn(_, node, _, _, apf, config) and
+ fromArg = true and
+ if nodeCand2(node, true, _, unbindBool(apf.toBoolNonEmpty()), config)
+ then argApf = TAccessPathFrontSome(apf)
+ else argApf = TAccessPathFrontNone()
+ or
+ // flow out of a callable
+ exists(DataFlowCall call |
+ flowCandFwdOut(call, node, fromArg, argApf, apf, config) and
+ fromArg = false
+ or
+ exists(AccessPathFront argApf0 |
+ flowCandFwdOutFromArg(call, node, argApf0, apf, config) and
+ flowCandFwdIsEntered(call, fromArg, argApf, argApf0, config)
+ )
)
}
pragma[nomagic]
-private predicate consCandFwd(Content f, AccessPathFront apf, Configuration config) {
- exists(NodeExt mid, NodeExt n |
- flowCandFwd(mid, _, apf, config) and
- storeExtCand2(mid, f, n, config) and
- nodeCand2(n, _, true, unbind(config)) and
+private predicate flowCandFwdConsCand(Content f, AccessPathFront apf, Configuration config) {
+ exists(Node mid, Node n |
+ flowCandFwd(mid, _, _, apf, config) and
+ storeCand2(mid, f, n, config) and
+ nodeCand2(n, _, _, true, unbind(config)) and
compatibleTypes(apf.getType(), f.getType())
)
}
pragma[nomagic]
-private predicate flowCandFwdRead(Content f, NodeExt node, boolean fromArg, Configuration config) {
- exists(NodeExt mid, AccessPathFrontHead apf0 |
- flowCandFwd(mid, fromArg, apf0, config) and
- readExtCand2(mid, f, node, config) and
+private predicate flowCandFwdRead(
+ Content f, Node node, boolean fromArg, AccessPathFrontOption argApf, Configuration config
+) {
+ exists(Node mid, AccessPathFrontHead apf0 |
+ flowCandFwd(mid, fromArg, argApf, apf0, config) and
+ readCand2(mid, f, node, config) and
apf0.headUsesContent(f)
)
}
+pragma[nomagic]
+private predicate flowCandFwdIn(
+ DataFlowCall call, ParameterNode p, boolean fromArg, AccessPathFrontOption argApf,
+ AccessPathFront apf, Configuration config
+) {
+ exists(ArgumentNode arg, boolean allowsFieldFlow |
+ flowCandFwd(arg, fromArg, argApf, apf, config) and
+ flowIntoCallNodeCand2(call, arg, p, allowsFieldFlow, config)
+ |
+ apf instanceof AccessPathFrontNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowCandFwdOut(
+ DataFlowCall call, Node node, boolean fromArg, AccessPathFrontOption argApf, AccessPathFront apf,
+ Configuration config
+) {
+ exists(ReturnNodeExt ret, boolean allowsFieldFlow |
+ flowCandFwd(ret, fromArg, argApf, apf, config) and
+ flowOutOfCallNodeCand2(call, ret, node, allowsFieldFlow, config)
+ |
+ apf instanceof AccessPathFrontNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowCandFwdOutFromArg(
+ DataFlowCall call, Node node, AccessPathFront argApf, AccessPathFront apf, Configuration config
+) {
+ flowCandFwdOut(call, node, true, TAccessPathFrontSome(argApf), apf, config)
+}
+
/**
- * Holds if data can flow from a source to `node` with the given `apf` and
- * from there flow to a sink.
+ * Holds if an argument to `call` is reached in the flow covered by `flowCandFwd`.
*/
pragma[nomagic]
-private predicate flowCand(NodeExt node, boolean toReturn, AccessPathFront apf, Configuration config) {
- flowCand0(node, toReturn, apf, config) and
- flowCandFwd(node, _, apf, config)
+private predicate flowCandFwdIsEntered(
+ DataFlowCall call, boolean fromArg, AccessPathFrontOption argApf, AccessPathFront apf,
+ Configuration config
+) {
+ exists(ParameterNode p |
+ flowCandFwdIn(call, p, fromArg, argApf, apf, config) and
+ nodeCand2(p, true, TBooleanSome(_), unbindBool(apf.toBoolNonEmpty()), config)
+ )
+}
+
+/**
+ * Holds if `node` with access path front `apf` is part of a path from a
+ * source to a sink in the configuration `config`.
+ *
+ * The Boolean `toReturn` records whether the node must be returned from
+ * the enclosing callable in order to reach a sink, and if so, `returnApf`
+ * records the front of the access path of the returned value.
+ */
+pragma[nomagic]
+private predicate flowCand(
+ Node node, boolean toReturn, AccessPathFrontOption returnApf, AccessPathFront apf,
+ Configuration config
+) {
+ flowCand0(node, toReturn, returnApf, apf, config) and
+ flowCandFwd(node, _, _, apf, config)
}
pragma[nomagic]
private predicate flowCand0(
- NodeExt node, boolean toReturn, AccessPathFront apf, Configuration config
+ Node node, boolean toReturn, AccessPathFrontOption returnApf, AccessPathFront apf,
+ Configuration config
) {
- flowCandFwd(node, _, apf, config) and
- config.isSink(node.getNode()) and
+ flowCandFwd(node, _, _, apf, config) and
+ config.isSink(node) and
toReturn = false and
+ returnApf = TAccessPathFrontNone() and
apf instanceof AccessPathFrontNil
or
- exists(NodeExt mid |
- localFlowBigStepExt(node, mid, true, _, config) and
- flowCand(mid, toReturn, apf, config)
+ exists(Node mid |
+ localFlowBigStep(node, mid, true, _, config, _) and
+ flowCand(mid, toReturn, returnApf, apf, config)
)
or
- exists(NodeExt mid, AccessPathFrontNil nil |
- flowCandFwd(node, _, apf, config) and
- localFlowBigStepExt(node, mid, false, _, config) and
- flowCand(mid, toReturn, nil, config) and
+ exists(Node mid, AccessPathFrontNil nil |
+ flowCandFwd(node, _, _, apf, config) and
+ localFlowBigStep(node, mid, false, _, config, _) and
+ flowCand(mid, toReturn, returnApf, nil, config) and
apf instanceof AccessPathFrontNil
)
or
- exists(NodeExt mid |
- jumpStepExt(node, mid, config) and
- flowCand(mid, _, apf, config) and
- toReturn = false
+ exists(Node mid |
+ jumpStep(node, mid, config) and
+ flowCand(mid, _, _, apf, config) and
+ toReturn = false and
+ returnApf = TAccessPathFrontNone()
)
or
- exists(NodeExt mid, AccessPathFrontNil nil |
- flowCandFwd(node, _, apf, config) and
- additionalJumpStepExt(node, mid, config) and
- flowCand(mid, _, nil, config) and
+ exists(Node mid, AccessPathFrontNil nil |
+ flowCandFwd(node, _, _, apf, config) and
+ additionalJumpStep(node, mid, config) and
+ flowCand(mid, _, _, nil, config) and
toReturn = false and
+ returnApf = TAccessPathFrontNone() and
apf instanceof AccessPathFrontNil
)
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallableNodeCand2(node, mid, allowsFieldFlow, config) and
- flowCand(mid, false, apf, config) and
- toReturn = false and
- (apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallableNodeCand2(node, mid, allowsFieldFlow, config) and
- flowCand(mid, _, apf, config) and
- toReturn = true and
- (apf instanceof AccessPathFrontNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid |
- argumentValueFlowsThrough(node, mid) and
- flowCand(mid, toReturn, apf, config)
- )
- or
- exists(NodeExt mid, AccessPathFrontNil nil |
- argumentFlowsThrough(node, mid, _, config) and
- flowCand(mid, toReturn, nil, config) and
- apf instanceof AccessPathFrontNil and
- flowCandFwd(node, _, apf, config)
- )
- or
+ // store
exists(Content f, AccessPathFrontHead apf0 |
- flowCandStore(node, f, toReturn, apf0, config) and
+ flowCandStore(node, f, toReturn, returnApf, apf0, config) and
apf0.headUsesContent(f) and
- consCand(f, apf, config)
+ flowCandConsCand(f, apf, config)
)
or
+ // read
exists(Content f, AccessPathFront apf0 |
- flowCandRead(node, f, toReturn, apf0, config) and
- consCandFwd(f, apf0, config) and
+ flowCandRead(node, f, toReturn, returnApf, apf0, config) and
+ flowCandFwdConsCand(f, apf0, config) and
apf.headUsesContent(f)
)
+ or
+ // flow into a callable
+ exists(DataFlowCall call |
+ flowCandIn(call, node, toReturn, returnApf, apf, config) and
+ toReturn = false
+ or
+ exists(AccessPathFront returnApf0 |
+ flowCandInToReturn(call, node, returnApf0, apf, config) and
+ flowCandIsReturned(call, toReturn, returnApf, returnApf0, config)
+ )
+ )
+ or
+ // flow out of a callable
+ flowCandOut(_, node, _, _, apf, config) and
+ toReturn = true and
+ if flowCandFwd(node, true, _, apf, config)
+ then returnApf = TAccessPathFrontSome(apf)
+ else returnApf = TAccessPathFrontNone()
}
pragma[nomagic]
private predicate flowCandRead(
- NodeExt node, Content f, boolean toReturn, AccessPathFront apf0, Configuration config
+ Node node, Content f, boolean toReturn, AccessPathFrontOption returnApf, AccessPathFront apf0,
+ Configuration config
) {
- exists(NodeExt mid |
- readExtCand2(node, f, mid, config) and
- flowCand(mid, toReturn, apf0, config)
+ exists(Node mid |
+ readCand2(node, f, mid, config) and
+ flowCand(mid, toReturn, returnApf, apf0, config)
)
}
pragma[nomagic]
private predicate flowCandStore(
- NodeExt node, Content f, boolean toReturn, AccessPathFrontHead apf0, Configuration config
+ Node node, Content f, boolean toReturn, AccessPathFrontOption returnApf, AccessPathFrontHead apf0,
+ Configuration config
) {
- exists(NodeExt mid |
- storeExtCand2(node, f, mid, config) and
- flowCand(mid, toReturn, apf0, config)
+ exists(Node mid |
+ storeCand2(node, f, mid, config) and
+ flowCand(mid, toReturn, returnApf, apf0, config)
)
}
pragma[nomagic]
-private predicate consCand(Content f, AccessPathFront apf, Configuration config) {
- consCandFwd(f, apf, config) and
- exists(NodeExt n, AccessPathFrontHead apf0 |
- flowCandFwd(n, _, apf0, config) and
+private predicate flowCandConsCand(Content f, AccessPathFront apf, Configuration config) {
+ flowCandFwdConsCand(f, apf, config) and
+ exists(Node n, AccessPathFrontHead apf0 |
+ flowCandFwd(n, _, _, apf0, config) and
apf0.headUsesContent(f) and
- flowCandRead(n, f, _, apf, config)
+ flowCandRead(n, f, _, _, apf, config)
+ )
+}
+
+pragma[nomagic]
+private predicate flowCandOut(
+ DataFlowCall call, ReturnNodeExt ret, boolean toReturn, AccessPathFrontOption returnApf,
+ AccessPathFront apf, Configuration config
+) {
+ exists(Node out, boolean allowsFieldFlow |
+ flowCand(out, toReturn, returnApf, apf, config) and
+ flowOutOfCallNodeCand2(call, ret, out, allowsFieldFlow, config)
+ |
+ apf instanceof AccessPathFrontNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowCandIn(
+ DataFlowCall call, ArgumentNode arg, boolean toReturn, AccessPathFrontOption returnApf,
+ AccessPathFront apf, Configuration config
+) {
+ exists(ParameterNode p, boolean allowsFieldFlow |
+ flowCand(p, toReturn, returnApf, apf, config) and
+ flowIntoCallNodeCand2(call, arg, p, allowsFieldFlow, config)
+ |
+ apf instanceof AccessPathFrontNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowCandInToReturn(
+ DataFlowCall call, ArgumentNode arg, AccessPathFront returnApf, AccessPathFront apf,
+ Configuration config
+) {
+ flowCandIn(call, arg, true, TAccessPathFrontSome(returnApf), apf, config)
+}
+
+/**
+ * Holds if an output from `call` is reached in the flow covered by `flowCand`.
+ */
+pragma[nomagic]
+private predicate flowCandIsReturned(
+ DataFlowCall call, boolean toReturn, AccessPathFrontOption returnApf, AccessPathFront apf,
+ Configuration config
+) {
+ exists(ReturnNodeExt ret |
+ flowCandOut(call, ret, toReturn, returnApf, apf, config) and
+ flowCandFwd(ret, true, TAccessPathFrontSome(_), apf, config)
)
}
private newtype TAccessPath =
TNil(DataFlowType t) or
- TConsNil(Content f, DataFlowType t) { consCand(f, TFrontNil(t), _) } or
+ TConsNil(Content f, DataFlowType t) { flowCandConsCand(f, TFrontNil(t), _) } or
TConsCons(Content f1, Content f2, int len) {
- consCand(f1, TFrontHead(f2), _) and len in [2 .. accessPathLimit()]
+ flowCandConsCand(f1, TFrontHead(f2), _) and len in [2 .. accessPathLimit()]
}
/**
@@ -1778,292 +1611,396 @@ private AccessPath pop(Content f, AccessPath ap) { ap.pop(f, result) }
/** Gets the access path obtained by pushing `f` onto `ap`. */
private AccessPath push(Content f, AccessPath ap) { ap = pop(f, result) }
+private newtype TAccessPathOption =
+ TAccessPathNone() or
+ TAccessPathSome(AccessPath ap)
+
+private class AccessPathOption extends TAccessPathOption {
+ string toString() {
+ this = TAccessPathNone() and result = ""
+ or
+ this = TAccessPathSome(any(AccessPath ap | result = ap.toString()))
+ }
+}
+
/**
- * Holds if data can flow from a source to `node` with the given `ap`.
+ * Holds if `node` is reachable with access path `ap` from a source in
+ * the configuration `config`.
+ *
+ * The Boolean `fromArg` records whether the node is reached through an
+ * argument in a call, and if so, `argAp` records the access path of that
+ * argument.
*/
private predicate flowFwd(
- NodeExt node, boolean fromArg, AccessPathFront apf, AccessPath ap, Configuration config
+ Node node, boolean fromArg, AccessPathOption argAp, AccessPathFront apf, AccessPath ap,
+ Configuration config
) {
- flowFwd0(node, fromArg, apf, ap, config) and
- flowCand(node, _, apf, config)
+ flowFwd0(node, fromArg, argAp, apf, ap, config) and
+ flowCand(node, _, _, apf, config)
}
private predicate flowFwd0(
- NodeExt node, boolean fromArg, AccessPathFront apf, AccessPath ap, Configuration config
+ Node node, boolean fromArg, AccessPathOption argAp, AccessPathFront apf, AccessPath ap,
+ Configuration config
) {
- flowCand(node, _, _, config) and
- config.isSource(node.getNode()) and
+ flowCand(node, _, _, _, config) and
+ config.isSource(node) and
fromArg = false and
- ap = TNil(node.getErasedNodeTypeBound()) and
+ argAp = TAccessPathNone() and
+ ap = TNil(getErasedNodeTypeBound(node)) and
apf = ap.(AccessPathNil).getFront()
or
- flowCand(node, _, _, unbind(config)) and
+ flowCand(node, _, _, _, unbind(config)) and
(
- exists(NodeExt mid |
- flowFwd(mid, fromArg, apf, ap, config) and
- localFlowBigStepExt(mid, node, true, _, config)
+ exists(Node mid |
+ flowFwd(mid, fromArg, argAp, apf, ap, config) and
+ localFlowBigStep(mid, node, true, _, config, _)
)
or
- exists(NodeExt mid, AccessPathNil nil |
- flowFwd(mid, fromArg, _, nil, config) and
- localFlowBigStepExt(mid, node, false, apf, config) and
+ exists(Node mid, AccessPathNil nil |
+ flowFwd(mid, fromArg, argAp, _, nil, config) and
+ localFlowBigStep(mid, node, false, apf, config, _) and
apf = ap.(AccessPathNil).getFront()
)
or
- exists(NodeExt mid |
- flowFwd(mid, _, apf, ap, config) and
- jumpStepExt(mid, node, config) and
- fromArg = false
- )
- or
- exists(NodeExt mid, AccessPathNil nil |
- flowFwd(mid, _, _, nil, config) and
- additionalJumpStepExt(mid, node, config) and
+ exists(Node mid |
+ flowFwd(mid, _, _, apf, ap, config) and
+ jumpStep(mid, node, config) and
fromArg = false and
- ap = TNil(node.getErasedNodeTypeBound()) and
- apf = ap.(AccessPathNil).getFront()
+ argAp = TAccessPathNone()
)
or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowFwd(mid, _, apf, ap, config) and
- flowIntoCallableNodeCand2(mid, node, allowsFieldFlow, config) and
- fromArg = true and
- (ap instanceof AccessPathNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowFwd(mid, false, apf, ap, config) and
- flowOutOfCallableNodeCand2(mid, node, allowsFieldFlow, config) and
+ exists(Node mid, AccessPathNil nil |
+ flowFwd(mid, _, _, _, nil, config) and
+ additionalJumpStep(mid, node, config) and
fromArg = false and
- (ap instanceof AccessPathNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid |
- flowFwd(mid, fromArg, apf, ap, config) and
- argumentValueFlowsThrough(mid, node)
- )
- or
- exists(NodeExt mid, AccessPathNil nil, DataFlowType t |
- flowFwd(mid, fromArg, _, nil, config) and
- argumentFlowsThrough(mid, node, t, config) and
- ap = TNil(t) and
+ argAp = TAccessPathNone() and
+ ap = TNil(getErasedNodeTypeBound(node)) and
apf = ap.(AccessPathNil).getFront()
)
)
or
+ // store
exists(Content f, AccessPath ap0 |
- flowFwdStore(node, f, ap0, apf, fromArg, config) and
+ flowFwdStore(node, f, ap0, apf, fromArg, argAp, config) and
ap = push(f, ap0)
)
or
+ // read
exists(Content f |
- flowFwdRead(node, f, push(f, ap), fromArg, config) and
- flowConsCandFwd(f, apf, ap, config)
+ flowFwdRead(node, f, push(f, ap), fromArg, argAp, config) and
+ flowFwdConsCand(f, apf, ap, config)
+ )
+ or
+ // flow into a callable
+ flowFwdIn(_, node, _, _, apf, ap, config) and
+ fromArg = true and
+ if flowCand(node, true, _, apf, config)
+ then argAp = TAccessPathSome(ap)
+ else argAp = TAccessPathNone()
+ or
+ // flow out of a callable
+ exists(DataFlowCall call |
+ flowFwdOut(call, node, fromArg, argAp, apf, ap, config) and
+ fromArg = false
+ or
+ exists(AccessPath argAp0 |
+ flowFwdOutFromArg(call, node, argAp0, apf, ap, config) and
+ flowFwdIsEntered(call, fromArg, argAp, argAp0, config)
+ )
)
}
pragma[nomagic]
private predicate flowFwdStore(
- NodeExt node, Content f, AccessPath ap0, AccessPathFront apf, boolean fromArg,
- Configuration config
+ Node node, Content f, AccessPath ap0, AccessPathFront apf, boolean fromArg,
+ AccessPathOption argAp, Configuration config
) {
- exists(NodeExt mid, AccessPathFront apf0 |
- flowFwd(mid, fromArg, apf0, ap0, config) and
+ exists(Node mid, AccessPathFront apf0 |
+ flowFwd(mid, fromArg, argAp, apf0, ap0, config) and
flowFwdStore1(mid, f, node, apf0, apf, config)
)
}
pragma[nomagic]
private predicate flowFwdStore0(
- NodeExt mid, Content f, NodeExt node, AccessPathFront apf0, Configuration config
+ Node mid, Content f, Node node, AccessPathFront apf0, Configuration config
) {
- storeExtCand2(mid, f, node, config) and
- flowCand(mid, _, apf0, config)
+ storeCand2(mid, f, node, config) and
+ flowCand(mid, _, _, apf0, config)
}
pragma[noinline]
private predicate flowFwdStore1(
- NodeExt mid, Content f, NodeExt node, AccessPathFront apf0, AccessPathFrontHead apf,
+ Node mid, Content f, Node node, AccessPathFront apf0, AccessPathFrontHead apf,
Configuration config
) {
flowFwdStore0(mid, f, node, apf0, config) and
- consCand(f, apf0, config) and
+ flowCandConsCand(f, apf0, config) and
apf.headUsesContent(f) and
- flowCand(node, _, apf, unbind(config))
+ flowCand(node, _, _, apf, unbind(config))
}
pragma[nomagic]
private predicate flowFwdRead(
- NodeExt node, Content f, AccessPath ap0, boolean fromArg, Configuration config
+ Node node, Content f, AccessPath ap0, boolean fromArg, AccessPathOption argAp,
+ Configuration config
) {
- exists(NodeExt mid, AccessPathFrontHead apf0 |
- flowFwd(mid, fromArg, apf0, ap0, config) and
- readExtCand2(mid, f, node, config) and
+ exists(Node mid, AccessPathFrontHead apf0 |
+ flowFwd(mid, fromArg, argAp, apf0, ap0, config) and
+ readCand2(mid, f, node, config) and
apf0.headUsesContent(f) and
- flowCand(node, _, _, unbind(config))
+ flowCand(node, _, _, _, unbind(config))
)
}
pragma[nomagic]
-private predicate flowConsCandFwd(
+private predicate flowFwdConsCand(
Content f, AccessPathFront apf, AccessPath ap, Configuration config
) {
- exists(NodeExt n |
- flowFwd(n, _, apf, ap, config) and
+ exists(Node n |
+ flowFwd(n, _, _, apf, ap, config) and
flowFwdStore1(n, f, _, apf, _, config)
)
}
-/**
- * Holds if data can flow from a source to `node` with the given `ap` and
- * from there flow to a sink.
- */
-private predicate flow(NodeExt node, boolean toReturn, AccessPath ap, Configuration config) {
- flow0(node, toReturn, ap, config) and
- flowFwd(node, _, _, ap, config)
-}
-
-private predicate flow0(NodeExt node, boolean toReturn, AccessPath ap, Configuration config) {
- flowFwd(node, _, _, ap, config) and
- config.isSink(node.getNode()) and
- toReturn = false and
- ap instanceof AccessPathNil
- or
- exists(NodeExt mid |
- localFlowBigStepExt(node, mid, true, _, config) and
- flow(mid, toReturn, ap, config)
- )
- or
- exists(NodeExt mid, AccessPathNil nil |
- flowFwd(node, _, _, ap, config) and
- localFlowBigStepExt(node, mid, false, _, config) and
- flow(mid, toReturn, nil, config) and
- ap instanceof AccessPathNil
- )
- or
- exists(NodeExt mid |
- jumpStepExt(node, mid, config) and
- flow(mid, _, ap, config) and
- toReturn = false
- )
- or
- exists(NodeExt mid, AccessPathNil nil |
- flowFwd(node, _, _, ap, config) and
- additionalJumpStepExt(node, mid, config) and
- flow(mid, _, nil, config) and
- toReturn = false and
- ap instanceof AccessPathNil
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowIntoCallableNodeCand2(node, mid, allowsFieldFlow, config) and
- flow(mid, false, ap, config) and
- toReturn = false and
- (ap instanceof AccessPathNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid, boolean allowsFieldFlow |
- flowOutOfCallableNodeCand2(node, mid, allowsFieldFlow, config) and
- flow(mid, _, ap, config) and
- toReturn = true and
- (ap instanceof AccessPathNil or allowsFieldFlow = true)
- )
- or
- exists(NodeExt mid |
- argumentValueFlowsThrough(node, mid) and
- flow(mid, toReturn, ap, config)
- )
- or
- exists(NodeExt mid, AccessPathNil nil |
- argumentFlowsThrough(node, mid, _, config) and
- flow(mid, toReturn, nil, config) and
- ap instanceof AccessPathNil and
- flowFwd(node, _, _, ap, config)
- )
- or
- exists(Content f |
- flowStore(f, node, toReturn, ap, config) and
- flowConsCand(f, ap, config)
- )
- or
- exists(NodeExt mid, AccessPath ap0 |
- readFwd(node, _, mid, ap, ap0, config) and
- flow(mid, toReturn, ap0, config)
+pragma[nomagic]
+private predicate flowFwdIn(
+ DataFlowCall call, ParameterNode p, boolean fromArg, AccessPathOption argAp, AccessPathFront apf,
+ AccessPath ap, Configuration config
+) {
+ exists(ArgumentNode arg, boolean allowsFieldFlow |
+ flowFwd(arg, fromArg, argAp, apf, ap, config) and
+ flowIntoCallNodeCand2(call, arg, p, allowsFieldFlow, config) and
+ flowCand(p, _, _, _, unbind(config))
+ |
+ ap instanceof AccessPathNil or allowsFieldFlow = true
)
}
pragma[nomagic]
-private predicate storeFwd(
- NodeExt node1, Content f, NodeExt node2, AccessPath ap, AccessPath ap0, Configuration config
+private predicate flowFwdOut(
+ DataFlowCall call, Node node, boolean fromArg, AccessPathOption argAp, AccessPathFront apf,
+ AccessPath ap, Configuration config
) {
- storeExtCand2(node1, f, node2, config) and
- flowFwdStore(node2, f, ap, _, _, config) and
+ exists(ReturnNodeExt ret, boolean allowsFieldFlow |
+ flowFwd(ret, fromArg, argAp, apf, ap, config) and
+ flowOutOfCallNodeCand2(call, ret, node, allowsFieldFlow, config) and
+ flowCand(node, _, _, _, unbind(config))
+ |
+ ap instanceof AccessPathNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowFwdOutFromArg(
+ DataFlowCall call, Node node, AccessPath argAp, AccessPathFront apf, AccessPath ap,
+ Configuration config
+) {
+ flowFwdOut(call, node, true, TAccessPathSome(argAp), apf, ap, config)
+}
+
+/**
+ * Holds if an argument to `call` is reached in the flow covered by `flowFwd`.
+ */
+pragma[nomagic]
+private predicate flowFwdIsEntered(
+ DataFlowCall call, boolean fromArg, AccessPathOption argAp, AccessPath ap, Configuration config
+) {
+ exists(ParameterNode p, AccessPathFront apf |
+ flowFwdIn(call, p, fromArg, argAp, apf, ap, config) and
+ flowCand(p, true, TAccessPathFrontSome(_), apf, config)
+ )
+}
+
+/**
+ * Holds if `node` with access path `ap` is part of a path from a source to
+ * a sink in the configuration `config`.
+ *
+ * The Boolean `toReturn` records whether the node must be returned from
+ * the enclosing callable in order to reach a sink, and if so, `returnAp`
+ * records the access path of the returned value.
+ */
+private predicate flow(
+ Node node, boolean toReturn, AccessPathOption returnAp, AccessPath ap, Configuration config
+) {
+ flow0(node, toReturn, returnAp, ap, config) and
+ flowFwd(node, _, _, _, ap, config)
+}
+
+private predicate flow0(
+ Node node, boolean toReturn, AccessPathOption returnAp, AccessPath ap, Configuration config
+) {
+ flowFwd(node, _, _, _, ap, config) and
+ config.isSink(node) and
+ toReturn = false and
+ returnAp = TAccessPathNone() and
+ ap instanceof AccessPathNil
+ or
+ exists(Node mid |
+ localFlowBigStep(node, mid, true, _, config, _) and
+ flow(mid, toReturn, returnAp, ap, config)
+ )
+ or
+ exists(Node mid, AccessPathNil nil |
+ flowFwd(node, _, _, _, ap, config) and
+ localFlowBigStep(node, mid, false, _, config, _) and
+ flow(mid, toReturn, returnAp, nil, config) and
+ ap instanceof AccessPathNil
+ )
+ or
+ exists(Node mid |
+ jumpStep(node, mid, config) and
+ flow(mid, _, _, ap, config) and
+ toReturn = false and
+ returnAp = TAccessPathNone()
+ )
+ or
+ exists(Node mid, AccessPathNil nil |
+ flowFwd(node, _, _, _, ap, config) and
+ additionalJumpStep(node, mid, config) and
+ flow(mid, _, _, nil, config) and
+ toReturn = false and
+ returnAp = TAccessPathNone() and
+ ap instanceof AccessPathNil
+ )
+ or
+ // store
+ exists(Content f |
+ flowStore(f, node, toReturn, returnAp, ap, config) and
+ flowConsCand(f, ap, config)
+ )
+ or
+ // read
+ exists(Node mid, AccessPath ap0 |
+ readFlowFwd(node, _, mid, ap, ap0, config) and
+ flow(mid, toReturn, returnAp, ap0, config)
+ )
+ or
+ // flow into a callable
+ exists(DataFlowCall call |
+ flowIn(call, node, toReturn, returnAp, ap, config) and
+ toReturn = false
+ or
+ exists(AccessPath returnAp0 |
+ flowInToReturn(call, node, returnAp0, ap, config) and
+ flowIsReturned(call, toReturn, returnAp, returnAp0, config)
+ )
+ )
+ or
+ // flow out of a callable
+ flowOut(_, node, _, _, ap, config) and
+ toReturn = true and
+ if flowFwd(node, true, TAccessPathSome(_), _, ap, config)
+ then returnAp = TAccessPathSome(ap)
+ else returnAp = TAccessPathNone()
+}
+
+pragma[nomagic]
+private predicate storeFlowFwd(
+ Node node1, Content f, Node node2, AccessPath ap, AccessPath ap0, Configuration config
+) {
+ storeCand2(node1, f, node2, config) and
+ flowFwdStore(node2, f, ap, _, _, _, config) and
ap0 = push(f, ap)
}
pragma[nomagic]
private predicate flowStore(
- Content f, NodeExt node, boolean toReturn, AccessPath ap, Configuration config
+ Content f, Node node, boolean toReturn, AccessPathOption returnAp, AccessPath ap,
+ Configuration config
) {
- exists(NodeExt mid, AccessPath ap0 |
- storeFwd(node, f, mid, ap, ap0, config) and
- flow(mid, toReturn, ap0, config)
+ exists(Node mid, AccessPath ap0 |
+ storeFlowFwd(node, f, mid, ap, ap0, config) and
+ flow(mid, toReturn, returnAp, ap0, config)
)
}
pragma[nomagic]
-private predicate readFwd(
- NodeExt node1, Content f, NodeExt node2, AccessPath ap, AccessPath ap0, Configuration config
+private predicate readFlowFwd(
+ Node node1, Content f, Node node2, AccessPath ap, AccessPath ap0, Configuration config
) {
- readExtCand2(node1, f, node2, config) and
- flowFwdRead(node2, f, ap, _, config) and
+ readCand2(node1, f, node2, config) and
+ flowFwdRead(node2, f, ap, _, _, config) and
ap0 = pop(f, ap) and
- flowConsCandFwd(f, _, ap0, unbind(config))
+ flowFwdConsCand(f, _, ap0, unbind(config))
}
pragma[nomagic]
private predicate flowConsCand(Content f, AccessPath ap, Configuration config) {
- exists(NodeExt n, NodeExt mid |
- flow(mid, _, ap, config) and
- readFwd(n, f, mid, _, ap, config)
+ exists(Node n, Node mid |
+ flow(mid, _, _, ap, config) and
+ readFlowFwd(n, f, mid, _, ap, config)
+ )
+}
+
+pragma[nomagic]
+private predicate flowOut(
+ DataFlowCall call, ReturnNodeExt ret, boolean toReturn, AccessPathOption returnAp, AccessPath ap,
+ Configuration config
+) {
+ exists(Node out, boolean allowsFieldFlow |
+ flow(out, toReturn, returnAp, ap, config) and
+ flowOutOfCallNodeCand2(call, ret, out, allowsFieldFlow, config)
+ |
+ ap instanceof AccessPathNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowIn(
+ DataFlowCall call, ArgumentNode arg, boolean toReturn, AccessPathOption returnAp, AccessPath ap,
+ Configuration config
+) {
+ exists(ParameterNode p, boolean allowsFieldFlow |
+ flow(p, toReturn, returnAp, ap, config) and
+ flowIntoCallNodeCand2(call, arg, p, allowsFieldFlow, config)
+ |
+ ap instanceof AccessPathNil or allowsFieldFlow = true
+ )
+}
+
+pragma[nomagic]
+private predicate flowInToReturn(
+ DataFlowCall call, ArgumentNode arg, AccessPath returnAp, AccessPath ap, Configuration config
+) {
+ flowIn(call, arg, true, TAccessPathSome(returnAp), ap, config)
+}
+
+/**
+ * Holds if an output from `call` is reached in the flow covered by `flow`.
+ */
+pragma[nomagic]
+private predicate flowIsReturned(
+ DataFlowCall call, boolean toReturn, AccessPathOption returnAp, AccessPath ap,
+ Configuration config
+) {
+ exists(ReturnNodeExt ret |
+ flowOut(call, ret, toReturn, returnAp, ap, config) and
+ flowFwd(ret, true, TAccessPathSome(_), _, ap, config)
)
}
bindingset[conf, result]
private Configuration unbind(Configuration conf) { result >= conf and result <= conf }
-private predicate flow(Node n, Configuration config) { flow(TNormalNode(n), _, _, config) }
+private predicate flow(Node n, Configuration config) { flow(n, _, _, _, config) }
+
+pragma[noinline]
+private predicate parameterFlow(
+ ParameterNode p, AccessPath ap, DataFlowCallable c, Configuration config
+) {
+ flow(p, true, _, ap, config) and
+ c = p.getEnclosingCallable()
+}
private newtype TSummaryCtx =
TSummaryCtxNone() or
TSummaryCtxSome(ParameterNode p, AccessPath ap) {
- exists(ReturnNodeExt ret, Configuration config | flow(TNormalNode(p), true, ap, config) |
- exists(Summary summary |
- parameterFlowReturn(p, ret, _, _, _, summary, config) and
- flow(ret, unbind(config))
- |
- // taint through
- summary = TSummaryTaint() and
- ap instanceof AccessPathNil
- or
- // taint setter
- summary = TSummaryTaintStore(_) and
- ap instanceof AccessPathNil
- or
- // taint getter
- summary = TSummaryReadTaint(ap.(AccessPathConsNil).getHead())
- )
- or
- exists(ContentOption contentIn |
- parameterValueFlowReturn(p, ret, _, contentIn, _) and
- flow(ret, unbind(config))
- |
- // value through/setter
- contentIn = TContentNone()
- or
- // value getter (+ setter)
- contentIn = TContentSome(ap.getHead())
- )
+ exists(ReturnNodeExt ret, Configuration config, AccessPath ap0 |
+ parameterFlow(p, ap, ret.getEnclosingCallable(), config) and
+ flow(ret, true, TAccessPathSome(_), ap0, config) and
+ flowFwd(ret, true, TAccessPathSome(ap), _, ap0, config)
)
}
@@ -2113,7 +2050,7 @@ private newtype TPathNode =
exists(PathNodeMid mid |
pathStep(mid, node, cc, sc, ap) and
config = mid.getConfiguration() and
- flow(TNormalNode(node), _, ap, unbind(config))
+ flow(node, _, _, ap, unbind(config))
)
} or
TPathNodeSink(Node node, Configuration config) {
@@ -2304,7 +2241,7 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
localFlowBigStep(midnode, node, true, _, conf, localCC) and
ap = ap0
or
- localFlowBigStep(midnode, node, false, ap.(AccessPathNil).getType(), conf, localCC) and
+ localFlowBigStep(midnode, node, false, ap.getFront(), conf, localCC) and
ap0 instanceof AccessPathNil
)
or
@@ -2319,10 +2256,10 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
mid.getAp() instanceof AccessPathNil and
ap = TNil(getErasedNodeTypeBound(node))
or
- exists(Content f | pathReadStep(mid, node, push(f, ap), f, cc)) and
+ exists(Content f | pathStoreStep(mid, node, pop(f, ap), f, cc)) and
sc = mid.getSummaryCtx()
or
- exists(Content f | pathStoreStep(mid, node, pop(f, ap), f, cc)) and
+ exists(Content f | pathReadStep(mid, node, push(f, ap), f, cc)) and
sc = mid.getSummaryCtx()
or
pathIntoCallable(mid, node, _, cc, sc, _) and ap = mid.getAp()
@@ -2334,7 +2271,7 @@ private predicate pathStep(PathNodeMid mid, Node node, CallContext cc, SummaryCt
pragma[nomagic]
private predicate readCand(Node node1, Content f, Node node2, Configuration config) {
- readDirect(node1, f, node2) and
+ read(node1, f, node2) and
flow(node2, config)
}
@@ -2347,7 +2284,7 @@ private predicate pathReadStep(PathNodeMid mid, Node node, AccessPath ap0, Conte
pragma[nomagic]
private predicate storeCand(Node node1, Content f, Node node2, Configuration config) {
- storeDirect(node1, f, node2) and
+ store(node1, f, node2) and
flow(node2, config)
}
@@ -2386,11 +2323,11 @@ private predicate pathOutOfCallable1(
}
pragma[noinline]
-private Node getAnOutNodeCand(
+private Node getAnOutNodeFlow(
ReturnKindExt kind, DataFlowCall call, AccessPath ap, Configuration config
) {
result = kind.getAnOutNode(call) and
- flow(TNormalNode(result), _, ap, config)
+ flow(result, _, _, ap, config)
}
/**
@@ -2402,7 +2339,7 @@ private predicate pathOutOfCallable(PathNodeMid mid, Node out, CallContext cc) {
exists(ReturnKindExt kind, DataFlowCall call, AccessPath ap, Configuration config |
pathOutOfCallable1(mid, call, kind, cc, ap, config)
|
- out = getAnOutNodeCand(kind, call, ap, config)
+ out = getAnOutNodeFlow(kind, call, ap, config)
)
}
@@ -2426,7 +2363,7 @@ private predicate parameterCand(
DataFlowCallable callable, int i, AccessPath ap, Configuration config
) {
exists(ParameterNode p |
- flow(TNormalNode(p), _, ap, config) and
+ flow(p, _, _, ap, config) and
p.isParameterOf(callable, i)
)
}
@@ -2501,7 +2438,7 @@ pragma[noinline]
private predicate pathThroughCallable(PathNodeMid mid, Node out, CallContext cc, AccessPath ap) {
exists(DataFlowCall call, ReturnKindExt kind |
pathThroughCallable0(call, mid, kind, cc, ap) and
- out = getAnOutNodeCand(kind, call, ap, mid.getConfiguration())
+ out = getAnOutNodeFlow(kind, call, ap, mid.getConfiguration())
)
}
@@ -2542,10 +2479,7 @@ private module FlowExploration {
viableParamArg(_, node2, node1)
or
// flow out of a callable
- exists(DataFlowCall call, ReturnKindExt kind |
- getReturnPosition(node1) = viableReturnPos(call, kind) and
- node2 = kind.getAnOutNode(call)
- )
+ viableReturnPosOut(_, getReturnPosition(node1), node2)
|
c1 = node1.getEnclosingCallable() and
c2 = node2.getEnclosingCallable() and
@@ -2841,7 +2775,7 @@ private module FlowExploration {
PartialPathNodePriv mid, PartialAccessPath ap1, Content f, Node node, PartialAccessPath ap2
) {
ap1 = mid.getAp() and
- storeDirect(mid.getNode(), f, node) and
+ store(mid.getNode(), f, node) and
ap2.getHead() = f and
ap2.len() = unbindInt(ap1.len() + 1) and
compatibleTypes(ap1.getType(), f.getType())
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
index 783ac641e6e..b241a574c97 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
@@ -26,13 +26,30 @@ private module Cached {
)
}
- /** Provides predicates for calculating flow-through summaries. */
+ pragma[nomagic]
+ private ReturnPosition viableReturnPos(DataFlowCall call, ReturnKindExt kind) {
+ viableCallable(call) = result.getCallable() and
+ kind = result.getKind()
+ }
+
+ /**
+ * Holds if a value at return position `pos` can be returned to `out` via `call`,
+ * taking virtual dispatch into account.
+ */
cached
+ predicate viableReturnPosOut(DataFlowCall call, ReturnPosition pos, Node out) {
+ exists(ReturnKindExt kind |
+ pos = viableReturnPos(call, kind) and
+ out = kind.getAnOutNode(call)
+ )
+ }
+
+ /** Provides predicates for calculating flow-through summaries. */
private module FlowThrough {
/**
* The first flow-through approximation:
*
- * - Input/output access paths are abstracted with a Boolean parameter
+ * - Input access paths are abstracted with a Boolean parameter
* that indicates (non-)emptiness.
*/
private module Cand {
@@ -40,83 +57,47 @@ private module Cached {
* Holds if `p` can flow to `node` in the same callable using only
* value-preserving steps.
*
- * `read` indicates whether it is contents of `p` that can flow to `node`,
- * and `stored` indicates whether it flows to contents of `node`.
+ * `read` indicates whether it is contents of `p` that can flow to `node`.
*/
pragma[nomagic]
- private predicate parameterValueFlowCand(
- ParameterNode p, Node node, boolean read, boolean stored
- ) {
+ private predicate parameterValueFlowCand(ParameterNode p, Node node, boolean read) {
p = node and
- read = false and
- stored = false
+ read = false
or
// local flow
exists(Node mid |
- parameterValueFlowCand(p, mid, read, stored) and
+ parameterValueFlowCand(p, mid, read) and
simpleLocalFlowStep(mid, node)
)
or
// read
- exists(Node mid, boolean readMid, boolean storedMid |
- parameterValueFlowCand(p, mid, readMid, storedMid) and
- readStep(mid, _, node) and
- stored = false
- |
- // value neither read nor stored prior to read
- readMid = false and
- storedMid = false and
- read = true
- or
- // value (possibly read and then) stored prior to read (same content)
- read = readMid and
- storedMid = true
- )
- or
- // store
exists(Node mid |
- parameterValueFlowCand(p, mid, read, false) and
- storeStep(mid, _, node) and
- stored = true
+ parameterValueFlowCand(p, mid, false) and
+ readStep(mid, _, node) and
+ read = true
)
or
- // flow through: no prior read or store
+ // flow through: no prior read
exists(ArgumentNode arg |
- parameterValueFlowArgCand(p, arg, false, false) and
- argumentValueFlowsThroughCand(arg, node, read, stored)
+ parameterValueFlowArgCand(p, arg, false) and
+ argumentValueFlowsThroughCand(arg, node, read)
)
or
- // flow through: no read or store inside method
+ // flow through: no read inside method
exists(ArgumentNode arg |
- parameterValueFlowArgCand(p, arg, read, stored) and
- argumentValueFlowsThroughCand(arg, node, false, false)
- )
- or
- // flow through: possible prior read and prior store with compatible
- // flow-through method
- exists(ArgumentNode arg, boolean mid |
- parameterValueFlowArgCand(p, arg, read, mid) and
- argumentValueFlowsThroughCand(arg, node, mid, stored)
+ parameterValueFlowArgCand(p, arg, read) and
+ argumentValueFlowsThroughCand(arg, node, false)
)
}
pragma[nomagic]
- private predicate parameterValueFlowArgCand(
- ParameterNode p, ArgumentNode arg, boolean read, boolean stored
- ) {
- parameterValueFlowCand(p, arg, read, stored)
+ private predicate parameterValueFlowArgCand(ParameterNode p, ArgumentNode arg, boolean read) {
+ parameterValueFlowCand(p, arg, read)
}
pragma[nomagic]
predicate parameterValueFlowsToPreUpdateCand(ParameterNode p, PostUpdateNode n) {
- parameterValueFlowCand(p, n.getPreUpdateNode(), false, false)
- }
-
- pragma[nomagic]
- private predicate parameterValueFlowsToPostUpdateCand(
- ParameterNode p, PostUpdateNode n, boolean read
- ) {
- parameterValueFlowCand(p, n, read, true)
+ parameterValueFlowCand(p, n.getPreUpdateNode(), false)
}
/**
@@ -125,33 +106,21 @@ private module Cached {
* into account.
*
* `read` indicates whether it is contents of `p` that can flow to the return
- * node, and `stored` indicates whether it flows to contents of the return
* node.
*/
- predicate parameterValueFlowReturnCand(
- ParameterNode p, ReturnKindExt kind, boolean read, boolean stored
- ) {
+ predicate parameterValueFlowReturnCand(ParameterNode p, ReturnKind kind, boolean read) {
exists(ReturnNode ret |
- parameterValueFlowCand(p, ret, read, stored) and
- kind = TValueReturn(ret.getKind())
- )
- or
- exists(ParameterNode p2, int pos2, PostUpdateNode n |
- parameterValueFlowsToPostUpdateCand(p, n, read) and
- parameterValueFlowsToPreUpdateCand(p2, n) and
- p2.isParameterOf(_, pos2) and
- kind = TParamUpdate(pos2) and
- p != p2 and
- stored = true
+ parameterValueFlowCand(p, ret, read) and
+ kind = ret.getKind()
)
}
pragma[nomagic]
private predicate argumentValueFlowsThroughCand0(
- DataFlowCall call, ArgumentNode arg, ReturnKindExt kind, boolean read, boolean stored
+ DataFlowCall call, ArgumentNode arg, ReturnKind kind, boolean read
) {
exists(ParameterNode param | viableParamArg(call, param, arg) |
- parameterValueFlowReturnCand(param, kind, read, stored)
+ parameterValueFlowReturnCand(param, kind, read)
)
}
@@ -159,22 +128,19 @@ private module Cached {
* Holds if `arg` flows to `out` through a call using only value-preserving steps,
* not taking call contexts into account.
*
- * `read` indicates whether it is contents of `arg` that can flow to `out`, and
- * `stored` indicates whether it flows to contents of `out`.
+ * `read` indicates whether it is contents of `arg` that can flow to `out`.
*/
- predicate argumentValueFlowsThroughCand(
- ArgumentNode arg, Node out, boolean read, boolean stored
- ) {
- exists(DataFlowCall call, ReturnKindExt kind |
- argumentValueFlowsThroughCand0(call, arg, kind, read, stored) and
- out = kind.getAnOutNode(call)
+ predicate argumentValueFlowsThroughCand(ArgumentNode arg, Node out, boolean read) {
+ exists(DataFlowCall call, ReturnKind kind |
+ argumentValueFlowsThroughCand0(call, arg, kind, read) and
+ out = getAnOutNode(call, kind)
)
}
predicate cand(ParameterNode p, Node n) {
- parameterValueFlowCand(p, n, _, _) and
+ parameterValueFlowCand(p, n, _) and
(
- parameterValueFlowReturnCand(p, _, _, _)
+ parameterValueFlowReturnCand(p, _, _)
or
parameterValueFlowsToPreUpdateCand(p, _)
)
@@ -187,7 +153,6 @@ private module Cached {
(
n instanceof ParameterNode or
n instanceof OutNode or
- n instanceof PostUpdateNode or
readStep(_, _, n) or
n instanceof CastNode
)
@@ -200,10 +165,6 @@ private module Cached {
or
n instanceof ReturnNode
or
- Cand::parameterValueFlowsToPreUpdateCand(_, n)
- or
- storeStep(n, _, _)
- or
readStep(n, _, _)
or
n instanceof CastNode
@@ -237,230 +198,140 @@ private module Cached {
/**
* The final flow-through calculation:
*
- * - Input/output access paths are abstracted with a `ContentOption` parameter
+ * - Input access paths are abstracted with a `ContentOption` parameter
* that represents the head of the access path. `TContentNone()` means that
* the access path is unrestricted.
* - Types are checked using the `compatibleTypes()` relation.
*/
- cached
private module Final {
/**
* Holds if `p` can flow to `node` in the same callable using only
* value-preserving steps, not taking call contexts into account.
*
* `contentIn` describes the content of `p` that can flow to `node`
- * (if any), and `contentOut` describes the content of `node` that
- * it flows to (if any).
+ * (if any).
*/
- private predicate parameterValueFlow(
- ParameterNode p, Node node, ContentOption contentIn, ContentOption contentOut
- ) {
- parameterValueFlow0(p, node, contentIn, contentOut) and
+ predicate parameterValueFlow(ParameterNode p, Node node, ContentOption contentIn) {
+ parameterValueFlow0(p, node, contentIn) and
if node instanceof CastingNode
then
// normal flow through
contentIn = TContentNone() and
- contentOut = TContentNone() and
compatibleTypes(getErasedNodeTypeBound(p), getErasedNodeTypeBound(node))
or
// getter
exists(Content fIn |
contentIn.getContent() = fIn and
- contentOut = TContentNone() and
compatibleTypes(fIn.getType(), getErasedNodeTypeBound(node))
)
- or
- // (getter+)setter
- exists(Content fOut |
- contentOut.getContent() = fOut and
- compatibleTypes(fOut.getContainerType(), getErasedNodeTypeBound(node))
- )
else any()
}
pragma[nomagic]
- private predicate parameterValueFlow0(
- ParameterNode p, Node node, ContentOption contentIn, ContentOption contentOut
- ) {
+ private predicate parameterValueFlow0(ParameterNode p, Node node, ContentOption contentIn) {
p = node and
Cand::cand(p, _) and
- contentIn = TContentNone() and
- contentOut = TContentNone()
+ contentIn = TContentNone()
or
// local flow
exists(Node mid |
- parameterValueFlow(p, mid, contentIn, contentOut) and
+ parameterValueFlow(p, mid, contentIn) and
LocalFlowBigStep::localFlowBigStep(mid, node)
)
or
// read
- exists(Node mid, Content f, ContentOption contentInMid, ContentOption contentOutMid |
- parameterValueFlow(p, mid, contentInMid, contentOutMid) and
- readStep(mid, f, node)
- |
- // value neither read nor stored prior to read
- contentInMid = TContentNone() and
- contentOutMid = TContentNone() and
- contentIn.getContent() = f and
- contentOut = TContentNone() and
- Cand::parameterValueFlowReturnCand(p, _, true, _) and
- compatibleTypes(getErasedNodeTypeBound(p), f.getContainerType())
- or
- // value (possibly read and then) stored prior to read (same content)
- contentIn = contentInMid and
- contentOutMid.getContent() = f and
- contentOut = TContentNone()
- )
- or
- // store
exists(Node mid, Content f |
- parameterValueFlow(p, mid, contentIn, TContentNone()) and
- storeStep(mid, f, node) and
- contentOut.getContent() = f
- |
- contentIn = TContentNone() and
- compatibleTypes(getErasedNodeTypeBound(p), f.getType())
- or
- compatibleTypes(contentIn.getContent().getType(), f.getType())
+ parameterValueFlow(p, mid, TContentNone()) and
+ readStep(mid, f, node) and
+ contentIn.getContent() = f and
+ Cand::parameterValueFlowReturnCand(p, _, true) and
+ compatibleTypes(getErasedNodeTypeBound(p), f.getContainerType())
)
or
- // flow through: no prior read or store
+ // flow through: no prior read
exists(ArgumentNode arg |
- parameterValueFlowArg(p, arg, TContentNone(), TContentNone()) and
- argumentValueFlowsThrough(_, arg, contentIn, contentOut, node)
+ parameterValueFlowArg(p, arg, TContentNone()) and
+ argumentValueFlowsThrough(arg, contentIn, node)
)
or
- // flow through: no read or store inside method
+ // flow through: no read inside method
exists(ArgumentNode arg |
- parameterValueFlowArg(p, arg, contentIn, contentOut) and
- argumentValueFlowsThrough(_, arg, TContentNone(), TContentNone(), node)
- )
- or
- // flow through: possible prior read and prior store with compatible
- // flow-through method
- exists(ArgumentNode arg, ContentOption contentMid |
- parameterValueFlowArg(p, arg, contentIn, contentMid) and
- argumentValueFlowsThrough(_, arg, contentMid, contentOut, node)
+ parameterValueFlowArg(p, arg, contentIn) and
+ argumentValueFlowsThrough(arg, TContentNone(), node)
)
}
pragma[nomagic]
private predicate parameterValueFlowArg(
- ParameterNode p, ArgumentNode arg, ContentOption contentIn, ContentOption contentOut
+ ParameterNode p, ArgumentNode arg, ContentOption contentIn
) {
- parameterValueFlow(p, arg, contentIn, contentOut) and
- Cand::argumentValueFlowsThroughCand(arg, _, _, _)
+ parameterValueFlow(p, arg, contentIn) and
+ Cand::argumentValueFlowsThroughCand(arg, _, _)
}
pragma[nomagic]
private predicate argumentValueFlowsThrough0(
- DataFlowCall call, ArgumentNode arg, ReturnKindExt kind, ContentOption contentIn,
- ContentOption contentOut
+ DataFlowCall call, ArgumentNode arg, ReturnKind kind, ContentOption contentIn
) {
exists(ParameterNode param | viableParamArg(call, param, arg) |
- parameterValueFlowReturn(param, _, kind, contentIn, contentOut)
+ parameterValueFlowReturn(param, kind, contentIn)
)
}
/**
- * Holds if `arg` flows to `out` through `call` using only value-preserving steps,
+ * Holds if `arg` flows to `out` through a call using only value-preserving steps,
* not taking call contexts into account.
*
- * `contentIn` describes the content of `arg` that can flow to `out` (if any), and
- * `contentOut` describes the content of `out` that it flows to (if any).
+ * `contentIn` describes the content of `arg` that can flow to `out` (if any).
*/
- cached
- predicate argumentValueFlowsThrough(
- DataFlowCall call, ArgumentNode arg, ContentOption contentIn, ContentOption contentOut,
- Node out
- ) {
- exists(ReturnKindExt kind |
- argumentValueFlowsThrough0(call, arg, kind, contentIn, contentOut) and
- out = kind.getAnOutNode(call)
+ pragma[nomagic]
+ predicate argumentValueFlowsThrough(ArgumentNode arg, ContentOption contentIn, Node out) {
+ exists(DataFlowCall call, ReturnKind kind |
+ argumentValueFlowsThrough0(call, arg, kind, contentIn) and
+ out = getAnOutNode(call, kind)
|
// normal flow through
contentIn = TContentNone() and
- contentOut = TContentNone() and
compatibleTypes(getErasedNodeTypeBound(arg), getErasedNodeTypeBound(out))
or
// getter
exists(Content fIn |
contentIn.getContent() = fIn and
- contentOut = TContentNone() and
compatibleTypes(getErasedNodeTypeBound(arg), fIn.getContainerType()) and
compatibleTypes(fIn.getType(), getErasedNodeTypeBound(out))
)
- or
- // setter
- exists(Content fOut |
- contentIn = TContentNone() and
- contentOut.getContent() = fOut and
- compatibleTypes(getErasedNodeTypeBound(arg), fOut.getType()) and
- compatibleTypes(fOut.getContainerType(), getErasedNodeTypeBound(out))
- )
- or
- // getter+setter
- exists(Content fIn, Content fOut |
- contentIn.getContent() = fIn and
- contentOut.getContent() = fOut and
- compatibleTypes(getErasedNodeTypeBound(arg), fIn.getContainerType()) and
- compatibleTypes(fOut.getContainerType(), getErasedNodeTypeBound(out))
- )
)
}
- /**
- * Holds if `p` can flow to the pre-update node associated with post-update
- * node `n`, in the same callable, using only value-preserving steps.
- */
- cached
- predicate parameterValueFlowsToPreUpdate(ParameterNode p, PostUpdateNode n) {
- parameterValueFlow(p, n.getPreUpdateNode(), TContentNone(), TContentNone())
- }
-
- pragma[nomagic]
- private predicate parameterValueFlowsToPostUpdate(
- ParameterNode p, PostUpdateNode n, ContentOption contentIn, ContentOption contentOut
- ) {
- parameterValueFlow(p, n, contentIn, contentOut) and
- contentOut.hasContent()
- }
-
/**
* Holds if `p` can flow to a return node of kind `kind` in the same
* callable using only value-preserving steps.
*
* `contentIn` describes the content of `p` that can flow to the return
- * node (if any), and `contentOut` describes the content of the return
- * node that it flows to (if any).
+ * node (if any).
*/
- cached
- predicate parameterValueFlowReturn(
- ParameterNode p, Node ret, ReturnKindExt kind, ContentOption contentIn,
- ContentOption contentOut
+ private predicate parameterValueFlowReturn(
+ ParameterNode p, ReturnKind kind, ContentOption contentIn
) {
- ret =
- any(ReturnNode n |
- parameterValueFlow(p, n, contentIn, contentOut) and
- kind = TValueReturn(n.getKind())
- )
- or
- ret =
- any(PostUpdateNode n |
- exists(ParameterNode p2, int pos2 |
- parameterValueFlowsToPostUpdate(p, n, contentIn, contentOut) and
- parameterValueFlowsToPreUpdate(p2, n) and
- p2.isParameterOf(_, pos2) and
- kind = TParamUpdate(pos2) and
- p != p2
- )
- )
+ exists(ReturnNode ret |
+ parameterValueFlow(p, ret, contentIn) and
+ kind = ret.getKind()
+ )
}
}
import Final
}
+ /**
+ * Holds if `p` can flow to the pre-update node associated with post-update
+ * node `n`, in the same callable, using only value-preserving steps.
+ */
+ cached
+ predicate parameterValueFlowsToPreUpdate(ParameterNode p, PostUpdateNode n) {
+ parameterValueFlow(p, n.getPreUpdateNode(), TContentNone())
+ }
+
/**
* Holds if data can flow from `node1` to `node2` via a direct assignment to
* `f`.
@@ -469,14 +340,14 @@ private module Cached {
* been stored into, in order to handle cases like `x.f1.f2 = y`.
*/
cached
- predicate storeDirect(Node node1, Content f, Node node2) {
+ predicate store(Node node1, Content f, Node node2) {
storeStep(node1, f, node2) and readStep(_, f, _)
or
exists(Node n1, Node n2 |
n1 = node1.(PostUpdateNode).getPreUpdateNode() and
n2 = node2.(PostUpdateNode).getPreUpdateNode()
|
- argumentValueFlowsThrough(_, n2, TContentSome(f), TContentNone(), n1)
+ argumentValueFlowsThrough(n2, TContentSome(f), n1)
or
readStep(n2, f, n1)
)
@@ -520,6 +391,21 @@ private module Cached {
newtype TReturnKindExt =
TValueReturn(ReturnKind kind) or
TParamUpdate(int pos) { exists(ParameterNode p | p.isParameterOf(_, pos)) }
+
+ cached
+ newtype TBooleanOption =
+ TBooleanNone() or
+ TBooleanSome(boolean b) { b = true or b = false }
+
+ cached
+ newtype TAccessPathFront =
+ TFrontNil(DataFlowType t) or
+ TFrontHead(Content f)
+
+ cached
+ newtype TAccessPathFrontOption =
+ TAccessPathFrontNone() or
+ TAccessPathFrontSome(AccessPathFront apf)
}
/**
@@ -538,7 +424,7 @@ newtype TContentOption =
TContentNone() or
TContentSome(Content f)
-class ContentOption extends TContentOption {
+private class ContentOption extends TContentOption {
Content getContent() { this = TContentSome(result) }
predicate hasContent() { exists(this.getContent()) }
@@ -779,77 +665,58 @@ DataFlowCallable resolveCall(DataFlowCall call, CallContext cc) {
result = viableCallable(call) and cc instanceof CallContextReturn
}
-newtype TSummary =
- TSummaryVal() or
- TSummaryTaint() or
- TSummaryReadVal(Content f) or
- TSummaryReadTaint(Content f) or
- TSummaryTaintStore(Content f)
-
-/**
- * A summary of flow through a callable. This can either be value-preserving
- * if no additional steps are used, taint-flow if at least one additional step
- * is used, or any one of those combined with a store or a read. Summaries
- * recorded at a return node are restricted to include at least one additional
- * step, as the value-based summaries are calculated independent of the
- * configuration.
- */
-class Summary extends TSummary {
- string toString() {
- result = "Val" and this = TSummaryVal()
- or
- result = "Taint" and this = TSummaryTaint()
- or
- exists(Content f |
- result = "ReadVal " + f.toString() and this = TSummaryReadVal(f)
- or
- result = "ReadTaint " + f.toString() and this = TSummaryReadTaint(f)
- or
- result = "TaintStore " + f.toString() and this = TSummaryTaintStore(f)
- )
- }
-
- /** Gets the summary that results from extending this with an additional step. */
- Summary additionalStep() {
- this = TSummaryVal() and result = TSummaryTaint()
- or
- this = TSummaryTaint() and result = TSummaryTaint()
- or
- exists(Content f | this = TSummaryReadVal(f) and result = TSummaryReadTaint(f))
- or
- exists(Content f | this = TSummaryReadTaint(f) and result = TSummaryReadTaint(f))
- }
-
- /** Gets the summary that results from extending this with a read. */
- Summary readStep(Content f) { this = TSummaryVal() and result = TSummaryReadVal(f) }
-
- /** Gets the summary that results from extending this with a store. */
- Summary storeStep(Content f) { this = TSummaryTaint() and result = TSummaryTaintStore(f) }
-
- /** Gets the summary that results from extending this with `step`. */
- bindingset[this, step]
- Summary compose(Summary step) {
- this = TSummaryVal() and result = step
- or
- this = TSummaryTaint() and
- (step = TSummaryTaint() or step = TSummaryTaintStore(_)) and
- result = step
- or
- exists(Content f |
- this = TSummaryReadVal(f) and step = TSummaryTaint() and result = TSummaryReadTaint(f)
- )
- or
- this = TSummaryReadTaint(_) and step = TSummaryTaint() and result = this
- }
-
- /** Holds if this summary does not include any taint steps. */
- predicate isPartial() {
- this = TSummaryVal() or
- this = TSummaryReadVal(_)
- }
-}
-
pragma[noinline]
DataFlowType getErasedNodeTypeBound(Node n) { result = getErasedRepr(n.getTypeBound()) }
-predicate readDirect = readStep/3;
+predicate read = readStep/3;
+
+/** An optional Boolean value. */
+class BooleanOption extends TBooleanOption {
+ string toString() {
+ this = TBooleanNone() and result = ""
+ or
+ this = TBooleanSome(any(boolean b | result = b.toString()))
+ }
+}
+
+/**
+ * The front of an access path. This is either a head or a nil.
+ */
+abstract class AccessPathFront extends TAccessPathFront {
+ abstract string toString();
+
+ abstract DataFlowType getType();
+
+ abstract boolean toBoolNonEmpty();
+
+ predicate headUsesContent(Content f) { this = TFrontHead(f) }
+}
+
+class AccessPathFrontNil extends AccessPathFront, TFrontNil {
+ override string toString() {
+ exists(DataFlowType t | this = TFrontNil(t) | result = ppReprType(t))
+ }
+
+ override DataFlowType getType() { this = TFrontNil(result) }
+
+ override boolean toBoolNonEmpty() { result = false }
+}
+
+class AccessPathFrontHead extends AccessPathFront, TFrontHead {
+ override string toString() { exists(Content f | this = TFrontHead(f) | result = f.toString()) }
+
+ override DataFlowType getType() {
+ exists(Content head | this = TFrontHead(head) | result = head.getContainerType())
+ }
+
+ override boolean toBoolNonEmpty() { result = true }
+}
+
+/** An optional access path front. */
+class AccessPathFrontOption extends TAccessPathFrontOption {
+ string toString() {
+ this = TAccessPathFrontNone() and result = ""
+ or
+ this = TAccessPathFrontSome(any(AccessPathFront apf | result = apf.toString()))
+ }
+}
From 70f87b59d2fed87ee2ccfff4347a46069f4b839f Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 6 May 2020 09:34:23 +0100
Subject: [PATCH 064/157] Data flow: Support stores into nodes that are not
`PostUpdateNode`s.
cf https://github.com/github/codeql/pull/3312
---
.../go/dataflow/internal/DataFlowImpl.qll | 4 +--
.../dataflow/internal/DataFlowImplCommon.qll | 25 ++++++++++++++-----
2 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
index db0fbcf7130..9587ea5f274 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImpl.qll
@@ -1060,8 +1060,8 @@ private module LocalFlowBigStep {
jumpStep(_, node, config) or
additionalJumpStep(_, node, config) or
node instanceof ParameterNode or
- node instanceof OutNode or
- node instanceof PostUpdateNode or
+ node instanceof OutNodeExt or
+ store(_, _, node) or
read(_, _, node) or
node instanceof CastNode
)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
index b241a574c97..852f54974e2 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowImplCommon.qll
@@ -415,8 +415,7 @@ class CastingNode extends Node {
CastingNode() {
this instanceof ParameterNode or
this instanceof CastNode or
- this instanceof OutNode or
- this.(PostUpdateNode).getPreUpdateNode() instanceof ArgumentNode
+ this instanceof OutNodeExt
}
}
@@ -564,6 +563,18 @@ class ReturnNodeExt extends Node {
}
}
+/**
+ * A node to which data can flow from a call. Either an ordinary out node
+ * or a post-update node associated with a call argument.
+ */
+class OutNodeExt extends Node {
+ OutNodeExt() {
+ this instanceof OutNode
+ or
+ this.(PostUpdateNode).getPreUpdateNode() instanceof ArgumentNode
+ }
+}
+
/**
* An extended return kind. A return kind describes how data can be returned
* from a callable. This can either be through a returned value or an updated
@@ -574,7 +585,7 @@ abstract class ReturnKindExt extends TReturnKindExt {
abstract string toString();
/** Gets a node corresponding to data flow out of `call`. */
- abstract Node getAnOutNode(DataFlowCall call);
+ abstract OutNodeExt getAnOutNode(DataFlowCall call);
}
class ValueReturnKind extends ReturnKindExt, TValueReturn {
@@ -586,7 +597,9 @@ class ValueReturnKind extends ReturnKindExt, TValueReturn {
override string toString() { result = kind.toString() }
- override Node getAnOutNode(DataFlowCall call) { result = getAnOutNode(call, this.getKind()) }
+ override OutNodeExt getAnOutNode(DataFlowCall call) {
+ result = getAnOutNode(call, this.getKind())
+ }
}
class ParamUpdateReturnKind extends ReturnKindExt, TParamUpdate {
@@ -598,9 +611,9 @@ class ParamUpdateReturnKind extends ReturnKindExt, TParamUpdate {
override string toString() { result = "param update " + pos }
- override PostUpdateNode getAnOutNode(DataFlowCall call) {
+ override OutNodeExt getAnOutNode(DataFlowCall call) {
exists(ArgumentNode arg |
- result.getPreUpdateNode() = arg and
+ result.(PostUpdateNode).getPreUpdateNode() = arg and
arg.argumentOf(call, this.getPosition())
)
}
From 994536e93bd1b8f33d961631794abe0259d314eb Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 7 May 2020 11:46:31 +0100
Subject: [PATCH 065/157] Add change note.
---
change-notes/2020-05-07-update-data-flow.md | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 change-notes/2020-05-07-update-data-flow.md
diff --git a/change-notes/2020-05-07-update-data-flow.md b/change-notes/2020-05-07-update-data-flow.md
new file mode 100644
index 00000000000..6d67c59b041
--- /dev/null
+++ b/change-notes/2020-05-07-update-data-flow.md
@@ -0,0 +1,3 @@
+lgtm,codescanning
+* The data-flow library has been improved, which affects and improves most security queries. In particular,
+ flow through functions involving nested field reads and writes is now modeled more fully.
From 3d10ec7e51544b7865c3d9ca3ae518c58dc9e0ec Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Mon, 11 May 2020 03:13:01 +0530
Subject: [PATCH 066/157] remove some obvious false positives and include
changes from review
---
.../CWE-807/SensitiveConditionBypass.qhelp | 5 +-
.../CWE-807/SensitiveConditionBypass.ql | 31 ++++------
.../CWE-807/SensitiveConditionBypass.qll | 37 ++++++++----
.../CWE-807/SensitiveConditionBypass.expected | 3 -
ql/test/experimental/CWE-807/condition.go | 60 ++++++++++++++++++-
5 files changed, 98 insertions(+), 38 deletions(-)
delete mode 100644 ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
index 67422e53a28..c75a1b71c0c 100644
--- a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qhelp
@@ -13,7 +13,8 @@ it results an attacker gaining access to the sensitive block.
Never decide whether to authenticate a user based on data that may be controlled by that user.
If necessary, ensure that the data is validated extensively when it is input before any
authentication checks are performed.
-
+
+
It is still possible to have a system that "remembers" users, thus not requiring
the user to login on every interaction. For example, personalization settings can be applied
without authentication because this is not sensitive information. However, users
@@ -21,7 +22,7 @@ should be allowed to take sensitive actions only when they have been fully authe
-The following example shows a comparision where an user controlled
+The following example shows a comparison where an user controlled
expression is used to guard a sensitive method. This should be avoided.:
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
index 61798836e77..119d5f9961f 100644
--- a/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
@@ -1,35 +1,30 @@
-/**
- * @name User-controlled bypassing of sensitive action
- * @description This query tests for user-controlled bypassing
- * of sensitive actions.
- * @id go/sensitive-condition-bypass
- * @kind problem
- * @problem.severity high
- * @tags external/cwe/cwe-807
- * external/cwe/cwe-247
- * external/cwe/cwe-350
- */
-
+// /**
+// * @name User-controlled bypassing of sensitive action
+// * @description This query tests for user-controlled bypassing
+// * of sensitive actions.
+// * @id go/sensitive-condition-bypass
+// * @kind problem
+// * @problem.severity warning
+// * @tags external/cwe/cwe-807
+// * external/cwe/cwe-247
+// * external/cwe/cwe-350
+// */
import go
import SensitiveConditionBypass
from
ControlFlow::ConditionGuardNode guard, DataFlow::Node sensitiveSink,
SensitiveExpr::Classification classification, Configuration config, DataFlow::PathNode source,
- DataFlow::PathNode operand, DataFlow::PathNode constOperand, DataFlow::PathNode constSource,
- ComparisonExpr comp, ConstConfiguration constConfig
+ DataFlow::PathNode operand, ComparisonExpr comp
where
// there should be a flow between source and the operand sink
config.hasFlowPath(source, operand) and
- // A constant string value should flow to a sink
- constConfig.hasFlowPath(constSource, constOperand) and
// both the operand should belong to the same comparision expression
operand.getNode().asExpr() = comp.getAnOperand() and
- constOperand.getNode().asExpr() = comp.getAnOperand() and
// get the ConditionGuardNode corresponding to the comparision expr.
guard.getCondition() = comp and
// the sink `sensitiveSink` should be sensitive,
isSensitive(sensitiveSink, classification) and
// the guard should control the sink
guard.dominates(sensitiveSink.getBasicBlock())
-select comp, "This sensitive comparision check can potentially be bypassed"
+select comp, "This sensitive comparision check can potentially be bypassed."
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
index 77a503e7dc7..065282b0531 100644
--- a/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.qll
@@ -22,6 +22,26 @@ predicate isSensitive(DataFlow::Node sink, SensitiveExpr::Classification type) {
exists(SensitiveAction a | a = sink and type = SensitiveExpr::secret())
}
+private class ConstComparisonExpr extends ComparisonExpr {
+ string constString;
+
+ ConstComparisonExpr() {
+ exists(DataFlow::Node n |
+ n.getASuccessor*() = DataFlow::exprNode(this.getAnOperand()) and
+ constString = n.getStringValue()
+ )
+ }
+
+ predicate isPotentialFalsePositive() {
+ // if its an empty string
+ constString.length() = 0 or
+ // // if it is uri path
+ constString.matches("/%") or
+ constString.matches("%/") or
+ constString.matches("%/%")
+ }
+}
+
/**
* A data-flow configuration for reasoning about
* user-controlled bypassing of sensitive actions.
@@ -40,18 +60,9 @@ class Configuration extends TaintTracking::Configuration {
}
override predicate isSink(DataFlow::Node sink) {
- exists(ComparisonExpr c | c.getAnOperand() = sink.asExpr())
- }
-}
-
-class ConstConfiguration extends DataFlow::Configuration {
- ConstConfiguration() { this = "Constant expression flow" }
-
- override predicate isSource(DataFlow::Node source) {
- exists(string val | source.getStringValue() = val)
- }
-
- override predicate isSink(DataFlow::Node sink) {
- exists(ComparisonExpr c | c.getAnOperand() = sink.asExpr())
+ exists(ConstComparisonExpr c |
+ c.getAnOperand() = sink.asExpr() and
+ not c.isPotentialFalsePositive()
+ )
}
}
diff --git a/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected b/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
deleted file mode 100644
index dcc3edfce51..00000000000
--- a/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
+++ /dev/null
@@ -1,3 +0,0 @@
-| condition.go:14:5:14:34 | ...!=... | This sensitive comparision check can potentially be bypassed |
-| condition.go:22:5:22:35 | ...!=... | This sensitive comparision check can potentially be bypassed |
-| condition.go:30:5:30:35 | ...!=... | This sensitive comparision check can potentially be bypassed |
diff --git a/ql/test/experimental/CWE-807/condition.go b/ql/test/experimental/CWE-807/condition.go
index d8861b53201..ff12dfbea0f 100644
--- a/ql/test/experimental/CWE-807/condition.go
+++ b/ql/test/experimental/CWE-807/condition.go
@@ -5,11 +5,13 @@ import (
"net/http"
)
-func use(xs ...interface{}) {}
+func use(xs ...interface{}) {}
+func t(xs ...interface{}) string { return "sadsad" }
+func login(xs ...interface{}) {}
var test = "localhost"
-// bad both are from remote sources
+// Should alert as authkey is sensitive
func ex1(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Origin") != test {
authkey := "randomDatta"
@@ -17,6 +19,7 @@ func ex1(w http.ResponseWriter, r *http.Request) {
}
}
+// Should alert as authkey is sensitive
func ex2(w http.ResponseWriter, r *http.Request) {
test2 := "test"
if r.Header.Get("Origin") != test2 {
@@ -25,9 +28,62 @@ func ex2(w http.ResponseWriter, r *http.Request) {
}
}
+// Should alert as login() is sensitive
func ex3(w http.ResponseWriter, r *http.Request) {
test2 := "test"
if r.Header.Get("Origin") != test2 {
login()
}
}
+
+// no alert as we can't say if the rhs resolves to a fixed pattern everytime.
+func ex4(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != t()+r.Header.Get("Origin") {
+ login()
+ }
+}
+
+// No alert as use is not sensitive
+func ex5(w http.ResponseWriter, r *http.Request) {
+ test2 := "test"
+ if r.Header.Get("Origin") != test2 {
+ use()
+ }
+}
+
+// Should not alert as test is against empty string
+func ex6(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "" {
+ login()
+ }
+}
+
+// Should not alert as test is against uri path
+func ex7(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "/asd/" {
+ login()
+ }
+}
+
+// Should not alert as test is against uri path
+func ex8(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "/asd/a" {
+ login()
+ }
+}
+
+// Should not alert as test is against uri path
+func ex9(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "/asd" {
+ login()
+ }
+}
+
+// Should not alert as test is against uri path
+func ex10(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Origin") != "asd/" {
+ login()
+ }
+}
+
+func main() {}
From 4aba80b0bdc1ff30479851ca59cfb3c877dfd4c3 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Mon, 11 May 2020 04:05:41 +0530
Subject: [PATCH 067/157] include changes from review
---
.../experimental/CWE-840/ConditionBypass.qhelp | 14 +++-----------
ql/src/experimental/CWE-840/ConditionBypass.ql | 17 ++++++-----------
.../experimental/CWE-840/ConditionBypassBad.go | 2 +-
3 files changed, 10 insertions(+), 23 deletions(-)
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.qhelp b/ql/src/experimental/CWE-840/ConditionBypass.qhelp
index 42bdc95e0f3..70525e1b0d9 100644
--- a/ql/src/experimental/CWE-840/ConditionBypass.qhelp
+++ b/ql/src/experimental/CWE-840/ConditionBypass.qhelp
@@ -8,15 +8,15 @@ a bypass of the conditional check as the attacker may modify parameters to match
-To guard against this, it is advisable to avoid framing a comparision
+To guard against this, it is advisable to avoid framing a comparison
where both sides are untrusted user inputs.
Instead, use a configuration to store and access the values required.
-The following example shows a comparision where both the sides
-are from attacker controlled request headers. This should be avoided.:
+The following example shows a comparison where both the sides
+are from attacker-controlled request headers. This should be avoided.:
@@ -24,12 +24,4 @@ One way to remedy the problem is to test against a value stored in a configurati
-
-
- MITRE:
-
- CWE-840.
-
-
-
\ No newline at end of file
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.ql b/ql/src/experimental/CWE-840/ConditionBypass.ql
index 8fc7fb3d249..8f4ddcbf5a6 100644
--- a/ql/src/experimental/CWE-840/ConditionBypass.ql
+++ b/ql/src/experimental/CWE-840/ConditionBypass.ql
@@ -1,11 +1,10 @@
/**
* @name Comparision Expression Check Bypass
- * @description This query tests for user-controlled bypassing
- * of a comparision expression i.e. instances where both the
- * lhs and rhs of a comparision are user controlled.
+ * @description Comparing two user controlled inputs may
+ * lead to an effective bypass of the comparison check.
* @id go/condition-bypass
* @kind problem
- * @problem.severity medium
+ * @problem.severity warning
* @tags external/cwe/cwe-840
*/
@@ -20,11 +19,7 @@ class Configuration extends TaintTracking::Configuration {
override predicate isSource(DataFlow::Node source) {
source instanceof UntrustedFlowSource
or
- exists(string fieldName |
- source.(DataFlow::FieldReadNode).getField().hasQualifiedName("net/http", "Request", fieldName)
- |
- fieldName = "Host"
- )
+ source = any(Field f | f.hasQualifiedName("net/http", "Request", "Host")).getARead()
}
override predicate isSink(DataFlow::Node sink) {
@@ -40,5 +35,5 @@ where
rhs.getNode().asExpr() = c.getRightOperand() and
config.hasFlowPath(lhsSource, lhs) and
lhs.getNode().asExpr() = c.getLeftOperand()
-select c, "This comparision is between user controlled operands and "
-+ "hence may be bypassed."
+select c, "This comparision is between user controlled operands derived from $@", lhsSource,
+ " and $@", rhsSource, "hence may be bypassed."
diff --git a/ql/src/experimental/CWE-840/ConditionBypassBad.go b/ql/src/experimental/CWE-840/ConditionBypassBad.go
index 6ec70c694bd..3174d8f8012 100644
--- a/ql/src/experimental/CWE-840/ConditionBypassBad.go
+++ b/ql/src/experimental/CWE-840/ConditionBypassBad.go
@@ -4,8 +4,8 @@ import (
"net/http"
)
-// bad the origin and the host headers are user controlled
func ex1(w http.ResponseWriter, r *http.Request) {
+ // bad the origin and the host headers are user controlled
if r.Header.Get("Origin") != "http://"+r.Host {
//do something
}
From b32ac2a47fd5ac65bcfac13c98f82faa8cedb1c7 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Mon, 11 May 2020 04:51:17 +0530
Subject: [PATCH 068/157] fix tests
---
ql/test/experimental/CWE-840/ConditionBypass.expected | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ql/test/experimental/CWE-840/ConditionBypass.expected b/ql/test/experimental/CWE-840/ConditionBypass.expected
index 73f3b39a0cc..86e96b2f6b8 100644
--- a/ql/test/experimental/CWE-840/ConditionBypass.expected
+++ b/ql/test/experimental/CWE-840/ConditionBypass.expected
@@ -1,2 +1,2 @@
-| condition.go:9:5:9:46 | ...!=... | This comparision is between user controlled operands and hence may be bypassed. |
-| condition.go:16:5:16:62 | ...!=... | This comparision is between user controlled operands and hence may be bypassed. |
+| condition.go:9:5:9:46 | ...!=... | This comparision is between user controlled operands derived from $@ | condition.go:9:5:9:12 | selection of Header : Header | and $@ | condition.go:9:41:9:46 | selection of Host : string | hence may be bypassed. |
+| condition.go:16:5:16:62 | ...!=... | This comparision is between user controlled operands derived from $@ | condition.go:16:5:16:12 | selection of Header : Header | and $@ | condition.go:16:41:16:48 | selection of Header : Header | hence may be bypassed. |
From 181c03ebf3f3407797a81c4356fc3519af2cd7fa Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 7 May 2020 06:26:18 -0700
Subject: [PATCH 069/157] Add support for ioutil TempFile and TempDir
---
ql/src/semmle/go/frameworks/Stdlib.qll | 4 +++-
.../query-tests/Security/CWE-022/TaintedPath.expected | 8 ++++++++
ql/test/query-tests/Security/CWE-022/ZipSlip.expected | 8 ++++----
ql/test/query-tests/Security/CWE-022/tst.go | 9 +++++++++
4 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index 36c9516a449..142e592b31e 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -166,11 +166,13 @@ module IoUtil {
exists(string fn | getTarget().hasQualifiedName("io/ioutil", fn) |
fn = "ReadDir" or
fn = "ReadFile" or
+ fn = "TempDir" or
+ fn = "TempFile" or
fn = "WriteFile"
)
}
- override DataFlow::Node getAPathArgument() { result = getArgument(0) }
+ override DataFlow::Node getAPathArgument() { result = getAnArgument() }
}
/**
diff --git a/ql/test/query-tests/Security/CWE-022/TaintedPath.expected b/ql/test/query-tests/Security/CWE-022/TaintedPath.expected
index 04e3459926c..39e4380b136 100644
--- a/ql/test/query-tests/Security/CWE-022/TaintedPath.expected
+++ b/ql/test/query-tests/Security/CWE-022/TaintedPath.expected
@@ -1,10 +1,18 @@
edges
| TaintedPath.go:10:10:10:14 | selection of URL : pointer type | TaintedPath.go:13:29:13:32 | path |
| TaintedPath.go:10:10:10:14 | selection of URL : pointer type | TaintedPath.go:17:28:17:61 | call to Join |
+| tst.go:14:22:14:39 | call to FormFile : tuple type | tst.go:17:41:17:47 | implicit dereference : FileHeader |
+| tst.go:14:22:14:39 | call to FormFile : tuple type | tst.go:17:41:17:56 | selection of Filename |
+| tst.go:17:41:17:47 | implicit dereference : FileHeader | tst.go:17:41:17:47 | implicit dereference : FileHeader |
+| tst.go:17:41:17:47 | implicit dereference : FileHeader | tst.go:17:41:17:56 | selection of Filename |
nodes
| TaintedPath.go:10:10:10:14 | selection of URL : pointer type | semmle.label | selection of URL : pointer type |
| TaintedPath.go:13:29:13:32 | path | semmle.label | path |
| TaintedPath.go:17:28:17:61 | call to Join | semmle.label | call to Join |
+| tst.go:14:22:14:39 | call to FormFile : tuple type | semmle.label | call to FormFile : tuple type |
+| tst.go:17:41:17:47 | implicit dereference : FileHeader | semmle.label | implicit dereference : FileHeader |
+| tst.go:17:41:17:56 | selection of Filename | semmle.label | selection of Filename |
#select
| TaintedPath.go:13:29:13:32 | path | TaintedPath.go:10:10:10:14 | selection of URL : pointer type | TaintedPath.go:13:29:13:32 | path | This path depends on $@. | TaintedPath.go:10:10:10:14 | selection of URL | a user-provided value |
| TaintedPath.go:17:28:17:61 | call to Join | TaintedPath.go:10:10:10:14 | selection of URL : pointer type | TaintedPath.go:17:28:17:61 | call to Join | This path depends on $@. | TaintedPath.go:10:10:10:14 | selection of URL | a user-provided value |
+| tst.go:17:41:17:56 | selection of Filename | tst.go:14:22:14:39 | call to FormFile : tuple type | tst.go:17:41:17:56 | selection of Filename | This path depends on $@. | tst.go:14:22:14:39 | call to FormFile | a user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-022/ZipSlip.expected b/ql/test/query-tests/Security/CWE-022/ZipSlip.expected
index 212eb53924d..d26767d52bf 100644
--- a/ql/test/query-tests/Security/CWE-022/ZipSlip.expected
+++ b/ql/test/query-tests/Security/CWE-022/ZipSlip.expected
@@ -1,15 +1,15 @@
edges
| ZipSlip.go:12:24:12:29 | selection of Name : string | ZipSlip.go:14:20:14:20 | p |
| tarslip.go:14:23:14:33 | selection of Name : string | tarslip.go:14:14:14:34 | call to Dir |
-| tst.go:15:11:15:16 | selection of Name : string | tst.go:20:20:20:23 | path |
+| tst.go:24:11:24:16 | selection of Name : string | tst.go:29:20:29:23 | path |
nodes
| ZipSlip.go:12:24:12:29 | selection of Name : string | semmle.label | selection of Name : string |
| ZipSlip.go:14:20:14:20 | p | semmle.label | p |
| tarslip.go:14:14:14:34 | call to Dir | semmle.label | call to Dir |
| tarslip.go:14:23:14:33 | selection of Name : string | semmle.label | selection of Name : string |
-| tst.go:15:11:15:16 | selection of Name : string | semmle.label | selection of Name : string |
-| tst.go:20:20:20:23 | path | semmle.label | path |
+| tst.go:24:11:24:16 | selection of Name : string | semmle.label | selection of Name : string |
+| tst.go:29:20:29:23 | path | semmle.label | path |
#select
| ZipSlip.go:12:24:12:29 | selection of Name | ZipSlip.go:12:24:12:29 | selection of Name : string | ZipSlip.go:14:20:14:20 | p | Unsanitized archive entry, which may contain '..', is used in a $@. | ZipSlip.go:14:20:14:20 | p | file system operation |
| tarslip.go:14:23:14:33 | selection of Name | tarslip.go:14:23:14:33 | selection of Name : string | tarslip.go:14:14:14:34 | call to Dir | Unsanitized archive entry, which may contain '..', is used in a $@. | tarslip.go:14:14:14:34 | call to Dir | file system operation |
-| tst.go:15:11:15:16 | selection of Name | tst.go:15:11:15:16 | selection of Name : string | tst.go:20:20:20:23 | path | Unsanitized archive entry, which may contain '..', is used in a $@. | tst.go:20:20:20:23 | path | file system operation |
+| tst.go:24:11:24:16 | selection of Name | tst.go:24:11:24:16 | selection of Name : string | tst.go:29:20:29:23 | path | Unsanitized archive entry, which may contain '..', is used in a $@. | tst.go:29:20:29:23 | path | file system operation |
diff --git a/ql/test/query-tests/Security/CWE-022/tst.go b/ql/test/query-tests/Security/CWE-022/tst.go
index 766f37029a7..8cb4fe5ee56 100644
--- a/ql/test/query-tests/Security/CWE-022/tst.go
+++ b/ql/test/query-tests/Security/CWE-022/tst.go
@@ -3,12 +3,21 @@ package main
import (
"archive/zip"
"io/ioutil"
+ "net/http"
"os"
"path/filepath"
"regexp"
"strings"
)
+func uploadFile(w http.ResponseWriter, r *http.Request) {
+ file, handler, _ := r.FormFile("file")
+ // err handling
+ defer file.Close()
+ tempFile, _ := ioutil.TempFile("/tmp", handler.Filename) // NOT OK
+ // do stuff with tempFile
+}
+
func unzip2(f string, root string) {
r, _ := zip.OpenReader(f)
for _, f := range r.File {
From 5df81d32103fbf6203dc6e643cd241d4994eedbc Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Mon, 11 May 2020 12:37:14 +0300
Subject: [PATCH 070/157] Apply suggestions from code review
Co-authored-by: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
---
.../CWE-681/IncorrectNumericConversion.qhelp | 22 +++++++++----------
.../CWE-681/IncorrectNumericConversion.ql | 5 +++--
2 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
index 606131ce6d9..4174e70cd62 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
@@ -5,27 +5,27 @@
If a numeric value string is parsed using strconv.Atoi into an int, and subsequently that int
- is converted into another type of a lower bit size, the result can produce unexpected values.
+ is converted into another type of a smaller size, the result can produce unexpected values.
- This also applie to the results of strconv.ParseFloat, strconv.ParseInt,
- and strconv.ParseUint when the specified bit size is higher than the bit size of the
+ This also applies to the results of strconv.ParseFloat, strconv.ParseInt,
+ and strconv.ParseUint when the specified size is larger than the size of the
type that number is converted to.
- If you need to parse numeric values with specific bit sizes, avoid strconv.Atoi, and, instead,
+ If you need to parse numeric values with specific bit sizes, avoid strconv.Atoi, and instead
use the functions specific to each type (strconv.ParseFloat, strconv.ParseInt,
strconv.ParseUint) that also allow to specify the wanted bit size.
- When using those functions, be careful to not convert the result to another type with a lower bit size than
+ When using those functions, be careful to not convert the result to another type with a smaller bit size than
the bit size you specified when parsing the number.
If this is not possible, then add upper (and lower) bound checks specific to each type and
- bit size (you can find the min and max value for each type in the `math` package).
+ bit size (you can find the minimum and maximum value for each type in the `math` package).
@@ -35,13 +35,13 @@
- The bounds are not checked, so this means that if the provided number is greater than max int32,
+ The bounds are not checked, so this means that if the provided number is greater than the maximum value of type int32,
the resulting value from the conversion will be different from the actual provided value.
To avoid unexpected values, you should either use the other functions provided by the strconv
- package to parse the specific types and bit sizes; in this case, strconv.ParseInt as you
- can see in parseAllocateGood2 function; or check bounds as in parseAllocateGood1
+ package to parse the specific types and bit sizes as shown in the
+ parseAllocateGood2 function; or check bounds as in the parseAllocateGood1
function.
@@ -53,7 +53,7 @@
- If the provided number is greater than max int32, the resulting value from the conversion will be
+ If the provided number is greater than the maximum value of type int32, the resulting value from the conversion will be
different from the actual provided value.
@@ -70,4 +70,4 @@
mitre.org: CWE-190: Integer Overflow or Wraparound.
-
\ No newline at end of file
+
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index 1746f70701a..e9fa0ad8d95 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -1,10 +1,11 @@
/**
- * @name Incorrect Conversion between Numeric Types
+ * @name Incorrect conversion between numeric types
* @description Converting the result of strconv.Atoi (and other parsers from strconv package)
- * to numeric types of lower bit size can produce unexpected values.
+ * to numeric types of smaller bit size can produce unexpected values.
* @kind path-problem
* @id go/incorrect-numeric-conversion
* @tags security
+ * external/cwe/cwe-190
* external/cwe/cwe-681
*/
From 78201a2c5fb05b219c0fc855af5651f38bd4b9d6 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 11 May 2020 10:47:00 +0100
Subject: [PATCH 071/157] Rename `ConditionBypass*` to `ConditionalBypass*` for
consistency with other languages.
---
.../{ConditionBypass.qhelp => ConditionalBypass.qhelp} | 4 ++--
.../CWE-840/{ConditionBypass.ql => ConditionalBypass.ql} | 0
.../{ConditionBypassBad.go => ConditionalBypassBad.go} | 0
.../{ConditionBypassGood.go => ConditionalBypassGood.go} | 0
ql/test/experimental/CWE-840/ConditionBypass.qlref | 1 -
.../{ConditionBypass.expected => ConditionalBypass.expected} | 0
ql/test/experimental/CWE-840/ConditionalBypass.qlref | 1 +
7 files changed, 3 insertions(+), 3 deletions(-)
rename ql/src/experimental/CWE-840/{ConditionBypass.qhelp => ConditionalBypass.qhelp} (88%)
rename ql/src/experimental/CWE-840/{ConditionBypass.ql => ConditionalBypass.ql} (100%)
rename ql/src/experimental/CWE-840/{ConditionBypassBad.go => ConditionalBypassBad.go} (100%)
rename ql/src/experimental/CWE-840/{ConditionBypassGood.go => ConditionalBypassGood.go} (100%)
delete mode 100644 ql/test/experimental/CWE-840/ConditionBypass.qlref
rename ql/test/experimental/CWE-840/{ConditionBypass.expected => ConditionalBypass.expected} (100%)
create mode 100644 ql/test/experimental/CWE-840/ConditionalBypass.qlref
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.qhelp b/ql/src/experimental/CWE-840/ConditionalBypass.qhelp
similarity index 88%
rename from ql/src/experimental/CWE-840/ConditionBypass.qhelp
rename to ql/src/experimental/CWE-840/ConditionalBypass.qhelp
index 70525e1b0d9..5151c402cef 100644
--- a/ql/src/experimental/CWE-840/ConditionBypass.qhelp
+++ b/ql/src/experimental/CWE-840/ConditionalBypass.qhelp
@@ -18,10 +18,10 @@ Instead, use a configuration to store and access the values required.
The following example shows a comparison where both the sides
are from attacker-controlled request headers. This should be avoided.:
-
+
One way to remedy the problem is to test against a value stored in a configuration:
-
+
\ No newline at end of file
diff --git a/ql/src/experimental/CWE-840/ConditionBypass.ql b/ql/src/experimental/CWE-840/ConditionalBypass.ql
similarity index 100%
rename from ql/src/experimental/CWE-840/ConditionBypass.ql
rename to ql/src/experimental/CWE-840/ConditionalBypass.ql
diff --git a/ql/src/experimental/CWE-840/ConditionBypassBad.go b/ql/src/experimental/CWE-840/ConditionalBypassBad.go
similarity index 100%
rename from ql/src/experimental/CWE-840/ConditionBypassBad.go
rename to ql/src/experimental/CWE-840/ConditionalBypassBad.go
diff --git a/ql/src/experimental/CWE-840/ConditionBypassGood.go b/ql/src/experimental/CWE-840/ConditionalBypassGood.go
similarity index 100%
rename from ql/src/experimental/CWE-840/ConditionBypassGood.go
rename to ql/src/experimental/CWE-840/ConditionalBypassGood.go
diff --git a/ql/test/experimental/CWE-840/ConditionBypass.qlref b/ql/test/experimental/CWE-840/ConditionBypass.qlref
deleted file mode 100644
index d107d9110d5..00000000000
--- a/ql/test/experimental/CWE-840/ConditionBypass.qlref
+++ /dev/null
@@ -1 +0,0 @@
-experimental/CWE-840/ConditionBypass.ql
diff --git a/ql/test/experimental/CWE-840/ConditionBypass.expected b/ql/test/experimental/CWE-840/ConditionalBypass.expected
similarity index 100%
rename from ql/test/experimental/CWE-840/ConditionBypass.expected
rename to ql/test/experimental/CWE-840/ConditionalBypass.expected
diff --git a/ql/test/experimental/CWE-840/ConditionalBypass.qlref b/ql/test/experimental/CWE-840/ConditionalBypass.qlref
new file mode 100644
index 00000000000..6d167616055
--- /dev/null
+++ b/ql/test/experimental/CWE-840/ConditionalBypass.qlref
@@ -0,0 +1 @@
+experimental/CWE-840/ConditionalBypass.ql
From 287dda0ab52c4d539abc264a2938c41440578cac Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 11 May 2020 11:05:40 +0100
Subject: [PATCH 072/157] Minor cleanup in query and tests.
---
.../experimental/CWE-840/ConditionalBypass.ql | 17 +++++++++--------
.../CWE-840/ConditionalBypass.expected | 4 ++--
2 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/ql/src/experimental/CWE-840/ConditionalBypass.ql b/ql/src/experimental/CWE-840/ConditionalBypass.ql
index 8f4ddcbf5a6..621e02fdbe7 100644
--- a/ql/src/experimental/CWE-840/ConditionalBypass.ql
+++ b/ql/src/experimental/CWE-840/ConditionalBypass.ql
@@ -1,8 +1,8 @@
/**
- * @name Comparision Expression Check Bypass
- * @description Comparing two user controlled inputs may
- * lead to an effective bypass of the comparison check.
- * @id go/condition-bypass
+ * @name User-controlled bypass of condition
+ * @description A check that compares two user-controlled inputs with each other can be bypassed
+ * by a malicious user.
+ * @id go/user-controlled-bypass
* @kind problem
* @problem.severity warning
* @tags external/cwe/cwe-840
@@ -11,10 +11,10 @@
import go
/**
- * A data-flow configuration for reasoning about Condition Bypass.
+ * A taint-tracking configuration for reasoning about conditional bypass.
*/
class Configuration extends TaintTracking::Configuration {
- Configuration() { this = "Comparision Expression Check Bypass" }
+ Configuration() { this = "ConitionalBypass" }
override predicate isSource(DataFlow::Node source) {
source instanceof UntrustedFlowSource
@@ -35,5 +35,6 @@ where
rhs.getNode().asExpr() = c.getRightOperand() and
config.hasFlowPath(lhsSource, lhs) and
lhs.getNode().asExpr() = c.getLeftOperand()
-select c, "This comparision is between user controlled operands derived from $@", lhsSource,
- " and $@", rhsSource, "hence may be bypassed."
+select c,
+ "This comparison compares user-controlled values from $@ and $@, and hence can be bypassed.",
+ lhsSource, "here", rhsSource, "here"
diff --git a/ql/test/experimental/CWE-840/ConditionalBypass.expected b/ql/test/experimental/CWE-840/ConditionalBypass.expected
index 86e96b2f6b8..931da422399 100644
--- a/ql/test/experimental/CWE-840/ConditionalBypass.expected
+++ b/ql/test/experimental/CWE-840/ConditionalBypass.expected
@@ -1,2 +1,2 @@
-| condition.go:9:5:9:46 | ...!=... | This comparision is between user controlled operands derived from $@ | condition.go:9:5:9:12 | selection of Header : Header | and $@ | condition.go:9:41:9:46 | selection of Host : string | hence may be bypassed. |
-| condition.go:16:5:16:62 | ...!=... | This comparision is between user controlled operands derived from $@ | condition.go:16:5:16:12 | selection of Header : Header | and $@ | condition.go:16:41:16:48 | selection of Header : Header | hence may be bypassed. |
+| condition.go:9:5:9:46 | ...!=... | This comparison compares user-controlled values from $@ and $@, and hence can be bypassed. | condition.go:9:5:9:12 | selection of Header : Header | here | condition.go:9:41:9:46 | selection of Host : string | here |
+| condition.go:16:5:16:62 | ...!=... | This comparison compares user-controlled values from $@ and $@, and hence can be bypassed. | condition.go:16:5:16:12 | selection of Header : Header | here | condition.go:16:41:16:48 | selection of Header : Header | here |
From df9902512fc07192e4e900898d828403801da7d7 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 11 May 2020 11:05:58 +0100
Subject: [PATCH 073/157] More cleanup in help and tests.
In particular, I have copied over the examples referenced in the qhelp into the test folder and made sure they compile.
---
.../CWE-840/ConditionalBypass.qhelp | 21 +++++++++----------
.../CWE-840/ConditionalBypassBad.go | 4 ++--
.../CWE-840/ConditionalBypassGood.go | 3 ++-
.../CWE-840/ConditionalBypass.expected | 1 +
.../CWE-840/ConditionalBypassBad.go | 12 +++++++++++
.../CWE-840/ConditionalBypassGood.go | 12 +++++++++++
ql/test/experimental/CWE-840/condition.go | 6 +++---
ql/test/experimental/CWE-840/util.go | 9 ++++++++
8 files changed, 51 insertions(+), 17 deletions(-)
create mode 100644 ql/test/experimental/CWE-840/ConditionalBypassBad.go
create mode 100644 ql/test/experimental/CWE-840/ConditionalBypassGood.go
create mode 100644 ql/test/experimental/CWE-840/util.go
diff --git a/ql/src/experimental/CWE-840/ConditionalBypass.qhelp b/ql/src/experimental/CWE-840/ConditionalBypass.qhelp
index 5151c402cef..3a1e5503de2 100644
--- a/ql/src/experimental/CWE-840/ConditionalBypass.qhelp
+++ b/ql/src/experimental/CWE-840/ConditionalBypass.qhelp
@@ -2,26 +2,25 @@
-Testing untrusted user input against untrusted user input results in
-a bypass of the conditional check as the attacker may modify parameters to match.
-
+Conditional checks that compare two values that are both controlled by an untrusted user against
+each other are easy to bypass and should not be used in security-critical contexts.
+
-To guard against this, it is advisable to avoid framing a comparison
-where both sides are untrusted user inputs.
-Instead, use a configuration to store and access the values required.
-
+To guard against bypass, it is advisable to avoid framing a comparison where both sides are
+untrusted user inputs. Instead, use a configuration to store and access the values required.
+
-The following example shows a comparison where both the sides
-are from attacker-controlled request headers. This should be avoided.:
-
+The following example shows a comparison where both the sides are from attacker-controlled request
+headers. This should be avoided:
+
One way to remedy the problem is to test against a value stored in a configuration:
-
+
\ No newline at end of file
diff --git a/ql/src/experimental/CWE-840/ConditionalBypassBad.go b/ql/src/experimental/CWE-840/ConditionalBypassBad.go
index 3174d8f8012..b788dee2009 100644
--- a/ql/src/experimental/CWE-840/ConditionalBypassBad.go
+++ b/ql/src/experimental/CWE-840/ConditionalBypassBad.go
@@ -4,8 +4,8 @@ import (
"net/http"
)
-func ex1(w http.ResponseWriter, r *http.Request) {
- // bad the origin and the host headers are user controlled
+func exampleHandlerBad(w http.ResponseWriter, r *http.Request) {
+ // BAD: the Origin and Host headers are user controlled
if r.Header.Get("Origin") != "http://"+r.Host {
//do something
}
diff --git a/ql/src/experimental/CWE-840/ConditionalBypassGood.go b/ql/src/experimental/CWE-840/ConditionalBypassGood.go
index 987f97a9cd0..635d16d1f8f 100644
--- a/ql/src/experimental/CWE-840/ConditionalBypassGood.go
+++ b/ql/src/experimental/CWE-840/ConditionalBypassGood.go
@@ -4,7 +4,8 @@ import (
"net/http"
)
-func ex1(w http.ResponseWriter, r *http.Request) {
+func exampleHandlerGood(w http.ResponseWriter, r *http.Request) {
+ // GOOD: the configuration is not user controlled
if r.Header.Get("Origin") != config.get("Host") {
//do something
}
diff --git a/ql/test/experimental/CWE-840/ConditionalBypass.expected b/ql/test/experimental/CWE-840/ConditionalBypass.expected
index 931da422399..c57a25bddff 100644
--- a/ql/test/experimental/CWE-840/ConditionalBypass.expected
+++ b/ql/test/experimental/CWE-840/ConditionalBypass.expected
@@ -1,2 +1,3 @@
+| ConditionalBypassBad.go:9:5:9:46 | ...!=... | This comparison compares user-controlled values from $@ and $@, and hence can be bypassed. | ConditionalBypassBad.go:9:5:9:12 | selection of Header : Header | here | ConditionalBypassBad.go:9:41:9:46 | selection of Host : string | here |
| condition.go:9:5:9:46 | ...!=... | This comparison compares user-controlled values from $@ and $@, and hence can be bypassed. | condition.go:9:5:9:12 | selection of Header : Header | here | condition.go:9:41:9:46 | selection of Host : string | here |
| condition.go:16:5:16:62 | ...!=... | This comparison compares user-controlled values from $@ and $@, and hence can be bypassed. | condition.go:16:5:16:12 | selection of Header : Header | here | condition.go:16:41:16:48 | selection of Header : Header | here |
diff --git a/ql/test/experimental/CWE-840/ConditionalBypassBad.go b/ql/test/experimental/CWE-840/ConditionalBypassBad.go
new file mode 100644
index 00000000000..b788dee2009
--- /dev/null
+++ b/ql/test/experimental/CWE-840/ConditionalBypassBad.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "net/http"
+)
+
+func exampleHandlerBad(w http.ResponseWriter, r *http.Request) {
+ // BAD: the Origin and Host headers are user controlled
+ if r.Header.Get("Origin") != "http://"+r.Host {
+ //do something
+ }
+}
diff --git a/ql/test/experimental/CWE-840/ConditionalBypassGood.go b/ql/test/experimental/CWE-840/ConditionalBypassGood.go
new file mode 100644
index 00000000000..635d16d1f8f
--- /dev/null
+++ b/ql/test/experimental/CWE-840/ConditionalBypassGood.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "net/http"
+)
+
+func exampleHandlerGood(w http.ResponseWriter, r *http.Request) {
+ // GOOD: the configuration is not user controlled
+ if r.Header.Get("Origin") != config.get("Host") {
+ //do something
+ }
+}
diff --git a/ql/test/experimental/CWE-840/condition.go b/ql/test/experimental/CWE-840/condition.go
index b61d72b94e1..7b7b7480c10 100644
--- a/ql/test/experimental/CWE-840/condition.go
+++ b/ql/test/experimental/CWE-840/condition.go
@@ -4,21 +4,21 @@ import (
"net/http"
)
-// bad : taken from https://www.gorillatoolkit.org/pkg/websocket
+// BAD: taken from https://www.gorillatoolkit.org/pkg/websocket
func ex1(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Origin") != "http://"+r.Host {
//do something
}
}
-// bad both are from remote sources
+// BAD: both operands are from remote sources
func ex2(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Origin") != "http://"+r.Header.Get("Header") {
//do something
}
}
-// good
+// GOOD
func ex3(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Origin") != "http://"+"test" {
//do something
diff --git a/ql/test/experimental/CWE-840/util.go b/ql/test/experimental/CWE-840/util.go
new file mode 100644
index 00000000000..9e7a9a27f20
--- /dev/null
+++ b/ql/test/experimental/CWE-840/util.go
@@ -0,0 +1,9 @@
+package main
+
+type Config struct{}
+
+func (_ Config) get(s string) string {
+ return ""
+}
+
+var config = Config{}
From 17dd99d32663d0a0962499f3f334b4d8df8d157d Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 11 May 2020 11:45:08 +0100
Subject: [PATCH 074/157] Fix frontend errors in Mux tests.
---
.../semmle/go/frameworks/Mux/mux.go | 2 +-
.../Mux/vendor/github.com/gorilla/mux/stub.go | 238 +++++++++++++++++-
2 files changed, 238 insertions(+), 2 deletions(-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go b/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
index 9b70a2122aa..e12ce28daa3 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/mux.go
@@ -1,6 +1,6 @@
package main
-//go:generate depstubber -vendor github.com/gorilla/mux "" Vars
+//go:generate depstubber -vendor github.com/gorilla/mux "" Vars,NewRouter
import (
"fmt"
diff --git a/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
index f5087942ae5..62510300b2d 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/Mux/vendor/github.com/gorilla/mux/stub.go
@@ -2,15 +2,251 @@
// This is a simple stub for github.com/gorilla/mux, strictly for use in testing.
// See the LICENSE file for information about the licensing of the original library.
-// Source: github.com/gorilla/mux (exports: ; functions: Vars)
+// Source: github.com/gorilla/mux (exports: ; functions: Vars,NewRouter)
// Package mux is a stub of github.com/gorilla/mux, generated by depstubber.
package mux
import (
http "net/http"
+ url "net/url"
)
+type BuildVarsFunc func(map[string]string) map[string]string
+
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+func (_ MatcherFunc) Match(_ *http.Request, _ *RouteMatch) bool {
+ return false
+}
+
+type MiddlewareFunc func(http.Handler) http.Handler
+
+func (_ MiddlewareFunc) Middleware(_ http.Handler) http.Handler {
+ return nil
+}
+
+func NewRouter() *Router {
+ return nil
+}
+
+type Route struct{}
+
+func (_ *Route) BuildOnly() *Route {
+ return nil
+}
+
+func (_ *Route) BuildVarsFunc(_ BuildVarsFunc) *Route {
+ return nil
+}
+
+func (_ *Route) GetError() error {
+ return nil
+}
+
+func (_ *Route) GetHandler() http.Handler {
+ return nil
+}
+
+func (_ *Route) GetHostTemplate() (string, error) {
+ return "", nil
+}
+
+func (_ *Route) GetMethods() ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Route) GetName() string {
+ return ""
+}
+
+func (_ *Route) GetPathRegexp() (string, error) {
+ return "", nil
+}
+
+func (_ *Route) GetPathTemplate() (string, error) {
+ return "", nil
+}
+
+func (_ *Route) GetQueriesRegexp() ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Route) GetQueriesTemplates() ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Route) Handler(_ http.Handler) *Route {
+ return nil
+}
+
+func (_ *Route) HandlerFunc(_ func(http.ResponseWriter, *http.Request)) *Route {
+ return nil
+}
+
+func (_ *Route) Headers(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Route) HeadersRegexp(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Route) Host(_ string) *Route {
+ return nil
+}
+
+func (_ *Route) Match(_ *http.Request, _ *RouteMatch) bool {
+ return false
+}
+
+func (_ *Route) MatcherFunc(_ MatcherFunc) *Route {
+ return nil
+}
+
+func (_ *Route) Methods(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Route) Name(_ string) *Route {
+ return nil
+}
+
+func (_ *Route) Path(_ string) *Route {
+ return nil
+}
+
+func (_ *Route) PathPrefix(_ string) *Route {
+ return nil
+}
+
+func (_ *Route) Queries(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Route) Schemes(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Route) SkipClean() bool {
+ return false
+}
+
+func (_ *Route) Subrouter() *Router {
+ return nil
+}
+
+func (_ *Route) URL(_ ...string) (*url.URL, error) {
+ return nil, nil
+}
+
+func (_ *Route) URLHost(_ ...string) (*url.URL, error) {
+ return nil, nil
+}
+
+func (_ *Route) URLPath(_ ...string) (*url.URL, error) {
+ return nil, nil
+}
+
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+ MatchErr error
+}
+
+type Router struct {
+ NotFoundHandler http.Handler
+ MethodNotAllowedHandler http.Handler
+ KeepContext bool
+}
+
+func (_ *Router) BuildVarsFunc(_ BuildVarsFunc) *Route {
+ return nil
+}
+
+func (_ *Router) Get(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) GetRoute(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) Handle(_ string, _ http.Handler) *Route {
+ return nil
+}
+
+func (_ *Router) HandleFunc(_ string, _ func(http.ResponseWriter, *http.Request)) *Route {
+ return nil
+}
+
+func (_ *Router) Headers(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Router) Host(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) Match(_ *http.Request, _ *RouteMatch) bool {
+ return false
+}
+
+func (_ *Router) MatcherFunc(_ MatcherFunc) *Route {
+ return nil
+}
+
+func (_ *Router) Methods(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Router) Name(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) NewRoute() *Route {
+ return nil
+}
+
+func (_ *Router) Path(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) PathPrefix(_ string) *Route {
+ return nil
+}
+
+func (_ *Router) Queries(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Router) Schemes(_ ...string) *Route {
+ return nil
+}
+
+func (_ *Router) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {}
+
+func (_ *Router) SkipClean(_ bool) *Router {
+ return nil
+}
+
+func (_ *Router) StrictSlash(_ bool) *Router {
+ return nil
+}
+
+func (_ *Router) Use(_ ...MiddlewareFunc) {}
+
+func (_ *Router) UseEncodedPath() *Router {
+ return nil
+}
+
+func (_ *Router) Walk(_ WalkFunc) error {
+ return nil
+}
+
func Vars(_ *http.Request) map[string]string {
return nil
}
+
+type WalkFunc func(*Route, *Router, []*Route) error
From 4a7171d91e35b2f041e8ce05fb3939f1755771ee Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Mon, 11 May 2020 11:45:21 +0100
Subject: [PATCH 075/157] Fix frontend errors in BadRedirectCheck tests.
---
.../query-tests/Security/CWE-601/BadRedirectCheck/main.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
index c76f9ec09cf..beccc9a135d 100644
--- a/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
+++ b/ql/test/query-tests/Security/CWE-601/BadRedirectCheck/main.go
@@ -54,7 +54,7 @@ func goodRedirect3(url string, rw http.ResponseWriter, req *http.Request) {
func getTarget(redirect string) string {
u, _ := url.Parse(redirect)
- if u.Path[0] != "/" {
+ if u.Path[0] != '/' {
return "/"
}
@@ -66,21 +66,21 @@ func goodRedirect4(url string, rw http.ResponseWriter, req *http.Request) {
}
func getTarget1(redirect string) string {
- if redirect[0] != "/" {
+ if redirect[0] != '/' {
return "/"
}
return path.Clean(redirect)
}
-func badRedirect2(url string, rw http.ResponseWriter, req *http.Request) {
+func badRedirect1(url string, rw http.ResponseWriter, req *http.Request) {
http.Redirect(rw, req, getTarget1(url), 302)
}
func getTarget2(redirect string) string {
u, _ := url.Parse(redirect)
- if u.Path[0] != "/" {
+ if u.Path[0] != '/' {
return "/"
}
From c1856ba260f91a698931d774fcfb9dec8e98340f Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Mon, 11 May 2020 19:32:28 +0530
Subject: [PATCH 076/157] fix tests
---
.../CWE-807/SensitiveConditionBypass.expected | 4 ++++
.../CWE-807/SensitiveConditionBypassBad.go | 10 ++++++++++
ql/test/experimental/CWE-807/condition.go | 2 +-
3 files changed, 15 insertions(+), 1 deletion(-)
create mode 100644 ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
create mode 100644 ql/test/experimental/CWE-807/SensitiveConditionBypassBad.go
diff --git a/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected b/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
new file mode 100644
index 00000000000..a9dbfa8f426
--- /dev/null
+++ b/ql/test/experimental/CWE-807/SensitiveConditionBypass.expected
@@ -0,0 +1,4 @@
+| SensitiveConditionBypassBad.go:7:5:7:39 | ...!=... | This sensitive comparision check can potentially be bypassed. |
+| condition.go:16:5:16:34 | ...!=... | This sensitive comparision check can potentially be bypassed. |
+| condition.go:25:5:25:35 | ...!=... | This sensitive comparision check can potentially be bypassed. |
+| condition.go:34:5:34:35 | ...!=... | This sensitive comparision check can potentially be bypassed. |
diff --git a/ql/test/experimental/CWE-807/SensitiveConditionBypassBad.go b/ql/test/experimental/CWE-807/SensitiveConditionBypassBad.go
new file mode 100644
index 00000000000..bf8e70f88b7
--- /dev/null
+++ b/ql/test/experimental/CWE-807/SensitiveConditionBypassBad.go
@@ -0,0 +1,10 @@
+package main
+
+import "net/http"
+
+func example(w http.ResponseWriter, r *http.Request) {
+ test2 := "test"
+ if r.Header.Get("X-Password") != test2 {
+ login()
+ }
+}
diff --git a/ql/test/experimental/CWE-807/condition.go b/ql/test/experimental/CWE-807/condition.go
index ff12dfbea0f..ecd6b0a9f2a 100644
--- a/ql/test/experimental/CWE-807/condition.go
+++ b/ql/test/experimental/CWE-807/condition.go
@@ -9,7 +9,7 @@ func use(xs ...interface{}) {}
func t(xs ...interface{}) string { return "sadsad" }
func login(xs ...interface{}) {}
-var test = "localhost"
+const test = "localhost"
// Should alert as authkey is sensitive
func ex1(w http.ResponseWriter, r *http.Request) {
From 9b53ad3b3ce9567a4710e7c27c9463430a0259dc Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Wed, 6 May 2020 03:39:54 +0530
Subject: [PATCH 077/157] model IO package
---
ql/src/semmle/go/frameworks/Stdlib.qll | 212 +++++++++++++++++-
.../frameworks/TaintSteps/TaintStep.expected | 48 ++++
.../semmle/go/frameworks/TaintSteps/io.go | 117 ++++++++++
3 files changed, 367 insertions(+), 10 deletions(-)
create mode 100644 ql/test/library-tests/semmle/go/frameworks/TaintSteps/io.go
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index 36c9516a449..554e8192fd0 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -122,28 +122,220 @@ module Fmt {
}
/** Provides models of commonly used functions in the `io` package. */
-module Io {
- private class ReaderRead extends TaintTracking::FunctionModel, Method {
- ReaderRead() { this.implements("io", "Reader", "Read") }
+module IO {
+ private class Copy extends TaintTracking::FunctionModel, Function {
+ Copy() {
+ // func Copy(dst Writer, src Reader) (written int64, err error)
+ // func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error)
+ // func CopyN(dst Writer, src Reader, n int64) (written int64, err error)
+ hasQualifiedName("io", "Copy") or
+ hasQualifiedName("io", "CopyBuffer") or
+ hasQualifiedName("io", "CopyN")
+ }
- override predicate hasTaintFlow(FunctionInput inp, FunctionOutput outp) {
- inp.isReceiver() and outp.isParameter(0)
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(1) and output.isParameter(0)
}
}
- private class WriterWrite extends TaintTracking::FunctionModel, Method {
- WriterWrite() { this.implements("io", "Writer", "Write") }
+ private class Pipe extends TaintTracking::FunctionModel, Function {
+ Pipe() {
+ // func Pipe() (*PipeReader, *PipeWriter)
+ hasQualifiedName("io", "Pipe")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isResult(0) and output.isResult(1)
+ }
+ }
+
+ private class IORead extends TaintTracking::FunctionModel, Function {
+ IORead() {
+ // func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error)
+ // func ReadFull(r Reader, buf []byte) (n int, err error)
+ hasQualifiedName("io", "ReadAtLeast") or
+ hasQualifiedName("io", "ReadFull")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isParameter(1)
+ }
+ }
+
+ private class WriteString extends TaintTracking::FunctionModel {
+ WriteString() {
+ // func WriteString(w Writer, s string) (n int, err error)
+ this.hasQualifiedName("io", "WriteString")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(1) and output.isParameter(0)
+ }
+ }
+
+ private class ByteReaderReadByte extends TaintTracking::FunctionModel, Method {
+ ByteReaderReadByte() {
+ // ReadByte() (byte, error)
+ this.implements("io", "ByteReader", "ReadByte")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isReceiver() and output.isResult(0)
+ }
+ }
+
+ private class ByteWriterWriteByte extends TaintTracking::FunctionModel, Method {
+ ByteWriterWriteByte() {
+ // WriteByte(c byte) error
+ this.implements("io", "ByteWriter", "WriteByte")
+ }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
input.isParameter(0) and output.isReceiver()
}
}
- private class WriteString extends TaintTracking::FunctionModel {
- WriteString() { this.hasQualifiedName("io", "WriteString") }
+ private class ReaderRead extends TaintTracking::FunctionModel, Method {
+ ReaderRead() {
+ // Read(p []byte) (n int, err error)
+ this.implements("io", "Reader", "Read")
+ }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
- input.isParameter(1) and output.isParameter(0)
+ input.isReceiver() and output.isParameter(0)
+ }
+ }
+
+ private class LimitReader extends TaintTracking::FunctionModel, Function {
+ LimitReader() {
+ // func LimitReader(r Reader, n int64) Reader
+ this.hasQualifiedName("io", "LimitReader")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isResult()
+ }
+ }
+
+ private class MultiReader extends TaintTracking::FunctionModel, Function {
+ MultiReader() {
+ // func MultiReader(readers ...Reader) Reader
+ this.hasQualifiedName("io", "MultiReader")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(_) and output.isResult()
+ }
+ }
+
+ private class TeeReader extends TaintTracking::FunctionModel, Function {
+ TeeReader() {
+ // func TeeReader(r Reader, w Writer) Reader
+ this.hasQualifiedName("io", "TeeReader")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isResult()
+ or
+ input.isParameter(0) and output.isParameter(1)
+ }
+ }
+
+ private class ReaderAtReadAt extends TaintTracking::FunctionModel, Method {
+ ReaderAtReadAt() { this.implements("io", "ReaderAt", "ReadAt") }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ // ReadAt(p []byte, off int64) (n int, err error)
+ input.isReceiver() and output.isParameter(0)
+ }
+ }
+
+ private class ReaderFromReadFrom extends TaintTracking::FunctionModel, Method {
+ ReaderFromReadFrom() {
+ // ReadFrom(r Reader) (n int64, err error)
+ this.implements("io", "ReaderFrom", "ReadFrom")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isReceiver()
+ }
+ }
+
+ private class RuneReaderReadRune extends TaintTracking::FunctionModel, Method {
+ RuneReaderReadRune() {
+ // ReadRune() (r rune, size int, err error)
+ this.implements("io", "RuneReader", "ReadRune")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isReceiver() and output.isResult(0)
+ }
+ }
+
+ private class NewSectionReader extends TaintTracking::FunctionModel, Function {
+ NewSectionReader() {
+ // func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader
+ this.hasQualifiedName("io", "NewSectionReader")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isResult()
+ }
+ }
+
+ // A Taint Model for the stdlib io StringWriter interface
+ private class StringWriterWriteString extends TaintTracking::FunctionModel, Method {
+ StringWriterWriteString() {
+ // WriteString(s string) (n int, err error)
+ this.implements("io", "StringWriter", "WriteString")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isReceiver()
+ }
+ }
+
+ private class WriterWrite extends TaintTracking::FunctionModel, Method {
+ WriterWrite() {
+ // Write(p []byte) (n int, err error)
+ this.implements("io", "Writer", "Write")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isReceiver()
+ }
+ }
+
+ private class MultiWriter extends TaintTracking::FunctionModel, Function {
+ MultiWriter() {
+ // func MultiWriter(writers ...Writer) Writer
+ hasQualifiedName("io", "MultiWriter")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isResult() and output.isParameter(_)
+ }
+ }
+
+ private class WriterAtWriteAt extends TaintTracking::FunctionModel, Method {
+ WriterAtWriteAt() {
+ // WriteAt(p []byte, off int64) (n int, err error)
+ this.implements("io", "WriterAt", "WriteAt")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isParameter(0) and output.isReceiver()
+ }
+ }
+
+ private class WriterToWriteTo extends TaintTracking::FunctionModel, Method {
+ WriterToWriteTo() {
+ // WriteTo(w Writer) (n int64, err error)
+ this.implements("io", "WriterTo", "WriteTo")
+ }
+
+ override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
+ input.isReceiver() and output.isParameter(0)
}
}
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/TaintSteps/TaintStep.expected b/ql/test/library-tests/semmle/go/frameworks/TaintSteps/TaintStep.expected
index d912d745d7a..eeb8e85e203 100644
--- a/ql/test/library-tests/semmle/go/frameworks/TaintSteps/TaintStep.expected
+++ b/ql/test/library-tests/semmle/go/frameworks/TaintSteps/TaintStep.expected
@@ -5,6 +5,54 @@
| crypto.go:11:18:11:57 | call to Open | crypto.go:11:2:11:57 | ... := ...[0] |
| crypto.go:11:18:11:57 | call to Open | crypto.go:11:2:11:57 | ... := ...[1] |
| crypto.go:11:42:11:51 | ciphertext | crypto.go:11:2:11:57 | ... := ...[0] |
+| io.go:15:3:15:3 | definition of w | io.go:15:23:15:27 | &... |
+| io.go:15:3:15:3 | definition of w | io.go:15:30:15:34 | &... |
+| io.go:15:23:15:27 | &... | io.go:14:7:14:10 | definition of buf1 |
+| io.go:15:24:15:27 | buf1 | io.go:15:23:15:27 | &... |
+| io.go:15:30:15:34 | &... | io.go:14:13:14:16 | definition of buf2 |
+| io.go:15:31:15:34 | buf2 | io.go:15:30:15:34 | &... |
+| io.go:17:14:17:19 | reader | io.go:15:3:15:3 | definition of w |
+| io.go:24:19:24:23 | &... | io.go:22:7:22:10 | definition of buf1 |
+| io.go:24:20:24:23 | buf1 | io.go:24:19:24:23 | &... |
+| io.go:26:21:26:26 | reader | io.go:24:3:24:4 | definition of w2 |
+| io.go:32:19:32:23 | &... | io.go:31:7:31:10 | definition of buf1 |
+| io.go:32:20:32:23 | buf1 | io.go:32:19:32:23 | &... |
+| io.go:34:16:34:21 | reader | io.go:32:3:32:4 | definition of w2 |
+| io.go:38:3:38:3 | definition of r | io.go:38:3:38:19 | ... := ...[1] |
+| io.go:38:11:38:19 | call to Pipe | io.go:38:3:38:19 | ... := ...[0] |
+| io.go:38:11:38:19 | call to Pipe | io.go:38:3:38:19 | ... := ...[1] |
+| io.go:39:17:39:31 | "some string\\n" | io.go:38:6:38:6 | definition of w |
+| io.go:42:16:42:16 | r | io.go:41:3:41:5 | definition of buf |
+| io.go:43:13:43:15 | buf | io.go:43:13:43:24 | call to String |
+| io.go:49:18:49:23 | reader | io.go:48:3:48:5 | definition of buf |
+| io.go:55:15:55:20 | reader | io.go:54:3:54:5 | definition of buf |
+| io.go:60:18:60:21 | &... | io.go:59:7:59:9 | definition of buf |
+| io.go:60:19:60:21 | buf | io.go:60:18:60:21 | &... |
+| io.go:61:21:61:26 | "test" | io.go:60:3:60:3 | definition of w |
+| io.go:66:11:66:16 | reader | io.go:66:3:66:27 | ... := ...[0] |
+| io.go:66:11:66:27 | call to ReadByte | io.go:66:3:66:27 | ... := ...[0] |
+| io.go:66:11:66:27 | call to ReadByte | io.go:66:3:66:27 | ... := ...[1] |
+| io.go:68:21:68:21 | t | io.go:67:7:67:13 | definition of bwriter |
+| io.go:74:3:74:8 | reader | io.go:73:3:73:5 | definition of buf |
+| io.go:79:3:79:8 | reader | io.go:78:3:78:5 | definition of buf |
+| io.go:84:24:84:29 | reader | io.go:84:9:84:33 | call to LimitReader |
+| io.go:85:22:85:23 | lr | io.go:85:11:85:19 | selection of Stdout |
+| io.go:92:23:92:24 | r1 | io.go:92:8:92:33 | call to MultiReader |
+| io.go:92:27:92:28 | r2 | io.go:92:8:92:33 | call to MultiReader |
+| io.go:92:31:92:32 | r3 | io.go:92:8:92:33 | call to MultiReader |
+| io.go:93:22:93:22 | r | io.go:93:11:93:19 | selection of Stdout |
+| io.go:98:23:98:23 | r | io.go:98:10:98:30 | call to TeeReader |
+| io.go:98:23:98:23 | r | io.go:98:26:98:29 | &... |
+| io.go:98:26:98:29 | &... | io.go:97:7:97:9 | definition of buf |
+| io.go:98:27:98:29 | buf | io.go:98:26:98:29 | &... |
+| io.go:100:22:100:24 | tee | io.go:100:11:100:19 | selection of Stdout |
+| io.go:104:28:104:28 | r | io.go:104:8:104:36 | call to NewSectionReader |
+| io.go:105:22:105:22 | s | io.go:105:11:105:19 | selection of Stdout |
+| io.go:109:16:109:16 | r | io.go:109:3:109:27 | ... := ...[0] |
+| io.go:109:16:109:27 | call to ReadRune | io.go:109:3:109:27 | ... := ...[0] |
+| io.go:109:16:109:27 | call to ReadRune | io.go:109:3:109:27 | ... := ...[1] |
+| io.go:109:16:109:27 | call to ReadRune | io.go:109:3:109:27 | ... := ...[2] |
+| io.go:114:3:114:3 | r | io.go:114:13:114:21 | selection of Stdout |
| main.go:11:12:11:26 | call to Marshal | main.go:11:2:11:26 | ... := ...[0] |
| main.go:11:12:11:26 | call to Marshal | main.go:11:2:11:26 | ... := ...[1] |
| main.go:11:25:11:25 | v | main.go:11:2:11:26 | ... := ...[0] |
diff --git a/ql/test/library-tests/semmle/go/frameworks/TaintSteps/io.go b/ql/test/library-tests/semmle/go/frameworks/TaintSteps/io.go
new file mode 100644
index 00000000000..82aa4d377c5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/TaintSteps/io.go
@@ -0,0 +1,117 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func io2() {
+ {
+ reader := strings.NewReader("some string")
+ var buf1, buf2 bytes.Buffer
+ w := io.MultiWriter(&buf1, &buf2)
+
+ io.Copy(w, reader)
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ var buf1 bytes.Buffer
+ buf := make([]byte, 512)
+ w2 := io.Writer(&buf1)
+
+ io.CopyBuffer(w2, reader, buf)
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ var buf1 bytes.Buffer
+ w2 := io.Writer(&buf1)
+
+ io.CopyN(w2, reader, 512)
+ }
+
+ {
+ r, w := io.Pipe()
+ fmt.Fprint(w, "some string\n")
+
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r)
+ fmt.Print(buf.String())
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ buf := make([]byte, 512)
+ io.ReadAtLeast(reader, buf, 512)
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ buf := make([]byte, 512)
+ io.ReadFull(reader, buf)
+ }
+
+ {
+ var buf bytes.Buffer
+ w := io.Writer(&buf)
+ io.WriteString(w, "test")
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ t, _ := reader.ReadByte()
+ var bwriter io.ByteWriter
+ bwriter.WriteByte(t)
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ buf := make([]byte, 512)
+ reader.Read(buf)
+ }
+ {
+ reader := strings.NewReader("some string")
+ buf := make([]byte, 512)
+ reader.ReadAt(buf, 10)
+ }
+
+ {
+ reader := strings.NewReader("some string")
+ lr := io.LimitReader(reader, 4)
+ io.Copy(os.Stdout, lr)
+ }
+
+ {
+ r1 := strings.NewReader("reader1 ")
+ r2 := strings.NewReader("reader2 ")
+ r3 := strings.NewReader("reader3")
+ r := io.MultiReader(r1, r2, r3)
+ io.Copy(os.Stdout, r)
+ }
+ {
+ r := strings.NewReader("some string")
+ var buf bytes.Buffer
+ tee := io.TeeReader(r, &buf)
+
+ io.Copy(os.Stdout, tee)
+ }
+ {
+ r := strings.NewReader("some string")
+ s := io.NewSectionReader(r, 5, 17)
+ io.Copy(os.Stdout, s)
+ }
+ {
+ r := strings.NewReader("some string")
+ run, _, _ := r.ReadRune()
+ fmt.Println(run)
+ }
+ {
+ r := strings.NewReader("some string")
+ r.WriteTo(os.Stdout)
+ }
+
+}
From d0061bfd4b8974a7b427477994e112861e0cd7fe Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Sun, 3 May 2020 19:57:06 +0530
Subject: [PATCH 078/157] Golang : Add MongoDB injection support
This PR adds support for MongoDB injection to the existing SQL injection query.
This models the official Golang MongoDB driver.
A brief summary of changes made in this query are :
1. A `NoSQL.qll` files has been created to model a `NoSQLQueryString`.
2. An entry is added in `go.qll` by default as I find these changes may be generally useful.
3. Library tests along with there expected outputs are added.
4. Query tests are added. However, I am unable to add the expected output as qltest
can't find depstubber. However, these can be easily added. I have created a separate
codeql-go database with the same files and ran the query against the same. I can see
there should be 14 correct results added from this PR.
---
ql/src/go.qll | 1 +
ql/src/semmle/go/frameworks/NoSQL.qll | 107 +++++
ql/src/semmle/go/security/SqlInjection.qll | 4 +
.../security/SqlInjectionCustomizations.qll | 5 +
.../go/frameworks/NoSQL/QueryString.expected | 14 +
.../semmle/go/frameworks/NoSQL/QueryString.ql | 5 +
.../semmle/go/frameworks/NoSQL/go.mod | 5 +
.../semmle/go/frameworks/NoSQL/main.go | 52 +++
.../go.mongodb.org/mongo-driver/LICENSE | 201 +++++++++
.../mongo-driver/bson/primitive/stub.go | 23 +
.../go.mongodb.org/mongo-driver/bson/stub.go | 5 +
.../go.mongodb.org/mongo-driver/mongo/stub.go | 389 +++++++++++++++++
.../go/frameworks/NoSQL/vendor/modules.txt | 3 +
.../Security/CWE-089/SqlInjection.expected | 43 ++
ql/test/query-tests/Security/CWE-089/go.mod | 5 +-
.../query-tests/Security/CWE-089/mongoDB.go | 83 ++++
.../github.com/Masterminds/squirrel/stub.go | 117 ++----
.../go.mongodb.org/mongo-driver/LICENSE | 201 +++++++++
.../mongo-driver/bson/primitive/stub.go | 23 +
.../go.mongodb.org/mongo-driver/bson/stub.go | 5 +
.../mongo-driver/mongo/options/stub.go | 217 ++++++++++
.../go.mongodb.org/mongo-driver/mongo/stub.go | 393 ++++++++++++++++++
.../Security/CWE-089/vendor/modules.txt | 13 +-
23 files changed, 1812 insertions(+), 102 deletions(-)
create mode 100644 ql/src/semmle/go/frameworks/NoSQL.qll
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.expected
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/modules.txt
create mode 100644 ql/test/query-tests/Security/CWE-089/mongoDB.go
create mode 100644 ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
create mode 100644 ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/stub.go
create mode 100644 ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
create mode 100644 ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
diff --git a/ql/src/go.qll b/ql/src/go.qll
index 23457315d48..524717b7544 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -28,6 +28,7 @@ import semmle.go.frameworks.Email
import semmle.go.frameworks.HTTP
import semmle.go.frameworks.Macaron
import semmle.go.frameworks.Mux
+import semmle.go.frameworks.NoSQL
import semmle.go.frameworks.SystemCommandExecutors
import semmle.go.frameworks.SQL
import semmle.go.frameworks.XPath
diff --git a/ql/src/semmle/go/frameworks/NoSQL.qll b/ql/src/semmle/go/frameworks/NoSQL.qll
new file mode 100644
index 00000000000..a49c3864b62
--- /dev/null
+++ b/ql/src/semmle/go/frameworks/NoSQL.qll
@@ -0,0 +1,107 @@
+/**
+ * Provides classes for working with NoSQL-related concepts such as queries.
+ */
+
+import go
+
+/** Provides classes for working with NoSQL-related APIs. */
+module NoSQL {
+ /**
+ * A data-flow node whose string value is interpreted as (part of) a NoSQL query.
+ *
+ * Extends this class to refine existing API models. If you want to model new APIs,
+ * extend `NoSQL::QueryString::Range` instead.
+ */
+ class NoSQLQueryString extends DataFlow::Node {
+ NoSQLQueryString::Range self;
+
+ NoSQLQueryString() { this = self }
+ }
+
+ //TODO : Replace the following two predicate definitions with a simple call to package()
+ private string mongoDb() { result = "go.mongodb.org/mongo-driver/mongo" }
+
+ private string mongoBsonPrimitive() { result = "go.mongodb.org/mongo-driver/bson/primitive" }
+
+ /** Provides classes for working with SQL query strings. */
+ module NoSQLQueryString {
+ /**
+ * A data-flow node whose string value is interpreted as (part of) a NoSQL query.
+ *
+ * Extend this class to model new APIs. If you want to refine existing API models,
+ * extend `NoSQL::QueryString` instead.
+ */
+ abstract class Range extends DataFlow::Node { }
+
+ /**
+ * Holds if method `name` of `Collection` struct of `go.mongodb.org/mongo-driver/mongo`
+ * package interprets parameter `n` as a query.
+ */
+ private predicate collectionMethods(string name, int n) {
+ // func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, opts ...*options.CountOptions) (int64, error)
+ name = "CountDocuments" and n = 1
+ or
+ // func (coll *Collection) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error)
+ name = "DeleteMany" and n = 1
+ or
+ // func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error)
+ name = "DeleteOne" and n = 1
+ or
+ // func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{}, ...) ([]interface{}, error)
+ name = "Distinct" and n = 2
+ or
+ // func (coll *Collection) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (*Cursor, error)
+ name = "Find" and n = 1
+ or
+ // func (coll *Collection) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *SingleResult
+ name = "FindOne" and n = 1
+ or
+ // func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, ...) *SingleResult
+ name = "FindOneAndDelete" and n = 1
+ or
+ // func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, ...) *SingleResult
+ name = "FindOneAndReplace" and n = 1
+ or
+ // func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{}, ...) *SingleResult
+ name = "FindOneAndUpdate" and n = 1
+ or
+ // func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{}, ...) (*UpdateResult, error)
+ name = "ReplaceOne" and n = 1
+ or
+ // func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{}, ...) (*UpdateResult, error)
+ name = "UpdateMany" and n = 1
+ or
+ // func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{}, ...) (*UpdateResult, error)
+ name = "UpdateOne" and n = 1
+ or
+ // func (coll *Collection) Watch(ctx context.Context, pipeline interface{}, ...) (*ChangeStream, error)
+ name = "Watch" and n = 1
+ or
+ // func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error)
+ name = "Aggregate" and n = 1
+ }
+
+ /**
+ * A query string used in an API function acting on a `Collection` struct of
+ * `go.mongodb.org/mongo-driver/mongo` package
+ */
+ private class MongoDbCollectionQueryString extends Range {
+ MongoDbCollectionQueryString() {
+ exists(Method meth, string methodName, int n |
+ collectionMethods(methodName, n) and
+ meth.hasQualifiedName(mongoDb(), "Collection", methodName) and
+ this = meth.getACall().getArgument(n)
+ )
+ }
+ }
+ }
+
+ predicate isAdditionalMongoTaintStep(DataFlow::Node prev, DataFlow::Node succ) {
+ // Taint bson.E if input is tainted
+ exists(Write w, DataFlow::Node base, Field f | w.writesField(base, f, prev) |
+ base = succ.getASuccessor*() and
+ base.getType().hasQualifiedName(mongoBsonPrimitive(), "E") and
+ f.getName() = "Value"
+ )
+ }
+}
diff --git a/ql/src/semmle/go/security/SqlInjection.qll b/ql/src/semmle/go/security/SqlInjection.qll
index 70df0b4822f..0397ee8c665 100644
--- a/ql/src/semmle/go/security/SqlInjection.qll
+++ b/ql/src/semmle/go/security/SqlInjection.qll
@@ -23,6 +23,10 @@ module SqlInjection {
override predicate isSink(DataFlow::Node sink) { sink instanceof Sink }
+ override predicate isAdditionalTaintStep(DataFlow::Node prev, DataFlow::Node succ) {
+ NoSQL::isAdditionalMongoTaintStep(prev, succ)
+ }
+
override predicate isSanitizer(DataFlow::Node node) {
super.isSanitizer(node) or
node instanceof Sanitizer
diff --git a/ql/src/semmle/go/security/SqlInjectionCustomizations.qll b/ql/src/semmle/go/security/SqlInjectionCustomizations.qll
index b34c34954d8..d5b87271546 100644
--- a/ql/src/semmle/go/security/SqlInjectionCustomizations.qll
+++ b/ql/src/semmle/go/security/SqlInjectionCustomizations.qll
@@ -39,4 +39,9 @@ module SqlInjection {
class SqlQueryAsSink extends Sink {
SqlQueryAsSink() { this instanceof SQL::QueryString }
}
+
+ /** An NoSQL string, considered as a taint sink for SQL injection. */
+ class NoSqlQueryAsSink extends Sink {
+ NoSqlQueryAsSink() { this instanceof NoSQL::NoSQLQueryString }
+ }
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.expected b/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.expected
new file mode 100644
index 00000000000..109ad99673b
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.expected
@@ -0,0 +1,14 @@
+| main.go:24:22:24:29 | pipeline |
+| main.go:27:27:27:32 | filter |
+| main.go:29:23:29:28 | filter |
+| main.go:30:22:30:27 | filter |
+| main.go:32:32:32:37 | filter |
+| main.go:35:17:35:22 | filter |
+| main.go:36:20:36:25 | filter |
+| main.go:37:29:37:34 | filter |
+| main.go:38:30:38:35 | filter |
+| main.go:39:29:39:34 | filter |
+| main.go:45:23:45:28 | filter |
+| main.go:47:23:47:28 | filter |
+| main.go:48:22:48:27 | filter |
+| main.go:49:18:49:25 | pipeline |
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.ql b/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.ql
new file mode 100644
index 00000000000..2bf41639937
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/QueryString.ql
@@ -0,0 +1,5 @@
+import go
+import semmle.go.frameworks.NoSQL
+
+from NoSQL::NoSQLQueryString qs
+select qs
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/go.mod b/ql/test/library-tests/semmle/go/frameworks/NoSQL/go.mod
new file mode 100644
index 00000000000..6cd131e192f
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/go.mod
@@ -0,0 +1,5 @@
+module main
+
+go 1.14
+
+require go.mongodb.org/mongo-driver v1.3.2
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
new file mode 100644
index 00000000000..438763a587e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
@@ -0,0 +1,52 @@
+package main
+
+//go:generate depstubber -vendor go.mongodb.org/mongo-driver/bson/primitive D
+//go:generate depstubber -vendor go.mongodb.org/mongo-driver/mongo Collection,Pipeline
+
+import (
+ "context"
+
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+)
+
+func test(coll *mongo.Collection, filter interface{}, models []WriteModel, ctx context.Context) {
+
+ fieldName := "test"
+ document := filter
+ documents := []interface{}{
+ document,
+ bson.D{{"name", "Bob"}},
+ }
+ matchStage := bson.D{{"$match", filter}}
+ pipeline := mongo.Pipeline{matchStage}
+
+ coll.Aggregate(ctx, pipeline, nil)
+ coll.BulkWrite(ctx, models, nil)
+ coll.Clone(nil)
+ coll.CountDocuments(ctx, filter, nil)
+ coll.Database()
+ coll.DeleteMany(ctx, filter, nil)
+ coll.DeleteOne(ctx, filter, nil)
+
+ coll.Distinct(ctx, fieldName, filter)
+ coll.Drop(ctx)
+ coll.EstimatedDocumentCount(ctx, nil)
+ coll.Find(ctx, filter, nil)
+ coll.FindOne(ctx, filter, nil)
+ coll.FindOneAndDelete(ctx, filter, nil)
+ coll.FindOneAndReplace(ctx, filter, nil)
+ coll.FindOneAndUpdate(ctx, filter, nil)
+ coll.Indexes()
+ coll.InsertMany(ctx, documents)
+ coll.InsertOne(ctx, document, nil)
+ coll.Name()
+ replacement := bson.D{{"location", "NYC"}}
+ coll.ReplaceOne(ctx, filter, replacement)
+ update := bson.D{{"$inc", bson.D{{"age", 1}}}}
+ coll.UpdateMany(ctx, filter, update)
+ coll.UpdateOne(ctx, filter, update)
+ coll.Watch(ctx, pipeline)
+}
+
+func main() {}
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/LICENSE b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
new file mode 100644
index 00000000000..6f07aaff4ee
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
@@ -0,0 +1,23 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for go.mongodb.org/mongo-driver/bson/primitive, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: go.mongodb.org/mongo-driver/bson/primitive (exports: D; functions: )
+
+// Package primitive is a stub of go.mongodb.org/mongo-driver/bson/primitive, generated by depstubber.
+package primitive
+
+import ()
+
+type D []E
+
+func (_ D) Map() M {
+ return nil
+}
+
+type E struct {
+ Key string
+ Value interface{}
+}
+
+type M map[string]interface{}
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/stub.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/stub.go
new file mode 100644
index 00000000000..de80f55501f
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/bson/stub.go
@@ -0,0 +1,5 @@
+package bson
+
+import "go.mongodb.org/mongo-driver/bson/primitive"
+
+type D = primitive.D
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
new file mode 100644
index 00000000000..57fda704783
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
@@ -0,0 +1,389 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for go.mongodb.org/mongo-driver/mongo, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: go.mongodb.org/mongo-driver/mongo (exports: Collection,Pipeline; functions: )
+
+// Package mongo is a stub of go.mongodb.org/mongo-driver/mongo, generated by depstubber.
+package mongo
+
+import (
+ context "context"
+ time "time"
+)
+
+type BulkWriteResult struct {
+ InsertedCount int64
+ MatchedCount int64
+ ModifiedCount int64
+ DeletedCount int64
+ UpsertedCount int64
+ UpsertedIDs map[int64]interface{}
+}
+
+type ChangeStream struct {
+ Current interface{}
+}
+
+func (_ *ChangeStream) Close(_ context.Context) error {
+ return nil
+}
+
+func (_ *ChangeStream) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *ChangeStream) Err() error {
+ return nil
+}
+
+func (_ *ChangeStream) ID() int64 {
+ return 0
+}
+
+func (_ *ChangeStream) Next(_ context.Context) bool {
+ return false
+}
+
+func (_ *ChangeStream) ResumeToken() interface{} {
+ return nil
+}
+
+func (_ *ChangeStream) TryNext(_ context.Context) bool {
+ return false
+}
+
+type Client struct{}
+
+func (_ *Client) Connect(_ context.Context) error {
+ return nil
+}
+
+func (_ *Client) Database(_ string, _ ...*interface{}) *Database {
+ return nil
+}
+
+func (_ *Client) Disconnect(_ context.Context) error {
+ return nil
+}
+
+func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...*interface{}) (ListDatabasesResult, error) {
+ return ListDatabasesResult{}, nil
+}
+
+func (_ *Client) NumberSessionsInProgress() int {
+ return 0
+}
+
+func (_ *Client) Ping(_ context.Context, _ *interface{}) error {
+ return nil
+}
+
+func (_ *Client) StartSession(_ ...*interface{}) (Session, error) {
+ return nil, nil
+}
+
+func (_ *Client) UseSession(_ context.Context, _ func(SessionContext) error) error {
+ return nil
+}
+
+func (_ *Client) UseSessionWithOptions(_ context.Context, _ *interface{}, _ func(SessionContext) error) error {
+ return nil
+}
+
+func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+type Collection struct{}
+
+func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...*interface{}) (*BulkWriteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Clone(_ ...*interface{}) (*Collection, error) {
+ return nil, nil
+}
+
+func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...*interface{}) (int64, error) {
+ return 0, nil
+}
+
+func (_ *Collection) Database() *Database {
+ return nil
+}
+
+func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...*interface{}) ([]interface{}, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Drop(_ context.Context) error {
+ return nil
+}
+
+func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...*interface{}) (int64, error) {
+ return 0, nil
+}
+
+func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) Indexes() IndexView {
+ return IndexView{}
+}
+
+func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...*interface{}) (*InsertManyResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...*interface{}) (*InsertOneResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Name() string {
+ return ""
+}
+
+func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+type Cursor struct {
+ Current interface{}
+}
+
+func (_ *Cursor) All(_ context.Context, _ interface{}) error {
+ return nil
+}
+
+func (_ *Cursor) Close(_ context.Context) error {
+ return nil
+}
+
+func (_ *Cursor) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *Cursor) Err() error {
+ return nil
+}
+
+func (_ *Cursor) ID() int64 {
+ return 0
+}
+
+func (_ *Cursor) Next(_ context.Context) bool {
+ return false
+}
+
+func (_ *Cursor) TryNext(_ context.Context) bool {
+ return false
+}
+
+type Database struct{}
+
+func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Client() *Client {
+ return nil
+}
+
+func (_ *Database) Collection(_ string, _ ...*interface{}) *Collection {
+ return nil
+}
+
+func (_ *Database) Drop(_ context.Context) error {
+ return nil
+}
+
+func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Name() string {
+ return ""
+}
+
+func (_ *Database) ReadConcern() *interface{} {
+ return nil
+}
+
+func (_ *Database) ReadPreference() *interface{} {
+ return nil
+}
+
+func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+func (_ *Database) WriteConcern() *interface{} {
+ return nil
+}
+
+type DatabaseSpecification struct {
+ Name string
+ SizeOnDisk int64
+ Empty bool
+}
+
+type DeleteResult struct {
+ DeletedCount int64
+}
+
+type IndexModel struct {
+ Keys interface{}
+ Options *interface{}
+}
+
+type IndexView struct{}
+
+func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...*interface{}) (string, error) {
+ return "", nil
+}
+
+func (_ IndexView) DropAll(_ context.Context, _ ...*interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ IndexView) DropOne(_ context.Context, _ string, _ ...*interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ IndexView) List(_ context.Context, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+type InsertManyResult struct {
+ InsertedIDs []interface{}
+}
+
+type InsertOneResult struct {
+ InsertedID interface{}
+}
+
+type ListDatabasesResult struct {
+ Databases []DatabaseSpecification
+ TotalSize int64
+}
+
+type Pipeline []interface{}
+
+type Session interface {
+ AbortTransaction(_ context.Context) error
+ AdvanceClusterTime(_ interface{}) error
+ AdvanceOperationTime(_ *interface{}) error
+ Client() *Client
+ ClusterTime() interface{}
+ CommitTransaction(_ context.Context) error
+ EndSession(_ context.Context)
+ OperationTime() *interface{}
+ StartTransaction(_ ...*interface{}) error
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+}
+
+type SessionContext interface {
+ AbortTransaction(_ context.Context) error
+ AdvanceClusterTime(_ interface{}) error
+ AdvanceOperationTime(_ *interface{}) error
+ Client() *Client
+ ClusterTime() interface{}
+ CommitTransaction(_ context.Context) error
+ Deadline() (time.Time, bool)
+ Done() <-chan struct{}
+ EndSession(_ context.Context)
+ Err() error
+ OperationTime() *interface{}
+ StartTransaction(_ ...*interface{}) error
+ Value(_ interface{}) interface{}
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+}
+
+type SingleResult struct{}
+
+func (_ *SingleResult) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *SingleResult) DecodeBytes() (interface{}, error) {
+ return nil, nil
+}
+
+func (_ *SingleResult) Err() error {
+ return nil
+}
+
+type UpdateResult struct {
+ MatchedCount int64
+ ModifiedCount int64
+ UpsertedCount int64
+ UpsertedID interface{}
+}
+
+func (_ *UpdateResult) UnmarshalBSON(_ []byte) error {
+ return nil
+}
+
+type WriteModel interface{}
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/modules.txt
new file mode 100644
index 00000000000..bcea2f371ea
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/modules.txt
@@ -0,0 +1,3 @@
+# go.mongodb.org/mongo-driver v1.3.2
+## explicit
+go.mongodb.org/mongo-driver
diff --git a/ql/test/query-tests/Security/CWE-089/SqlInjection.expected b/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
index e2ff8cbd7ec..89f2978af2f 100644
--- a/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
+++ b/ql/test/query-tests/Security/CWE-089/SqlInjection.expected
@@ -41,6 +41,20 @@ edges
| main.go:60:3:60:25 | selection of Category : slice type | main.go:61:11:61:11 | q |
| main.go:60:4:60:15 | star expression [Category] : slice type | main.go:60:3:60:25 | selection of Category : slice type |
| main.go:60:5:60:15 | RequestData [pointer, Category] | main.go:60:4:60:15 | star expression [Category] : slice type |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:57:22:57:29 | pipeline |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:61:27:61:32 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:63:23:63:28 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:64:22:64:27 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:66:32:66:37 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:69:17:69:22 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:70:20:70:25 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:71:29:71:34 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:72:30:72:35 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:73:29:73:34 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:78:23:78:28 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:79:23:79:28 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:80:22:80:27 | filter |
+| mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:81:18:81:25 | pipeline |
nodes
| SqlInjection.go:11:3:11:9 | selection of URL : pointer type | semmle.label | selection of URL : pointer type |
| SqlInjection.go:12:11:12:11 | q | semmle.label | q |
@@ -92,6 +106,21 @@ nodes
| main.go:60:4:60:15 | star expression [Category] : slice type | semmle.label | star expression [Category] : slice type |
| main.go:60:5:60:15 | RequestData [pointer, Category] | semmle.label | RequestData [pointer, Category] |
| main.go:61:11:61:11 | q | semmle.label | q |
+| mongoDB.go:40:20:40:30 | call to Referer : string | semmle.label | call to Referer : string |
+| mongoDB.go:57:22:57:29 | pipeline | semmle.label | pipeline |
+| mongoDB.go:61:27:61:32 | filter | semmle.label | filter |
+| mongoDB.go:63:23:63:28 | filter | semmle.label | filter |
+| mongoDB.go:64:22:64:27 | filter | semmle.label | filter |
+| mongoDB.go:66:32:66:37 | filter | semmle.label | filter |
+| mongoDB.go:69:17:69:22 | filter | semmle.label | filter |
+| mongoDB.go:70:20:70:25 | filter | semmle.label | filter |
+| mongoDB.go:71:29:71:34 | filter | semmle.label | filter |
+| mongoDB.go:72:30:72:35 | filter | semmle.label | filter |
+| mongoDB.go:73:29:73:34 | filter | semmle.label | filter |
+| mongoDB.go:78:23:78:28 | filter | semmle.label | filter |
+| mongoDB.go:79:23:79:28 | filter | semmle.label | filter |
+| mongoDB.go:80:22:80:27 | filter | semmle.label | filter |
+| mongoDB.go:81:18:81:25 | pipeline | semmle.label | pipeline |
#select
| SqlInjection.go:12:11:12:11 | q | SqlInjection.go:11:3:11:9 | selection of URL : pointer type | SqlInjection.go:12:11:12:11 | q | This query depends on $@. | SqlInjection.go:11:3:11:9 | selection of URL | a user-provided value |
| issue48.go:22:11:22:12 | q3 | issue48.go:17:25:17:32 | selection of Body : ReadCloser | issue48.go:22:11:22:12 | q3 | This query depends on $@. | issue48.go:17:25:17:32 | selection of Body | a user-provided value |
@@ -104,3 +133,17 @@ nodes
| main.go:43:11:43:11 | q | main.go:39:25:39:31 | selection of URL : pointer type | main.go:43:11:43:11 | q | This query depends on $@. | main.go:39:25:39:31 | selection of URL | a user-provided value |
| main.go:52:11:52:11 | q | main.go:48:28:48:34 | selection of URL : pointer type | main.go:52:11:52:11 | q | This query depends on $@. | main.go:48:28:48:34 | selection of URL | a user-provided value |
| main.go:61:11:61:11 | q | main.go:57:28:57:34 | selection of URL : pointer type | main.go:61:11:61:11 | q | This query depends on $@. | main.go:57:28:57:34 | selection of URL | a user-provided value |
+| mongoDB.go:57:22:57:29 | pipeline | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:57:22:57:29 | pipeline | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:61:27:61:32 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:61:27:61:32 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:63:23:63:28 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:63:23:63:28 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:64:22:64:27 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:64:22:64:27 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:66:32:66:37 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:66:32:66:37 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:69:17:69:22 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:69:17:69:22 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:70:20:70:25 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:70:20:70:25 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:71:29:71:34 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:71:29:71:34 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:72:30:72:35 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:72:30:72:35 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:73:29:73:34 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:73:29:73:34 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:78:23:78:28 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:78:23:78:28 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:79:23:79:28 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:79:23:79:28 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:80:22:80:27 | filter | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:80:22:80:27 | filter | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
+| mongoDB.go:81:18:81:25 | pipeline | mongoDB.go:40:20:40:30 | call to Referer : string | mongoDB.go:81:18:81:25 | pipeline | This query depends on $@. | mongoDB.go:40:20:40:30 | call to Referer | a user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-089/go.mod b/ql/test/query-tests/Security/CWE-089/go.mod
index 9c55bcb4ffc..6101c095cbc 100644
--- a/ql/test/query-tests/Security/CWE-089/go.mod
+++ b/ql/test/query-tests/Security/CWE-089/go.mod
@@ -4,8 +4,5 @@ go 1.14
require (
github.com/Masterminds/squirrel v1.1.0
- github.com/github/depstubber v0.0.0-20200414023404-c355b630c381 // indirect
- github.com/go-sql-driver/mysql v1.5.0 // indirect
- github.com/lib/pq v1.3.0 // indirect
- github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
+ go.mongodb.org/mongo-driver v1.3.3
)
diff --git a/ql/test/query-tests/Security/CWE-089/mongoDB.go b/ql/test/query-tests/Security/CWE-089/mongoDB.go
new file mode 100644
index 00000000000..818f8adb13c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/mongoDB.go
@@ -0,0 +1,83 @@
+package main
+
+//go:generate depstubber -vendor go.mongodb.org/mongo-driver/bson/primitive D
+//go:generate depstubber -vendor go.mongodb.org/mongo-driver/mongo Pipeline Connect
+//go:generate depstubber -vendor go.mongodb.org/mongo-driver/mongo/options "" Client
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+func mongo2(w http.ResponseWriter, r *http.Request) {
+
+ // Set client options
+ clientOptions := options.Client().ApplyURI("mongodb://test:test@localhost:27017")
+
+ // Connect to MongoDB
+ client, err := mongo.Connect(context.TODO(), clientOptions)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Check the connection
+ err = client.Ping(context.TODO(), nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println("Connected to MongoDB!")
+
+ // Get a handle for your collection
+ db := client.Database("test")
+ coll := db.Collection("collection")
+ untrustedInput := r.Referer()
+
+ filter := bson.D{{"name", untrustedInput}}
+
+ fieldName := "test"
+ document := filter
+ documents := []interface{}{
+ document,
+ bson.D{{"name", "Bob"}},
+ }
+ matchStage := bson.D{{"$match", filter}}
+ pipeline := mongo.Pipeline{matchStage}
+ ctx := context.TODO()
+ replacement := bson.D{{"location", "NYC"}}
+ update := bson.D{{"$inc", bson.D{{"age", 1}}}}
+ // models := nil
+
+ coll.Aggregate(ctx, pipeline, nil)
+ // coll.BulkWrite(ctx, models, nil)
+ coll.BulkWrite(ctx, nil, nil)
+ coll.Clone(nil)
+ coll.CountDocuments(ctx, filter, nil)
+ coll.Database()
+ coll.DeleteMany(ctx, filter, nil)
+ coll.DeleteOne(ctx, filter, nil)
+
+ coll.Distinct(ctx, fieldName, filter)
+ coll.Drop(ctx)
+ coll.EstimatedDocumentCount(ctx, nil)
+ coll.Find(ctx, filter, nil)
+ coll.FindOne(ctx, filter, nil)
+ coll.FindOneAndDelete(ctx, filter, nil)
+ coll.FindOneAndReplace(ctx, filter, nil)
+ coll.FindOneAndUpdate(ctx, filter, nil)
+ coll.Indexes()
+ coll.InsertMany(ctx, documents)
+ coll.InsertOne(ctx, document, nil)
+ coll.Name()
+ coll.ReplaceOne(ctx, filter, replacement)
+ coll.UpdateMany(ctx, filter, update)
+ coll.UpdateOne(ctx, filter, update)
+ coll.Watch(ctx, pipeline)
+
+}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/github.com/Masterminds/squirrel/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/github.com/Masterminds/squirrel/stub.go
index 925f13e5a8c..0e85e0f5e84 100644
--- a/ql/test/query-tests/Security/CWE-089/vendor/github.com/Masterminds/squirrel/stub.go
+++ b/ql/test/query-tests/Security/CWE-089/vendor/github.com/Masterminds/squirrel/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/Masterminds/squirrel, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/Masterminds/squirrel (exports: ; functions: Expr,StatementBuilder)
// Package squirrel is a stub of github.com/Masterminds/squirrel, generated by depstubber.
@@ -12,25 +13,17 @@ import (
)
type BaseRunner interface {
- Exec(_ string, _ ...interface{}) (sql.Result, interface {
- Error() string
- })
- Query(_ string, _ ...interface{}) (*sql.Rows, interface {
- Error() string
- })
+ Exec(_ string, _ ...interface{}) (sql.Result, error)
+ Query(_ string, _ ...interface{}) (*sql.Rows, error)
}
type DeleteBuilder struct{}
-func (_ DeleteBuilder) Exec() (sql.Result, interface {
- Error() string
-}) {
+func (_ DeleteBuilder) Exec() (sql.Result, error) {
return nil, nil
}
-func (_ DeleteBuilder) ExecContext(_ context.Context) (sql.Result, interface {
- Error() string
-}) {
+func (_ DeleteBuilder) ExecContext(_ context.Context) (sql.Result, error) {
return nil, nil
}
@@ -58,9 +51,7 @@ func (_ DeleteBuilder) Prefix(_ string, _ ...interface{}) DeleteBuilder {
return DeleteBuilder{}
}
-func (_ DeleteBuilder) Query() (*sql.Rows, interface {
- Error() string
-}) {
+func (_ DeleteBuilder) Query() (*sql.Rows, error) {
return nil, nil
}
@@ -72,9 +63,7 @@ func (_ DeleteBuilder) Suffix(_ string, _ ...interface{}) DeleteBuilder {
return DeleteBuilder{}
}
-func (_ DeleteBuilder) ToSql() (string, []interface{}, interface {
- Error() string
-}) {
+func (_ DeleteBuilder) ToSql() (string, []interface{}, error) {
return "", nil, nil
}
@@ -92,15 +81,11 @@ func (_ InsertBuilder) Columns(_ ...string) InsertBuilder {
return InsertBuilder{}
}
-func (_ InsertBuilder) Exec() (sql.Result, interface {
- Error() string
-}) {
+func (_ InsertBuilder) Exec() (sql.Result, error) {
return nil, nil
}
-func (_ InsertBuilder) ExecContext(_ context.Context) (sql.Result, interface {
- Error() string
-}) {
+func (_ InsertBuilder) ExecContext(_ context.Context) (sql.Result, error) {
return nil, nil
}
@@ -120,15 +105,11 @@ func (_ InsertBuilder) Prefix(_ string, _ ...interface{}) InsertBuilder {
return InsertBuilder{}
}
-func (_ InsertBuilder) Query() (*sql.Rows, interface {
- Error() string
-}) {
+func (_ InsertBuilder) Query() (*sql.Rows, error) {
return nil, nil
}
-func (_ InsertBuilder) QueryContext(_ context.Context) (*sql.Rows, interface {
- Error() string
-}) {
+func (_ InsertBuilder) QueryContext(_ context.Context) (*sql.Rows, error) {
return nil, nil
}
@@ -144,15 +125,11 @@ func (_ InsertBuilder) RunWith(_ BaseRunner) InsertBuilder {
return InsertBuilder{}
}
-func (_ InsertBuilder) Scan(_ ...interface{}) interface {
- Error() string
-} {
+func (_ InsertBuilder) Scan(_ ...interface{}) error {
return nil
}
-func (_ InsertBuilder) ScanContext(_ context.Context, _ ...interface{}) interface {
- Error() string
-} {
+func (_ InsertBuilder) ScanContext(_ context.Context, _ ...interface{}) error {
return nil
}
@@ -168,9 +145,7 @@ func (_ InsertBuilder) Suffix(_ string, _ ...interface{}) InsertBuilder {
return InsertBuilder{}
}
-func (_ InsertBuilder) ToSql() (string, []interface{}, interface {
- Error() string
-}) {
+func (_ InsertBuilder) ToSql() (string, []interface{}, error) {
return "", nil, nil
}
@@ -179,15 +154,11 @@ func (_ InsertBuilder) Values(_ ...interface{}) InsertBuilder {
}
type PlaceholderFormat interface {
- ReplacePlaceholders(_ string) (string, interface {
- Error() string
- })
+ ReplacePlaceholders(_ string) (string, error)
}
type RowScanner interface {
- Scan(_ ...interface{}) interface {
- Error() string
- }
+ Scan(_ ...interface{}) error
}
type SelectBuilder struct{}
@@ -204,15 +175,11 @@ func (_ SelectBuilder) Distinct() SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Exec() (sql.Result, interface {
- Error() string
-}) {
+func (_ SelectBuilder) Exec() (sql.Result, error) {
return nil, nil
}
-func (_ SelectBuilder) ExecContext(_ context.Context) (sql.Result, interface {
- Error() string
-}) {
+func (_ SelectBuilder) ExecContext(_ context.Context) (sql.Result, error) {
return nil, nil
}
@@ -272,15 +239,11 @@ func (_ SelectBuilder) Prefix(_ string, _ ...interface{}) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Query() (*sql.Rows, interface {
- Error() string
-}) {
+func (_ SelectBuilder) Query() (*sql.Rows, error) {
return nil, nil
}
-func (_ SelectBuilder) QueryContext(_ context.Context) (*sql.Rows, interface {
- Error() string
-}) {
+func (_ SelectBuilder) QueryContext(_ context.Context) (*sql.Rows, error) {
return nil, nil
}
@@ -304,15 +267,11 @@ func (_ SelectBuilder) RunWith(_ BaseRunner) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Scan(_ ...interface{}) interface {
- Error() string
-} {
+func (_ SelectBuilder) Scan(_ ...interface{}) error {
return nil
}
-func (_ SelectBuilder) ScanContext(_ context.Context, _ ...interface{}) interface {
- Error() string
-} {
+func (_ SelectBuilder) ScanContext(_ context.Context, _ ...interface{}) error {
return nil
}
@@ -320,9 +279,7 @@ func (_ SelectBuilder) Suffix(_ string, _ ...interface{}) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) ToSql() (string, []interface{}, interface {
- Error() string
-}) {
+func (_ SelectBuilder) ToSql() (string, []interface{}, error) {
return "", nil, nil
}
@@ -360,15 +317,11 @@ func (_ StatementBuilderType) Update(_ string) UpdateBuilder {
type UpdateBuilder struct{}
-func (_ UpdateBuilder) Exec() (sql.Result, interface {
- Error() string
-}) {
+func (_ UpdateBuilder) Exec() (sql.Result, error) {
return nil, nil
}
-func (_ UpdateBuilder) ExecContext(_ context.Context) (sql.Result, interface {
- Error() string
-}) {
+func (_ UpdateBuilder) ExecContext(_ context.Context) (sql.Result, error) {
return nil, nil
}
@@ -392,15 +345,11 @@ func (_ UpdateBuilder) Prefix(_ string, _ ...interface{}) UpdateBuilder {
return UpdateBuilder{}
}
-func (_ UpdateBuilder) Query() (*sql.Rows, interface {
- Error() string
-}) {
+func (_ UpdateBuilder) Query() (*sql.Rows, error) {
return nil, nil
}
-func (_ UpdateBuilder) QueryContext(_ context.Context) (*sql.Rows, interface {
- Error() string
-}) {
+func (_ UpdateBuilder) QueryContext(_ context.Context) (*sql.Rows, error) {
return nil, nil
}
@@ -416,15 +365,11 @@ func (_ UpdateBuilder) RunWith(_ BaseRunner) UpdateBuilder {
return UpdateBuilder{}
}
-func (_ UpdateBuilder) Scan(_ ...interface{}) interface {
- Error() string
-} {
+func (_ UpdateBuilder) Scan(_ ...interface{}) error {
return nil
}
-func (_ UpdateBuilder) ScanContext(_ context.Context, _ ...interface{}) interface {
- Error() string
-} {
+func (_ UpdateBuilder) ScanContext(_ context.Context, _ ...interface{}) error {
return nil
}
@@ -444,9 +389,7 @@ func (_ UpdateBuilder) Table(_ string) UpdateBuilder {
return UpdateBuilder{}
}
-func (_ UpdateBuilder) ToSql() (string, []interface{}, interface {
- Error() string
-}) {
+func (_ UpdateBuilder) ToSql() (string, []interface{}, error) {
return "", nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/LICENSE b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
new file mode 100644
index 00000000000..6f07aaff4ee
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/primitive/stub.go
@@ -0,0 +1,23 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for go.mongodb.org/mongo-driver/bson/primitive, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: go.mongodb.org/mongo-driver/bson/primitive (exports: D; functions: )
+
+// Package primitive is a stub of go.mongodb.org/mongo-driver/bson/primitive, generated by depstubber.
+package primitive
+
+import ()
+
+type D []E
+
+func (_ D) Map() M {
+ return nil
+}
+
+type E struct {
+ Key string
+ Value interface{}
+}
+
+type M map[string]interface{}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/stub.go
new file mode 100644
index 00000000000..de80f55501f
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/bson/stub.go
@@ -0,0 +1,5 @@
+package bson
+
+import "go.mongodb.org/mongo-driver/bson/primitive"
+
+type D = primitive.D
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
new file mode 100644
index 00000000000..a9275c51610
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
@@ -0,0 +1,217 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for go.mongodb.org/mongo-driver/mongo/options, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: go.mongodb.org/mongo-driver/mongo/options (exports: ; functions: Client)
+
+// Package options is a stub of go.mongodb.org/mongo-driver/mongo/options, generated by depstubber.
+package options
+
+import (
+ context "context"
+ tls "crypto/tls"
+ net "net"
+ time "time"
+)
+
+type AutoEncryptionOptions struct {
+ KeyVaultClientOptions *ClientOptions
+ KeyVaultNamespace string
+ KmsProviders map[string]map[string]interface{}
+ SchemaMap map[string]interface{}
+ BypassAutoEncryption *bool
+ ExtraOptions map[string]interface{}
+}
+
+func (_ *AutoEncryptionOptions) SetBypassAutoEncryption(_ bool) *AutoEncryptionOptions {
+ return nil
+}
+
+func (_ *AutoEncryptionOptions) SetExtraOptions(_ map[string]interface{}) *AutoEncryptionOptions {
+ return nil
+}
+
+func (_ *AutoEncryptionOptions) SetKeyVaultClientOptions(_ *ClientOptions) *AutoEncryptionOptions {
+ return nil
+}
+
+func (_ *AutoEncryptionOptions) SetKeyVaultNamespace(_ string) *AutoEncryptionOptions {
+ return nil
+}
+
+func (_ *AutoEncryptionOptions) SetKmsProviders(_ map[string]map[string]interface{}) *AutoEncryptionOptions {
+ return nil
+}
+
+func (_ *AutoEncryptionOptions) SetSchemaMap(_ map[string]interface{}) *AutoEncryptionOptions {
+ return nil
+}
+
+func Client() *ClientOptions {
+ return nil
+}
+
+type ClientOptions struct {
+ AppName *string
+ Auth *Credential
+ ConnectTimeout *time.Duration
+ Compressors []string
+ Dialer ContextDialer
+ HeartbeatInterval *time.Duration
+ Hosts []string
+ LocalThreshold *time.Duration
+ MaxConnIdleTime *time.Duration
+ MaxPoolSize *uint64
+ MinPoolSize *uint64
+ PoolMonitor *interface{}
+ Monitor *interface{}
+ ReadConcern *interface{}
+ ReadPreference *interface{}
+ Registry *interface{}
+ ReplicaSet *string
+ RetryWrites *bool
+ RetryReads *bool
+ ServerSelectionTimeout *time.Duration
+ Direct *bool
+ SocketTimeout *time.Duration
+ TLSConfig *tls.Config
+ WriteConcern *interface{}
+ ZlibLevel *int
+ ZstdLevel *int
+ AutoEncryptionOptions *AutoEncryptionOptions
+ AuthenticateToAnything *bool
+ Deployment interface{}
+}
+
+func (_ *ClientOptions) ApplyURI(_ string) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) GetURI() string {
+ return ""
+}
+
+func (_ *ClientOptions) SetAppName(_ string) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetAuth(_ Credential) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetAutoEncryptionOptions(_ *AutoEncryptionOptions) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetCompressors(_ []string) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetConnectTimeout(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetDialer(_ ContextDialer) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetDirect(_ bool) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetHeartbeatInterval(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetHosts(_ []string) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetLocalThreshold(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetMaxConnIdleTime(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetMaxPoolSize(_ uint64) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetMinPoolSize(_ uint64) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetMonitor(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetPoolMonitor(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetReadConcern(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetReadPreference(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetRegistry(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetReplicaSet(_ string) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetRetryReads(_ bool) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetRetryWrites(_ bool) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetServerSelectionTimeout(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetSocketTimeout(_ time.Duration) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetTLSConfig(_ *tls.Config) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetWriteConcern(_ *interface{}) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetZlibLevel(_ int) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) SetZstdLevel(_ int) *ClientOptions {
+ return nil
+}
+
+func (_ *ClientOptions) Validate() error {
+ return nil
+}
+
+type ContextDialer interface {
+ DialContext(_ context.Context, _ string, _ string) (net.Conn, error)
+}
+
+type Credential struct {
+ AuthMechanism string
+ AuthMechanismProperties map[string]string
+ AuthSource string
+ Username string
+ Password string
+ PasswordSet bool
+}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
new file mode 100644
index 00000000000..1a06732dbb1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
@@ -0,0 +1,393 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for go.mongodb.org/mongo-driver/mongo, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: go.mongodb.org/mongo-driver/mongo (exports: Pipeline; functions: Connect)
+
+// Package mongo is a stub of go.mongodb.org/mongo-driver/mongo, generated by depstubber.
+package mongo
+
+import (
+ context "context"
+ time "time"
+)
+
+type BulkWriteResult struct {
+ InsertedCount int64
+ MatchedCount int64
+ ModifiedCount int64
+ DeletedCount int64
+ UpsertedCount int64
+ UpsertedIDs map[int64]interface{}
+}
+
+type ChangeStream struct {
+ Current interface{}
+}
+
+func (_ *ChangeStream) Close(_ context.Context) error {
+ return nil
+}
+
+func (_ *ChangeStream) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *ChangeStream) Err() error {
+ return nil
+}
+
+func (_ *ChangeStream) ID() int64 {
+ return 0
+}
+
+func (_ *ChangeStream) Next(_ context.Context) bool {
+ return false
+}
+
+func (_ *ChangeStream) ResumeToken() interface{} {
+ return nil
+}
+
+func (_ *ChangeStream) TryNext(_ context.Context) bool {
+ return false
+}
+
+type Client struct{}
+
+func (_ *Client) Connect(_ context.Context) error {
+ return nil
+}
+
+func (_ *Client) Database(_ string, _ ...*interface{}) *Database {
+ return nil
+}
+
+func (_ *Client) Disconnect(_ context.Context) error {
+ return nil
+}
+
+func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...*interface{}) (ListDatabasesResult, error) {
+ return ListDatabasesResult{}, nil
+}
+
+func (_ *Client) NumberSessionsInProgress() int {
+ return 0
+}
+
+func (_ *Client) Ping(_ context.Context, _ *interface{}) error {
+ return nil
+}
+
+func (_ *Client) StartSession(_ ...*interface{}) (Session, error) {
+ return nil, nil
+}
+
+func (_ *Client) UseSession(_ context.Context, _ func(SessionContext) error) error {
+ return nil
+}
+
+func (_ *Client) UseSessionWithOptions(_ context.Context, _ *interface{}, _ func(SessionContext) error) error {
+ return nil
+}
+
+func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+type Collection struct{}
+
+func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...*interface{}) (*BulkWriteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Clone(_ ...*interface{}) (*Collection, error) {
+ return nil, nil
+}
+
+func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...*interface{}) (int64, error) {
+ return 0, nil
+}
+
+func (_ *Collection) Database() *Database {
+ return nil
+}
+
+func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...*interface{}) ([]interface{}, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Drop(_ context.Context) error {
+ return nil
+}
+
+func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...*interface{}) (int64, error) {
+ return 0, nil
+}
+
+func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Collection) Indexes() IndexView {
+ return IndexView{}
+}
+
+func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...*interface{}) (*InsertManyResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...*interface{}) (*InsertOneResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Name() string {
+ return ""
+}
+
+func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+ return nil, nil
+}
+
+func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+func Connect(_ context.Context, _ ...*interface{}) (*Client, error) {
+ return nil, nil
+}
+
+type Cursor struct {
+ Current interface{}
+}
+
+func (_ *Cursor) All(_ context.Context, _ interface{}) error {
+ return nil
+}
+
+func (_ *Cursor) Close(_ context.Context) error {
+ return nil
+}
+
+func (_ *Cursor) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *Cursor) Err() error {
+ return nil
+}
+
+func (_ *Cursor) ID() int64 {
+ return 0
+}
+
+func (_ *Cursor) Next(_ context.Context) bool {
+ return false
+}
+
+func (_ *Cursor) TryNext(_ context.Context) bool {
+ return false
+}
+
+type Database struct{}
+
+func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Client() *Client {
+ return nil
+}
+
+func (_ *Database) Collection(_ string, _ ...*interface{}) *Collection {
+ return nil
+}
+
+func (_ *Database) Drop(_ context.Context) error {
+ return nil
+}
+
+func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Name() string {
+ return ""
+}
+
+func (_ *Database) ReadConcern() *interface{} {
+ return nil
+}
+
+func (_ *Database) ReadPreference() *interface{} {
+ return nil
+}
+
+func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+ return nil
+}
+
+func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+ return nil, nil
+}
+
+func (_ *Database) WriteConcern() *interface{} {
+ return nil
+}
+
+type DatabaseSpecification struct {
+ Name string
+ SizeOnDisk int64
+ Empty bool
+}
+
+type DeleteResult struct {
+ DeletedCount int64
+}
+
+type IndexModel struct {
+ Keys interface{}
+ Options *interface{}
+}
+
+type IndexView struct{}
+
+func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...*interface{}) ([]string, error) {
+ return nil, nil
+}
+
+func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...*interface{}) (string, error) {
+ return "", nil
+}
+
+func (_ IndexView) DropAll(_ context.Context, _ ...*interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ IndexView) DropOne(_ context.Context, _ string, _ ...*interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ IndexView) List(_ context.Context, _ ...*interface{}) (*Cursor, error) {
+ return nil, nil
+}
+
+type InsertManyResult struct {
+ InsertedIDs []interface{}
+}
+
+type InsertOneResult struct {
+ InsertedID interface{}
+}
+
+type ListDatabasesResult struct {
+ Databases []DatabaseSpecification
+ TotalSize int64
+}
+
+type Pipeline []interface{}
+
+type Session interface {
+ AbortTransaction(_ context.Context) error
+ AdvanceClusterTime(_ interface{}) error
+ AdvanceOperationTime(_ *interface{}) error
+ Client() *Client
+ ClusterTime() interface{}
+ CommitTransaction(_ context.Context) error
+ EndSession(_ context.Context)
+ OperationTime() *interface{}
+ StartTransaction(_ ...*interface{}) error
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+}
+
+type SessionContext interface {
+ AbortTransaction(_ context.Context) error
+ AdvanceClusterTime(_ interface{}) error
+ AdvanceOperationTime(_ *interface{}) error
+ Client() *Client
+ ClusterTime() interface{}
+ CommitTransaction(_ context.Context) error
+ Deadline() (time.Time, bool)
+ Done() <-chan struct{}
+ EndSession(_ context.Context)
+ Err() error
+ OperationTime() *interface{}
+ StartTransaction(_ ...*interface{}) error
+ Value(_ interface{}) interface{}
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+}
+
+type SingleResult struct{}
+
+func (_ *SingleResult) Decode(_ interface{}) error {
+ return nil
+}
+
+func (_ *SingleResult) DecodeBytes() (interface{}, error) {
+ return nil, nil
+}
+
+func (_ *SingleResult) Err() error {
+ return nil
+}
+
+type UpdateResult struct {
+ MatchedCount int64
+ ModifiedCount int64
+ UpsertedCount int64
+ UpsertedID interface{}
+}
+
+func (_ *UpdateResult) UnmarshalBSON(_ []byte) error {
+ return nil
+}
+
+type WriteModel interface{}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/modules.txt b/ql/test/query-tests/Security/CWE-089/vendor/modules.txt
index c32aa371c7a..ddbc30953a2 100644
--- a/ql/test/query-tests/Security/CWE-089/vendor/modules.txt
+++ b/ql/test/query-tests/Security/CWE-089/vendor/modules.txt
@@ -1,15 +1,6 @@
# github.com/Masterminds/squirrel v1.1.0
## explicit
github.com/Masterminds/squirrel
-# github.com/github/depstubber v0.0.0-20200414023404-c355b630c381
+# go.mongodb.org/mongo-driver v1.3.3
## explicit
-github.com/github/depstubber
-# github.com/go-sql-driver/mysql v1.5.0
-## explicit
-github.com/go-sql-driver/mysql
-# github.com/lib/pq v1.3.0
-## explicit
-github.com/lib/pq
-# github.com/mattn/go-sqlite3 v2.0.3+incompatible
-## explicit
-github.com/mattn/go-sqlite3
+go.mongodb.org/mongo-driver
From a55c828fe490176703cb9e0d5fe610172b0a7ee1 Mon Sep 17 00:00:00 2001
From: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
Date: Mon, 11 May 2020 15:26:30 +0100
Subject: [PATCH 079/157] Update
ql/src/experimental/CWE-840/ConditionalBypass.ql
Co-authored-by: porcupineyhairs <61983466+porcupineyhairs@users.noreply.github.com>
---
ql/src/experimental/CWE-840/ConditionalBypass.ql | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/src/experimental/CWE-840/ConditionalBypass.ql b/ql/src/experimental/CWE-840/ConditionalBypass.ql
index 621e02fdbe7..09d8aefe3f9 100644
--- a/ql/src/experimental/CWE-840/ConditionalBypass.ql
+++ b/ql/src/experimental/CWE-840/ConditionalBypass.ql
@@ -14,7 +14,7 @@ import go
* A taint-tracking configuration for reasoning about conditional bypass.
*/
class Configuration extends TaintTracking::Configuration {
- Configuration() { this = "ConitionalBypass" }
+ Configuration() { this = "ConditionalBypass" }
override predicate isSource(DataFlow::Node source) {
source instanceof UntrustedFlowSource
From 58e41e9302337151a50ae2c165ce994d07a5803d Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Mon, 11 May 2020 15:49:37 -0700
Subject: [PATCH 080/157] ReflectedXss: More broadly exclude values with a
constant prefix
---
ql/src/semmle/go/frameworks/Stdlib.qll | 2 +-
.../security/ReflectedXssCustomizations.qll | 8 +++++
.../Security/CWE-079/ReflectedXss.expected | 12 ++++---
ql/test/query-tests/Security/CWE-079/tst.go | 35 +++++++++++++++++++
4 files changed, 52 insertions(+), 5 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index 36c9516a449..6d1228d62de 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -596,7 +596,7 @@ module Log {
/** Provides models of some functions in the `encoding/json` package. */
module EncodingJson {
- private class MarshalFunction extends TaintTracking::FunctionModel, MarshalingFunction::Range {
+ class MarshalFunction extends TaintTracking::FunctionModel, MarshalingFunction::Range {
MarshalFunction() {
this.hasQualifiedName("encoding/json", "Marshal") or
this.hasQualifiedName("encoding/json", "MarshalIndent")
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index 631c18780a7..826feb4a326 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -69,6 +69,14 @@ module ReflectedXss {
// - '%', which could be a format string.
call.getArgument(1).getStringValue().regexpMatch("^[^<%].*")
)
+ or
+ exists(DataFlow::Node pred | body = pred.getASuccessor*() |
+ // data starting with `<` cannot cause an HTML content type to be detected.
+ pred.getStringValue().regexpMatch("^[^<].*")
+ or
+ // json data cannot begin with `<`
+ pred = any(EncodingJson::MarshalFunction mf).getOutput().getExitNode(_)
+ )
)
}
diff --git a/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected b/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
index a4dcddef714..c32d23ceb7b 100644
--- a/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
+++ b/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
@@ -2,7 +2,8 @@ edges
| ReflectedXss.go:11:15:11:20 | selection of Form : Values | ReflectedXss.go:14:44:14:51 | username |
| contenttype.go:11:11:11:16 | selection of Form : Values | contenttype.go:17:11:17:22 | type conversion |
| contenttype.go:49:11:49:16 | selection of Form : Values | contenttype.go:53:34:53:37 | data |
-| tst.go:11:15:11:20 | selection of Form : Values | tst.go:15:12:15:39 | type conversion |
+| tst.go:13:15:13:20 | selection of Form : Values | tst.go:17:12:17:39 | type conversion |
+| tst.go:47:14:47:19 | selection of Form : Values | tst.go:52:12:52:26 | type conversion |
nodes
| ReflectedXss.go:11:15:11:20 | selection of Form : Values | semmle.label | selection of Form : Values |
| ReflectedXss.go:14:44:14:51 | username | semmle.label | username |
@@ -10,10 +11,13 @@ nodes
| contenttype.go:17:11:17:22 | type conversion | semmle.label | type conversion |
| contenttype.go:49:11:49:16 | selection of Form : Values | semmle.label | selection of Form : Values |
| contenttype.go:53:34:53:37 | data | semmle.label | data |
-| tst.go:11:15:11:20 | selection of Form : Values | semmle.label | selection of Form : Values |
-| tst.go:15:12:15:39 | type conversion | semmle.label | type conversion |
+| tst.go:13:15:13:20 | selection of Form : Values | semmle.label | selection of Form : Values |
+| tst.go:17:12:17:39 | type conversion | semmle.label | type conversion |
+| tst.go:47:14:47:19 | selection of Form : Values | semmle.label | selection of Form : Values |
+| tst.go:52:12:52:26 | type conversion | semmle.label | type conversion |
#select
| ReflectedXss.go:14:44:14:51 | username | ReflectedXss.go:11:15:11:20 | selection of Form : Values | ReflectedXss.go:14:44:14:51 | username | Cross-site scripting vulnerability due to $@. | ReflectedXss.go:11:15:11:20 | selection of Form | user-provided value |
| contenttype.go:17:11:17:22 | type conversion | contenttype.go:11:11:11:16 | selection of Form : Values | contenttype.go:17:11:17:22 | type conversion | Cross-site scripting vulnerability due to $@. | contenttype.go:11:11:11:16 | selection of Form | user-provided value |
| contenttype.go:53:34:53:37 | data | contenttype.go:49:11:49:16 | selection of Form : Values | contenttype.go:53:34:53:37 | data | Cross-site scripting vulnerability due to $@. | contenttype.go:49:11:49:16 | selection of Form | user-provided value |
-| tst.go:15:12:15:39 | type conversion | tst.go:11:15:11:20 | selection of Form : Values | tst.go:15:12:15:39 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:11:15:11:20 | selection of Form | user-provided value |
+| tst.go:17:12:17:39 | type conversion | tst.go:13:15:13:20 | selection of Form : Values | tst.go:17:12:17:39 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:13:15:13:20 | selection of Form | user-provided value |
+| tst.go:52:12:52:26 | type conversion | tst.go:47:14:47:19 | selection of Form : Values | tst.go:52:12:52:26 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:47:14:47:19 | selection of Form | user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-079/tst.go b/ql/test/query-tests/Security/CWE-079/tst.go
index 20e2edd4935..f4a154c94cb 100644
--- a/ql/test/query-tests/Security/CWE-079/tst.go
+++ b/ql/test/query-tests/Security/CWE-079/tst.go
@@ -1,6 +1,8 @@
package main
import (
+ "encoding/json"
+ "fmt"
"net/http"
"strings"
)
@@ -19,3 +21,36 @@ func serve6() {
})
http.ListenAndServe(":80", nil)
}
+
+type User struct {
+ name string
+}
+
+func serve7() {
+ http.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ username := r.Form.Get("username")
+ if !isValidUsername(username) {
+ // OK: json data cannot cause an HTML content type to be detected
+ a, _ := json.Marshal(User{username})
+ w.Write(a)
+ } else {
+ // TODO: do something exciting
+ }
+ })
+ http.ListenAndServe(":80", nil)
+}
+
+func serve8() {
+ http.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ service := r.Form.Get("service")
+ if service != "service1" && service != "service2" {
+ fmt.Fprintln(w, "Service not found")
+ } else {
+ // OK: json data cannot cause an HTML content type to be detected
+ w.Write([]byte(service))
+ }
+ })
+ http.ListenAndServe(":80", nil)
+}
From 67a7294d10c5430115802cdf2826089bebb0e9e4 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 12:51:13 +0300
Subject: [PATCH 081/157] Simplify and remove deprecated; add severity
---
.../CWE-681/IncorrectNumericConversion.ql | 31 +++++--------------
1 file changed, 7 insertions(+), 24 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index e9fa0ad8d95..82f2244f081 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -3,6 +3,7 @@
* @description Converting the result of strconv.Atoi (and other parsers from strconv package)
* to numeric types of smaller bit size can produce unexpected values.
* @kind path-problem
+ * @problem.severity warning
* @id go/incorrect-numeric-conversion
* @tags security
* external/cwe/cwe-190
@@ -28,6 +29,7 @@ class ParseUint extends Function {
ParseUint() { this.hasQualifiedName("strconv", "ParseUint") }
}
+/** Provides a class for modeling number parser calls. */
module ParserCall {
/**
* A data-flow call node that parses a number.
@@ -51,12 +53,10 @@ class ParserCall extends DataFlow::CallNode {
string getParserName() { result = self.getParserName() }
}
-int archBasedBitSize() { result = 0 }
-
class AtoiCall extends DataFlow::CallNode, ParserCall::Range {
AtoiCall() { exists(Atoi atoi | this = atoi.getACall()) }
- override int getTargetBitSize() { result = archBasedBitSize() }
+ override int getTargetBitSize() { result = 0 }
override string getParserName() { result = "strconv.Atoi" }
}
@@ -90,27 +90,10 @@ class NumericConversionExpr extends ConversionExpr {
int bitSize;
NumericConversionExpr() {
- exists(ConversionExpr conv |
- fullTypeName = conv.getTypeExpr().getType().getUnderlyingType*().getName() and
- (
- // 8 bit
- fullTypeName = ["int8", "uint8"] and
- bitSize = 8
- or
- // 16 bit
- fullTypeName = ["int16", "uint16"] and
- bitSize = 16
- or
- // 32 bit
- fullTypeName = ["int32", "uint32", "float32"] and
- bitSize = 32
- or
- // 64 bit
- fullTypeName = ["int64", "uint64", "float64"] and
- bitSize = 64
- )
- |
- this = conv
+ exists(NumericType conv |
+ conv = getTypeExpr().getType().getUnderlyingType() and
+ fullTypeName = conv.getName() and
+ bitSize = conv.getSize()
)
}
From ea7c38c99c5d86eb3c4bddf1456575ca9c77afa3 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 13:00:27 +0300
Subject: [PATCH 082/157] Remove references section from qhelp file
---
.../experimental/CWE-681/IncorrectNumericConversion.qhelp | 8 --------
1 file changed, 8 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
index 4174e70cd62..f978858ed89 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.qhelp
@@ -62,12 +62,4 @@
-
-
- mitre.org: CWE-681: Incorrect Conversion between Numeric Types.
-
-
- mitre.org: CWE-190: Integer Overflow or Wraparound.
-
-
From 623d5b3a97e7723b545719bf433e2e8774b00812 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 13:00:50 +0300
Subject: [PATCH 083/157] Add comments
---
ql/src/experimental/CWE-681/IncorrectNumericConversion.ql | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index 82f2244f081..0b157a879e9 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -13,23 +13,27 @@
import go
import DataFlow::PathGraph
+/** A function that parses integers. */
class Atoi extends Function {
Atoi() { this.hasQualifiedName("strconv", "Atoi") }
}
+/** A function that parses floating-point numbers. */
class ParseFloat extends Function {
ParseFloat() { this.hasQualifiedName("strconv", "ParseFloat") }
}
+/** A function that parses integers with a specifiable bitSize. */
class ParseInt extends Function {
ParseInt() { this.hasQualifiedName("strconv", "ParseInt") }
}
+/** A function that parses unsigned integers with a specifiable bitSize. */
class ParseUint extends Function {
ParseUint() { this.hasQualifiedName("strconv", "ParseUint") }
}
-/** Provides a class for modeling number parser calls. */
+/** Provides a class for modeling calls to number-parsing functions. */
module ParserCall {
/**
* A data-flow call node that parses a number.
From e5e74f34d7504951f8c4e2ef9ab12e02fdee152f Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 13:06:11 +0300
Subject: [PATCH 084/157] Add note on why the zero is commented out in
Lt32BitFlowConfig
---
ql/src/experimental/CWE-681/IncorrectNumericConversion.ql | 1 +
1 file changed, 1 insertion(+)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index 0b157a879e9..60116a65111 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -144,6 +144,7 @@ class Lt32BitFlowConfig extends TaintTracking::Configuration, DataFlow::Configur
Lt32BitFlowConfig() { this = "Lt32BitFlowConfig" }
override predicate isSource(DataFlow::Node source) {
+ // NOTE: target bit size 0 is already addressed in Lt64BitFlowConfig.
exists(ParserCall call | call.getTargetBitSize() = [/*0,*/ 32] | source = call)
}
From 556f527193be81040b272c440ce967dfd95de7b2 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 13:12:47 +0300
Subject: [PATCH 085/157] Exclude results in test files
---
.../CWE-681/IncorrectNumericConversion.ql | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
index 60116a65111..def77b696e9 100644
--- a/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
+++ b/ql/src/experimental/CWE-681/IncorrectNumericConversion.ql
@@ -249,9 +249,17 @@ string formatBitSize(ParserCall call) {
from DataFlow::PathNode source, DataFlow::PathNode sink
where
- exists(Lt64BitFlowConfig cfg | cfg.hasFlowPath(source, sink)) or
- exists(Lt32BitFlowConfig cfg | cfg.hasFlowPath(source, sink)) or
- exists(Lt16BitFlowConfig cfg | cfg.hasFlowPath(source, sink))
+ (
+ exists(Lt64BitFlowConfig cfg | cfg.hasFlowPath(source, sink))
+ or
+ exists(Lt32BitFlowConfig cfg | cfg.hasFlowPath(source, sink))
+ or
+ exists(Lt16BitFlowConfig cfg | cfg.hasFlowPath(source, sink))
+ ) and
+ // Exclude results in test files:
+ exists(File fl | fl = sink.getNode().asExpr().(NumericConversionExpr).getFile() |
+ not fl instanceof TestFile
+ )
select source.getNode(), source, sink,
"Incorrect conversion of a " + formatBitSize(source.getNode().(ParserCall)) + "-bit number from " +
source.getNode().(ParserCall).getParserName() + " result to a lower bit size type " +
From 1ef06e9e40bb3a83d324f649057712a9aa27e790 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Tue, 12 May 2020 04:52:44 -0700
Subject: [PATCH 086/157] Add getType to SsaWithFields
---
ql/src/semmle/go/dataflow/SSA.qll | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/ql/src/semmle/go/dataflow/SSA.qll b/ql/src/semmle/go/dataflow/SSA.qll
index 6d92b1c926d..15859eb1e60 100644
--- a/ql/src/semmle/go/dataflow/SSA.qll
+++ b/ql/src/semmle/go/dataflow/SSA.qll
@@ -338,6 +338,13 @@ class SsaWithFields extends TSsaWithFields {
/** Gets a use that refers to this SSA variable with fields. */
DataFlow::Node getAUse() { this = accessPath(result.asInstruction()) }
+ /** Gets the type of this SSA variable with fields. */
+ Type getType() {
+ exists(SsaVariable var | this = TRoot(var) | result = var.getType())
+ or
+ exists(Field f | this = TStep(_, f) | result = f.getType())
+ }
+
/** Gets a textual representation of this element. */
string toString() {
exists(SsaVariable var | this = TRoot(var) | result = "(" + var + ")")
From 33e4961c95c4981c40bc8d63dd79812bd3ec5cdb Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Tue, 12 May 2020 04:53:18 -0700
Subject: [PATCH 087/157] ReflectedXss: Add an equality test guard
---
ql/src/semmle/go/security/ReflectedXssCustomizations.qll | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index 826feb4a326..65c660f2112 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -103,4 +103,13 @@ module ReflectedXss {
)
}
}
+
+ /**
+ * A check against a constant value, considered a barrier for reflected XSS.
+ */
+ class EqualityTestGuard extends SanitizerGuard, DataFlow::EqualityTestNode {
+ override predicate checks(Expr e, boolean outcome) {
+ e = this.getAnOperand().asExpr() and this.eq(outcome, _, _)
+ }
+ }
}
From e51bc42bfb7526144f23a8977e64dd8de6ae9ad0 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Tue, 12 May 2020 17:31:24 +0530
Subject: [PATCH 088/157] fix metadata
---
.../CWE-807/SensitiveConditionBypass.ql | 23 ++++++++++---------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
index 119d5f9961f..35ba33bd056 100644
--- a/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypass.ql
@@ -1,14 +1,15 @@
-// /**
-// * @name User-controlled bypassing of sensitive action
-// * @description This query tests for user-controlled bypassing
-// * of sensitive actions.
-// * @id go/sensitive-condition-bypass
-// * @kind problem
-// * @problem.severity warning
-// * @tags external/cwe/cwe-807
-// * external/cwe/cwe-247
-// * external/cwe/cwe-350
-// */
+/**
+ * @name User-controlled bypassing of sensitive action
+ * @description This query tests for user-controlled bypassing
+ * of sensitive actions.
+ * @id go/sensitive-condition-bypass
+ * @kind problem
+ * @problem.severity warning
+ * @tags external/cwe/cwe-807
+ * external/cwe/cwe-247
+ * external/cwe/cwe-350
+ */
+
import go
import SensitiveConditionBypass
From 21bfaec0d3b3624fccbdd6a2f40c746842d3b703 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Tue, 12 May 2020 05:44:19 -0700
Subject: [PATCH 089/157] TaintedPath: Add change note for tempfiles
---
change-notes/2020-05-12-tainted-path.md | 4 ++++
1 file changed, 4 insertions(+)
create mode 100644 change-notes/2020-05-12-tainted-path.md
diff --git a/change-notes/2020-05-12-tainted-path.md b/change-notes/2020-05-12-tainted-path.md
new file mode 100644
index 00000000000..ccd78ed7d1f
--- /dev/null
+++ b/change-notes/2020-05-12-tainted-path.md
@@ -0,0 +1,4 @@
+lgtm,codescanning
+* The queries "Uncontrolled data used in path expression" and "Arbitrary file write during zip
+ extraction ("zip slip")" have been improved to recognize more file APIs, which may lead to more
+ alerts.
From 84e2a5ddd2344365e0d8f715fd51162fe71c6360 Mon Sep 17 00:00:00 2001
From: Slavomir
Date: Tue, 12 May 2020 16:27:11 +0300
Subject: [PATCH 090/157] Add experimental library: gin web framework (#117)
---
ql/src/experimental/frameworks/Gin.qll | 173 ++++++
.../experimental/frameworks/Gin/Gin.expected | 63 +++
ql/test/experimental/frameworks/Gin/Gin.go | 251 +++++++++
ql/test/experimental/frameworks/Gin/Gin.ql | 4 +
ql/test/experimental/frameworks/Gin/go.mod | 5 +
.../vendor/github.com/gin-gonic/gin/LICENSE | 21 +
.../github.com/gin-gonic/gin/binding/stub.go | 12 +
.../vendor/github.com/gin-gonic/gin/stub.go | 500 ++++++++++++++++++
.../frameworks/Gin/vendor/modules.txt | 3 +
9 files changed, 1032 insertions(+)
create mode 100644 ql/src/experimental/frameworks/Gin.qll
create mode 100644 ql/test/experimental/frameworks/Gin/Gin.expected
create mode 100644 ql/test/experimental/frameworks/Gin/Gin.go
create mode 100644 ql/test/experimental/frameworks/Gin/Gin.ql
create mode 100644 ql/test/experimental/frameworks/Gin/go.mod
create mode 100644 ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/LICENSE
create mode 100644 ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/binding/stub.go
create mode 100644 ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/stub.go
create mode 100644 ql/test/experimental/frameworks/Gin/vendor/modules.txt
diff --git a/ql/src/experimental/frameworks/Gin.qll b/ql/src/experimental/frameworks/Gin.qll
new file mode 100644
index 00000000000..019e4473774
--- /dev/null
+++ b/ql/src/experimental/frameworks/Gin.qll
@@ -0,0 +1,173 @@
+/**
+ * Provides classes for working with untrusted flow sources from the `github.com/gin-gonic/gin` package.
+ */
+
+import go
+
+private module Gin {
+ /**
+ * Data from a `Context` struct, considered as a source of untrusted flow.
+ */
+ private class GithubComGinGonicGinContextSource extends UntrustedFlowSource::Range {
+ GithubComGinGonicGinContextSource() {
+ exists(string packagePath, string typeName |
+ packagePath = "github.com/gin-gonic/gin" and
+ typeName = "Context"
+ |
+ // Method calls:
+ exists(DataFlow::MethodCallNode call, string methodName |
+ call.getTarget().hasQualifiedName(packagePath, typeName, methodName) and
+ (
+ methodName = "FullPath"
+ or
+ methodName = "GetHeader"
+ or
+ methodName = "QueryArray"
+ or
+ methodName = "Query"
+ or
+ methodName = "PostFormArray"
+ or
+ methodName = "PostForm"
+ or
+ methodName = "Param"
+ or
+ methodName = "GetStringSlice"
+ or
+ methodName = "GetString"
+ or
+ methodName = "GetRawData"
+ or
+ methodName = "ClientIP"
+ or
+ methodName = "ContentType"
+ or
+ methodName = "Cookie"
+ or
+ methodName = "GetQueryArray"
+ or
+ methodName = "GetQuery"
+ or
+ methodName = "GetPostFormArray"
+ or
+ methodName = "GetPostForm"
+ or
+ methodName = "DefaultPostForm"
+ or
+ methodName = "DefaultQuery"
+ or
+ methodName = "GetPostFormMap"
+ or
+ methodName = "GetQueryMap"
+ or
+ methodName = "GetStringMap"
+ or
+ methodName = "GetStringMapString"
+ or
+ methodName = "GetStringMapStringSlice"
+ or
+ methodName = "PostFormMap"
+ or
+ methodName = "QueryMap"
+ )
+ |
+ this = call.getResult(0)
+ or
+ this = call.getResult()
+ )
+ or
+ // Field reads:
+ exists(DataFlow::Field fld |
+ fld.hasQualifiedName(packagePath, typeName, "Accepted") and
+ this = fld.getARead()
+ )
+ )
+ }
+ }
+
+ /**
+ * Data from a `Params` slice, considered as a source of untrusted flow.
+ */
+ private class GithubComGinGonicGinParamsSource extends UntrustedFlowSource::Range {
+ GithubComGinGonicGinParamsSource() {
+ exists(string packagePath, string typeName |
+ packagePath = "github.com/gin-gonic/gin" and
+ typeName = "Params"
+ |
+ // Any read of a variable of this type:
+ exists(DataFlow::ReadNode read | read.getType().hasQualifiedName(packagePath, typeName) |
+ this = read
+ )
+ or
+ // Method calls:
+ exists(DataFlow::MethodCallNode call |
+ call.getTarget().hasQualifiedName(packagePath, typeName, ["ByName", "Get"])
+ |
+ this = call.getResult(0)
+ or
+ this = call.getResult()
+ )
+ )
+ }
+ }
+
+ /**
+ * Data from a `Param` struct, considered as a source of untrusted flow.
+ */
+ private class GithubComGinGonicGinParamSource extends UntrustedFlowSource::Range {
+ GithubComGinGonicGinParamSource() {
+ exists(string packagePath, string typeName |
+ packagePath = "github.com/gin-gonic/gin" and
+ typeName = "Param"
+ |
+ // Any read of a variable of this type:
+ exists(DataFlow::ReadNode read | read.getType().hasQualifiedName(packagePath, typeName) |
+ this = read
+ )
+ or
+ // Field reads:
+ exists(DataFlow::Field fld | fld.hasQualifiedName(packagePath, typeName, ["Key", "Value"]) |
+ this = fld.getARead()
+ )
+ )
+ }
+ }
+
+ /**
+ * A call to a method on `Context` struct that unmarshals data into a target.
+ */
+ private class GithubComGinGonicGinContextBindSource extends UntrustedFlowSource::Range {
+ GithubComGinGonicGinContextBindSource() {
+ exists(string packagePath, string typeName |
+ packagePath = "github.com/gin-gonic/gin" and
+ typeName = "Context"
+ |
+ exists(DataFlow::MethodCallNode call, string methodName |
+ call.getTarget().hasQualifiedName(packagePath, typeName, methodName) and
+ (
+ methodName = "BindJSON" or
+ methodName = "BindYAML" or
+ methodName = "BindXML" or
+ methodName = "BindUri" or
+ methodName = "BindQuery" or
+ methodName = "BindWith" or
+ methodName = "BindHeader" or
+ methodName = "MustBindWith" or
+ methodName = "Bind" or
+ methodName = "ShouldBind" or
+ methodName = "ShouldBindBodyWith" or
+ methodName = "ShouldBindJSON" or
+ methodName = "ShouldBindQuery" or
+ methodName = "ShouldBindUri" or
+ methodName = "ShouldBindHeader" or
+ methodName = "ShouldBindWith" or
+ methodName = "ShouldBindXML" or
+ methodName = "ShouldBindYAML"
+ )
+ |
+ this = call.getArgument(0)
+ )
+ )
+ }
+ }
+}
diff --git a/ql/test/experimental/frameworks/Gin/Gin.expected b/ql/test/experimental/frameworks/Gin/Gin.expected
new file mode 100644
index 00000000000..b30b4861d87
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/Gin.expected
@@ -0,0 +1,63 @@
+| Gin.go:23:10:23:29 | call to GetHeader |
+| Gin.go:27:10:27:30 | call to QueryArray |
+| Gin.go:31:10:31:25 | call to Query |
+| Gin.go:35:10:35:33 | call to PostFormArray |
+| Gin.go:39:10:39:28 | call to PostForm |
+| Gin.go:43:10:43:25 | call to Param |
+| Gin.go:47:10:47:34 | call to GetStringSlice |
+| Gin.go:51:10:51:29 | call to GetString |
+| Gin.go:55:3:55:28 | ... := ...[0] |
+| Gin.go:59:10:59:23 | call to ClientIP |
+| Gin.go:63:10:63:26 | call to ContentType |
+| Gin.go:67:3:67:29 | ... := ...[0] |
+| Gin.go:71:3:71:36 | ... := ...[0] |
+| Gin.go:75:3:75:31 | ... := ...[0] |
+| Gin.go:79:3:79:39 | ... := ...[0] |
+| Gin.go:83:3:83:34 | ... := ...[0] |
+| Gin.go:87:10:87:52 | call to DefaultPostForm |
+| Gin.go:91:10:91:49 | call to DefaultQuery |
+| Gin.go:95:3:95:37 | ... := ...[0] |
+| Gin.go:99:3:99:34 | ... := ...[0] |
+| Gin.go:103:10:103:32 | call to GetStringMap |
+| Gin.go:107:10:107:38 | call to GetStringMapString |
+| Gin.go:111:10:111:43 | call to GetStringMapStringSlice |
+| Gin.go:115:10:115:31 | call to PostFormMap |
+| Gin.go:119:10:119:28 | call to QueryMap |
+| Gin.go:123:10:123:23 | call to FullPath |
+| Gin.go:129:10:129:21 | selection of Accepted |
+| Gin.go:133:10:133:19 | selection of Params |
+| Gin.go:134:7:134:9 | val |
+| Gin.go:134:7:134:12 | index expression |
+| Gin.go:134:7:134:18 | selection of Value |
+| Gin.go:139:10:139:19 | selection of Params |
+| Gin.go:139:10:139:22 | index expression |
+| Gin.go:140:7:140:9 | val |
+| Gin.go:140:7:140:15 | selection of Value |
+| Gin.go:143:10:143:19 | selection of Params |
+| Gin.go:143:10:143:34 | call to ByName |
+| Gin.go:147:3:147:34 | ... := ...[0] |
+| Gin.go:147:13:147:22 | selection of Params |
+| Gin.go:153:12:153:21 | selection of Params |
+| Gin.go:153:12:153:24 | index expression |
+| Gin.go:154:10:154:14 | param |
+| Gin.go:154:10:154:18 | selection of Key |
+| Gin.go:155:10:155:14 | param |
+| Gin.go:155:10:155:20 | selection of Value |
+| Gin.go:163:16:163:22 | &... |
+| Gin.go:168:15:168:21 | &... |
+| Gin.go:173:16:173:22 | &... |
+| Gin.go:178:15:178:21 | &... |
+| Gin.go:183:17:183:23 | &... |
+| Gin.go:188:20:188:26 | &... |
+| Gin.go:193:16:193:22 | &... |
+| Gin.go:198:12:198:18 | &... |
+| Gin.go:203:18:203:24 | &... |
+| Gin.go:208:26:208:32 | &... |
+| Gin.go:213:22:213:28 | &... |
+| Gin.go:218:23:218:29 | &... |
+| Gin.go:223:21:223:27 | &... |
+| Gin.go:228:22:228:28 | &... |
+| Gin.go:233:21:233:27 | &... |
+| Gin.go:238:22:238:28 | &... |
+| Gin.go:243:18:243:24 | &... |
+| Gin.go:248:24:248:30 | &... |
diff --git a/ql/test/experimental/frameworks/Gin/Gin.go b/ql/test/experimental/frameworks/Gin/Gin.go
new file mode 100644
index 00000000000..774ce47e822
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/Gin.go
@@ -0,0 +1,251 @@
+package main
+
+//go:generate depstubber -vendor github.com/gin-gonic/gin Context
+//go:generate depstubber -vendor github.com/gin-gonic/gin/binding "" YAML
+
+import (
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+)
+
+func main() {}
+
+type Person struct {
+ Name string `form:"name"`
+ Address string `form:"address"`
+}
+
+func use(val string) {}
+
+// gin
+func ginHandler(ctx *gin.Context) {
+ {
+ val := ctx.GetHeader("key")
+ use(val)
+ }
+ {
+ val := ctx.QueryArray("key")
+ use(val[0])
+ }
+ {
+ val := ctx.Query("key")
+ use(val)
+ }
+ {
+ val := ctx.PostFormArray("key")
+ use(val[0])
+ }
+ {
+ val := ctx.PostForm("key")
+ use(val)
+ }
+ {
+ val := ctx.Param("key")
+ use(val)
+ }
+ {
+ val := ctx.GetStringSlice("key")
+ use(val[0])
+ }
+ {
+ val := ctx.GetString("key")
+ use(val)
+ }
+ {
+ val, _ := ctx.GetRawData()
+ use(string(val))
+ }
+ {
+ val := ctx.ClientIP()
+ use(val)
+ }
+ {
+ val := ctx.ContentType()
+ use(val)
+ }
+ {
+ val, _ := ctx.Cookie("key")
+ use(val)
+ }
+ {
+ val, _ := ctx.GetQueryArray("key")
+ use(val[0])
+ }
+ {
+ val, _ := ctx.GetQuery("key")
+ use(val)
+ }
+ {
+ val, _ := ctx.GetPostFormArray("key")
+ use(val[0])
+ }
+ {
+ val, _ := ctx.GetPostForm("key")
+ use(val)
+ }
+ {
+ val := ctx.DefaultPostForm("key", "default-value")
+ use(val)
+ }
+ {
+ val := ctx.DefaultQuery("key", "default-value")
+ use(val)
+ }
+ {
+ val, _ := ctx.GetPostFormMap("key")
+ use(val["a"])
+ }
+ {
+ val, _ := ctx.GetQueryMap("key")
+ use(val["a"])
+ }
+ {
+ val := ctx.GetStringMap("key")
+ use(val["a"].(string))
+ }
+ {
+ val := ctx.GetStringMapString("key")
+ use(val["a"])
+ }
+ {
+ val := ctx.GetStringMapStringSlice("key")
+ use(val["a"][0])
+ }
+ {
+ val := ctx.PostFormMap("key")
+ use(val["a"])
+ }
+ {
+ val := ctx.QueryMap("key")
+ use(val["a"])
+ }
+ {
+ val := ctx.FullPath()
+ use(val)
+ }
+
+ // fields:
+ {
+ val := ctx.Accepted
+ use(val[0])
+ }
+ {
+ val := ctx.Params
+ use(val[0].Value)
+ }
+
+ // Params:
+ {
+ val := ctx.Params[0]
+ use(val.Value)
+ }
+ {
+ val := ctx.Params.ByName("name")
+ use(val)
+ }
+ {
+ val, _ := ctx.Params.Get("name")
+ use(val)
+ }
+
+ // Param:
+ {
+ param := ctx.Params[0]
+ key := param.Key
+ val := param.Value
+ use(key)
+ use(val)
+ }
+
+ // bind:
+ {
+ var person Person
+ ctx.BindYAML(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindXML(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindWith(&person, binding.YAML)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindUri(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindQuery(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.MustBindWith(&person, binding.YAML)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindJSON(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.Bind(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBind(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindBodyWith(&person, binding.YAML)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindJSON(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindQuery(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindUri(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindWith(&person, binding.YAML)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindXML(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindYAML(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.BindHeader(&person)
+ use(person.Name)
+ }
+ {
+ var person Person
+ ctx.ShouldBindHeader(&person)
+ use(person.Name)
+ }
+}
diff --git a/ql/test/experimental/frameworks/Gin/Gin.ql b/ql/test/experimental/frameworks/Gin/Gin.ql
new file mode 100644
index 00000000000..91f3324f23a
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/Gin.ql
@@ -0,0 +1,4 @@
+import go
+import experimental.frameworks.Gin
+
+select any(UntrustedFlowSource src)
diff --git a/ql/test/experimental/frameworks/Gin/go.mod b/ql/test/experimental/frameworks/Gin/go.mod
new file mode 100644
index 00000000000..d6312810ebf
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/go.mod
@@ -0,0 +1,5 @@
+module example.com/m
+
+go 1.14
+
+require github.com/gin-gonic/gin v1.6.2
diff --git a/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/LICENSE b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/LICENSE
new file mode 100644
index 00000000000..1ff7f370605
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Manuel MartÃnez-Almeida
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/binding/stub.go b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/binding/stub.go
new file mode 100644
index 00000000000..43fd634edcd
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/binding/stub.go
@@ -0,0 +1,12 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/gin-gonic/gin/binding, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/gin-gonic/gin/binding (exports: ; functions: YAML)
+
+// Package binding is a stub of github.com/gin-gonic/gin/binding, generated by depstubber.
+package binding
+
+import ()
+
+var YAML interface{} = nil
diff --git a/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/stub.go b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/stub.go
new file mode 100644
index 00000000000..eb68095e53e
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/vendor/github.com/gin-gonic/gin/stub.go
@@ -0,0 +1,500 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/gin-gonic/gin, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/gin-gonic/gin (exports: Context; functions: )
+
+// Package gin is a stub of github.com/gin-gonic/gin, generated by depstubber.
+package gin
+
+import (
+ bufio "bufio"
+ io "io"
+ multipart "mime/multipart"
+ net "net"
+ http "net/http"
+ sync "sync"
+ time "time"
+)
+
+type Context struct {
+ Request *http.Request
+ Writer ResponseWriter
+ Params Params
+ KeysMutex *sync.RWMutex
+ Keys map[string]interface{}
+ Errors interface{}
+ Accepted []string
+}
+
+func (_ *Context) Abort() {}
+
+func (_ *Context) AbortWithError(_ int, _ interface {
+ Error() string
+}) *Error {
+ return nil
+}
+
+func (_ *Context) AbortWithStatus(_ int) {}
+
+func (_ *Context) AbortWithStatusJSON(_ int, _ interface{}) {}
+
+func (_ *Context) AsciiJSON(_ int, _ interface{}) {}
+
+func (_ *Context) Bind(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindHeader(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindJSON(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindQuery(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindUri(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindWith(_ interface{}, _ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindXML(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) BindYAML(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ClientIP() string {
+ return ""
+}
+
+func (_ *Context) ContentType() string {
+ return ""
+}
+
+func (_ *Context) Cookie(_ string) (string, interface {
+ Error() string
+}) {
+ return "", nil
+}
+
+func (_ *Context) Copy() *Context {
+ return nil
+}
+
+func (_ *Context) Data(_ int, _ string, _ []uint8) {}
+
+func (_ *Context) DataFromReader(_ int, _ int64, _ string, _ io.Reader, _ map[string]string) {}
+
+func (_ *Context) Deadline() (time.Time, bool) {
+ return time.Time{}, false
+}
+
+func (_ *Context) DefaultPostForm(_ string, _ string) string {
+ return ""
+}
+
+func (_ *Context) DefaultQuery(_ string, _ string) string {
+ return ""
+}
+
+func (_ *Context) Done() <-chan struct{} {
+ return nil
+}
+
+func (_ *Context) Err() interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) Error(_ interface {
+ Error() string
+}) *Error {
+ return nil
+}
+
+func (_ *Context) File(_ string) {}
+
+func (_ *Context) FileAttachment(_ string, _ string) {}
+
+func (_ *Context) FileFromFS(_ string, _ http.FileSystem) {}
+
+func (_ *Context) FormFile(_ string) (*multipart.FileHeader, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ *Context) FullPath() string {
+ return ""
+}
+
+func (_ *Context) Get(_ string) (interface{}, bool) {
+ return nil, false
+}
+
+func (_ *Context) GetBool(_ string) bool {
+ return false
+}
+
+func (_ *Context) GetDuration(_ string) time.Duration {
+ return 0
+}
+
+func (_ *Context) GetFloat64(_ string) float64 {
+ return 0
+}
+
+func (_ *Context) GetHeader(_ string) string {
+ return ""
+}
+
+func (_ *Context) GetInt(_ string) int {
+ return 0
+}
+
+func (_ *Context) GetInt64(_ string) int64 {
+ return 0
+}
+
+func (_ *Context) GetPostForm(_ string) (string, bool) {
+ return "", false
+}
+
+func (_ *Context) GetPostFormArray(_ string) ([]string, bool) {
+ return nil, false
+}
+
+func (_ *Context) GetPostFormMap(_ string) (map[string]string, bool) {
+ return nil, false
+}
+
+func (_ *Context) GetQuery(_ string) (string, bool) {
+ return "", false
+}
+
+func (_ *Context) GetQueryArray(_ string) ([]string, bool) {
+ return nil, false
+}
+
+func (_ *Context) GetQueryMap(_ string) (map[string]string, bool) {
+ return nil, false
+}
+
+func (_ *Context) GetRawData() ([]uint8, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ *Context) GetString(_ string) string {
+ return ""
+}
+
+func (_ *Context) GetStringMap(_ string) map[string]interface{} {
+ return nil
+}
+
+func (_ *Context) GetStringMapString(_ string) map[string]string {
+ return nil
+}
+
+func (_ *Context) GetStringMapStringSlice(_ string) map[string][]string {
+ return nil
+}
+
+func (_ *Context) GetStringSlice(_ string) []string {
+ return nil
+}
+
+func (_ *Context) GetTime(_ string) time.Time {
+ return time.Time{}
+}
+
+func (_ *Context) HTML(_ int, _ string, _ interface{}) {}
+
+func (_ *Context) Handler() HandlerFunc {
+ return nil
+}
+
+func (_ *Context) HandlerName() string {
+ return ""
+}
+
+func (_ *Context) HandlerNames() []string {
+ return nil
+}
+
+func (_ *Context) Header(_ string, _ string) {}
+
+func (_ *Context) IndentedJSON(_ int, _ interface{}) {}
+
+func (_ *Context) IsAborted() bool {
+ return false
+}
+
+func (_ *Context) IsWebsocket() bool {
+ return false
+}
+
+func (_ *Context) JSON(_ int, _ interface{}) {}
+
+func (_ *Context) JSONP(_ int, _ interface{}) {}
+
+func (_ *Context) MultipartForm() (*multipart.Form, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ *Context) MustBindWith(_ interface{}, _ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) MustGet(_ string) interface{} {
+ return nil
+}
+
+func (_ *Context) Negotiate(_ int, _ Negotiate) {}
+
+func (_ *Context) NegotiateFormat(_ ...string) string {
+ return ""
+}
+
+func (_ *Context) Next() {}
+
+func (_ *Context) Param(_ string) string {
+ return ""
+}
+
+func (_ *Context) PostForm(_ string) string {
+ return ""
+}
+
+func (_ *Context) PostFormArray(_ string) []string {
+ return nil
+}
+
+func (_ *Context) PostFormMap(_ string) map[string]string {
+ return nil
+}
+
+func (_ *Context) ProtoBuf(_ int, _ interface{}) {}
+
+func (_ *Context) PureJSON(_ int, _ interface{}) {}
+
+func (_ *Context) Query(_ string) string {
+ return ""
+}
+
+func (_ *Context) QueryArray(_ string) []string {
+ return nil
+}
+
+func (_ *Context) QueryMap(_ string) map[string]string {
+ return nil
+}
+
+func (_ *Context) Redirect(_ int, _ string) {}
+
+func (_ *Context) Render(_ int, _ interface{}) {}
+
+func (_ *Context) SSEvent(_ string, _ interface{}) {}
+
+func (_ *Context) SaveUploadedFile(_ *multipart.FileHeader, _ string) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) SecureJSON(_ int, _ interface{}) {}
+
+func (_ *Context) Set(_ string, _ interface{}) {}
+
+func (_ *Context) SetAccepted(_ ...string) {}
+
+func (_ *Context) SetCookie(_ string, _ string, _ int, _ string, _ string, _ bool, _ bool) {}
+
+func (_ *Context) SetSameSite(_ http.SameSite) {}
+
+func (_ *Context) ShouldBind(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindBodyWith(_ interface{}, _ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindHeader(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindJSON(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindQuery(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindUri(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindWith(_ interface{}, _ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindXML(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) ShouldBindYAML(_ interface{}) interface {
+ Error() string
+} {
+ return nil
+}
+
+func (_ *Context) Status(_ int) {}
+
+func (_ *Context) Stream(_ func(io.Writer) bool) bool {
+ return false
+}
+
+func (_ *Context) String(_ int, _ string, _ ...interface{}) {}
+
+func (_ *Context) Value(_ interface{}) interface{} {
+ return nil
+}
+
+func (_ *Context) XML(_ int, _ interface{}) {}
+
+func (_ *Context) YAML(_ int, _ interface{}) {}
+
+type Error struct {
+ Err interface {
+ Error() string
+ }
+ Type ErrorType
+ Meta interface{}
+}
+
+func (_ Error) Error() string {
+ return ""
+}
+
+func (_ *Error) IsType(_ ErrorType) bool {
+ return false
+}
+
+func (_ *Error) JSON() interface{} {
+ return nil
+}
+
+func (_ *Error) MarshalJSON() ([]uint8, interface {
+ Error() string
+}) {
+ return nil, nil
+}
+
+func (_ *Error) SetMeta(_ interface{}) *Error {
+ return nil
+}
+
+func (_ *Error) SetType(_ ErrorType) *Error {
+ return nil
+}
+
+type ErrorType uint64
+
+type HandlerFunc func(*Context)
+
+type Negotiate struct {
+ Offered []string
+ HTMLName string
+ HTMLData interface{}
+ JSONData interface{}
+ XMLData interface{}
+ YAMLData interface{}
+ Data interface{}
+}
+
+type Param struct {
+ Key string
+ Value string
+}
+
+type Params []Param
+
+func (_ Params) ByName(_ string) string {
+ return ""
+}
+
+func (_ Params) Get(_ string) (string, bool) {
+ return "", false
+}
+
+type ResponseWriter interface {
+ CloseNotify() <-chan bool
+ Flush()
+ Header() http.Header
+ Hijack() (net.Conn, *bufio.ReadWriter, interface {
+ Error() string
+ })
+ Pusher() http.Pusher
+ Size() int
+ Status() int
+ Write(_ []uint8) (int, interface {
+ Error() string
+ })
+ WriteHeader(_ int)
+ WriteHeaderNow()
+ WriteString(_ string) (int, interface {
+ Error() string
+ })
+ Written() bool
+}
diff --git a/ql/test/experimental/frameworks/Gin/vendor/modules.txt b/ql/test/experimental/frameworks/Gin/vendor/modules.txt
new file mode 100644
index 00000000000..5f2816316f4
--- /dev/null
+++ b/ql/test/experimental/frameworks/Gin/vendor/modules.txt
@@ -0,0 +1,3 @@
+# github.com/gin-gonic/gin v1.6.2
+## explicit
+github.com/gin-gonic/gin
From 9e5645fa9df3a5eb6049356f7944321cc249f45d Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Wed, 13 May 2020 03:56:55 -0700
Subject: [PATCH 091/157] Add similar predicate to SsaWithFields
---
ql/src/Security/CWE-601/BadRedirectCheck.ql | 13 ++-----------
ql/src/semmle/go/dataflow/SSA.qll | 9 +++++++++
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/ql/src/Security/CWE-601/BadRedirectCheck.ql b/ql/src/Security/CWE-601/BadRedirectCheck.ql
index 968b1ab2c13..168326cb30e 100644
--- a/ql/src/Security/CWE-601/BadRedirectCheck.ql
+++ b/ql/src/Security/CWE-601/BadRedirectCheck.ql
@@ -141,15 +141,6 @@ predicate isBadRedirectCheckOrWrapper(DataFlow::Node check, SsaWithFields v) {
)
}
-/**
- * Gets an SSA-with-fields variable that is similar to `v` in the sense that it has the same
- * root variable and the same sequence of field accesses.
- */
-SsaWithFields similar(SsaWithFields v) {
- result.getBaseVariable().getSourceVariable() = v.getBaseVariable().getSourceVariable() and
- result.getQualifiedName() = v.getQualifiedName()
-}
-
/**
* Holds if `check` checks that `v` has a leading slash, but not whether it has another slash or a
* backslash in its second position.
@@ -161,8 +152,8 @@ predicate isBadRedirectCheck(DataFlow::Node check, SsaWithFields v) {
// (we allow those checks to be on variables that are most likely equivalent to `v`
// to rule out false positives due to minor variations in data flow)
not (
- isCheckedForSecondSlash(similar(v)) and
- isCheckedForSecondBackslash(similar(v))
+ isCheckedForSecondSlash(v.similar()) and
+ isCheckedForSecondBackslash(v.similar())
)
}
diff --git a/ql/src/semmle/go/dataflow/SSA.qll b/ql/src/semmle/go/dataflow/SSA.qll
index 15859eb1e60..4f31c197ef2 100644
--- a/ql/src/semmle/go/dataflow/SSA.qll
+++ b/ql/src/semmle/go/dataflow/SSA.qll
@@ -352,6 +352,15 @@ class SsaWithFields extends TSsaWithFields {
exists(SsaWithFields base, Field f | this = TStep(base, f) | result = base + "." + f.getName())
}
+ /**
+ * Gets an SSA-with-fields variable that is similar to this SSA-with-fields variable in the
+ * sense that it has the same root variable and the same sequence of field accesses.
+ */
+ SsaWithFields similar() {
+ result.getBaseVariable().getSourceVariable() = this.getBaseVariable().getSourceVariable() and
+ result.getQualifiedName() = this.getQualifiedName()
+ }
+
/**
* Gets the qualified name of the source variable or variable and fields that this represents.
*
From 748dd6801ef3adf7f6b72ea112d515ae8422761f Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Wed, 13 May 2020 04:31:07 -0700
Subject: [PATCH 092/157] Handle HTTP response writers that are fields
---
ql/src/semmle/go/Concepts.qll | 6 +++---
ql/src/semmle/go/frameworks/HTTP.qll | 14 +++++++++++---
ql/src/semmle/go/frameworks/Macaron.qll | 9 +++++++--
.../go/security/ReflectedXssCustomizations.qll | 2 +-
4 files changed, 22 insertions(+), 9 deletions(-)
diff --git a/ql/src/semmle/go/Concepts.qll b/ql/src/semmle/go/Concepts.qll
index e17b5f3889a..81cd1b1ff72 100644
--- a/ql/src/semmle/go/Concepts.qll
+++ b/ql/src/semmle/go/Concepts.qll
@@ -366,8 +366,8 @@ module HTTP {
* extend `HTTP::ResponseWriter` instead.
*/
abstract class Range extends Variable {
- /** Gets a data-flow node that represents this response writer. */
- DataFlow::Node getANode() { result = this.getARead().getASuccessor*() }
+ /** Gets a data-flow node that is a use of this response writer. */
+ abstract DataFlow::Node getANode();
}
}
@@ -391,7 +391,7 @@ module HTTP {
/** Gets a redirect that is sent in this HTTP response. */
Redirect getARedirect() { result.getResponseWriter() = this }
- /** Gets a data-flow node that represents this response writer. */
+ /** Gets a data-flow node that is a use of this response writer. */
DataFlow::Node getANode() { result = self.getANode() }
}
diff --git a/ql/src/semmle/go/frameworks/HTTP.qll b/ql/src/semmle/go/frameworks/HTTP.qll
index 44e0cd23533..a159d75db7b 100644
--- a/ql/src/semmle/go/frameworks/HTTP.qll
+++ b/ql/src/semmle/go/frameworks/HTTP.qll
@@ -54,13 +54,21 @@ private module StdlibHttp {
}
}
+ /** The declaration of a variable which either is or has a field that implements the http.ResponseWriter type */
private class StdlibResponseWriter extends HTTP::ResponseWriter::Range {
- StdlibResponseWriter() { this.getType().implements("net/http", "ResponseWriter") }
+ SsaWithFields v;
+
+ StdlibResponseWriter() {
+ this = v.getBaseVariable().getSourceVariable() and
+ exists(Type t | t.implements("net/http", "ResponseWriter") | v.getType() = t)
+ }
+
+ override DataFlow::Node getANode() { result = v.similar().getAUse().getASuccessor*() }
/** Gets a header object that corresponds to this HTTP response. */
DataFlow::MethodCallNode getAHeaderObject() {
- result.getTarget().hasQualifiedName("net/http", _, "Header") and
- this.getARead() = result.getReceiver()
+ result.getTarget().getName() = "Header" and
+ this.getANode() = result.getReceiver()
}
}
diff --git a/ql/src/semmle/go/frameworks/Macaron.qll b/ql/src/semmle/go/frameworks/Macaron.qll
index 134e7a390bb..a38b2b20da0 100644
--- a/ql/src/semmle/go/frameworks/Macaron.qll
+++ b/ql/src/semmle/go/frameworks/Macaron.qll
@@ -6,11 +6,16 @@ import go
private module Macaron {
private class Context extends HTTP::ResponseWriter::Range {
+ SsaWithFields v;
+
Context() {
+ this = v.getBaseVariable().getSourceVariable() and
exists(Method m | m.hasQualifiedName("gopkg.in/macaron.v1", "Context", "Redirect") |
- m = this.getType().getMethod("Redirect")
+ v.getType().getMethod("Redirect") = m
)
}
+
+ override DataFlow::Node getANode() { result = v.similar().getAUse().getASuccessor*() }
}
private class RedirectCall extends HTTP::Redirect::Range, DataFlow::MethodCallNode {
@@ -20,6 +25,6 @@ private module Macaron {
override DataFlow::Node getUrl() { result = this.getArgument(0) }
- override HTTP::ResponseWriter getResponseWriter() { result.getARead() = this.getReceiver() }
+ override HTTP::ResponseWriter getResponseWriter() { result.getANode() = this.getReceiver() }
}
}
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index 65c660f2112..b6b3757c060 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -59,7 +59,7 @@ module ReflectedXss {
not htmlTypeSpecified(body) and
(
exists(HTTP::HeaderWrite hw | hw = body.getResponseWriter().getAHeaderWrite() |
- hw.definesHeader("content-type", _)
+ hw.getName().getStringValue().toLowerCase() = "content-type"
)
or
exists(DataFlow::CallNode call | call.getTarget().hasQualifiedName("fmt", "Fprintf") |
From 83a3b6336f4060086c5f121f29022c29ad22e273 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Wed, 13 May 2020 04:31:23 -0700
Subject: [PATCH 093/157] Add change note
---
change-notes/2020-05-11-reflected-xss.md | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 change-notes/2020-05-11-reflected-xss.md
diff --git a/change-notes/2020-05-11-reflected-xss.md b/change-notes/2020-05-11-reflected-xss.md
new file mode 100644
index 00000000000..0d1f703c7e4
--- /dev/null
+++ b/change-notes/2020-05-11-reflected-xss.md
@@ -0,0 +1,3 @@
+lgtm,codescanning
+* The query "Reflected cross-site scripting" has been improved to recognize more cases where the
+ value should be considered to be safe, which should lead to fewer false positive results.
From e034458574aa26d4d548f54d2a92eaeaa14e2937 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:25:54 +0100
Subject: [PATCH 094/157] Fix MongoDB tests.
---
ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go | 2 +-
.../CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
index 438763a587e..a437eef781b 100644
--- a/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/main.go
@@ -10,7 +10,7 @@ import (
"go.mongodb.org/mongo-driver/mongo"
)
-func test(coll *mongo.Collection, filter interface{}, models []WriteModel, ctx context.Context) {
+func test(coll *mongo.Collection, filter interface{}, models []mongo.WriteModel, ctx context.Context) {
fieldName := "test"
document := filter
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
index 1a06732dbb1..013a9e90963 100644
--- a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
@@ -193,7 +193,7 @@ func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...*interface{})
return nil, nil
}
-func Connect(_ context.Context, _ ...*interface{}) (*Client, error) {
+func Connect(_ context.Context, _ ...interface{}) (*Client, error) {
return nil, nil
}
From ac9e39120b362f60b2580d9fcd3825b64b1934ad Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:26:16 +0100
Subject: [PATCH 095/157] Fix unused variable in test.
---
ql/test/query-tests/Security/CWE-022/tst.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/ql/test/query-tests/Security/CWE-022/tst.go b/ql/test/query-tests/Security/CWE-022/tst.go
index 8cb4fe5ee56..599faccf0f1 100644
--- a/ql/test/query-tests/Security/CWE-022/tst.go
+++ b/ql/test/query-tests/Security/CWE-022/tst.go
@@ -15,7 +15,7 @@ func uploadFile(w http.ResponseWriter, r *http.Request) {
// err handling
defer file.Close()
tempFile, _ := ioutil.TempFile("/tmp", handler.Filename) // NOT OK
- // do stuff with tempFile
+ use(tempFile)
}
func unzip2(f string, root string) {
@@ -50,3 +50,5 @@ func containedIn(f string, root string) bool {
}
return false
}
+
+func use(v interface{}) {}
From ec2314310e5d2a304bfed3cbf17cf3060662e4b7 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:29:30 +0100
Subject: [PATCH 096/157] Fix code example in query.
---
.../experimental/CWE-807/SensitiveConditionBypassBad.go | 6 +++++-
ql/src/semmle/go/frameworks/NoSQL.qll | 9 ++++++---
2 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go b/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
index e2ca02615db..bf8e70f88b7 100644
--- a/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
+++ b/ql/src/experimental/CWE-807/SensitiveConditionBypassBad.go
@@ -1,4 +1,8 @@
-func ex3(w http.ResponseWriter, r *http.Request) {
+package main
+
+import "net/http"
+
+func example(w http.ResponseWriter, r *http.Request) {
test2 := "test"
if r.Header.Get("X-Password") != test2 {
login()
diff --git a/ql/src/semmle/go/frameworks/NoSQL.qll b/ql/src/semmle/go/frameworks/NoSQL.qll
index a49c3864b62..10444e67451 100644
--- a/ql/src/semmle/go/frameworks/NoSQL.qll
+++ b/ql/src/semmle/go/frameworks/NoSQL.qll
@@ -96,9 +96,12 @@ module NoSQL {
}
}
- predicate isAdditionalMongoTaintStep(DataFlow::Node prev, DataFlow::Node succ) {
- // Taint bson.E if input is tainted
- exists(Write w, DataFlow::Node base, Field f | w.writesField(base, f, prev) |
+ /**
+ * Holds if taint flows from `pred` to `succ` through a MongoDB-specific API.
+ */
+ predicate isAdditionalMongoTaintStep(DataFlow::Node pred, DataFlow::Node succ) {
+ // Taint an entry if the `Value` is tainted
+ exists(Write w, DataFlow::Node base, Field f | w.writesField(base, f, pred) |
base = succ.getASuccessor*() and
base.getType().hasQualifiedName(mongoBsonPrimitive(), "E") and
f.getName() = "Value"
From 6e58524b7885b08f7616b3b013c82a0ec0d44261 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:40:31 +0100
Subject: [PATCH 097/157] Fix a typo.
---
ql/src/semmle/go/frameworks/NoSQL.qll | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/src/semmle/go/frameworks/NoSQL.qll b/ql/src/semmle/go/frameworks/NoSQL.qll
index 10444e67451..46c73d6ad40 100644
--- a/ql/src/semmle/go/frameworks/NoSQL.qll
+++ b/ql/src/semmle/go/frameworks/NoSQL.qll
@@ -9,7 +9,7 @@ module NoSQL {
/**
* A data-flow node whose string value is interpreted as (part of) a NoSQL query.
*
- * Extends this class to refine existing API models. If you want to model new APIs,
+ * Extend this class to refine existing API models. If you want to model new APIs,
* extend `NoSQL::QueryString::Range` instead.
*/
class NoSQLQueryString extends DataFlow::Node {
From 41b5fc17abd494b374a79119a76674f402e3757d Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:40:36 +0100
Subject: [PATCH 098/157] Inline two single-use predicates.
This fixes a TODO.
---
ql/src/semmle/go/frameworks/NoSQL.qll | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/NoSQL.qll b/ql/src/semmle/go/frameworks/NoSQL.qll
index 46c73d6ad40..d80fe3f20b7 100644
--- a/ql/src/semmle/go/frameworks/NoSQL.qll
+++ b/ql/src/semmle/go/frameworks/NoSQL.qll
@@ -18,11 +18,6 @@ module NoSQL {
NoSQLQueryString() { this = self }
}
- //TODO : Replace the following two predicate definitions with a simple call to package()
- private string mongoDb() { result = "go.mongodb.org/mongo-driver/mongo" }
-
- private string mongoBsonPrimitive() { result = "go.mongodb.org/mongo-driver/bson/primitive" }
-
/** Provides classes for working with SQL query strings. */
module NoSQLQueryString {
/**
@@ -89,7 +84,7 @@ module NoSQL {
MongoDbCollectionQueryString() {
exists(Method meth, string methodName, int n |
collectionMethods(methodName, n) and
- meth.hasQualifiedName(mongoDb(), "Collection", methodName) and
+ meth.hasQualifiedName("go.mongodb.org/mongo-driver/mongo", "Collection", methodName) and
this = meth.getACall().getArgument(n)
)
}
@@ -103,7 +98,7 @@ module NoSQL {
// Taint an entry if the `Value` is tainted
exists(Write w, DataFlow::Node base, Field f | w.writesField(base, f, pred) |
base = succ.getASuccessor*() and
- base.getType().hasQualifiedName(mongoBsonPrimitive(), "E") and
+ base.getType().hasQualifiedName("go.mongodb.org/mongo-driver/bson/primitive", "E") and
f.getName() = "Value"
)
}
From e852caea07013d518411ab29818ac850fa494922 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:53:29 +0100
Subject: [PATCH 099/157] Cleanup of `Io` module.
- Undid rename from `Io` to `IO`
- Ensured function signatures in comments have leading `func`
- Removed superfluous `extends Function` clauses
- Renamed a few classes to be more consistent.
---
ql/src/semmle/go/frameworks/Stdlib.qll | 45 +++++++++++++-------------
1 file changed, 23 insertions(+), 22 deletions(-)
diff --git a/ql/src/semmle/go/frameworks/Stdlib.qll b/ql/src/semmle/go/frameworks/Stdlib.qll
index d184ab52f5e..271814cce9b 100644
--- a/ql/src/semmle/go/frameworks/Stdlib.qll
+++ b/ql/src/semmle/go/frameworks/Stdlib.qll
@@ -122,8 +122,8 @@ module Fmt {
}
/** Provides models of commonly used functions in the `io` package. */
-module IO {
- private class Copy extends TaintTracking::FunctionModel, Function {
+module Io {
+ private class Copy extends TaintTracking::FunctionModel {
Copy() {
// func Copy(dst Writer, src Reader) (written int64, err error)
// func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error)
@@ -138,7 +138,7 @@ module IO {
}
}
- private class Pipe extends TaintTracking::FunctionModel, Function {
+ private class Pipe extends TaintTracking::FunctionModel {
Pipe() {
// func Pipe() (*PipeReader, *PipeWriter)
hasQualifiedName("io", "Pipe")
@@ -149,8 +149,8 @@ module IO {
}
}
- private class IORead extends TaintTracking::FunctionModel, Function {
- IORead() {
+ private class ReadAtLeast extends TaintTracking::FunctionModel {
+ ReadAtLeast() {
// func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error)
// func ReadFull(r Reader, buf []byte) (n int, err error)
hasQualifiedName("io", "ReadAtLeast") or
@@ -175,7 +175,7 @@ module IO {
private class ByteReaderReadByte extends TaintTracking::FunctionModel, Method {
ByteReaderReadByte() {
- // ReadByte() (byte, error)
+ // func ReadByte() (byte, error)
this.implements("io", "ByteReader", "ReadByte")
}
@@ -186,7 +186,7 @@ module IO {
private class ByteWriterWriteByte extends TaintTracking::FunctionModel, Method {
ByteWriterWriteByte() {
- // WriteByte(c byte) error
+ // func WriteByte(c byte) error
this.implements("io", "ByteWriter", "WriteByte")
}
@@ -197,7 +197,7 @@ module IO {
private class ReaderRead extends TaintTracking::FunctionModel, Method {
ReaderRead() {
- // Read(p []byte) (n int, err error)
+ // func Read(p []byte) (n int, err error)
this.implements("io", "Reader", "Read")
}
@@ -206,7 +206,7 @@ module IO {
}
}
- private class LimitReader extends TaintTracking::FunctionModel, Function {
+ private class LimitReader extends TaintTracking::FunctionModel {
LimitReader() {
// func LimitReader(r Reader, n int64) Reader
this.hasQualifiedName("io", "LimitReader")
@@ -217,7 +217,7 @@ module IO {
}
}
- private class MultiReader extends TaintTracking::FunctionModel, Function {
+ private class MultiReader extends TaintTracking::FunctionModel {
MultiReader() {
// func MultiReader(readers ...Reader) Reader
this.hasQualifiedName("io", "MultiReader")
@@ -228,7 +228,7 @@ module IO {
}
}
- private class TeeReader extends TaintTracking::FunctionModel, Function {
+ private class TeeReader extends TaintTracking::FunctionModel {
TeeReader() {
// func TeeReader(r Reader, w Writer) Reader
this.hasQualifiedName("io", "TeeReader")
@@ -242,17 +242,19 @@ module IO {
}
private class ReaderAtReadAt extends TaintTracking::FunctionModel, Method {
- ReaderAtReadAt() { this.implements("io", "ReaderAt", "ReadAt") }
+ ReaderAtReadAt() {
+ // func ReadAt(p []byte, off int64) (n int, err error)
+ this.implements("io", "ReaderAt", "ReadAt")
+ }
override predicate hasTaintFlow(FunctionInput input, FunctionOutput output) {
- // ReadAt(p []byte, off int64) (n int, err error)
input.isReceiver() and output.isParameter(0)
}
}
private class ReaderFromReadFrom extends TaintTracking::FunctionModel, Method {
ReaderFromReadFrom() {
- // ReadFrom(r Reader) (n int64, err error)
+ // func ReadFrom(r Reader) (n int64, err error)
this.implements("io", "ReaderFrom", "ReadFrom")
}
@@ -263,7 +265,7 @@ module IO {
private class RuneReaderReadRune extends TaintTracking::FunctionModel, Method {
RuneReaderReadRune() {
- // ReadRune() (r rune, size int, err error)
+ // func ReadRune() (r rune, size int, err error)
this.implements("io", "RuneReader", "ReadRune")
}
@@ -272,7 +274,7 @@ module IO {
}
}
- private class NewSectionReader extends TaintTracking::FunctionModel, Function {
+ private class NewSectionReader extends TaintTracking::FunctionModel {
NewSectionReader() {
// func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader
this.hasQualifiedName("io", "NewSectionReader")
@@ -283,10 +285,9 @@ module IO {
}
}
- // A Taint Model for the stdlib io StringWriter interface
private class StringWriterWriteString extends TaintTracking::FunctionModel, Method {
StringWriterWriteString() {
- // WriteString(s string) (n int, err error)
+ // func WriteString(s string) (n int, err error)
this.implements("io", "StringWriter", "WriteString")
}
@@ -297,7 +298,7 @@ module IO {
private class WriterWrite extends TaintTracking::FunctionModel, Method {
WriterWrite() {
- // Write(p []byte) (n int, err error)
+ // func Write(p []byte) (n int, err error)
this.implements("io", "Writer", "Write")
}
@@ -306,7 +307,7 @@ module IO {
}
}
- private class MultiWriter extends TaintTracking::FunctionModel, Function {
+ private class MultiWriter extends TaintTracking::FunctionModel {
MultiWriter() {
// func MultiWriter(writers ...Writer) Writer
hasQualifiedName("io", "MultiWriter")
@@ -319,7 +320,7 @@ module IO {
private class WriterAtWriteAt extends TaintTracking::FunctionModel, Method {
WriterAtWriteAt() {
- // WriteAt(p []byte, off int64) (n int, err error)
+ // func WriteAt(p []byte, off int64) (n int, err error)
this.implements("io", "WriterAt", "WriteAt")
}
@@ -330,7 +331,7 @@ module IO {
private class WriterToWriteTo extends TaintTracking::FunctionModel, Method {
WriterToWriteTo() {
- // WriteTo(w Writer) (n int64, err error)
+ // func WriteTo(w Writer) (n int64, err error)
this.implements("io", "WriterTo", "WriteTo")
}
From d5fcf28e03d092c5db30b86081e052e75d6e95bc Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Wed, 13 May 2020 15:55:13 +0100
Subject: [PATCH 100/157] Add change note.
While we didn't see any new results in the evaluation, this is a fairly substantial amount of changes, so adding a change note is probably justified.
---
change-notes/2020-05-13-io-model.md | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 change-notes/2020-05-13-io-model.md
diff --git a/change-notes/2020-05-13-io-model.md b/change-notes/2020-05-13-io-model.md
new file mode 100644
index 00000000000..0e3531efedc
--- /dev/null
+++ b/change-notes/2020-05-13-io-model.md
@@ -0,0 +1,3 @@
+lgtm,codescanning
+* Modeling of the standard `io` library has been improved, which may lead to more results from the
+ security queries.
From 97b3ec5cfc35d6076bc355a847a04cc5cea09329 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Wed, 13 May 2020 10:07:14 -0700
Subject: [PATCH 101/157] Update dependency stubs
---
.../semmle/go/frameworks/Email/go.mod | 5 +-
.../go/frameworks/Email/vendor/modules.txt | 3 -
.../go.mongodb.org/mongo-driver/mongo/stub.go | 100 +--
.../semmle/go/frameworks/SQL/go.mod | 5 -
.../github.com/Masterminds/squirrel/stub.go | 44 +-
.../vendor/github.com/go-pg/pg/orm/stub.go | 298 ++-----
.../SQL/vendor/github.com/go-pg/pg/stub.go | 374 +++------
.../SQL/vendor/github.com/go-pg/pg/v9/stub.go | 396 +++------
.../frameworks/SystemCommandExecutors/go.mod | 1 -
.../SystemCommandExecutors/vendor/modules.txt | 3 -
.../mongo-driver/mongo/options/stub.go | 24 +-
.../go.mongodb.org/mongo-driver/mongo/stub.go | 100 +--
ql/test/query-tests/Security/CWE-312/go.mod | 1 -
.../Security/CWE-312/vendor/modules.txt | 3 -
ql/test/query-tests/Security/CWE-643/go.mod | 6 -
.../github.com/ChrisTrenkamp/goxpath/stub.go | 25 +-
.../ChrisTrenkamp/goxpath/tree/stub.go | 1 +
.../github.com/antchfx/htmlquery/stub.go | 13 +-
.../github.com/antchfx/jsonquery/stub.go | 9 +-
.../github.com/antchfx/xmlquery/stub.go | 9 +-
.../vendor/github.com/antchfx/xpath/stub.go | 5 +-
.../github.com/go-xmlpath/xmlpath/stub.go | 9 +-
.../github.com/jbowtie/gokogiri/xml/stub.go | 783 +++++-------------
.../github.com/jbowtie/gokogiri/xpath/stub.go | 3 +-
.../santhosh-tekuri/xpathparser/stub.go | 5 +-
.../Security/CWE-643/vendor/modules.txt | 18 -
.../query-tests/filters/ClassifyFiles/go.mod | 4 -
.../filters/ClassifyFiles/vendor/modules.txt | 12 -
28 files changed, 666 insertions(+), 1593 deletions(-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/go.mod b/ql/test/library-tests/semmle/go/frameworks/Email/go.mod
index d3b7fdb67f8..eb193e9905e 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Email/go.mod
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/go.mod
@@ -2,7 +2,4 @@ module main
go 1.14
-require (
- github.com/sendgrid/sendgrid-go v3.5.0+incompatible
- github.com/stretchr/testify v1.5.1 // indirect
-)
+require github.com/sendgrid/sendgrid-go v3.5.0+incompatible
diff --git a/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
index d782a4cc242..4b7525957df 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
+++ b/ql/test/library-tests/semmle/go/frameworks/Email/vendor/modules.txt
@@ -1,6 +1,3 @@
# github.com/sendgrid/sendgrid-go v3.5.0+incompatible
## explicit
github.com/sendgrid/sendgrid-go
-# github.com/stretchr/testify v1.5.1
-## explicit
-github.com/stretchr/testify
diff --git a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
index 57fda704783..be1ed14fbaf 100644
--- a/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/NoSQL/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
@@ -59,7 +59,7 @@ func (_ *Client) Connect(_ context.Context) error {
return nil
}
-func (_ *Client) Database(_ string, _ ...*interface{}) *Database {
+func (_ *Client) Database(_ string, _ ...interface{}) *Database {
return nil
}
@@ -67,11 +67,11 @@ func (_ *Client) Disconnect(_ context.Context) error {
return nil
}
-func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...*interface{}) (ListDatabasesResult, error) {
+func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...interface{}) (ListDatabasesResult, error) {
return ListDatabasesResult{}, nil
}
@@ -79,11 +79,11 @@ func (_ *Client) NumberSessionsInProgress() int {
return 0
}
-func (_ *Client) Ping(_ context.Context, _ *interface{}) error {
+func (_ *Client) Ping(_ context.Context, _ interface{}) error {
return nil
}
-func (_ *Client) StartSession(_ ...*interface{}) (Session, error) {
+func (_ *Client) StartSession(_ ...interface{}) (Session, error) {
return nil, nil
}
@@ -91,29 +91,29 @@ func (_ *Client) UseSession(_ context.Context, _ func(SessionContext) error) err
return nil
}
-func (_ *Client) UseSessionWithOptions(_ context.Context, _ *interface{}, _ func(SessionContext) error) error {
+func (_ *Client) UseSessionWithOptions(_ context.Context, _ interface{}, _ func(SessionContext) error) error {
return nil
}
-func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
type Collection struct{}
-func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...*interface{}) (*BulkWriteResult, error) {
+func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...interface{}) (*BulkWriteResult, error) {
return nil, nil
}
-func (_ *Collection) Clone(_ ...*interface{}) (*Collection, error) {
+func (_ *Collection) Clone(_ ...interface{}) (*Collection, error) {
return nil, nil
}
-func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...*interface{}) (int64, error) {
+func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...interface{}) (int64, error) {
return 0, nil
}
@@ -121,15 +121,15 @@ func (_ *Collection) Database() *Database {
return nil
}
-func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...interface{}) (*DeleteResult, error) {
return nil, nil
}
-func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...interface{}) (*DeleteResult, error) {
return nil, nil
}
-func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...*interface{}) ([]interface{}, error) {
+func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...interface{}) ([]interface{}, error) {
return nil, nil
}
@@ -137,27 +137,27 @@ func (_ *Collection) Drop(_ context.Context) error {
return nil
}
-func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...*interface{}) (int64, error) {
+func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...interface{}) (int64, error) {
return 0, nil
}
-func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
@@ -165,11 +165,11 @@ func (_ *Collection) Indexes() IndexView {
return IndexView{}
}
-func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...*interface{}) (*InsertManyResult, error) {
+func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...interface{}) (*InsertManyResult, error) {
return nil, nil
}
-func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...*interface{}) (*InsertOneResult, error) {
+func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...interface{}) (*InsertOneResult, error) {
return nil, nil
}
@@ -177,19 +177,19 @@ func (_ *Collection) Name() string {
return ""
}
-func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
@@ -227,7 +227,7 @@ func (_ *Cursor) TryNext(_ context.Context) bool {
type Database struct{}
-func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -235,7 +235,7 @@ func (_ *Database) Client() *Client {
return nil
}
-func (_ *Database) Collection(_ string, _ ...*interface{}) *Collection {
+func (_ *Database) Collection(_ string, _ ...interface{}) *Collection {
return nil
}
@@ -243,11 +243,11 @@ func (_ *Database) Drop(_ context.Context) error {
return nil
}
-func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -255,27 +255,27 @@ func (_ *Database) Name() string {
return ""
}
-func (_ *Database) ReadConcern() *interface{} {
+func (_ *Database) ReadConcern() interface{} {
return nil
}
-func (_ *Database) ReadPreference() *interface{} {
+func (_ *Database) ReadPreference() interface{} {
return nil
}
-func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
-func (_ *Database) WriteConcern() *interface{} {
+func (_ *Database) WriteConcern() interface{} {
return nil
}
@@ -291,28 +291,28 @@ type DeleteResult struct {
type IndexModel struct {
Keys interface{}
- Options *interface{}
+ Options interface{}
}
type IndexView struct{}
-func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...*interface{}) ([]string, error) {
+func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...*interface{}) (string, error) {
+func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...interface{}) (string, error) {
return "", nil
}
-func (_ IndexView) DropAll(_ context.Context, _ ...*interface{}) (interface{}, error) {
+func (_ IndexView) DropAll(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ IndexView) DropOne(_ context.Context, _ string, _ ...*interface{}) (interface{}, error) {
+func (_ IndexView) DropOne(_ context.Context, _ string, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ IndexView) List(_ context.Context, _ ...*interface{}) (*Cursor, error) {
+func (_ IndexView) List(_ context.Context, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -334,20 +334,20 @@ type Pipeline []interface{}
type Session interface {
AbortTransaction(_ context.Context) error
AdvanceClusterTime(_ interface{}) error
- AdvanceOperationTime(_ *interface{}) error
+ AdvanceOperationTime(_ interface{}) error
Client() *Client
ClusterTime() interface{}
CommitTransaction(_ context.Context) error
EndSession(_ context.Context)
- OperationTime() *interface{}
- StartTransaction(_ ...*interface{}) error
- WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+ OperationTime() interface{}
+ StartTransaction(_ ...interface{}) error
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...interface{}) (interface{}, error)
}
type SessionContext interface {
AbortTransaction(_ context.Context) error
AdvanceClusterTime(_ interface{}) error
- AdvanceOperationTime(_ *interface{}) error
+ AdvanceOperationTime(_ interface{}) error
Client() *Client
ClusterTime() interface{}
CommitTransaction(_ context.Context) error
@@ -355,10 +355,10 @@ type SessionContext interface {
Done() <-chan struct{}
EndSession(_ context.Context)
Err() error
- OperationTime() *interface{}
- StartTransaction(_ ...*interface{}) error
+ OperationTime() interface{}
+ StartTransaction(_ ...interface{}) error
Value(_ interface{}) interface{}
- WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...interface{}) (interface{}, error)
}
type SingleResult struct{}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SQL/go.mod b/ql/test/library-tests/semmle/go/frameworks/SQL/go.mod
index 14ee2e225ee..23bb420b262 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SQL/go.mod
+++ b/ql/test/library-tests/semmle/go/frameworks/SQL/go.mod
@@ -4,11 +4,6 @@ go 1.13
require (
github.com/Masterminds/squirrel v1.1.0
- github.com/github/depstubber v0.0.0-20200414033246-a63ca77a1581 // indirect
github.com/go-pg/pg v8.0.6+incompatible
github.com/go-pg/pg/v9 v9.1.3
- github.com/go-sql-driver/mysql v1.5.0 // indirect
- github.com/lib/pq v1.3.0 // indirect
- github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
- golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect
)
diff --git a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/Masterminds/squirrel/stub.go b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/Masterminds/squirrel/stub.go
index 19d94461f3a..fc639e9e209 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/Masterminds/squirrel/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/Masterminds/squirrel/stub.go
@@ -13,12 +13,8 @@ import (
)
type BaseRunner interface {
- Exec(_ string, _ ...interface{}) (sql.Result, interface {
- Error() string
- })
- Query(_ string, _ ...interface{}) (*sql.Rows, interface {
- Error() string
- })
+ Exec(_ string, _ ...interface{}) (sql.Result, error)
+ Query(_ string, _ ...interface{}) (*sql.Rows, error)
}
func Expr(_ string, _ ...interface{}) interface{} {
@@ -26,15 +22,11 @@ func Expr(_ string, _ ...interface{}) interface{} {
}
type PlaceholderFormat interface {
- ReplacePlaceholders(_ string) (string, interface {
- Error() string
- })
+ ReplacePlaceholders(_ string) (string, error)
}
type RowScanner interface {
- Scan(_ ...interface{}) interface {
- Error() string
- }
+ Scan(_ ...interface{}) error
}
func Select(_ ...string) SelectBuilder {
@@ -55,15 +47,11 @@ func (_ SelectBuilder) Distinct() SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Exec() (sql.Result, interface {
- Error() string
-}) {
+func (_ SelectBuilder) Exec() (sql.Result, error) {
return nil, nil
}
-func (_ SelectBuilder) ExecContext(_ context.Context) (sql.Result, interface {
- Error() string
-}) {
+func (_ SelectBuilder) ExecContext(_ context.Context) (sql.Result, error) {
return nil, nil
}
@@ -123,15 +111,11 @@ func (_ SelectBuilder) Prefix(_ string, _ ...interface{}) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Query() (*sql.Rows, interface {
- Error() string
-}) {
+func (_ SelectBuilder) Query() (*sql.Rows, error) {
return nil, nil
}
-func (_ SelectBuilder) QueryContext(_ context.Context) (*sql.Rows, interface {
- Error() string
-}) {
+func (_ SelectBuilder) QueryContext(_ context.Context) (*sql.Rows, error) {
return nil, nil
}
@@ -155,15 +139,11 @@ func (_ SelectBuilder) RunWith(_ BaseRunner) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) Scan(_ ...interface{}) interface {
- Error() string
-} {
+func (_ SelectBuilder) Scan(_ ...interface{}) error {
return nil
}
-func (_ SelectBuilder) ScanContext(_ context.Context, _ ...interface{}) interface {
- Error() string
-} {
+func (_ SelectBuilder) ScanContext(_ context.Context, _ ...interface{}) error {
return nil
}
@@ -171,9 +151,7 @@ func (_ SelectBuilder) Suffix(_ string, _ ...interface{}) SelectBuilder {
return SelectBuilder{}
}
-func (_ SelectBuilder) ToSql() (string, []interface{}, interface {
- Error() string
-}) {
+func (_ SelectBuilder) ToSql() (string, []interface{}, error) {
return "", nil, nil
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/orm/stub.go b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/orm/stub.go
index 15d5f583e6a..b4cedc671bf 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/orm/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/orm/stub.go
@@ -14,9 +14,7 @@ import (
)
type ColumnScanner interface {
- ScanColumn(_ int, _ string, _ interface{}, _ int) interface {
- Error() string
- }
+ ScanColumn(_ int, _ string, _ interface{}, _ int) error
}
type CreateTableOptions struct {
@@ -28,54 +26,24 @@ type CreateTableOptions struct {
type DB interface {
Context() context.Context
- CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- Delete(_ interface{}) interface {
- Error() string
- }
- Exec(_ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- ExecOne(_ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- ForceDelete(_ interface{}) interface {
- Error() string
- }
- FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8
- Insert(_ ...interface{}) interface {
- Error() string
- }
+ CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (Result, error)
+ CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (Result, error)
+ Delete(_ interface{}) error
+ Exec(_ interface{}, _ ...interface{}) (Result, error)
+ ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (Result, error)
+ ExecOne(_ interface{}, _ ...interface{}) (Result, error)
+ ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (Result, error)
+ ForceDelete(_ interface{}) error
+ FormatQuery(_ []byte, _ string, _ ...interface{}) []byte
+ Insert(_ ...interface{}) error
Model(_ ...interface{}) *Query
ModelContext(_ context.Context, _ ...interface{}) *Query
- Query(_ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
- })
- Select(_ interface{}) interface {
- Error() string
- }
- Update(_ interface{}) interface {
- Error() string
- }
+ Query(_ interface{}, _ interface{}, _ ...interface{}) (Result, error)
+ QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (Result, error)
+ QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (Result, error)
+ QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (Result, error)
+ Select(_ interface{}) error
+ Update(_ interface{}) error
}
type DropTableOptions struct {
@@ -96,7 +64,7 @@ type Field struct {
OnUpdate string
}
-func (_ *Field) AppendValue(_ []uint8, _ reflect.Value, _ int) []uint8 {
+func (_ *Field) AppendValue(_ []byte, _ reflect.Value, _ int) []byte {
return nil
}
@@ -104,7 +72,7 @@ func (_ *Field) Copy() *Field {
return nil
}
-func (_ *Field) HasFlag(_ uint8) bool {
+func (_ *Field) HasFlag(_ byte) bool {
return false
}
@@ -116,13 +84,11 @@ func (_ *Field) OmitZero() bool {
return false
}
-func (_ *Field) ScanValue(_ reflect.Value, _ interface{}, _ int) interface {
- Error() string
-} {
+func (_ *Field) ScanValue(_ reflect.Value, _ interface{}, _ int) error {
return nil
}
-func (_ *Field) SetFlag(_ uint8) {}
+func (_ *Field) SetFlag(_ byte) {}
func (_ *Field) Value(_ reflect.Value) reflect.Value {
return reflect.Value{}
@@ -132,7 +98,7 @@ type Method struct {
Index int
}
-func (_ *Method) AppendValue(_ []uint8, _ reflect.Value, _ int) []uint8 {
+func (_ *Method) AppendValue(_ []byte, _ reflect.Value, _ int) []byte {
return nil
}
@@ -145,55 +111,31 @@ func (_ *Method) Value(_ reflect.Value) reflect.Value {
}
type Model interface {
- AddModel(_ ColumnScanner) interface {
- Error() string
- }
- AfterDelete(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterInsert(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterQuery(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterSelect(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterUpdate(_ context.Context, _ DB) interface {
- Error() string
- }
- BeforeDelete(_ context.Context, _ DB) interface {
- Error() string
- }
- BeforeInsert(_ context.Context, _ DB) interface {
- Error() string
- }
- BeforeSelectQuery(_ context.Context, _ DB, _ *Query) (*Query, interface {
- Error() string
- })
- BeforeUpdate(_ context.Context, _ DB) interface {
- Error() string
- }
- Init() interface {
- Error() string
- }
+ AddModel(_ ColumnScanner) error
+ AfterDelete(_ context.Context, _ DB) error
+ AfterInsert(_ context.Context, _ DB) error
+ AfterQuery(_ context.Context, _ DB) error
+ AfterSelect(_ context.Context, _ DB) error
+ AfterUpdate(_ context.Context, _ DB) error
+ BeforeDelete(_ context.Context, _ DB) error
+ BeforeInsert(_ context.Context, _ DB) error
+ BeforeSelectQuery(_ context.Context, _ DB, _ *Query) (*Query, error)
+ BeforeUpdate(_ context.Context, _ DB) error
+ Init() error
NewModel() ColumnScanner
}
-func Q(_ string, _ ...interface{}) *interface{} {
+func Q(_ string, _ ...interface{}) interface{} {
return nil
}
type Query struct{}
-func (_ *Query) AppendFormat(_ []uint8, _ QueryFormatter) []uint8 {
+func (_ *Query) AppendFormat(_ []byte, _ QueryFormatter) []byte {
return nil
}
-func (_ *Query) Apply(_ func(*Query) (*Query, interface {
- Error() string
-})) *Query {
+func (_ *Query) Apply(_ func(*Query) (*Query, error)) *Query {
return nil
}
@@ -213,33 +155,23 @@ func (_ *Query) Copy() *Query {
return nil
}
-func (_ *Query) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) Count() (int, interface {
- Error() string
-}) {
+func (_ *Query) Count() (int, error) {
return 0, nil
}
-func (_ *Query) CountEstimate(_ int) (int, interface {
- Error() string
-}) {
+func (_ *Query) CountEstimate(_ int) (int, error) {
return 0, nil
}
-func (_ *Query) CreateTable(_ *CreateTableOptions) interface {
- Error() string
-} {
+func (_ *Query) CreateTable(_ *CreateTableOptions) error {
return nil
}
@@ -247,9 +179,7 @@ func (_ *Query) DB(_ DB) *Query {
return nil
}
-func (_ *Query) Delete(_ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) Delete(_ ...interface{}) (Result, error) {
return nil, nil
}
@@ -257,9 +187,7 @@ func (_ *Query) Deleted() *Query {
return nil
}
-func (_ *Query) DropTable(_ *DropTableOptions) interface {
- Error() string
-} {
+func (_ *Query) DropTable(_ *DropTableOptions) error {
return nil
}
@@ -267,27 +195,19 @@ func (_ *Query) ExcludeColumn(_ ...string) *Query {
return nil
}
-func (_ *Query) Exec(_ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) Exec(_ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) ExecOne(_ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) ExecOne(_ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) Exists() (bool, interface {
- Error() string
-}) {
+func (_ *Query) Exists() (bool, error) {
return false, nil
}
-func (_ *Query) First() interface {
- Error() string
-} {
+func (_ *Query) First() error {
return nil
}
@@ -295,19 +215,15 @@ func (_ *Query) For(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) ForEach(_ interface{}) interface {
- Error() string
-} {
+func (_ *Query) ForEach(_ interface{}) error {
return nil
}
-func (_ *Query) ForceDelete(_ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) ForceDelete(_ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8 {
+func (_ *Query) FormatQuery(_ []byte, _ string, _ ...interface{}) []byte {
return nil
}
@@ -331,9 +247,7 @@ func (_ *Query) Having(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) Insert(_ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) Insert(_ ...interface{}) (Result, error) {
return nil, nil
}
@@ -349,9 +263,7 @@ func (_ *Query) JoinOnOr(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) Last() interface {
- Error() string
-} {
+func (_ *Query) Last() error {
return nil
}
@@ -383,21 +295,15 @@ func (_ *Query) OrderExpr(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) Query(_ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) Query(_ interface{}, _ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) Relation(_ string, _ ...func(*Query) (*Query, interface {
- Error() string
-})) *Query {
+func (_ *Query) Relation(_ string, _ ...func(*Query) (*Query, error)) *Query {
return nil
}
@@ -405,27 +311,19 @@ func (_ *Query) Returning(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) Select(_ ...interface{}) interface {
- Error() string
-} {
+func (_ *Query) Select(_ ...interface{}) error {
return nil
}
-func (_ *Query) SelectAndCount(_ ...interface{}) (int, interface {
- Error() string
-}) {
+func (_ *Query) SelectAndCount(_ ...interface{}) (int, error) {
return 0, nil
}
-func (_ *Query) SelectAndCountEstimate(_ int, _ ...interface{}) (int, interface {
- Error() string
-}) {
+func (_ *Query) SelectAndCountEstimate(_ int, _ ...interface{}) (int, error) {
return 0, nil
}
-func (_ *Query) SelectOrInsert(_ ...interface{}) (bool, interface {
- Error() string
-}) {
+func (_ *Query) SelectOrInsert(_ ...interface{}) (bool, error) {
return false, nil
}
@@ -441,15 +339,11 @@ func (_ *Query) TableExpr(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) Update(_ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) Update(_ ...interface{}) (Result, error) {
return nil, nil
}
-func (_ *Query) UpdateNotNull(_ ...interface{}) (Result, interface {
- Error() string
-}) {
+func (_ *Query) UpdateNotNull(_ ...interface{}) (Result, error) {
return nil, nil
}
@@ -461,9 +355,7 @@ func (_ *Query) Where(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) WhereGroup(_ func(*Query) (*Query, interface {
- Error() string
-})) *Query {
+func (_ *Query) WhereGroup(_ func(*Query) (*Query, error)) *Query {
return nil
}
@@ -479,9 +371,7 @@ func (_ *Query) WhereOr(_ string, _ ...interface{}) *Query {
return nil
}
-func (_ *Query) WhereOrGroup(_ func(*Query) (*Query, interface {
- Error() string
-})) *Query {
+func (_ *Query) WhereOrGroup(_ func(*Query) (*Query, error)) *Query {
return nil
}
@@ -502,7 +392,7 @@ func (_ *Query) WrapWith(_ string) *Query {
}
type QueryFormatter interface {
- FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8
+ FormatQuery(_ []byte, _ string, _ ...interface{}) []byte
}
type Relation struct {
@@ -549,13 +439,11 @@ type Table struct {
func (_ *Table) AddField(_ *Field) {}
-func (_ *Table) AppendParam(_ []uint8, _ reflect.Value, _ string) ([]uint8, bool) {
+func (_ *Table) AppendParam(_ []byte, _ reflect.Value, _ string) ([]byte, bool) {
return nil, false
}
-func (_ *Table) GetField(_ string) (*Field, interface {
- Error() string
-}) {
+func (_ *Table) GetField(_ string) (*Field, error) {
return nil, nil
}
@@ -576,48 +464,24 @@ func (_ *Table) String() string {
}
type TableModel interface {
- AddJoin(_ interface{}) *interface{}
- AddModel(_ ColumnScanner) interface {
- Error() string
- }
- AfterDelete(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterInsert(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterQuery(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterSelect(_ context.Context, _ DB) interface {
- Error() string
- }
- AfterUpdate(_ context.Context, _ DB) interface {
- Error() string
- }
- AppendParam(_ []uint8, _ QueryFormatter, _ string) ([]uint8, bool)
- BeforeDelete(_ context.Context, _ DB) interface {
- Error() string
- }
- BeforeInsert(_ context.Context, _ DB) interface {
- Error() string
- }
- BeforeSelectQuery(_ context.Context, _ DB, _ *Query) (*Query, interface {
- Error() string
- })
- BeforeUpdate(_ context.Context, _ DB) interface {
- Error() string
- }
- GetJoin(_ string) *interface{}
+ AddJoin(_ interface{}) interface{}
+ AddModel(_ ColumnScanner) error
+ AfterDelete(_ context.Context, _ DB) error
+ AfterInsert(_ context.Context, _ DB) error
+ AfterQuery(_ context.Context, _ DB) error
+ AfterSelect(_ context.Context, _ DB) error
+ AfterUpdate(_ context.Context, _ DB) error
+ AppendParam(_ []byte, _ QueryFormatter, _ string) ([]byte, bool)
+ BeforeDelete(_ context.Context, _ DB) error
+ BeforeInsert(_ context.Context, _ DB) error
+ BeforeSelectQuery(_ context.Context, _ DB, _ *Query) (*Query, error)
+ BeforeUpdate(_ context.Context, _ DB) error
+ GetJoin(_ string) interface{}
GetJoins() []interface{}
Index() []int
- Init() interface {
- Error() string
- }
+ Init() error
IsNil() bool
- Join(_ string, _ func(*Query) (*Query, interface {
- Error() string
- })) *interface{}
+ Join(_ string, _ func(*Query) (*Query, error)) interface{}
Kind() reflect.Kind
Mount(_ reflect.Value)
NewModel() ColumnScanner
diff --git a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/stub.go b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/stub.go
index 0832aac36c4..d92b1486ef0 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/stub.go
@@ -19,105 +19,75 @@ type Conn struct{}
func (_ Conn) AddQueryHook(_ QueryHook) {}
-func (_ Conn) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ Conn) Begin() (*Tx, error) {
return nil, nil
}
-func (_ Conn) Close() interface {
- Error() string
-} {
+func (_ Conn) Close() error {
return nil
}
-func (_ Conn) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) CreateComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) CreateComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Delete(_ interface{}) error {
return nil
}
-func (_ Conn) DropComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) DropComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) ForceDelete(_ interface{}) error {
return nil
}
-func (_ Conn) FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8 {
+func (_ Conn) FormatQuery(_ []byte, _ string, _ ...interface{}) []byte {
return nil
}
-func (_ Conn) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ Conn) Insert(_ ...interface{}) error {
return nil
}
-func (_ Conn) Model(_ ...interface{}) *interface{} {
+func (_ Conn) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ Conn) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ Conn) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
@@ -129,53 +99,35 @@ func (_ Conn) PoolStats() *PoolStats {
return nil
}
-func (_ Conn) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ Conn) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ Conn) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ Conn) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ Conn) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Select(_ interface{}) error {
return nil
}
-func (_ Conn) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Update(_ interface{}) error {
return nil
}
@@ -199,105 +151,75 @@ type DB struct{}
func (_ DB) AddQueryHook(_ QueryHook) {}
-func (_ DB) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ DB) Begin() (*Tx, error) {
return nil, nil
}
-func (_ DB) Close() interface {
- Error() string
-} {
+func (_ DB) Close() error {
return nil
}
-func (_ DB) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) CreateComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) CreateComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Delete(_ interface{}) error {
return nil
}
-func (_ DB) DropComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) DropComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) ForceDelete(_ interface{}) error {
return nil
}
-func (_ DB) FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8 {
+func (_ DB) FormatQuery(_ []byte, _ string, _ ...interface{}) []byte {
return nil
}
-func (_ DB) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ DB) Insert(_ ...interface{}) error {
return nil
}
-func (_ DB) Model(_ ...interface{}) *interface{} {
+func (_ DB) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ DB) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ DB) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
@@ -309,53 +231,35 @@ func (_ DB) PoolStats() *PoolStats {
return nil
}
-func (_ DB) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ DB) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ DB) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ DB) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ DB) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Select(_ interface{}) error {
return nil
}
-func (_ DB) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Update(_ interface{}) error {
return nil
}
@@ -401,27 +305,19 @@ func (_ *Listener) ChannelSize(_ int) <-chan *Notification {
return nil
}
-func (_ *Listener) Close() interface {
- Error() string
-} {
+func (_ *Listener) Close() error {
return nil
}
-func (_ *Listener) Listen(_ ...string) interface {
- Error() string
-} {
+func (_ *Listener) Listen(_ ...string) error {
return nil
}
-func (_ *Listener) Receive() (string, string, interface {
- Error() string
-}) {
+func (_ *Listener) Receive() (string, string, error) {
return "", "", nil
}
-func (_ *Listener) ReceiveTimeout(_ time.Duration) (string, string, interface {
- Error() string
-}) {
+func (_ *Listener) ReceiveTimeout(_ time.Duration) (string, string, error) {
return "", "", nil
}
@@ -435,14 +331,10 @@ type Notification struct {
}
type Options struct {
- Network string
- Addr string
- Dialer func(string, string) (net.Conn, interface {
- Error() string
- })
- OnConnect func(*Conn) interface {
- Error() string
- }
+ Network string
+ Addr string
+ Dialer func(string, string) (net.Conn, error)
+ OnConnect func(*Conn) error
User string
Password string
Database string
@@ -483,21 +375,15 @@ type QueryEvent struct {
Params []interface{}
Attempt int
Result interface{}
- Error interface {
- Error() string
- }
- Data map[interface{}]interface{}
+ Error error
+ Data map[interface{}]interface{}
}
-func (_ *QueryEvent) FormattedQuery() (string, interface {
- Error() string
-}) {
+func (_ *QueryEvent) FormattedQuery() (string, error) {
return "", nil
}
-func (_ *QueryEvent) UnformattedQuery() (string, interface {
- Error() string
-}) {
+func (_ *QueryEvent) UnformattedQuery() (string, error) {
return "", nil
}
@@ -508,71 +394,49 @@ type QueryHook interface {
type Stmt struct{}
-func (_ *Stmt) Close() interface {
- Error() string
-} {
+func (_ *Stmt) Close() error {
return nil
}
-func (_ *Stmt) Exec(_ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) Exec(_ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecContext(_ context.Context, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecContext(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecOne(_ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecOne(_ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecOneContext(_ context.Context, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecOneContext(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) Query(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) Query(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
type Tx struct{}
-func (_ *Tx) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ *Tx) Begin() (*Tx, error) {
return nil, nil
}
-func (_ *Tx) Commit() interface {
- Error() string
-} {
+func (_ *Tx) Commit() error {
return nil
}
@@ -580,131 +444,91 @@ func (_ *Tx) Context() context.Context {
return nil
}
-func (_ *Tx) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ *Tx) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ *Tx) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Delete(_ interface{}) error {
return nil
}
-func (_ *Tx) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ *Tx) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ *Tx) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) ForceDelete(_ interface{}) error {
return nil
}
-func (_ *Tx) FormatQuery(_ []uint8, _ string, _ ...interface{}) []uint8 {
+func (_ *Tx) FormatQuery(_ []byte, _ string, _ ...interface{}) []byte {
return nil
}
-func (_ *Tx) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Insert(_ ...interface{}) error {
return nil
}
-func (_ *Tx) Model(_ ...interface{}) *interface{} {
+func (_ *Tx) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ *Tx) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ *Tx) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
-func (_ *Tx) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ *Tx) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ *Tx) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) Rollback() interface {
- Error() string
-} {
+func (_ *Tx) Rollback() error {
return nil
}
-func (_ *Tx) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ *Tx) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ *Tx) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Select(_ interface{}) error {
return nil
}
@@ -712,8 +536,6 @@ func (_ *Tx) Stmt(_ *Stmt) *Stmt {
return nil
}
-func (_ *Tx) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Update(_ interface{}) error {
return nil
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/v9/stub.go b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/v9/stub.go
index 3ddcee60a45..58a8442cc7c 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/v9/stub.go
+++ b/ql/test/library-tests/semmle/go/frameworks/SQL/vendor/github.com/go-pg/pg/v9/stub.go
@@ -19,87 +19,59 @@ type Conn struct{}
func (_ Conn) AddQueryHook(_ QueryHook) {}
-func (_ Conn) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ Conn) Begin() (*Tx, error) {
return nil, nil
}
-func (_ Conn) Close() interface {
- Error() string
-} {
+func (_ Conn) Close() error {
return nil
}
-func (_ Conn) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) CreateComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) CreateComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Delete(_ interface{}) error {
return nil
}
-func (_ Conn) DropComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) DropComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ Conn) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ Conn) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) ForceDelete(_ interface{}) error {
return nil
}
@@ -107,17 +79,15 @@ func (_ Conn) Formatter() interface{} {
return nil
}
-func (_ Conn) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ Conn) Insert(_ ...interface{}) error {
return nil
}
-func (_ Conn) Model(_ ...interface{}) *interface{} {
+func (_ Conn) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ Conn) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ Conn) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
@@ -129,53 +99,35 @@ func (_ Conn) PoolStats() *PoolStats {
return nil
}
-func (_ Conn) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ Conn) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ Conn) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ Conn) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ Conn) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ Conn) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ Conn) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Select(_ interface{}) error {
return nil
}
-func (_ Conn) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ Conn) Update(_ interface{}) error {
return nil
}
@@ -199,87 +151,59 @@ type DB struct{}
func (_ DB) AddQueryHook(_ QueryHook) {}
-func (_ DB) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ DB) Begin() (*Tx, error) {
return nil, nil
}
-func (_ DB) Close() interface {
- Error() string
-} {
+func (_ DB) Close() error {
return nil
}
-func (_ DB) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) CreateComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) CreateComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Delete(_ interface{}) error {
return nil
}
-func (_ DB) DropComposite(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) DropComposite(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ DB) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ DB) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) ForceDelete(_ interface{}) error {
return nil
}
@@ -287,17 +211,15 @@ func (_ DB) Formatter() interface{} {
return nil
}
-func (_ DB) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ DB) Insert(_ ...interface{}) error {
return nil
}
-func (_ DB) Model(_ ...interface{}) *interface{} {
+func (_ DB) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ DB) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ DB) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
@@ -309,53 +231,35 @@ func (_ DB) PoolStats() *PoolStats {
return nil
}
-func (_ DB) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ DB) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ DB) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DB) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ DB) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ DB) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ DB) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Select(_ interface{}) error {
return nil
}
-func (_ DB) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ DB) Update(_ interface{}) error {
return nil
}
@@ -401,27 +305,19 @@ func (_ *Listener) ChannelSize(_ int) <-chan *Notification {
return nil
}
-func (_ *Listener) Close() interface {
- Error() string
-} {
+func (_ *Listener) Close() error {
return nil
}
-func (_ *Listener) Listen(_ ...string) interface {
- Error() string
-} {
+func (_ *Listener) Listen(_ ...string) error {
return nil
}
-func (_ *Listener) Receive() (string, string, interface {
- Error() string
-}) {
+func (_ *Listener) Receive() (string, string, error) {
return "", "", nil
}
-func (_ *Listener) ReceiveTimeout(_ time.Duration) (string, string, interface {
- Error() string
-}) {
+func (_ *Listener) ReceiveTimeout(_ time.Duration) (string, string, error) {
return "", "", nil
}
@@ -435,22 +331,18 @@ type Notification struct {
}
type Options struct {
- Network string
- Addr string
- Dialer func(context.Context, string, string) (net.Conn, interface {
- Error() string
- })
- User string
- Password string
- Database string
- ApplicationName string
- TLSConfig *tls.Config
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
- OnConnect func(*Conn) interface {
- Error() string
- }
+ Network string
+ Addr string
+ Dialer func(context.Context, string, string) (net.Conn, error)
+ User string
+ Password string
+ Database string
+ ApplicationName string
+ TLSConfig *tls.Config
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ OnConnect func(*Conn) error
MaxRetries int
RetryStatementTimeout bool
MinRetryBackoff time.Duration
@@ -483,106 +375,72 @@ type QueryEvent struct {
Query interface{}
Params []interface{}
Result interface{}
- Err interface {
- Error() string
- }
- Stash map[interface{}]interface{}
+ Err error
+ Stash map[interface{}]interface{}
}
-func (_ *QueryEvent) FormattedQuery() (string, interface {
- Error() string
-}) {
+func (_ *QueryEvent) FormattedQuery() (string, error) {
return "", nil
}
-func (_ *QueryEvent) UnformattedQuery() (string, interface {
- Error() string
-}) {
+func (_ *QueryEvent) UnformattedQuery() (string, error) {
return "", nil
}
type QueryHook interface {
- AfterQuery(_ context.Context, _ *QueryEvent) interface {
- Error() string
- }
- BeforeQuery(_ context.Context, _ *QueryEvent) (context.Context, interface {
- Error() string
- })
+ AfterQuery(_ context.Context, _ *QueryEvent) error
+ BeforeQuery(_ context.Context, _ *QueryEvent) (context.Context, error)
}
type Stmt struct{}
-func (_ *Stmt) Close() interface {
- Error() string
-} {
+func (_ *Stmt) Close() error {
return nil
}
-func (_ *Stmt) Exec(_ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) Exec(_ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecContext(_ context.Context, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecContext(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecOne(_ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecOne(_ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) ExecOneContext(_ context.Context, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) ExecOneContext(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) Query(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) Query(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Stmt) QueryOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Stmt) QueryOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
type Tx struct{}
-func (_ *Tx) Begin() (*Tx, interface {
- Error() string
-}) {
+func (_ *Tx) Begin() (*Tx, error) {
return nil, nil
}
-func (_ *Tx) Close() interface {
- Error() string
-} {
+func (_ *Tx) Close() error {
return nil
}
-func (_ *Tx) Commit() interface {
- Error() string
-} {
+func (_ *Tx) Commit() error {
return nil
}
@@ -590,63 +448,43 @@ func (_ *Tx) Context() context.Context {
return nil
}
-func (_ *Tx) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) CopyFrom(_ io.Reader, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) CopyTo(_ io.Writer, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) CreateTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ *Tx) CreateTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ *Tx) Delete(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Delete(_ interface{}) error {
return nil
}
-func (_ *Tx) DropTable(_ interface{}, _ *interface{}) interface {
- Error() string
-} {
+func (_ *Tx) DropTable(_ interface{}, _ interface{}) error {
return nil
}
-func (_ *Tx) Exec(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) Exec(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecOne(_ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecOne(_ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) ExecOneContext(_ context.Context, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) ForceDelete(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) ForceDelete(_ interface{}) error {
return nil
}
@@ -654,67 +492,47 @@ func (_ *Tx) Formatter() interface{} {
return nil
}
-func (_ *Tx) Insert(_ ...interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Insert(_ ...interface{}) error {
return nil
}
-func (_ *Tx) Model(_ ...interface{}) *interface{} {
+func (_ *Tx) Model(_ ...interface{}) interface{} {
return nil
}
-func (_ *Tx) ModelContext(_ context.Context, _ ...interface{}) *interface{} {
+func (_ *Tx) ModelContext(_ context.Context, _ ...interface{}) interface{} {
return nil
}
-func (_ *Tx) Prepare(_ string) (*Stmt, interface {
- Error() string
-}) {
+func (_ *Tx) Prepare(_ string) (*Stmt, error) {
return nil, nil
}
-func (_ *Tx) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) Query(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryOne(_ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *Tx) QueryOneContext(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ *Tx) Rollback() interface {
- Error() string
-} {
+func (_ *Tx) Rollback() error {
return nil
}
-func (_ *Tx) RunInTransaction(_ func(*Tx) interface {
- Error() string
-}) interface {
- Error() string
-} {
+func (_ *Tx) RunInTransaction(_ func(*Tx) error) error {
return nil
}
-func (_ *Tx) Select(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Select(_ interface{}) error {
return nil
}
@@ -722,8 +540,6 @@ func (_ *Tx) Stmt(_ *Stmt) *Stmt {
return nil
}
-func (_ *Tx) Update(_ interface{}) interface {
- Error() string
-} {
+func (_ *Tx) Update(_ interface{}) error {
return nil
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/go.mod b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/go.mod
index 7e5ad05ed01..a38d4a4518b 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/go.mod
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/go.mod
@@ -4,6 +4,5 @@ go 1.14
require (
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
- github.com/github/depstubber v0.0.0-20200414025517-59c2db8e9405 // indirect
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
)
diff --git a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/vendor/modules.txt
index 768df0566ee..2fdb2ae4eae 100644
--- a/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/vendor/modules.txt
+++ b/ql/test/library-tests/semmle/go/frameworks/SystemCommandExecutors/vendor/modules.txt
@@ -1,9 +1,6 @@
# github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
## explicit
github.com/codeskyblue/go-sh
-# github.com/github/depstubber v0.0.0-20200414025517-59c2db8e9405
-## explicit
-github.com/github/depstubber
# golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
## explicit
golang.org/x/crypto
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
index a9275c51610..f435b8e112f 100644
--- a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/options/stub.go
@@ -63,11 +63,11 @@ type ClientOptions struct {
MaxConnIdleTime *time.Duration
MaxPoolSize *uint64
MinPoolSize *uint64
- PoolMonitor *interface{}
- Monitor *interface{}
- ReadConcern *interface{}
- ReadPreference *interface{}
- Registry *interface{}
+ PoolMonitor interface{}
+ Monitor interface{}
+ ReadConcern interface{}
+ ReadPreference interface{}
+ Registry interface{}
ReplicaSet *string
RetryWrites *bool
RetryReads *bool
@@ -75,7 +75,7 @@ type ClientOptions struct {
Direct *bool
SocketTimeout *time.Duration
TLSConfig *tls.Config
- WriteConcern *interface{}
+ WriteConcern interface{}
ZlibLevel *int
ZstdLevel *int
AutoEncryptionOptions *AutoEncryptionOptions
@@ -143,23 +143,23 @@ func (_ *ClientOptions) SetMinPoolSize(_ uint64) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetMonitor(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetMonitor(_ interface{}) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetPoolMonitor(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetPoolMonitor(_ interface{}) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetReadConcern(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetReadConcern(_ interface{}) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetReadPreference(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetReadPreference(_ interface{}) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetRegistry(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetRegistry(_ interface{}) *ClientOptions {
return nil
}
@@ -187,7 +187,7 @@ func (_ *ClientOptions) SetTLSConfig(_ *tls.Config) *ClientOptions {
return nil
}
-func (_ *ClientOptions) SetWriteConcern(_ *interface{}) *ClientOptions {
+func (_ *ClientOptions) SetWriteConcern(_ interface{}) *ClientOptions {
return nil
}
diff --git a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
index 013a9e90963..8cdf40dc69e 100644
--- a/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
+++ b/ql/test/query-tests/Security/CWE-089/vendor/go.mongodb.org/mongo-driver/mongo/stub.go
@@ -59,7 +59,7 @@ func (_ *Client) Connect(_ context.Context) error {
return nil
}
-func (_ *Client) Database(_ string, _ ...*interface{}) *Database {
+func (_ *Client) Database(_ string, _ ...interface{}) *Database {
return nil
}
@@ -67,11 +67,11 @@ func (_ *Client) Disconnect(_ context.Context) error {
return nil
}
-func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+func (_ *Client) ListDatabaseNames(_ context.Context, _ interface{}, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...*interface{}) (ListDatabasesResult, error) {
+func (_ *Client) ListDatabases(_ context.Context, _ interface{}, _ ...interface{}) (ListDatabasesResult, error) {
return ListDatabasesResult{}, nil
}
@@ -79,11 +79,11 @@ func (_ *Client) NumberSessionsInProgress() int {
return 0
}
-func (_ *Client) Ping(_ context.Context, _ *interface{}) error {
+func (_ *Client) Ping(_ context.Context, _ interface{}) error {
return nil
}
-func (_ *Client) StartSession(_ ...*interface{}) (Session, error) {
+func (_ *Client) StartSession(_ ...interface{}) (Session, error) {
return nil, nil
}
@@ -91,29 +91,29 @@ func (_ *Client) UseSession(_ context.Context, _ func(SessionContext) error) err
return nil
}
-func (_ *Client) UseSessionWithOptions(_ context.Context, _ *interface{}, _ func(SessionContext) error) error {
+func (_ *Client) UseSessionWithOptions(_ context.Context, _ interface{}, _ func(SessionContext) error) error {
return nil
}
-func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Client) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
type Collection struct{}
-func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Collection) Aggregate(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...*interface{}) (*BulkWriteResult, error) {
+func (_ *Collection) BulkWrite(_ context.Context, _ []WriteModel, _ ...interface{}) (*BulkWriteResult, error) {
return nil, nil
}
-func (_ *Collection) Clone(_ ...*interface{}) (*Collection, error) {
+func (_ *Collection) Clone(_ ...interface{}) (*Collection, error) {
return nil, nil
}
-func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...*interface{}) (int64, error) {
+func (_ *Collection) CountDocuments(_ context.Context, _ interface{}, _ ...interface{}) (int64, error) {
return 0, nil
}
@@ -121,15 +121,15 @@ func (_ *Collection) Database() *Database {
return nil
}
-func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+func (_ *Collection) DeleteMany(_ context.Context, _ interface{}, _ ...interface{}) (*DeleteResult, error) {
return nil, nil
}
-func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...*interface{}) (*DeleteResult, error) {
+func (_ *Collection) DeleteOne(_ context.Context, _ interface{}, _ ...interface{}) (*DeleteResult, error) {
return nil, nil
}
-func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...*interface{}) ([]interface{}, error) {
+func (_ *Collection) Distinct(_ context.Context, _ string, _ interface{}, _ ...interface{}) ([]interface{}, error) {
return nil, nil
}
@@ -137,27 +137,27 @@ func (_ *Collection) Drop(_ context.Context) error {
return nil
}
-func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...*interface{}) (int64, error) {
+func (_ *Collection) EstimatedDocumentCount(_ context.Context, _ ...interface{}) (int64, error) {
return 0, nil
}
-func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Collection) Find(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOne(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndDelete(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndReplace(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Collection) FindOneAndUpdate(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
@@ -165,11 +165,11 @@ func (_ *Collection) Indexes() IndexView {
return IndexView{}
}
-func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...*interface{}) (*InsertManyResult, error) {
+func (_ *Collection) InsertMany(_ context.Context, _ []interface{}, _ ...interface{}) (*InsertManyResult, error) {
return nil, nil
}
-func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...*interface{}) (*InsertOneResult, error) {
+func (_ *Collection) InsertOne(_ context.Context, _ interface{}, _ ...interface{}) (*InsertOneResult, error) {
return nil, nil
}
@@ -177,19 +177,19 @@ func (_ *Collection) Name() string {
return ""
}
-func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) ReplaceOne(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) UpdateMany(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...*interface{}) (*UpdateResult, error) {
+func (_ *Collection) UpdateOne(_ context.Context, _ interface{}, _ interface{}, _ ...interface{}) (*UpdateResult, error) {
return nil, nil
}
-func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Collection) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
@@ -231,7 +231,7 @@ func (_ *Cursor) TryNext(_ context.Context) bool {
type Database struct{}
-func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) Aggregate(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -239,7 +239,7 @@ func (_ *Database) Client() *Client {
return nil
}
-func (_ *Database) Collection(_ string, _ ...*interface{}) *Collection {
+func (_ *Database) Collection(_ string, _ ...interface{}) *Collection {
return nil
}
@@ -247,11 +247,11 @@ func (_ *Database) Drop(_ context.Context) error {
return nil
}
-func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...*interface{}) ([]string, error) {
+func (_ *Database) ListCollectionNames(_ context.Context, _ interface{}, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) ListCollections(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -259,27 +259,27 @@ func (_ *Database) Name() string {
return ""
}
-func (_ *Database) ReadConcern() *interface{} {
+func (_ *Database) ReadConcern() interface{} {
return nil
}
-func (_ *Database) ReadPreference() *interface{} {
+func (_ *Database) ReadPreference() interface{} {
return nil
}
-func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...*interface{}) *SingleResult {
+func (_ *Database) RunCommand(_ context.Context, _ interface{}, _ ...interface{}) *SingleResult {
return nil
}
-func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...*interface{}) (*Cursor, error) {
+func (_ *Database) RunCommandCursor(_ context.Context, _ interface{}, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
-func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...*interface{}) (*ChangeStream, error) {
+func (_ *Database) Watch(_ context.Context, _ interface{}, _ ...interface{}) (*ChangeStream, error) {
return nil, nil
}
-func (_ *Database) WriteConcern() *interface{} {
+func (_ *Database) WriteConcern() interface{} {
return nil
}
@@ -295,28 +295,28 @@ type DeleteResult struct {
type IndexModel struct {
Keys interface{}
- Options *interface{}
+ Options interface{}
}
type IndexView struct{}
-func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...*interface{}) ([]string, error) {
+func (_ IndexView) CreateMany(_ context.Context, _ []IndexModel, _ ...interface{}) ([]string, error) {
return nil, nil
}
-func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...*interface{}) (string, error) {
+func (_ IndexView) CreateOne(_ context.Context, _ IndexModel, _ ...interface{}) (string, error) {
return "", nil
}
-func (_ IndexView) DropAll(_ context.Context, _ ...*interface{}) (interface{}, error) {
+func (_ IndexView) DropAll(_ context.Context, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ IndexView) DropOne(_ context.Context, _ string, _ ...*interface{}) (interface{}, error) {
+func (_ IndexView) DropOne(_ context.Context, _ string, _ ...interface{}) (interface{}, error) {
return nil, nil
}
-func (_ IndexView) List(_ context.Context, _ ...*interface{}) (*Cursor, error) {
+func (_ IndexView) List(_ context.Context, _ ...interface{}) (*Cursor, error) {
return nil, nil
}
@@ -338,20 +338,20 @@ type Pipeline []interface{}
type Session interface {
AbortTransaction(_ context.Context) error
AdvanceClusterTime(_ interface{}) error
- AdvanceOperationTime(_ *interface{}) error
+ AdvanceOperationTime(_ interface{}) error
Client() *Client
ClusterTime() interface{}
CommitTransaction(_ context.Context) error
EndSession(_ context.Context)
- OperationTime() *interface{}
- StartTransaction(_ ...*interface{}) error
- WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+ OperationTime() interface{}
+ StartTransaction(_ ...interface{}) error
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...interface{}) (interface{}, error)
}
type SessionContext interface {
AbortTransaction(_ context.Context) error
AdvanceClusterTime(_ interface{}) error
- AdvanceOperationTime(_ *interface{}) error
+ AdvanceOperationTime(_ interface{}) error
Client() *Client
ClusterTime() interface{}
CommitTransaction(_ context.Context) error
@@ -359,10 +359,10 @@ type SessionContext interface {
Done() <-chan struct{}
EndSession(_ context.Context)
Err() error
- OperationTime() *interface{}
- StartTransaction(_ ...*interface{}) error
+ OperationTime() interface{}
+ StartTransaction(_ ...interface{}) error
Value(_ interface{}) interface{}
- WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...*interface{}) (interface{}, error)
+ WithTransaction(_ context.Context, _ func(SessionContext) (interface{}, error), _ ...interface{}) (interface{}, error)
}
type SingleResult struct{}
diff --git a/ql/test/query-tests/Security/CWE-312/go.mod b/ql/test/query-tests/Security/CWE-312/go.mod
index 358ac75f5d0..d56c33ffc85 100644
--- a/ql/test/query-tests/Security/CWE-312/go.mod
+++ b/ql/test/query-tests/Security/CWE-312/go.mod
@@ -3,7 +3,6 @@ module main
go 1.14
require (
- github.com/github/depstubber v0.0.0-20200414023404-c355b630c381 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/sirupsen/logrus v1.5.0
)
diff --git a/ql/test/query-tests/Security/CWE-312/vendor/modules.txt b/ql/test/query-tests/Security/CWE-312/vendor/modules.txt
index e6b6e8f0401..b0149947f41 100644
--- a/ql/test/query-tests/Security/CWE-312/vendor/modules.txt
+++ b/ql/test/query-tests/Security/CWE-312/vendor/modules.txt
@@ -1,6 +1,3 @@
-# github.com/github/depstubber v0.0.0-20200414023404-c355b630c381
-## explicit
-github.com/github/depstubber
# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
## explicit
github.com/golang/glog
diff --git a/ql/test/query-tests/Security/CWE-643/go.mod b/ql/test/query-tests/Security/CWE-643/go.mod
index 92c3e25f94c..fc7016cc9f6 100644
--- a/ql/test/query-tests/Security/CWE-643/go.mod
+++ b/ql/test/query-tests/Security/CWE-643/go.mod
@@ -8,13 +8,7 @@ require (
github.com/antchfx/jsonquery v1.1.2
github.com/antchfx/xmlquery v1.2.3
github.com/antchfx/xpath v1.1.5
- github.com/github/depstubber v0.0.0-20200413231600-392d5a70208e // indirect
github.com/go-xmlpath/xmlpath v0.0.0-20150820204837-860cbeca3ebc
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/jbowtie/gokogiri v0.0.0-20190301021639-37f655d3078f
- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/santhosh-tekuri/xpathparser v1.0.0
- golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect
- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
- gopkg.in/xmlpath.v2 v2.0.0-20150820204837-860cbeca3ebc // indirect
)
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/stub.go
index 9fc8651cf61..4b1e74cc192 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/ChrisTrenkamp/goxpath, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/ChrisTrenkamp/goxpath (exports: Opts,FuncOpts,XPathExec; functions: Parse,MustParse,ParseExec)
// Package goxpath is a stub of github.com/ChrisTrenkamp/goxpath, generated by depstubber.
@@ -22,41 +23,29 @@ type Opts struct {
Vars map[string]interface{}
}
-func Parse(_ string) (XPathExec, interface {
- Error() string
-}) {
+func Parse(_ string) (XPathExec, error) {
return XPathExec{}, nil
}
-func ParseExec(_ string, _ interface{}, _ ...FuncOpts) (interface{}, interface {
- Error() string
-}) {
+func ParseExec(_ string, _ interface{}, _ ...FuncOpts) (interface{}, error) {
return nil, nil
}
type XPathExec struct{}
-func (_ XPathExec) Exec(_ interface{}, _ ...FuncOpts) (interface{}, interface {
- Error() string
-}) {
+func (_ XPathExec) Exec(_ interface{}, _ ...FuncOpts) (interface{}, error) {
return nil, nil
}
-func (_ XPathExec) ExecBool(_ interface{}, _ ...FuncOpts) (bool, interface {
- Error() string
-}) {
+func (_ XPathExec) ExecBool(_ interface{}, _ ...FuncOpts) (bool, error) {
return false, nil
}
-func (_ XPathExec) ExecNode(_ interface{}, _ ...FuncOpts) (interface{}, interface {
- Error() string
-}) {
+func (_ XPathExec) ExecNode(_ interface{}, _ ...FuncOpts) (interface{}, error) {
return nil, nil
}
-func (_ XPathExec) ExecNum(_ interface{}, _ ...FuncOpts) (float64, interface {
- Error() string
-}) {
+func (_ XPathExec) ExecNum(_ interface{}, _ ...FuncOpts) (float64, error) {
return 0, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/tree/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/tree/stub.go
index 9c538842b2a..7cc3b01c626 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/tree/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/ChrisTrenkamp/goxpath/tree/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/ChrisTrenkamp/goxpath/tree, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/ChrisTrenkamp/goxpath/tree (exports: Node,String; functions: )
// Package tree is a stub of github.com/ChrisTrenkamp/goxpath/tree, generated by depstubber.
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/htmlquery/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/htmlquery/stub.go
index 0f7f7095aa8..0bac0acfe55 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/htmlquery/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/htmlquery/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/antchfx/htmlquery, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/antchfx/htmlquery (exports: ; functions: Find,FindOne,QueryAll,Query)
// Package htmlquery is a stub of github.com/antchfx/htmlquery, generated by depstubber.
@@ -8,22 +9,18 @@ package htmlquery
import ()
-func Find(_ *interface{}, _ string) []*interface{} {
+func Find(_ interface{}, _ string) []interface{} {
return nil
}
-func FindOne(_ *interface{}, _ string) *interface{} {
+func FindOne(_ interface{}, _ string) interface{} {
return nil
}
-func Query(_ *interface{}, _ string) (*interface{}, interface {
- Error() string
-}) {
+func Query(_ interface{}, _ string) (interface{}, error) {
return nil, nil
}
-func QueryAll(_ *interface{}, _ string) ([]*interface{}, interface {
- Error() string
-}) {
+func QueryAll(_ interface{}, _ string) ([]interface{}, error) {
return nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/jsonquery/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/jsonquery/stub.go
index 8aca2f88266..2948daae031 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/jsonquery/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/jsonquery/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/antchfx/jsonquery, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/antchfx/jsonquery (exports: Node; functions: Find,FindOne,QueryAll,Query)
// Package jsonquery is a stub of github.com/antchfx/jsonquery, generated by depstubber.
@@ -40,14 +41,10 @@ func (_ *Node) SelectElement(_ string) *Node {
type NodeType uint
-func Query(_ *Node, _ string) (*Node, interface {
- Error() string
-}) {
+func Query(_ *Node, _ string) (*Node, error) {
return nil, nil
}
-func QueryAll(_ *Node, _ string) ([]*Node, interface {
- Error() string
-}) {
+func QueryAll(_ *Node, _ string) ([]*Node, error) {
return nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xmlquery/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xmlquery/stub.go
index 771d87641fc..a4c5d8cc806 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xmlquery/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xmlquery/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/antchfx/xmlquery, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/antchfx/xmlquery (exports: Node; functions: Find,FindOne,FindEach,FindEachWithBreak,QueryAll,Query)
// Package xmlquery is a stub of github.com/antchfx/xmlquery, generated by depstubber.
@@ -57,14 +58,10 @@ func (_ *Node) SelectElements(_ string) []*Node {
type NodeType uint
-func Query(_ *Node, _ string) (*Node, interface {
- Error() string
-}) {
+func Query(_ *Node, _ string) (*Node, error) {
return nil, nil
}
-func QueryAll(_ *Node, _ string) ([]*Node, interface {
- Error() string
-}) {
+func QueryAll(_ *Node, _ string) ([]*Node, error) {
return nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xpath/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xpath/stub.go
index cb611c75edd..d470abbed9d 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xpath/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/antchfx/xpath/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/antchfx/xpath, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/antchfx/xpath (exports: ; functions: Compile,MustCompile,Select)
// Package xpath is a stub of github.com/antchfx/xpath, generated by depstubber.
@@ -8,9 +9,7 @@ package xpath
import ()
-func Compile(_ string) (*Expr, interface {
- Error() string
-}) {
+func Compile(_ string) (*Expr, error) {
return nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/go-xmlpath/xmlpath/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/go-xmlpath/xmlpath/stub.go
index 7df7a981cdd..2ec18b739df 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/go-xmlpath/xmlpath/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/go-xmlpath/xmlpath/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/go-xmlpath/xmlpath, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/go-xmlpath/xmlpath (exports: ; functions: Compile,MustCompile)
// Package xmlpath is a stub of github.com/go-xmlpath/xmlpath, generated by depstubber.
@@ -8,9 +9,7 @@ package xmlpath
import ()
-func Compile(_ string) (*Path, interface {
- Error() string
-}) {
+func Compile(_ string) (*Path, error) {
return nil, nil
}
@@ -30,7 +29,7 @@ func MustCompile(_ string) *Path {
type Node struct{}
-func (_ *Node) Bytes() []uint8 {
+func (_ *Node) Bytes() []byte {
return nil
}
@@ -40,7 +39,7 @@ func (_ *Node) String() string {
type Path struct{}
-func (_ *Path) Bytes(_ *Node) ([]uint8, bool) {
+func (_ *Path) Bytes(_ *Node) ([]byte, bool) {
return nil, false
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xml/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xml/stub.go
index f0ac31d47cb..26db7472cdf 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xml/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xml/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/jbowtie/gokogiri/xml, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/jbowtie/gokogiri/xml (exports: Node; functions: )
// Package xml is a stub of github.com/jbowtie/gokogiri/xml, generated by depstubber.
@@ -12,21 +13,15 @@ type AttributeNode struct {
XmlNode *XmlNode
}
-func (_ AttributeNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) AddChild(_ interface{}) error {
return nil
}
-func (_ AttributeNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ AttributeNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -50,9 +45,7 @@ func (_ AttributeNode) Attributes() map[string]*AttributeNode {
func (_ AttributeNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ AttributeNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ AttributeNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -102,7 +95,7 @@ func (_ AttributeNode) DocType() NodeType {
return 0
}
-func (_ AttributeNode) DocXPathCtx() *interface{} {
+func (_ AttributeNode) DocXPathCtx() interface{} {
return nil
}
@@ -114,9 +107,7 @@ func (_ AttributeNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ AttributeNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ AttributeNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -134,31 +125,23 @@ func (_ AttributeNode) InnerHtml() string {
return ""
}
-func (_ AttributeNode) InputEncoding() []uint8 {
+func (_ AttributeNode) InputEncoding() []byte {
return nil
}
-func (_ AttributeNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ AttributeNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ AttributeNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ AttributeNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -202,7 +185,7 @@ func (_ AttributeNode) NodeType() NodeType {
return 0
}
-func (_ AttributeNode) OutputEncoding() []uint8 {
+func (_ AttributeNode) OutputEncoding() []byte {
return nil
}
@@ -210,9 +193,7 @@ func (_ AttributeNode) Parent() Node {
return nil
}
-func (_ AttributeNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ AttributeNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -224,9 +205,7 @@ func (_ AttributeNode) PreviousSibling() Node {
return nil
}
-func (_ AttributeNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ AttributeNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -238,9 +217,7 @@ func (_ AttributeNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ AttributeNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) Replace(_ interface{}) error {
return nil
}
@@ -252,19 +229,15 @@ func (_ AttributeNode) Root() *ElementNode {
return nil
}
-func (_ AttributeNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ AttributeNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ AttributeNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ AttributeNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ AttributeNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ AttributeNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -272,21 +245,15 @@ func (_ AttributeNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ AttributeNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ AttributeNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) SetContent(_ interface{}) error {
return nil
}
-func (_ AttributeNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ AttributeNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -298,11 +265,11 @@ func (_ AttributeNode) SetNsAttr(_ string, _ string, _ string) string {
return ""
}
-func (_ AttributeNode) ToBuffer(_ []uint8) []uint8 {
+func (_ AttributeNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ AttributeNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ AttributeNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -310,7 +277,7 @@ func (_ AttributeNode) ToUnformattedXml() string {
return ""
}
-func (_ AttributeNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ AttributeNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -324,9 +291,7 @@ func (_ AttributeNode) Uri() string {
return ""
}
-func (_ AttributeNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ AttributeNode) Wrap(_ string) error {
return nil
}
@@ -344,21 +309,15 @@ type CDataNode struct {
XmlNode *XmlNode
}
-func (_ CDataNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) AddChild(_ interface{}) error {
return nil
}
-func (_ CDataNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ CDataNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -382,9 +341,7 @@ func (_ CDataNode) Attributes() map[string]*AttributeNode {
func (_ CDataNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ CDataNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CDataNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -434,7 +391,7 @@ func (_ CDataNode) DocType() NodeType {
return 0
}
-func (_ CDataNode) DocXPathCtx() *interface{} {
+func (_ CDataNode) DocXPathCtx() interface{} {
return nil
}
@@ -446,9 +403,7 @@ func (_ CDataNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ CDataNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ CDataNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -466,31 +421,23 @@ func (_ CDataNode) InnerHtml() string {
return ""
}
-func (_ CDataNode) InputEncoding() []uint8 {
+func (_ CDataNode) InputEncoding() []byte {
return nil
}
-func (_ CDataNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ CDataNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ CDataNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ CDataNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -534,7 +481,7 @@ func (_ CDataNode) NodeType() NodeType {
return 0
}
-func (_ CDataNode) OutputEncoding() []uint8 {
+func (_ CDataNode) OutputEncoding() []byte {
return nil
}
@@ -542,9 +489,7 @@ func (_ CDataNode) Parent() Node {
return nil
}
-func (_ CDataNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ CDataNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -556,9 +501,7 @@ func (_ CDataNode) PreviousSibling() Node {
return nil
}
-func (_ CDataNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ CDataNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -570,9 +513,7 @@ func (_ CDataNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ CDataNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) Replace(_ interface{}) error {
return nil
}
@@ -584,19 +525,15 @@ func (_ CDataNode) Root() *ElementNode {
return nil
}
-func (_ CDataNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CDataNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ CDataNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CDataNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ CDataNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CDataNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -604,21 +541,15 @@ func (_ CDataNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ CDataNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ CDataNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) SetContent(_ interface{}) error {
return nil
}
-func (_ CDataNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ CDataNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -634,11 +565,11 @@ func (_ CDataNode) String() string {
return ""
}
-func (_ CDataNode) ToBuffer(_ []uint8) []uint8 {
+func (_ CDataNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ CDataNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CDataNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -646,7 +577,7 @@ func (_ CDataNode) ToUnformattedXml() string {
return ""
}
-func (_ CDataNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CDataNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -660,9 +591,7 @@ func (_ CDataNode) Uri() string {
return ""
}
-func (_ CDataNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ CDataNode) Wrap(_ string) error {
return nil
}
@@ -670,21 +599,15 @@ type CommentNode struct {
XmlNode *XmlNode
}
-func (_ CommentNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) AddChild(_ interface{}) error {
return nil
}
-func (_ CommentNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ CommentNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -708,9 +631,7 @@ func (_ CommentNode) Attributes() map[string]*AttributeNode {
func (_ CommentNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ CommentNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CommentNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -760,7 +681,7 @@ func (_ CommentNode) DocType() NodeType {
return 0
}
-func (_ CommentNode) DocXPathCtx() *interface{} {
+func (_ CommentNode) DocXPathCtx() interface{} {
return nil
}
@@ -772,9 +693,7 @@ func (_ CommentNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ CommentNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ CommentNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -792,31 +711,23 @@ func (_ CommentNode) InnerHtml() string {
return ""
}
-func (_ CommentNode) InputEncoding() []uint8 {
+func (_ CommentNode) InputEncoding() []byte {
return nil
}
-func (_ CommentNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ CommentNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ CommentNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ CommentNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -860,7 +771,7 @@ func (_ CommentNode) NodeType() NodeType {
return 0
}
-func (_ CommentNode) OutputEncoding() []uint8 {
+func (_ CommentNode) OutputEncoding() []byte {
return nil
}
@@ -868,9 +779,7 @@ func (_ CommentNode) Parent() Node {
return nil
}
-func (_ CommentNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ CommentNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -882,9 +791,7 @@ func (_ CommentNode) PreviousSibling() Node {
return nil
}
-func (_ CommentNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ CommentNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -896,9 +803,7 @@ func (_ CommentNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ CommentNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) Replace(_ interface{}) error {
return nil
}
@@ -910,19 +815,15 @@ func (_ CommentNode) Root() *ElementNode {
return nil
}
-func (_ CommentNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CommentNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ CommentNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ CommentNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ CommentNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CommentNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -930,21 +831,15 @@ func (_ CommentNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ CommentNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ CommentNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) SetContent(_ interface{}) error {
return nil
}
-func (_ CommentNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ CommentNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -960,11 +855,11 @@ func (_ CommentNode) String() string {
return ""
}
-func (_ CommentNode) ToBuffer(_ []uint8) []uint8 {
+func (_ CommentNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ CommentNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CommentNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -972,7 +867,7 @@ func (_ CommentNode) ToUnformattedXml() string {
return ""
}
-func (_ CommentNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ CommentNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -986,9 +881,7 @@ func (_ CommentNode) Uri() string {
return ""
}
-func (_ CommentNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ CommentNode) Wrap(_ string) error {
return nil
}
@@ -1003,17 +896,13 @@ type Document interface {
DocPtr() interface{}
DocRef() Document
DocType() NodeType
- DocXPathCtx() *interface{}
+ DocXPathCtx() interface{}
Free()
- InputEncoding() []uint8
+ InputEncoding() []byte
NodeById(_ string) *ElementNode
- OutputEncoding() []uint8
- ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
- })
- RecursivelyRemoveNamespaces() interface {
- Error() string
- }
+ OutputEncoding() []byte
+ ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error)
+ RecursivelyRemoveNamespaces() error
RemoveUnlinkedNode(_ interface{}) bool
Root() *ElementNode
String() string
@@ -1023,25 +912,19 @@ type Document interface {
type DocumentFragment struct {
Node Node
- InEncoding []uint8
- OutEncoding []uint8
+ InEncoding []byte
+ OutEncoding []byte
}
-func (_ DocumentFragment) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) AddChild(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -1061,9 +944,7 @@ func (_ DocumentFragment) Attributes() map[string]*AttributeNode {
return nil
}
-func (_ DocumentFragment) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ DocumentFragment) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -1089,9 +970,7 @@ func (_ DocumentFragment) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ DocumentFragment) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ DocumentFragment) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -1107,27 +986,19 @@ func (_ DocumentFragment) InnerHtml() string {
return ""
}
-func (_ DocumentFragment) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) InsertAfter(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) InsertBefore(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) InsertBegin(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) InsertEnd(_ interface{}) error {
return nil
}
@@ -1171,9 +1042,7 @@ func (_ DocumentFragment) Parent() Node {
return nil
}
-func (_ DocumentFragment) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ DocumentFragment) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -1185,17 +1054,13 @@ func (_ DocumentFragment) PreviousSibling() Node {
return nil
}
-func (_ DocumentFragment) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ DocumentFragment) RecursivelyRemoveNamespaces() error {
return nil
}
func (_ DocumentFragment) RemoveDefaultNamespace() {}
-func (_ DocumentFragment) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) Replace(_ interface{}) error {
return nil
}
@@ -1203,19 +1068,15 @@ func (_ DocumentFragment) ResetChildren() {}
func (_ DocumentFragment) ResetNodePtr() {}
-func (_ DocumentFragment) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ DocumentFragment) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ DocumentFragment) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ DocumentFragment) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ DocumentFragment) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ DocumentFragment) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1223,21 +1084,15 @@ func (_ DocumentFragment) SetAttr(_ string, _ string) string {
return ""
}
-func (_ DocumentFragment) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) SetChildren(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) SetContent(_ interface{}) error {
return nil
}
-func (_ DocumentFragment) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ DocumentFragment) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -1249,7 +1104,7 @@ func (_ DocumentFragment) SetNsAttr(_ string, _ string, _ string) string {
return ""
}
-func (_ DocumentFragment) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ DocumentFragment) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1257,15 +1112,13 @@ func (_ DocumentFragment) ToUnformattedXml() string {
return ""
}
-func (_ DocumentFragment) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ DocumentFragment) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
func (_ DocumentFragment) Unlink() {}
-func (_ DocumentFragment) Wrap(_ string) interface {
- Error() string
-} {
+func (_ DocumentFragment) Wrap(_ string) error {
return nil
}
@@ -1279,7 +1132,7 @@ func (_ *DocumentFragment) String() string {
return ""
}
-func (_ *DocumentFragment) ToBuffer(_ []uint8) []uint8 {
+func (_ *DocumentFragment) ToBuffer(_ []byte) []byte {
return nil
}
@@ -1287,21 +1140,15 @@ type ElementNode struct {
XmlNode *XmlNode
}
-func (_ ElementNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) AddChild(_ interface{}) error {
return nil
}
-func (_ ElementNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ ElementNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -1325,9 +1172,7 @@ func (_ ElementNode) Attributes() map[string]*AttributeNode {
func (_ ElementNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ ElementNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ElementNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -1377,7 +1222,7 @@ func (_ ElementNode) DocType() NodeType {
return 0
}
-func (_ ElementNode) DocXPathCtx() *interface{} {
+func (_ ElementNode) DocXPathCtx() interface{} {
return nil
}
@@ -1389,9 +1234,7 @@ func (_ ElementNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ ElementNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ ElementNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -1409,31 +1252,23 @@ func (_ ElementNode) InnerHtml() string {
return ""
}
-func (_ ElementNode) InputEncoding() []uint8 {
+func (_ ElementNode) InputEncoding() []byte {
return nil
}
-func (_ ElementNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ ElementNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ ElementNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ ElementNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -1477,7 +1312,7 @@ func (_ ElementNode) NodeType() NodeType {
return 0
}
-func (_ ElementNode) OutputEncoding() []uint8 {
+func (_ ElementNode) OutputEncoding() []byte {
return nil
}
@@ -1485,9 +1320,7 @@ func (_ ElementNode) Parent() Node {
return nil
}
-func (_ ElementNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ ElementNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -1499,9 +1332,7 @@ func (_ ElementNode) PreviousSibling() Node {
return nil
}
-func (_ ElementNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ ElementNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -1513,9 +1344,7 @@ func (_ ElementNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ ElementNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) Replace(_ interface{}) error {
return nil
}
@@ -1527,19 +1356,15 @@ func (_ ElementNode) Root() *ElementNode {
return nil
}
-func (_ ElementNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ElementNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ ElementNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ElementNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ ElementNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ElementNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1547,21 +1372,15 @@ func (_ ElementNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ ElementNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ ElementNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) SetContent(_ interface{}) error {
return nil
}
-func (_ ElementNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ ElementNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -1577,11 +1396,11 @@ func (_ ElementNode) String() string {
return ""
}
-func (_ ElementNode) ToBuffer(_ []uint8) []uint8 {
+func (_ ElementNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ ElementNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ElementNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1589,7 +1408,7 @@ func (_ ElementNode) ToUnformattedXml() string {
return ""
}
-func (_ ElementNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ElementNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1603,9 +1422,7 @@ func (_ ElementNode) Uri() string {
return ""
}
-func (_ ElementNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ ElementNode) Wrap(_ string) error {
return nil
}
@@ -1615,46 +1432,28 @@ type NamespaceDeclaration struct {
}
type Node interface {
- AddChild(_ interface{}) interface {
- Error() string
- }
- AddNextSibling(_ interface{}) interface {
- Error() string
- }
- AddPreviousSibling(_ interface{}) interface {
- Error() string
- }
+ AddChild(_ interface{}) error
+ AddNextSibling(_ interface{}) error
+ AddPreviousSibling(_ interface{}) error
Attr(_ string) string
Attribute(_ string) *AttributeNode
AttributeList() []*AttributeNode
Attributes() map[string]*AttributeNode
- Coerce(_ interface{}) ([]Node, interface {
- Error() string
- })
+ Coerce(_ interface{}) ([]Node, error)
Content() string
CountChildren() int
DeclareNamespace(_ string, _ string)
DeclaredNamespaces() []NamespaceDeclaration
Duplicate(_ int) Node
DuplicateTo(_ Document, _ int) Node
- EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
- })
+ EvalXPath(_ interface{}, _ interface{}) (interface{}, error)
EvalXPathAsBoolean(_ interface{}, _ interface{}) bool
FirstChild() Node
InnerHtml() string
- InsertAfter(_ interface{}) interface {
- Error() string
- }
- InsertBefore(_ interface{}) interface {
- Error() string
- }
- InsertBegin(_ interface{}) interface {
- Error() string
- }
- InsertEnd(_ interface{}) interface {
- Error() string
- }
+ InsertAfter(_ interface{}) error
+ InsertBefore(_ interface{}) error
+ InsertBegin(_ interface{}) error
+ InsertEnd(_ interface{}) error
IsValid() bool
LastChild() Node
LineNumber() int
@@ -1665,50 +1464,32 @@ type Node interface {
NodePtr() interface{}
NodeType() NodeType
Parent() Node
- ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
- })
+ ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error)
Path() string
PreviousSibling() Node
- RecursivelyRemoveNamespaces() interface {
- Error() string
- }
+ RecursivelyRemoveNamespaces() error
Remove()
RemoveDefaultNamespace()
- Replace(_ interface{}) interface {
- Error() string
- }
+ Replace(_ interface{}) error
ResetChildren()
ResetNodePtr()
- Search(_ interface{}) ([]Node, interface {
- Error() string
- })
- SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
- })
- SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int)
+ Search(_ interface{}) ([]Node, error)
+ SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error)
+ SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int)
SetAttr(_ string, _ string) string
- SetChildren(_ interface{}) interface {
- Error() string
- }
- SetContent(_ interface{}) interface {
- Error() string
- }
- SetInnerHtml(_ interface{}) interface {
- Error() string
- }
+ SetChildren(_ interface{}) error
+ SetContent(_ interface{}) error
+ SetInnerHtml(_ interface{}) error
SetName(_ string)
SetNamespace(_ string, _ string)
SetNsAttr(_ string, _ string, _ string) string
String() string
- ToBuffer(_ []uint8) []uint8
- ToHtml(_ []uint8, _ []uint8) ([]uint8, int)
+ ToBuffer(_ []byte) []byte
+ ToHtml(_ []byte, _ []byte) ([]byte, int)
ToUnformattedXml() string
- ToXml(_ []uint8, _ []uint8) ([]uint8, int)
+ ToXml(_ []byte, _ []byte) ([]byte, int)
Unlink()
- Wrap(_ string) interface {
- Error() string
- }
+ Wrap(_ string) error
}
type NodeType int
@@ -1719,21 +1500,15 @@ type ProcessingInstructionNode struct {
XmlNode *XmlNode
}
-func (_ ProcessingInstructionNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) AddChild(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -1757,9 +1532,7 @@ func (_ ProcessingInstructionNode) Attributes() map[string]*AttributeNode {
func (_ ProcessingInstructionNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ ProcessingInstructionNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ProcessingInstructionNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -1809,7 +1582,7 @@ func (_ ProcessingInstructionNode) DocType() NodeType {
return 0
}
-func (_ ProcessingInstructionNode) DocXPathCtx() *interface{} {
+func (_ ProcessingInstructionNode) DocXPathCtx() interface{} {
return nil
}
@@ -1821,9 +1594,7 @@ func (_ ProcessingInstructionNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ ProcessingInstructionNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ ProcessingInstructionNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -1841,31 +1612,23 @@ func (_ ProcessingInstructionNode) InnerHtml() string {
return ""
}
-func (_ ProcessingInstructionNode) InputEncoding() []uint8 {
+func (_ ProcessingInstructionNode) InputEncoding() []byte {
return nil
}
-func (_ ProcessingInstructionNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -1909,7 +1672,7 @@ func (_ ProcessingInstructionNode) NodeType() NodeType {
return 0
}
-func (_ ProcessingInstructionNode) OutputEncoding() []uint8 {
+func (_ ProcessingInstructionNode) OutputEncoding() []byte {
return nil
}
@@ -1917,9 +1680,7 @@ func (_ ProcessingInstructionNode) Parent() Node {
return nil
}
-func (_ ProcessingInstructionNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ ProcessingInstructionNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -1931,9 +1692,7 @@ func (_ ProcessingInstructionNode) PreviousSibling() Node {
return nil
}
-func (_ ProcessingInstructionNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -1945,9 +1704,7 @@ func (_ ProcessingInstructionNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ ProcessingInstructionNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) Replace(_ interface{}) error {
return nil
}
@@ -1959,19 +1716,15 @@ func (_ ProcessingInstructionNode) Root() *ElementNode {
return nil
}
-func (_ ProcessingInstructionNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ProcessingInstructionNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ ProcessingInstructionNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ ProcessingInstructionNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ ProcessingInstructionNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ProcessingInstructionNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -1979,21 +1732,15 @@ func (_ ProcessingInstructionNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ ProcessingInstructionNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) SetContent(_ interface{}) error {
return nil
}
-func (_ ProcessingInstructionNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -2009,11 +1756,11 @@ func (_ ProcessingInstructionNode) String() string {
return ""
}
-func (_ ProcessingInstructionNode) ToBuffer(_ []uint8) []uint8 {
+func (_ ProcessingInstructionNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ ProcessingInstructionNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ProcessingInstructionNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2021,7 +1768,7 @@ func (_ ProcessingInstructionNode) ToUnformattedXml() string {
return ""
}
-func (_ ProcessingInstructionNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ ProcessingInstructionNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2035,9 +1782,7 @@ func (_ ProcessingInstructionNode) Uri() string {
return ""
}
-func (_ ProcessingInstructionNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ ProcessingInstructionNode) Wrap(_ string) error {
return nil
}
@@ -2047,21 +1792,15 @@ type TextNode struct {
XmlNode *XmlNode
}
-func (_ TextNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) AddChild(_ interface{}) error {
return nil
}
-func (_ TextNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ TextNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -2085,9 +1824,7 @@ func (_ TextNode) Attributes() map[string]*AttributeNode {
func (_ TextNode) BookkeepFragment(_ *DocumentFragment) {}
-func (_ TextNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ TextNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -2137,7 +1874,7 @@ func (_ TextNode) DocType() NodeType {
return 0
}
-func (_ TextNode) DocXPathCtx() *interface{} {
+func (_ TextNode) DocXPathCtx() interface{} {
return nil
}
@@ -2149,9 +1886,7 @@ func (_ TextNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ TextNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ TextNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -2169,31 +1904,23 @@ func (_ TextNode) InnerHtml() string {
return ""
}
-func (_ TextNode) InputEncoding() []uint8 {
+func (_ TextNode) InputEncoding() []byte {
return nil
}
-func (_ TextNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ TextNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ TextNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ TextNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -2237,7 +1964,7 @@ func (_ TextNode) NodeType() NodeType {
return 0
}
-func (_ TextNode) OutputEncoding() []uint8 {
+func (_ TextNode) OutputEncoding() []byte {
return nil
}
@@ -2245,9 +1972,7 @@ func (_ TextNode) Parent() Node {
return nil
}
-func (_ TextNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ TextNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -2259,9 +1984,7 @@ func (_ TextNode) PreviousSibling() Node {
return nil
}
-func (_ TextNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ TextNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -2273,9 +1996,7 @@ func (_ TextNode) RemoveUnlinkedNode(_ interface{}) bool {
return false
}
-func (_ TextNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) Replace(_ interface{}) error {
return nil
}
@@ -2287,19 +2008,15 @@ func (_ TextNode) Root() *ElementNode {
return nil
}
-func (_ TextNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ TextNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ TextNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ TextNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ TextNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ TextNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2307,21 +2024,15 @@ func (_ TextNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ TextNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ TextNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) SetContent(_ interface{}) error {
return nil
}
-func (_ TextNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ TextNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -2337,11 +2048,11 @@ func (_ TextNode) String() string {
return ""
}
-func (_ TextNode) ToBuffer(_ []uint8) []uint8 {
+func (_ TextNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ TextNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ TextNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2349,7 +2060,7 @@ func (_ TextNode) ToUnformattedXml() string {
return ""
}
-func (_ TextNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ TextNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2363,16 +2074,14 @@ func (_ TextNode) Uri() string {
return ""
}
-func (_ TextNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ TextNode) Wrap(_ string) error {
return nil
}
func (_ *TextNode) DisableOutputEscaping() {}
type XmlNode struct {
- Ptr *interface{}
+ Ptr interface{}
Document Document
}
@@ -2412,13 +2121,13 @@ func (_ XmlNode) DocType() NodeType {
return 0
}
-func (_ XmlNode) DocXPathCtx() *interface{} {
+func (_ XmlNode) DocXPathCtx() interface{} {
return nil
}
func (_ XmlNode) Free() {}
-func (_ XmlNode) InputEncoding() []uint8 {
+func (_ XmlNode) InputEncoding() []byte {
return nil
}
@@ -2426,7 +2135,7 @@ func (_ XmlNode) NodeById(_ string) *ElementNode {
return nil
}
-func (_ XmlNode) OutputEncoding() []uint8 {
+func (_ XmlNode) OutputEncoding() []byte {
return nil
}
@@ -2446,21 +2155,15 @@ func (_ XmlNode) Uri() string {
return ""
}
-func (_ *XmlNode) AddChild(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) AddChild(_ interface{}) error {
return nil
}
-func (_ *XmlNode) AddNextSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) AddNextSibling(_ interface{}) error {
return nil
}
-func (_ *XmlNode) AddPreviousSibling(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) AddPreviousSibling(_ interface{}) error {
return nil
}
@@ -2480,9 +2183,7 @@ func (_ *XmlNode) Attributes() map[string]*AttributeNode {
return nil
}
-func (_ *XmlNode) Coerce(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ *XmlNode) Coerce(_ interface{}) ([]Node, error) {
return nil, nil
}
@@ -2508,9 +2209,7 @@ func (_ *XmlNode) DuplicateTo(_ Document, _ int) Node {
return nil
}
-func (_ *XmlNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, interface {
- Error() string
-}) {
+func (_ *XmlNode) EvalXPath(_ interface{}, _ interface{}) (interface{}, error) {
return nil, nil
}
@@ -2526,27 +2225,19 @@ func (_ *XmlNode) InnerHtml() string {
return ""
}
-func (_ *XmlNode) InsertAfter(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) InsertAfter(_ interface{}) error {
return nil
}
-func (_ *XmlNode) InsertBefore(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) InsertBefore(_ interface{}) error {
return nil
}
-func (_ *XmlNode) InsertBegin(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) InsertBegin(_ interface{}) error {
return nil
}
-func (_ *XmlNode) InsertEnd(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) InsertEnd(_ interface{}) error {
return nil
}
@@ -2590,9 +2281,7 @@ func (_ *XmlNode) Parent() Node {
return nil
}
-func (_ *XmlNode) ParseFragment(_ []uint8, _ []uint8, _ ParseOption) (*DocumentFragment, interface {
- Error() string
-}) {
+func (_ *XmlNode) ParseFragment(_ []byte, _ []byte, _ ParseOption) (*DocumentFragment, error) {
return nil, nil
}
@@ -2604,9 +2293,7 @@ func (_ *XmlNode) PreviousSibling() Node {
return nil
}
-func (_ *XmlNode) RecursivelyRemoveNamespaces() interface {
- Error() string
-} {
+func (_ *XmlNode) RecursivelyRemoveNamespaces() error {
return nil
}
@@ -2614,9 +2301,7 @@ func (_ *XmlNode) Remove() {}
func (_ *XmlNode) RemoveDefaultNamespace() {}
-func (_ *XmlNode) Replace(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) Replace(_ interface{}) error {
return nil
}
@@ -2624,19 +2309,15 @@ func (_ *XmlNode) ResetChildren() {}
func (_ *XmlNode) ResetNodePtr() {}
-func (_ *XmlNode) Search(_ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ *XmlNode) Search(_ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ *XmlNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, interface {
- Error() string
-}) {
+func (_ *XmlNode) SearchWithVariables(_ interface{}, _ interface{}) ([]Node, error) {
return nil, nil
}
-func (_ *XmlNode) SerializeWithFormat(_ SerializationOption, _ []uint8, _ []uint8) ([]uint8, int) {
+func (_ *XmlNode) SerializeWithFormat(_ SerializationOption, _ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2644,21 +2325,15 @@ func (_ *XmlNode) SetAttr(_ string, _ string) string {
return ""
}
-func (_ *XmlNode) SetChildren(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) SetChildren(_ interface{}) error {
return nil
}
-func (_ *XmlNode) SetContent(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) SetContent(_ interface{}) error {
return nil
}
-func (_ *XmlNode) SetInnerHtml(_ interface{}) interface {
- Error() string
-} {
+func (_ *XmlNode) SetInnerHtml(_ interface{}) error {
return nil
}
@@ -2674,11 +2349,11 @@ func (_ *XmlNode) String() string {
return ""
}
-func (_ *XmlNode) ToBuffer(_ []uint8) []uint8 {
+func (_ *XmlNode) ToBuffer(_ []byte) []byte {
return nil
}
-func (_ *XmlNode) ToHtml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ *XmlNode) ToHtml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
@@ -2686,14 +2361,12 @@ func (_ *XmlNode) ToUnformattedXml() string {
return ""
}
-func (_ *XmlNode) ToXml(_ []uint8, _ []uint8) ([]uint8, int) {
+func (_ *XmlNode) ToXml(_ []byte, _ []byte) ([]byte, int) {
return nil, 0
}
func (_ *XmlNode) Unlink() {}
-func (_ *XmlNode) Wrap(_ string) interface {
- Error() string
-} {
+func (_ *XmlNode) Wrap(_ string) error {
return nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xpath/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xpath/stub.go
index 54f664cf2a1..c1e32cba5f2 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xpath/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/jbowtie/gokogiri/xpath/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/jbowtie/gokogiri/xpath, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/jbowtie/gokogiri/xpath (exports: ; functions: Compile)
// Package xpath is a stub of github.com/jbowtie/gokogiri/xpath, generated by depstubber.
@@ -13,7 +14,7 @@ func Compile(_ string) *Expression {
}
type Expression struct {
- Ptr *interface{}
+ Ptr interface{}
}
func (_ *Expression) Free() {}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/github.com/santhosh-tekuri/xpathparser/stub.go b/ql/test/query-tests/Security/CWE-643/vendor/github.com/santhosh-tekuri/xpathparser/stub.go
index 8f49df46c65..8bb9a6ced33 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/github.com/santhosh-tekuri/xpathparser/stub.go
+++ b/ql/test/query-tests/Security/CWE-643/vendor/github.com/santhosh-tekuri/xpathparser/stub.go
@@ -1,6 +1,7 @@
// Code generated by depstubber. DO NOT EDIT.
// This is a simple stub for github.com/santhosh-tekuri/xpathparser, strictly for use in testing.
+// See the LICENSE file for information about the licensing of the original library.
// Source: github.com/santhosh-tekuri/xpathparser (exports: ; functions: Parse,MustParse)
// Package xpathparser is a stub of github.com/santhosh-tekuri/xpathparser, generated by depstubber.
@@ -14,8 +15,6 @@ func MustParse(_ string) Expr {
return nil
}
-func Parse(_ string) (Expr, interface {
- Error() string
-}) {
+func Parse(_ string) (Expr, error) {
return nil, nil
}
diff --git a/ql/test/query-tests/Security/CWE-643/vendor/modules.txt b/ql/test/query-tests/Security/CWE-643/vendor/modules.txt
index c51bc8971fe..ce1e1188bf6 100644
--- a/ql/test/query-tests/Security/CWE-643/vendor/modules.txt
+++ b/ql/test/query-tests/Security/CWE-643/vendor/modules.txt
@@ -13,30 +13,12 @@ github.com/antchfx/xmlquery
# github.com/antchfx/xpath v1.1.5
## explicit
github.com/antchfx/xpath
-# github.com/github/depstubber v0.0.0-20200413231600-392d5a70208e
-## explicit
-github.com/github/depstubber
# github.com/go-xmlpath/xmlpath v0.0.0-20150820204837-860cbeca3ebc
## explicit
github.com/go-xmlpath/xmlpath
-# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
-## explicit
-github.com/golang/groupcache
# github.com/jbowtie/gokogiri v0.0.0-20190301021639-37f655d3078f
## explicit
github.com/jbowtie/gokogiri
-# github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e
-## explicit
-github.com/niemeyer/pretty
# github.com/santhosh-tekuri/xpathparser v1.0.0
## explicit
github.com/santhosh-tekuri/xpathparser
-# golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
-## explicit
-golang.org/x/net
-# gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
-## explicit
-gopkg.in/check.v1
-# gopkg.in/xmlpath.v2 v2.0.0-20150820204837-860cbeca3ebc
-## explicit
-gopkg.in/xmlpath.v2
diff --git a/ql/test/query-tests/filters/ClassifyFiles/go.mod b/ql/test/query-tests/filters/ClassifyFiles/go.mod
index 62ce60299b7..aa05800e200 100644
--- a/ql/test/query-tests/filters/ClassifyFiles/go.mod
+++ b/ql/test/query-tests/filters/ClassifyFiles/go.mod
@@ -3,10 +3,6 @@ module filters.ClassifyFiles
go 1.14
require (
- github.com/github/depstubber v0.0.0-20200414033246-a63ca77a1581 // indirect
github.com/onsi/ginkgo v1.12.0
github.com/onsi/gomega v1.9.0
- golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect
- golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect
- golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
)
diff --git a/ql/test/query-tests/filters/ClassifyFiles/vendor/modules.txt b/ql/test/query-tests/filters/ClassifyFiles/vendor/modules.txt
index b17b7a8df2c..b7e13eca83b 100644
--- a/ql/test/query-tests/filters/ClassifyFiles/vendor/modules.txt
+++ b/ql/test/query-tests/filters/ClassifyFiles/vendor/modules.txt
@@ -1,18 +1,6 @@
-# github.com/github/depstubber v0.0.0-20200414033246-a63ca77a1581
-## explicit
-github.com/github/depstubber
# github.com/onsi/ginkgo v1.12.0
## explicit
github.com/onsi/ginkgo
# github.com/onsi/gomega v1.9.0
## explicit
github.com/onsi/gomega
-# golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
-## explicit
-golang.org/x/net
-# golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
-## explicit
-golang.org/x/sync
-# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
-## explicit
-golang.org/x/xerrors
From 804165c9ef45371305ebc3457e6a22f56401a12f Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 14 May 2020 02:28:15 -0700
Subject: [PATCH 102/157] Fix comment in ReflectedXss nonhtmlcontenttype
Co-authored-by: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
---
ql/src/semmle/go/security/ReflectedXssCustomizations.qll | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index b6b3757c060..8940df93b12 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -71,7 +71,7 @@ module ReflectedXss {
)
or
exists(DataFlow::Node pred | body = pred.getASuccessor*() |
- // data starting with `<` cannot cause an HTML content type to be detected.
+ // data starting with a character other than `<` cannot cause an HTML content type to be detected.
pred.getStringValue().regexpMatch("^[^<].*")
or
// json data cannot begin with `<`
From ee0f3c9fbaf986bc25c8785a3222d6eb407e1409 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 14 May 2020 02:29:26 -0700
Subject: [PATCH 103/157] Address review comments
---
ql/src/semmle/go/security/ReflectedXssCustomizations.qll | 6 +++++-
ql/test/query-tests/Security/CWE-079/tst.go | 2 +-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index 8940df93b12..93439ace5eb 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -109,7 +109,11 @@ module ReflectedXss {
*/
class EqualityTestGuard extends SanitizerGuard, DataFlow::EqualityTestNode {
override predicate checks(Expr e, boolean outcome) {
- e = this.getAnOperand().asExpr() and this.eq(outcome, _, _)
+ exists(DataFlow::Node const | const.isConst() |
+ const = this.getAnOperand() and
+ e = this.getAnOperand().asExpr() and
+ this.eq(outcome, _, _)
+ )
}
}
}
diff --git a/ql/test/query-tests/Security/CWE-079/tst.go b/ql/test/query-tests/Security/CWE-079/tst.go
index f4a154c94cb..e25c79c74e1 100644
--- a/ql/test/query-tests/Security/CWE-079/tst.go
+++ b/ql/test/query-tests/Security/CWE-079/tst.go
@@ -48,7 +48,7 @@ func serve8() {
if service != "service1" && service != "service2" {
fmt.Fprintln(w, "Service not found")
} else {
- // OK: json data cannot cause an HTML content type to be detected
+ // OK, but caught: service is known to be either "service1" or "service2" here
w.Write([]byte(service))
}
})
From 5e2b973ac490b68ba860ef89c9bbf69923767990 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 14 May 2020 14:35:08 -0700
Subject: [PATCH 104/157] Update comment in ReflectedXss test
Co-authored-by: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
---
ql/test/query-tests/Security/CWE-079/tst.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ql/test/query-tests/Security/CWE-079/tst.go b/ql/test/query-tests/Security/CWE-079/tst.go
index e25c79c74e1..b76e087e26e 100644
--- a/ql/test/query-tests/Security/CWE-079/tst.go
+++ b/ql/test/query-tests/Security/CWE-079/tst.go
@@ -48,7 +48,7 @@ func serve8() {
if service != "service1" && service != "service2" {
fmt.Fprintln(w, "Service not found")
} else {
- // OK, but caught: service is known to be either "service1" or "service2" here
+ // OK (service is known to be either "service1" or "service2" here), but currently flagged
w.Write([]byte(service))
}
})
From 5e633b2c740d30d9e9a03f2741c8a2fe2a3a824b Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Thu, 14 May 2020 14:38:03 -0700
Subject: [PATCH 105/157] Add EqualityTestNode.getPolarity
---
ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll | 3 +++
.../semmle/go/security/OpenUrlRedirectCustomizations.qll | 2 +-
ql/src/semmle/go/security/ReflectedXssCustomizations.qll | 8 +++-----
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
index d01829a1f5c..d3d9fd928f9 100644
--- a/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
+++ b/ql/src/semmle/go/dataflow/internal/DataFlowUtil.qll
@@ -758,6 +758,9 @@ class EqualityTestNode extends BinaryOperationNode, ExprNode {
outcome = expr.getPolarity() and
expr.hasOperands(lhs.asExpr(), rhs.asExpr())
}
+
+ /** Gets the polarity of this equality test, that is, `true` for `==` and `false` for `!=`. */
+ boolean getPolarity() { result = expr.getPolarity() }
}
/**
diff --git a/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll b/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
index 7c4b0854daf..ba88143eb80 100644
--- a/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
+++ b/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
@@ -137,7 +137,7 @@ module OpenUrlRedirect {
}
override predicate checks(Expr e, boolean outcome) {
- e = url.asExpr() and this.eq(outcome, _, _)
+ e = url.asExpr() and outcome = this.getPolarity()
}
}
diff --git a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
index 93439ace5eb..80b21da6efa 100644
--- a/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
+++ b/ql/src/semmle/go/security/ReflectedXssCustomizations.qll
@@ -109,11 +109,9 @@ module ReflectedXss {
*/
class EqualityTestGuard extends SanitizerGuard, DataFlow::EqualityTestNode {
override predicate checks(Expr e, boolean outcome) {
- exists(DataFlow::Node const | const.isConst() |
- const = this.getAnOperand() and
- e = this.getAnOperand().asExpr() and
- this.eq(outcome, _, _)
- )
+ this.getAnOperand().isConst() and
+ e = this.getAnOperand().asExpr() and
+ outcome = this.getPolarity()
}
}
}
From 24d8c7ea174d3b93f208c2274699b3026d5b0300 Mon Sep 17 00:00:00 2001
From: Max Schaefer <54907921+max-schaefer@users.noreply.github.com>
Date: Fri, 15 May 2020 07:31:51 +0100
Subject: [PATCH 106/157] Clarify which types have a qualified name.
---
ql/src/semmle/go/Types.qll | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/ql/src/semmle/go/Types.qll b/ql/src/semmle/go/Types.qll
index cee40d31410..1b246e3e904 100644
--- a/ql/src/semmle/go/Types.qll
+++ b/ql/src/semmle/go/Types.qll
@@ -25,11 +25,17 @@ class Type extends @type {
/**
* Gets the qualified name of this type, if any.
+ *
+ * Only (defined) named types like `io.Writer` have a qualified name. Basic types like `int`,
+ * pointer types like `*io.Writer`, and other composite types do not have a qualified name.
*/
string getQualifiedName() { result = getEntity().getQualifiedName() }
/**
* Holds if this type is declared in a package with path `pkg` and has name `name`.
+ *
+ * Only (defined) named types like `io.Writer` have a qualified name. Basic types like `int`,
+ * pointer types like `*io.Writer`, and other composite types do not have a qualified name.
*/
predicate hasQualifiedName(string pkg, string name) { getEntity().hasQualifiedName(pkg, name) }
From d300ec6324e7c7f0671185cfae427963d46ed677 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 14 May 2020 11:28:44 +0100
Subject: [PATCH 107/157] Refine `Method.implements` so that interface methods
only implement themselves.
Without this restriction, the two `m`s in the following example are considered to implement each other, even though they aren't logically related:
```go
type I interface {
m()
}
type J interface {
m()
}
type K struct {
I
J
}
```
Previously, interface methods would sometimes implement themselves and sometimes not (see changes to test output for examples).
---
ql/src/semmle/go/Scopes.qll | 9 +++++++++
.../semmle/go/Scopes/DeclaredEntity.expected | 2 ++
.../semmle/go/Scopes/EntityReference.expected | 4 ++++
.../semmle/go/Scopes/EntityType.expected | 2 ++
.../semmle/go/Scopes/MethodImplements.expected | 10 ++++++++--
.../go/Scopes/MethodImplementsName.expected | 11 ++++++++---
.../semmle/go/Scopes/Methods.expected | 1 +
.../semmle/go/Scopes/TypeImplements.expected | 5 +++++
ql/test/library-tests/semmle/go/Scopes/types.go | 5 +++++
.../Security/CWE-079/ReflectedXss.expected | 16 ++++++++--------
ql/test/query-tests/Security/CWE-079/tst.go | 15 +++++++++++++++
11 files changed, 67 insertions(+), 13 deletions(-)
diff --git a/ql/src/semmle/go/Scopes.qll b/ql/src/semmle/go/Scopes.qll
index 74af0d196e1..c9bf60e3ac4 100644
--- a/ql/src/semmle/go/Scopes.qll
+++ b/ql/src/semmle/go/Scopes.qll
@@ -406,6 +406,9 @@ class Method extends Function {
result = this.getReceiverType().getPackage()
}
+ /** Holds if this method is declared in an interface. */
+ predicate isInterfaceMethod() { getReceiverType().getUnderlyingType() instanceof InterfaceType }
+
/** Gets the receiver variable of this method. */
Variable getReceiver() { result = receiver }
@@ -464,8 +467,14 @@ class Method extends Function {
* Holds if this method implements the method `m`, that is, if `m` is a method
* on an interface, and this is a method with the same name on a type that
* implements that interface.
+ *
+ * Note that all methods implement themselves, and interface methods _only_
+ * implement themselves.
*/
predicate implements(Method m) {
+ this = m
+ or
+ not isInterfaceMethod() and
exists(Type t |
this = t.getMethod(m.getName()) and
t.implements(m.getReceiverType().getUnderlyingType())
diff --git a/ql/test/library-tests/semmle/go/Scopes/DeclaredEntity.expected b/ql/test/library-tests/semmle/go/Scopes/DeclaredEntity.expected
index aa235d0b23f..8fa90233151 100644
--- a/ql/test/library-tests/semmle/go/Scopes/DeclaredEntity.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/DeclaredEntity.expected
@@ -23,3 +23,5 @@
| types.go:33:16:33:20 | meth1 | types.go:33:16:33:20 | meth1 |
| types.go:33:22:33:22 | a | types.go:33:22:33:22 | a |
| types.go:37:16:37:20 | meth2 | types.go:37:16:37:20 | meth2 |
+| types.go:41:6:41:27 | iHaveARedeclaredMethod | types.go:41:6:41:27 | iHaveARedeclaredMethod |
+| types.go:43:2:43:5 | meth | types.go:43:2:43:5 | meth |
diff --git a/ql/test/library-tests/semmle/go/Scopes/EntityReference.expected b/ql/test/library-tests/semmle/go/Scopes/EntityReference.expected
index 97fa0236baf..d78211a4c66 100644
--- a/ql/test/library-tests/semmle/go/Scopes/EntityReference.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/EntityReference.expected
@@ -13,6 +13,7 @@
| file://:0:0:0:0 | int | | types.go:27:25:27:27 | int |
| file://:0:0:0:0 | int | | types.go:33:24:33:26 | int |
| file://:0:0:0:0 | int | | types.go:37:24:37:26 | int |
+| file://:0:0:0:0 | int | | types.go:43:9:43:11 | int |
| main.go:5:6:5:6 | t | main.go@5:6:5:6 | main.go:5:6:5:6 | t |
| main.go:5:6:5:6 | t | main.go@5:6:5:6 | main.go:13:13:13:13 | t |
| main.go:5:6:5:6 | t | main.go@5:6:5:6 | main.go:17:29:17:29 | t |
@@ -42,6 +43,7 @@
| main.go:23:16:23:19 | bump | main.go@23:16:23:19 | main.go:23:16:23:19 | bump |
| types.go:3:6:3:17 | iHaveAMethod | types.go@3:6:3:17 | main.go:17:12:17:23 | iHaveAMethod |
| types.go:3:6:3:17 | iHaveAMethod | types.go@3:6:3:17 | types.go:3:6:3:17 | iHaveAMethod |
+| types.go:3:6:3:17 | iHaveAMethod | types.go@3:6:3:17 | types.go:42:2:42:13 | iHaveAMethod |
| types.go:4:2:4:5 | meth | types.go@4:2:4:5 | main.go:18:2:18:7 | selection of meth |
| types.go:4:2:4:5 | meth | types.go@4:2:4:5 | main.go:18:4:18:7 | meth |
| types.go:4:2:4:5 | meth | types.go@4:2:4:5 | types.go:4:2:4:5 | meth |
@@ -65,3 +67,5 @@
| types.go:33:22:33:22 | a | types.go@33:22:33:22 | types.go:33:22:33:22 | a |
| types.go:33:22:33:22 | a | types.go@33:22:33:22 | types.go:34:9:34:9 | a |
| types.go:37:16:37:20 | meth2 | types.go@37:16:37:20 | types.go:37:16:37:20 | meth2 |
+| types.go:41:6:41:27 | iHaveARedeclaredMethod | types.go@41:6:41:27 | types.go:41:6:41:27 | iHaveARedeclaredMethod |
+| types.go:43:2:43:5 | meth | types.go@43:2:43:5 | types.go:43:2:43:5 | meth |
diff --git a/ql/test/library-tests/semmle/go/Scopes/EntityType.expected b/ql/test/library-tests/semmle/go/Scopes/EntityType.expected
index 8c7eac514b1..d61da59604e 100644
--- a/ql/test/library-tests/semmle/go/Scopes/EntityType.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/EntityType.expected
@@ -23,3 +23,5 @@
| types.go:33:16:33:20 | meth1 | func(int) bool |
| types.go:33:22:33:22 | a | int |
| types.go:37:16:37:20 | meth2 | func() int |
+| types.go:41:6:41:27 | iHaveARedeclaredMethod | iHaveARedeclaredMethod |
+| types.go:43:2:43:5 | meth | func() int |
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
index b23488d0571..acafedf6428 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
@@ -1,10 +1,16 @@
| iHaveAMethod | meth | iHaveAMethod | meth |
+| iHaveARedeclaredMethod | meth | iHaveARedeclaredMethod | meth |
| meth1Iface | meth1 | meth1Iface | meth1 |
-| meth1Iface | meth1 | twoMethods | meth1 |
+| notImpl | meth1 | notImpl | meth1 |
+| notImpl | meth2 | notImpl | meth2 |
+| pointer type | bump | pointer type | bump |
| pointer type | meth | iHaveAMethod | meth |
+| pointer type | meth | iHaveARedeclaredMethod | meth |
+| pointer type | meth | pointer type | meth |
| pointer type | meth1 | meth1Iface | meth1 |
+| pointer type | meth1 | pointer type | meth1 |
| pointer type | meth1 | twoMethods | meth1 |
+| starImpl | meth2 | starImpl | meth2 |
| starImpl | meth2 | twoMethods | meth2 |
-| twoMethods | meth1 | meth1Iface | meth1 |
| twoMethods | meth1 | twoMethods | meth1 |
| twoMethods | meth2 | twoMethods | meth2 |
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
index 2d5931a28a0..05b655129ae 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
@@ -1,15 +1,20 @@
| iHaveAMethod | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveAMethod | meth |
+| iHaveARedeclaredMethod | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveARedeclaredMethod | meth |
| meth1Iface | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
-| meth1Iface | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth1 |
| meth1Iface | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
+| notImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | notImpl | meth1 |
+| notImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | notImpl | meth2 |
+| pointer type | bump | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | bump |
| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveAMethod | meth |
+| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveARedeclaredMethod | meth |
+| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | meth |
| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
+| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | starImpl | meth1 |
| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth1 |
| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
+| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | starImpl | meth2 |
| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth2 |
| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth2 |
-| twoMethods | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
| twoMethods | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth1 |
-| twoMethods | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
| twoMethods | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth2 |
| twoMethods | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth2 |
diff --git a/ql/test/library-tests/semmle/go/Scopes/Methods.expected b/ql/test/library-tests/semmle/go/Scopes/Methods.expected
index 5afe9843562..215d6308509 100644
--- a/ql/test/library-tests/semmle/go/Scopes/Methods.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/Methods.expected
@@ -10,3 +10,4 @@
| types.go:27:17:27:21 | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes.starImpl.meth2 | file://:0:0:0:0 | | starImpl |
| types.go:33:16:33:20 | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes.notImpl.meth1 | file://:0:0:0:0 | | notImpl |
| types.go:37:16:37:20 | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes.notImpl.meth2 | file://:0:0:0:0 | | notImpl |
+| types.go:43:2:43:5 | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes.iHaveARedeclaredMethod.meth | file://:0:0:0:0 | | iHaveARedeclaredMethod |
diff --git a/ql/test/library-tests/semmle/go/Scopes/TypeImplements.expected b/ql/test/library-tests/semmle/go/Scopes/TypeImplements.expected
index d58feba2c03..d6e1ee64252 100644
--- a/ql/test/library-tests/semmle/go/Scopes/TypeImplements.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/TypeImplements.expected
@@ -2,12 +2,17 @@
| * starImpl | twoMethods |
| * starImpl | twoMethodsEmbedded |
| * t | iHaveAMethod |
+| * t | iHaveARedeclaredMethod |
| iHaveAMethod | iHaveAMethod |
+| iHaveAMethod | iHaveARedeclaredMethod |
+| iHaveARedeclaredMethod | iHaveAMethod |
+| iHaveARedeclaredMethod | iHaveARedeclaredMethod |
| interface { meth1 func() bool } | meth1Iface |
| interface { meth1 func() bool; meth2 func() int } | meth1Iface |
| interface { meth1 func() bool; meth2 func() int } | twoMethods |
| interface { meth1 func() bool; meth2 func() int } | twoMethodsEmbedded |
| interface { meth func() int } | iHaveAMethod |
+| interface { meth func() int } | iHaveARedeclaredMethod |
| meth1Iface | meth1Iface |
| twoMethods | meth1Iface |
| twoMethods | twoMethods |
diff --git a/ql/test/library-tests/semmle/go/Scopes/types.go b/ql/test/library-tests/semmle/go/Scopes/types.go
index 5486d0faa74..49f640da6bb 100644
--- a/ql/test/library-tests/semmle/go/Scopes/types.go
+++ b/ql/test/library-tests/semmle/go/Scopes/types.go
@@ -37,3 +37,8 @@ func (notImpl) meth1(a int) bool {
func (notImpl) meth2() int {
return -42
}
+
+type iHaveARedeclaredMethod interface {
+ iHaveAMethod
+ meth() int
+}
diff --git a/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected b/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
index c32d23ceb7b..24a074cd269 100644
--- a/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
+++ b/ql/test/query-tests/Security/CWE-079/ReflectedXss.expected
@@ -2,8 +2,8 @@ edges
| ReflectedXss.go:11:15:11:20 | selection of Form : Values | ReflectedXss.go:14:44:14:51 | username |
| contenttype.go:11:11:11:16 | selection of Form : Values | contenttype.go:17:11:17:22 | type conversion |
| contenttype.go:49:11:49:16 | selection of Form : Values | contenttype.go:53:34:53:37 | data |
-| tst.go:13:15:13:20 | selection of Form : Values | tst.go:17:12:17:39 | type conversion |
-| tst.go:47:14:47:19 | selection of Form : Values | tst.go:52:12:52:26 | type conversion |
+| tst.go:14:15:14:20 | selection of Form : Values | tst.go:18:12:18:39 | type conversion |
+| tst.go:48:14:48:19 | selection of Form : Values | tst.go:53:12:53:26 | type conversion |
nodes
| ReflectedXss.go:11:15:11:20 | selection of Form : Values | semmle.label | selection of Form : Values |
| ReflectedXss.go:14:44:14:51 | username | semmle.label | username |
@@ -11,13 +11,13 @@ nodes
| contenttype.go:17:11:17:22 | type conversion | semmle.label | type conversion |
| contenttype.go:49:11:49:16 | selection of Form : Values | semmle.label | selection of Form : Values |
| contenttype.go:53:34:53:37 | data | semmle.label | data |
-| tst.go:13:15:13:20 | selection of Form : Values | semmle.label | selection of Form : Values |
-| tst.go:17:12:17:39 | type conversion | semmle.label | type conversion |
-| tst.go:47:14:47:19 | selection of Form : Values | semmle.label | selection of Form : Values |
-| tst.go:52:12:52:26 | type conversion | semmle.label | type conversion |
+| tst.go:14:15:14:20 | selection of Form : Values | semmle.label | selection of Form : Values |
+| tst.go:18:12:18:39 | type conversion | semmle.label | type conversion |
+| tst.go:48:14:48:19 | selection of Form : Values | semmle.label | selection of Form : Values |
+| tst.go:53:12:53:26 | type conversion | semmle.label | type conversion |
#select
| ReflectedXss.go:14:44:14:51 | username | ReflectedXss.go:11:15:11:20 | selection of Form : Values | ReflectedXss.go:14:44:14:51 | username | Cross-site scripting vulnerability due to $@. | ReflectedXss.go:11:15:11:20 | selection of Form | user-provided value |
| contenttype.go:17:11:17:22 | type conversion | contenttype.go:11:11:11:16 | selection of Form : Values | contenttype.go:17:11:17:22 | type conversion | Cross-site scripting vulnerability due to $@. | contenttype.go:11:11:11:16 | selection of Form | user-provided value |
| contenttype.go:53:34:53:37 | data | contenttype.go:49:11:49:16 | selection of Form : Values | contenttype.go:53:34:53:37 | data | Cross-site scripting vulnerability due to $@. | contenttype.go:49:11:49:16 | selection of Form | user-provided value |
-| tst.go:17:12:17:39 | type conversion | tst.go:13:15:13:20 | selection of Form : Values | tst.go:17:12:17:39 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:13:15:13:20 | selection of Form | user-provided value |
-| tst.go:52:12:52:26 | type conversion | tst.go:47:14:47:19 | selection of Form : Values | tst.go:52:12:52:26 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:47:14:47:19 | selection of Form | user-provided value |
+| tst.go:18:12:18:39 | type conversion | tst.go:14:15:14:20 | selection of Form : Values | tst.go:18:12:18:39 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:14:15:14:20 | selection of Form | user-provided value |
+| tst.go:53:12:53:26 | type conversion | tst.go:48:14:48:19 | selection of Form : Values | tst.go:53:12:53:26 | type conversion | Cross-site scripting vulnerability due to $@. | tst.go:48:14:48:19 | selection of Form | user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-079/tst.go b/ql/test/query-tests/Security/CWE-079/tst.go
index b76e087e26e..76bb3d8fe4c 100644
--- a/ql/test/query-tests/Security/CWE-079/tst.go
+++ b/ql/test/query-tests/Security/CWE-079/tst.go
@@ -3,6 +3,7 @@ package main
import (
"encoding/json"
"fmt"
+ "io"
"net/http"
"strings"
)
@@ -52,5 +53,19 @@ func serve8() {
w.Write([]byte(service))
}
})
+}
+
+type mix struct {
+ io.Writer
+ http.ResponseWriter
+}
+
+func serve9(log io.Writer) {
+ http.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ username := r.Form.Get("username")
+ // OK: not a ResponseWriter
+ log.Write(username)
+ })
http.ListenAndServe(":80", nil)
}
From 9c7e46386fd3bc9bd8a1bda0360717ef38bd46fe Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Thu, 14 May 2020 15:22:15 +0100
Subject: [PATCH 108/157] Simplify logic in AllocationSizeOverflow query.
---
.../AllocationSizeOverflowCustomizations.qll | 27 +++++++------------
1 file changed, 9 insertions(+), 18 deletions(-)
diff --git a/ql/src/semmle/go/security/AllocationSizeOverflowCustomizations.qll b/ql/src/semmle/go/security/AllocationSizeOverflowCustomizations.qll
index fe2572ac5d0..dd4502e059a 100644
--- a/ql/src/semmle/go/security/AllocationSizeOverflowCustomizations.qll
+++ b/ql/src/semmle/go/security/AllocationSizeOverflowCustomizations.qll
@@ -51,8 +51,8 @@ module AllocationSizeOverflow {
exists(MarshalingFunction marshal, DataFlow::CallNode call |
call = marshal.getACall() and
// rule out cases where we can tell that the result will always be small
- not forall(FunctionInput inp | inp = marshal.getAnInput() |
- isSmall(inp.getNode(call).asExpr())
+ exists(FunctionInput inp | inp = marshal.getAnInput() |
+ isBig(inp.getNode(call).asExpr())
) and
this = marshal.getOutput().getNode(call)
)
@@ -152,26 +152,17 @@ module AllocationSizeOverflow {
exists(StructType st | st = t | forall(Field f | f = st.getField(_) | isSmallType(f.getType())))
}
- /** Holds if `e` is an expression whose values are likely to marshal to relatively small blobs. */
- private predicate isSmall(Expr e) {
- isSmallType(e.getType())
- or
- e.isConst()
+ /** Holds if `e` is an expression whose values might marshal to relatively large blobs. */
+ private predicate isBig(Expr e) {
+ not isSmallType(e.getType()) and
+ not e.isConst()
or
exists(KeyValueExpr kv | kv = e |
- isSmall(kv.getKey()) and
- isSmall(kv.getValue())
+ isBig(kv.getKey()) or
+ isBig(kv.getValue())
)
or
- isSmallCompositeLit(e, 0)
- }
-
- /** Holds if elements `n` and above of `lit` are small. */
- private predicate isSmallCompositeLit(CompositeLit lit, int n) {
- n = lit.getNumElement()
- or
- isSmall(lit.getElement(n)) and
- isSmallCompositeLit(lit, n + 1)
+ isBig(e.(CompositeLit).getAnElement())
}
/**
From 27cb92fb86a5306d6950ec4ce6f753a5ee540b29 Mon Sep 17 00:00:00 2001
From: Max Schaefer
Date: Fri, 15 May 2020 15:54:23 +0100
Subject: [PATCH 109/157] Use `.pp()` in a few tests selecting types.
---
.../semmle/go/Scopes/MethodImplements.expected | 14 +++++++-------
.../semmle/go/Scopes/MethodImplements.ql | 2 +-
.../go/Scopes/MethodImplementsName.expected | 16 ++++++++--------
.../semmle/go/Scopes/MethodImplementsName.ql | 2 +-
4 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
index acafedf6428..8edc2f2a211 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.expected
@@ -1,15 +1,15 @@
+| * starImpl | meth1 | * starImpl | meth1 |
+| * starImpl | meth1 | meth1Iface | meth1 |
+| * starImpl | meth1 | twoMethods | meth1 |
+| * t | bump | * t | bump |
+| * t | meth | * t | meth |
+| * t | meth | iHaveAMethod | meth |
+| * t | meth | iHaveARedeclaredMethod | meth |
| iHaveAMethod | meth | iHaveAMethod | meth |
| iHaveARedeclaredMethod | meth | iHaveARedeclaredMethod | meth |
| meth1Iface | meth1 | meth1Iface | meth1 |
| notImpl | meth1 | notImpl | meth1 |
| notImpl | meth2 | notImpl | meth2 |
-| pointer type | bump | pointer type | bump |
-| pointer type | meth | iHaveAMethod | meth |
-| pointer type | meth | iHaveARedeclaredMethod | meth |
-| pointer type | meth | pointer type | meth |
-| pointer type | meth1 | meth1Iface | meth1 |
-| pointer type | meth1 | pointer type | meth1 |
-| pointer type | meth1 | twoMethods | meth1 |
| starImpl | meth2 | starImpl | meth2 |
| starImpl | meth2 | twoMethods | meth2 |
| twoMethods | meth1 | twoMethods | meth1 |
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.ql b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.ql
index 7def17705e0..eefc289dde5 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplements.ql
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplements.ql
@@ -2,4 +2,4 @@ import go
from Method m, Method im
where m.implements(im) and m.getPackage().getName() = "main"
-select m.getReceiverType(), m.getName(), im.getReceiverType(), im.getName()
+select m.getReceiverType().pp(), m.getName(), im.getReceiverType().pp(), im.getName()
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
index 05b655129ae..021111a44f3 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.expected
@@ -1,17 +1,17 @@
+| * starImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
+| * starImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | starImpl | meth1 |
+| * starImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth1 |
+| * starImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
+| * t | bump | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | bump |
+| * t | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveAMethod | meth |
+| * t | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveARedeclaredMethod | meth |
+| * t | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | meth |
| iHaveAMethod | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveAMethod | meth |
| iHaveARedeclaredMethod | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveARedeclaredMethod | meth |
| meth1Iface | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
| meth1Iface | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
| notImpl | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | notImpl | meth1 |
| notImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | notImpl | meth2 |
-| pointer type | bump | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | bump |
-| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveAMethod | meth |
-| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | iHaveARedeclaredMethod | meth |
-| pointer type | meth | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | t | meth |
-| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | meth1Iface | meth1 |
-| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | starImpl | meth1 |
-| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth1 |
-| pointer type | meth1 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth1 |
| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | starImpl | meth2 |
| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethods | meth2 |
| starImpl | meth2 | github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes | twoMethodsEmbedded | meth2 |
diff --git a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.ql b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.ql
index 6b688522c2d..d8ad9ee4122 100644
--- a/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.ql
+++ b/ql/test/library-tests/semmle/go/Scopes/MethodImplementsName.ql
@@ -4,4 +4,4 @@ from Method m, string pkg, string tp, string name
where
m.implements(pkg, tp, name) and
m.hasQualifiedName("github.com/github/codeql-go/ql/test/library-tests/semmle/go/Scopes", _, _)
-select m.getReceiverType(), m.getName(), pkg, tp, name
+select m.getReceiverType().pp(), m.getName(), pkg, tp, name
From fbee7fe9836dfbe4e3caaf80a59bff2dc27c5500 Mon Sep 17 00:00:00 2001
From: Owen Mansel-Chan
Date: Wed, 13 May 2020 09:43:50 +0100
Subject: [PATCH 110/157] Add new query for redundant calls to recover
---
change-notes/2020-05-18-redundant-recover.md | 2 +
ql/src/RedundantCode/RedundantRecover.qhelp | 53 +++++++++++++++++++
ql/src/RedundantCode/RedundantRecover.ql | 33 ++++++++++++
ql/src/RedundantCode/RedundantRecover1.go | 16 ++++++
ql/src/RedundantCode/RedundantRecover1Good.go | 14 +++++
ql/src/RedundantCode/RedundantRecover2.go | 6 +++
ql/src/RedundantCode/RedundantRecover2Good.go | 6 +++
.../RedundantRecover.expected | 3 ++
.../RedundantRecover/RedundantRecover.qlref | 1 +
.../RedundantRecover/RedundantRecover1.go | 16 ++++++
.../RedundantRecover/RedundantRecover1Good.go | 14 +++++
.../RedundantRecover/RedundantRecover2.go | 6 +++
.../RedundantRecover/RedundantRecover2Good.go | 6 +++
.../RedundantCode/RedundantRecover/tst.go | 49 +++++++++++++++++
14 files changed, 225 insertions(+)
create mode 100644 change-notes/2020-05-18-redundant-recover.md
create mode 100644 ql/src/RedundantCode/RedundantRecover.qhelp
create mode 100644 ql/src/RedundantCode/RedundantRecover.ql
create mode 100644 ql/src/RedundantCode/RedundantRecover1.go
create mode 100644 ql/src/RedundantCode/RedundantRecover1Good.go
create mode 100644 ql/src/RedundantCode/RedundantRecover2.go
create mode 100644 ql/src/RedundantCode/RedundantRecover2Good.go
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.qlref
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1.go
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1Good.go
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2.go
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2Good.go
create mode 100644 ql/test/query-tests/RedundantCode/RedundantRecover/tst.go
diff --git a/change-notes/2020-05-18-redundant-recover.md b/change-notes/2020-05-18-redundant-recover.md
new file mode 100644
index 00000000000..5b8151be60b
--- /dev/null
+++ b/change-notes/2020-05-18-redundant-recover.md
@@ -0,0 +1,2 @@
+lgtm,codescanning
+* A new query go/redundant-recover has been added to detect redundant calls to recover.
diff --git a/ql/src/RedundantCode/RedundantRecover.qhelp b/ql/src/RedundantCode/RedundantRecover.qhelp
new file mode 100644
index 00000000000..0033b640df2
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover.qhelp
@@ -0,0 +1,53 @@
+
+
+
+
+
+The built-in recover function is only useful inside deferred
+functions. Calling it in a function that is never deferred means that it will
+always return nil and it will never regain control of a panicking
+goroutine. The same is true of calling recover directly in a defer
+statement.
+
+
+
+
+
+Carefully inspect the code to determine whether it is a mistake that should be
+fixed.
+
+
+
+
+
+In the example below, the function fun1 is intended to recover
+from the panic. However, the function that is deferred calls another function,
+which then calls recover:
+
+
+
+This problem can be deferring the call to the function which callsrecover:
+
+
+
+
+In the following example, recover is called directly in a defer
+statement, which has no effect, so the panic is not caught.
+
+
+
+We can fix this by instead deferring an anonymous function which calls
+recover.
+
+
+
+
+
+
+ Defer, Panic, and Recover - The Go Blog.
+
+
+
+
diff --git a/ql/src/RedundantCode/RedundantRecover.ql b/ql/src/RedundantCode/RedundantRecover.ql
new file mode 100644
index 00000000000..58ab5fb1149
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover.ql
@@ -0,0 +1,33 @@
+/**
+ * @name Redundant call to recover
+ * @description Calling 'recover' in a function which isn't called using a defer
+ * statement has no effect. Also, putting 'recover' directly in a
+ * defer statement has no effect.
+ * @kind problem
+ * @problem.severity warning
+ * @id go/redundant-recover
+ * @tags maintainability
+ * correctness
+ * @precision high
+ */
+
+import go
+
+predicate isDeferred(DataFlow::CallNode call) {
+ exists(DeferStmt defer | defer.getCall() = call.asExpr())
+}
+
+from DataFlow::CallNode recoverCall, FuncDef f, string msg
+where
+ recoverCall.getTarget() = Builtin::recover() and
+ f = recoverCall.getEnclosingCallable() and
+ (
+ isDeferred(recoverCall) and
+ msg = "Deferred calls to 'recover' have no effect"
+ or
+ not isDeferred(recoverCall) and
+ exists(f.getACall()) and
+ not isDeferred(f.getACall()) and
+ msg = "This call to 'recover' has no effect because $@ is never called using a defer statement."
+ )
+select recoverCall, msg, f, f.getName()
diff --git a/ql/src/RedundantCode/RedundantRecover1.go b/ql/src/RedundantCode/RedundantRecover1.go
new file mode 100644
index 00000000000..d058dd0dfde
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover1.go
@@ -0,0 +1,16 @@
+package main
+
+import "fmt"
+
+func callRecover1() {
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun1() {
+ defer func() {
+ callRecover1()
+ }()
+ panic("1")
+}
diff --git a/ql/src/RedundantCode/RedundantRecover1Good.go b/ql/src/RedundantCode/RedundantRecover1Good.go
new file mode 100644
index 00000000000..b017e050dc4
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover1Good.go
@@ -0,0 +1,14 @@
+package main
+
+import "fmt"
+
+func callRecover1Good() {
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun1Good() {
+ defer callRecover1Good()
+ panic("1")
+}
diff --git a/ql/src/RedundantCode/RedundantRecover2.go b/ql/src/RedundantCode/RedundantRecover2.go
new file mode 100644
index 00000000000..4365cb7c9fe
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover2.go
@@ -0,0 +1,6 @@
+package main
+
+func fun2() {
+ defer recover()
+ panic("2")
+}
diff --git a/ql/src/RedundantCode/RedundantRecover2Good.go b/ql/src/RedundantCode/RedundantRecover2Good.go
new file mode 100644
index 00000000000..d34e5c82b63
--- /dev/null
+++ b/ql/src/RedundantCode/RedundantRecover2Good.go
@@ -0,0 +1,6 @@
+package main
+
+func fun2Good() {
+ defer func() { recover() }()
+ panic("2")
+}
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
new file mode 100644
index 00000000000..88545b5a503
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
@@ -0,0 +1,3 @@
+| RedundantRecover1.go:6:5:6:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | RedundantRecover1.go:5:1:9:1 | function declaration | callRecover1 |
+| RedundantRecover2.go:4:8:4:16 | call to recover | Deferred calls to 'recover' have no effect | RedundantRecover2.go:3:1:6:1 | function declaration | fun2 |
+| tst.go:8:5:8:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | tst.go:5:1:11:1 | function declaration | callRecover3 |
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.qlref b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.qlref
new file mode 100644
index 00000000000..c8997068734
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.qlref
@@ -0,0 +1 @@
+RedundantCode/RedundantRecover.ql
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1.go b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1.go
new file mode 100644
index 00000000000..d058dd0dfde
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1.go
@@ -0,0 +1,16 @@
+package main
+
+import "fmt"
+
+func callRecover1() {
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun1() {
+ defer func() {
+ callRecover1()
+ }()
+ panic("1")
+}
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1Good.go b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1Good.go
new file mode 100644
index 00000000000..b017e050dc4
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover1Good.go
@@ -0,0 +1,14 @@
+package main
+
+import "fmt"
+
+func callRecover1Good() {
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun1Good() {
+ defer callRecover1Good()
+ panic("1")
+}
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2.go b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2.go
new file mode 100644
index 00000000000..4365cb7c9fe
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2.go
@@ -0,0 +1,6 @@
+package main
+
+func fun2() {
+ defer recover()
+ panic("2")
+}
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2Good.go b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2Good.go
new file mode 100644
index 00000000000..d34e5c82b63
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover2Good.go
@@ -0,0 +1,6 @@
+package main
+
+func fun2Good() {
+ defer func() { recover() }()
+ panic("2")
+}
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/tst.go b/ql/test/query-tests/RedundantCode/RedundantRecover/tst.go
new file mode 100644
index 00000000000..0533a060931
--- /dev/null
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/tst.go
@@ -0,0 +1,49 @@
+package main
+
+import "fmt"
+
+func callRecover3() {
+ // This will have no effect because panics do not propagate down the stack,
+ // only back up the stack
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun3() {
+ panic("3")
+ callRecover3()
+}
+
+func callRecover4() {
+ // This is not flagged because callRecover4 is called in a defer statement
+ // at least once
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func fun4a() {
+ panic("4")
+ callRecover4()
+}
+
+func fun4b() {
+ defer callRecover4()
+ panic("4")
+}
+
+func neverCalled() {
+ // This will not be flagged because it is not called from anywhere
+ if recover() != nil {
+ fmt.Printf("recovered")
+ }
+}
+
+func main() {
+ fun1()
+ fun2()
+ fun3()
+ fun4a()
+ fun4b()
+}
From 23a7db5d4d22294a15ab998cf7bbe2f0e8f46858 Mon Sep 17 00:00:00 2001
From: Owen Mansel-Chan
Date: Mon, 18 May 2020 17:05:49 +0100
Subject: [PATCH 111/157] Minor textual corrections
---
ql/src/RedundantCode/RedundantRecover.qhelp | 3 ++-
ql/src/RedundantCode/RedundantRecover.ql | 4 ++--
.../RedundantRecover/RedundantRecover.expected | 6 +++---
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/ql/src/RedundantCode/RedundantRecover.qhelp b/ql/src/RedundantCode/RedundantRecover.qhelp
index 0033b640df2..7470b63c898 100644
--- a/ql/src/RedundantCode/RedundantRecover.qhelp
+++ b/ql/src/RedundantCode/RedundantRecover.qhelp
@@ -28,7 +28,8 @@ which then calls recover:
-This problem can be deferring the call to the function which callsrecover:
+This problem can be fixed by deferring the call to the function which calls
+recover:
diff --git a/ql/src/RedundantCode/RedundantRecover.ql b/ql/src/RedundantCode/RedundantRecover.ql
index 58ab5fb1149..7e509bc4862 100644
--- a/ql/src/RedundantCode/RedundantRecover.ql
+++ b/ql/src/RedundantCode/RedundantRecover.ql
@@ -23,11 +23,11 @@ where
f = recoverCall.getEnclosingCallable() and
(
isDeferred(recoverCall) and
- msg = "Deferred calls to 'recover' have no effect"
+ msg = "Deferred calls to 'recover' have no effect."
or
not isDeferred(recoverCall) and
exists(f.getACall()) and
not isDeferred(f.getACall()) and
msg = "This call to 'recover' has no effect because $@ is never called using a defer statement."
)
-select recoverCall, msg, f, f.getName()
+select recoverCall, msg, f, "the enclosing function"
diff --git a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
index 88545b5a503..fdc175c8cf0 100644
--- a/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
+++ b/ql/test/query-tests/RedundantCode/RedundantRecover/RedundantRecover.expected
@@ -1,3 +1,3 @@
-| RedundantRecover1.go:6:5:6:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | RedundantRecover1.go:5:1:9:1 | function declaration | callRecover1 |
-| RedundantRecover2.go:4:8:4:16 | call to recover | Deferred calls to 'recover' have no effect | RedundantRecover2.go:3:1:6:1 | function declaration | fun2 |
-| tst.go:8:5:8:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | tst.go:5:1:11:1 | function declaration | callRecover3 |
+| RedundantRecover1.go:6:5:6:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | RedundantRecover1.go:5:1:9:1 | function declaration | the enclosing function |
+| RedundantRecover2.go:4:8:4:16 | call to recover | Deferred calls to 'recover' have no effect. | RedundantRecover2.go:3:1:6:1 | function declaration | the enclosing function |
+| tst.go:8:5:8:13 | call to recover | This call to 'recover' has no effect because $@ is never called using a defer statement. | tst.go:5:1:11:1 | function declaration | the enclosing function |
From 275be36e4aa7c0697d0bc1b9a5e2fdf639bdb915 Mon Sep 17 00:00:00 2001
From: Owen Mansel-Chan <62447351+owen-mc@users.noreply.github.com>
Date: Tue, 19 May 2020 06:31:47 +0100
Subject: [PATCH 112/157] Update change-notes/2020-05-18-redundant-recover.md
Co-authored-by: Shati Patel <42641846+shati-patel@users.noreply.github.com>
---
change-notes/2020-05-18-redundant-recover.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/change-notes/2020-05-18-redundant-recover.md b/change-notes/2020-05-18-redundant-recover.md
index 5b8151be60b..cca5e8fe490 100644
--- a/change-notes/2020-05-18-redundant-recover.md
+++ b/change-notes/2020-05-18-redundant-recover.md
@@ -1,2 +1,2 @@
lgtm,codescanning
-* A new query go/redundant-recover has been added to detect redundant calls to recover.
+* A new query "Redundant call to recover" (`go/redundant-recover`) has been added. The query detects calls to `recover` that have no effect.
From 2b5989cff2153d5aa2313f21dad8e74ae64a63c5 Mon Sep 17 00:00:00 2001
From: Porcupiney Hairs
Date: Tue, 21 Apr 2020 03:43:08 +0530
Subject: [PATCH 113/157] Add improvements for codeql-go SSRF query
---
ql/src/go.qll | 1 +
ql/src/semmle/go/Packages.qll | 9 +
.../semmle/go/dataflow/BarrierGuardUtil.qll | 69 +
ql/src/semmle/go/frameworks/Websocket.qll | 134 ++
.../OpenUrlRedirectCustomizations.qll | 59 +-
.../security/RequestForgeryCustomizations.qll | 37 +
.../semmle/go/Packages/packagePredicate.go | 22 +
.../semmle/go/Packages/predicate.expected | 2 +
.../semmle/go/Packages/predicate.ql | 8 +
.../Websocket/DialFunction.expected | 9 +
.../go/frameworks/Websocket/DialFunction.go | 39 +
.../go/frameworks/Websocket/DialFunction.ql | 4 +
.../semmle/go/frameworks/Websocket/go.mod | 12 +
.../vendor/github.com/gobwas/httphead/LICENSE | 21 +
.../github.com/gobwas/httphead/README.md | 63 +
.../github.com/gobwas/httphead/cookie.go | 200 +++
.../vendor/github.com/gobwas/httphead/head.go | 275 ++++
.../github.com/gobwas/httphead/httphead.go | 331 +++++
.../github.com/gobwas/httphead/lexer.go | 360 +++++
.../github.com/gobwas/httphead/octet.go | 83 ++
.../github.com/gobwas/httphead/option.go | 187 +++
.../github.com/gobwas/httphead/writer.go | 101 ++
.../vendor/github.com/gobwas/pool/README.md | 107 ++
.../vendor/github.com/gobwas/pool/generic.go | 87 ++
.../gobwas/pool/internal/pmath/pmath.go | 65 +
.../vendor/github.com/gobwas/pool/option.go | 43 +
.../github.com/gobwas/pool/pbufio/pbufio.go | 106 ++
.../gobwas/pool/pbufio/pbufio_go110.go | 13 +
.../gobwas/pool/pbufio/pbufio_go19.go | 27 +
.../vendor/github.com/gobwas/pool/pool.go | 25 +
.../vendor/github.com/gobwas/ws/.gitignore | 5 +
.../vendor/github.com/gobwas/ws/.travis.yml | 25 +
.../vendor/github.com/gobwas/ws/LICENSE | 21 +
.../vendor/github.com/gobwas/ws/Makefile | 47 +
.../vendor/github.com/gobwas/ws/README.md | 360 +++++
.../vendor/github.com/gobwas/ws/check.go | 145 ++
.../vendor/github.com/gobwas/ws/cipher.go | 59 +
.../vendor/github.com/gobwas/ws/dialer.go | 556 ++++++++
.../github.com/gobwas/ws/dialer_tls_go17.go | 35 +
.../github.com/gobwas/ws/dialer_tls_go18.go | 9 +
.../vendor/github.com/gobwas/ws/doc.go | 81 ++
.../vendor/github.com/gobwas/ws/errors.go | 54 +
.../vendor/github.com/gobwas/ws/frame.go | 389 ++++++
.../vendor/github.com/gobwas/ws/http.go | 468 +++++++
.../vendor/github.com/gobwas/ws/nonce.go | 80 ++
.../vendor/github.com/gobwas/ws/read.go | 147 ++
.../vendor/github.com/gobwas/ws/server.go | 607 +++++++++
.../vendor/github.com/gobwas/ws/server_test.s | 0
.../vendor/github.com/gobwas/ws/util.go | 214 +++
.../vendor/github.com/gobwas/ws/write.go | 104 ++
.../github.com/gorilla/websocket/.gitignore | 25 +
.../github.com/gorilla/websocket/AUTHORS | 9 +
.../github.com/gorilla/websocket/LICENSE | 22 +
.../github.com/gorilla/websocket/README.md | 64 +
.../github.com/gorilla/websocket/client.go | 395 ++++++
.../gorilla/websocket/client_clone.go | 16 +
.../gorilla/websocket/client_clone_legacy.go | 38 +
.../gorilla/websocket/compression.go | 148 ++
.../github.com/gorilla/websocket/conn.go | 1201 +++++++++++++++++
.../gorilla/websocket/conn_write.go | 15 +
.../gorilla/websocket/conn_write_legacy.go | 18 +
.../github.com/gorilla/websocket/doc.go | 227 ++++
.../github.com/gorilla/websocket/go.mod | 3 +
.../github.com/gorilla/websocket/join.go | 42 +
.../github.com/gorilla/websocket/json.go | 60 +
.../github.com/gorilla/websocket/mask.go | 54 +
.../github.com/gorilla/websocket/mask_safe.go | 15 +
.../github.com/gorilla/websocket/prepared.go | 102 ++
.../github.com/gorilla/websocket/proxy.go | 77 ++
.../github.com/gorilla/websocket/server.go | 363 +++++
.../github.com/gorilla/websocket/trace.go | 19 +
.../github.com/gorilla/websocket/trace_17.go | 12 +
.../github.com/gorilla/websocket/util.go | 283 ++++
.../gorilla/websocket/x_net_proxy.go | 473 +++++++
.../github.com/klauspost/compress/LICENSE | 28 +
.../klauspost/compress/flate/deflate.go | 819 +++++++++++
.../klauspost/compress/flate/dict_decoder.go | 184 +++
.../klauspost/compress/flate/fast_encoder.go | 254 ++++
.../klauspost/compress/flate/gen_inflate.go | 274 ++++
.../compress/flate/huffman_bit_writer.go | 911 +++++++++++++
.../klauspost/compress/flate/huffman_code.go | 363 +++++
.../compress/flate/huffman_sortByFreq.go | 178 +++
.../compress/flate/huffman_sortByLiteral.go | 201 +++
.../klauspost/compress/flate/inflate.go | 1000 ++++++++++++++
.../klauspost/compress/flate/inflate_gen.go | 922 +++++++++++++
.../klauspost/compress/flate/level1.go | 179 +++
.../klauspost/compress/flate/level2.go | 205 +++
.../klauspost/compress/flate/level3.go | 229 ++++
.../klauspost/compress/flate/level4.go | 212 +++
.../klauspost/compress/flate/level5.go | 279 ++++
.../klauspost/compress/flate/level6.go | 282 ++++
.../klauspost/compress/flate/stateless.go | 297 ++++
.../klauspost/compress/flate/token.go | 375 +++++
.../github.com/sacOO7/go-logger/.gitignore | 12 +
.../github.com/sacOO7/go-logger/LICENSE | 21 +
.../github.com/sacOO7/go-logger/logging.go | 82 ++
.../github.com/sacOO7/go-logger/loggingL.go | 13 +
.../sacOO7/go-logger/logginglevel_string.go | 16 +
.../github.com/sacOO7/gowebsocket/.gitignore | 21 +
.../github.com/sacOO7/gowebsocket/LICENSE | 201 +++
.../github.com/sacOO7/gowebsocket/README.md | 157 +++
.../sacOO7/gowebsocket/gowebsocket.go | 186 +++
.../github.com/sacOO7/gowebsocket/utils.go | 15 +
.../Websocket/vendor/golang.org/x/net/AUTHORS | 3 +
.../vendor/golang.org/x/net/CONTRIBUTORS | 3 +
.../Websocket/vendor/golang.org/x/net/LICENSE | 27 +
.../Websocket/vendor/golang.org/x/net/PATENTS | 22 +
.../golang.org/x/net/websocket/client.go | 106 ++
.../vendor/golang.org/x/net/websocket/dial.go | 24 +
.../vendor/golang.org/x/net/websocket/hybi.go | 583 ++++++++
.../golang.org/x/net/websocket/server.go | 113 ++
.../golang.org/x/net/websocket/websocket.go | 451 +++++++
.../frameworks/Websocket/vendor/modules.txt | 30 +
.../vendor/nhooyr.io/websocket/.gitignore | 1 +
.../vendor/nhooyr.io/websocket/.travis.yml | 40 +
.../vendor/nhooyr.io/websocket/LICENSE.txt | 21 +
.../vendor/nhooyr.io/websocket/Makefile | 7 +
.../vendor/nhooyr.io/websocket/README.md | 132 ++
.../vendor/nhooyr.io/websocket/accept.go | 365 +++++
.../vendor/nhooyr.io/websocket/accept_js.go | 20 +
.../vendor/nhooyr.io/websocket/close.go | 76 ++
.../vendor/nhooyr.io/websocket/close_notjs.go | 211 +++
.../vendor/nhooyr.io/websocket/compress.go | 39 +
.../nhooyr.io/websocket/compress_notjs.go | 181 +++
.../vendor/nhooyr.io/websocket/conn.go | 13 +
.../vendor/nhooyr.io/websocket/conn_notjs.go | 265 ++++
.../vendor/nhooyr.io/websocket/dial.go | 287 ++++
.../vendor/nhooyr.io/websocket/doc.go | 32 +
.../vendor/nhooyr.io/websocket/frame.go | 294 ++++
.../vendor/nhooyr.io/websocket/go.mod | 14 +
.../websocket/internal/bpool/bpool.go | 24 +
.../nhooyr.io/websocket/internal/errd/wrap.go | 14 +
.../websocket/internal/wsjs/wsjs_js.go | 170 +++
.../nhooyr.io/websocket/internal/xsync/go.go | 25 +
.../websocket/internal/xsync/int64.go | 23 +
.../vendor/nhooyr.io/websocket/netconn.go | 166 +++
.../vendor/nhooyr.io/websocket/read.go | 471 +++++++
.../vendor/nhooyr.io/websocket/stringer.go | 91 ++
.../vendor/nhooyr.io/websocket/write.go | 386 ++++++
.../vendor/nhooyr.io/websocket/ws_js.go | 379 ++++++
.../Security/CWE-918/RequestForgery.expected | 36 +
ql/test/query-tests/Security/CWE-918/go.mod | 12 +
.../vendor/github.com/gobwas/httphead/LICENSE | 21 +
.../github.com/gobwas/httphead/README.md | 63 +
.../github.com/gobwas/httphead/cookie.go | 200 +++
.../vendor/github.com/gobwas/httphead/head.go | 275 ++++
.../github.com/gobwas/httphead/httphead.go | 331 +++++
.../github.com/gobwas/httphead/lexer.go | 360 +++++
.../github.com/gobwas/httphead/octet.go | 83 ++
.../github.com/gobwas/httphead/option.go | 187 +++
.../github.com/gobwas/httphead/writer.go | 101 ++
.../vendor/github.com/gobwas/pool/README.md | 107 ++
.../vendor/github.com/gobwas/pool/generic.go | 87 ++
.../gobwas/pool/internal/pmath/pmath.go | 65 +
.../vendor/github.com/gobwas/pool/option.go | 43 +
.../github.com/gobwas/pool/pbufio/pbufio.go | 106 ++
.../gobwas/pool/pbufio/pbufio_go110.go | 13 +
.../gobwas/pool/pbufio/pbufio_go19.go | 27 +
.../vendor/github.com/gobwas/pool/pool.go | 25 +
.../vendor/github.com/gobwas/ws/.gitignore | 5 +
.../vendor/github.com/gobwas/ws/.travis.yml | 25 +
.../vendor/github.com/gobwas/ws/LICENSE | 21 +
.../vendor/github.com/gobwas/ws/Makefile | 47 +
.../vendor/github.com/gobwas/ws/README.md | 360 +++++
.../vendor/github.com/gobwas/ws/check.go | 145 ++
.../vendor/github.com/gobwas/ws/cipher.go | 59 +
.../vendor/github.com/gobwas/ws/dialer.go | 556 ++++++++
.../github.com/gobwas/ws/dialer_tls_go17.go | 35 +
.../github.com/gobwas/ws/dialer_tls_go18.go | 9 +
.../vendor/github.com/gobwas/ws/doc.go | 81 ++
.../vendor/github.com/gobwas/ws/errors.go | 54 +
.../vendor/github.com/gobwas/ws/frame.go | 389 ++++++
.../vendor/github.com/gobwas/ws/http.go | 468 +++++++
.../vendor/github.com/gobwas/ws/nonce.go | 80 ++
.../vendor/github.com/gobwas/ws/read.go | 147 ++
.../vendor/github.com/gobwas/ws/server.go | 607 +++++++++
.../vendor/github.com/gobwas/ws/server_test.s | 0
.../vendor/github.com/gobwas/ws/util.go | 214 +++
.../vendor/github.com/gobwas/ws/write.go | 104 ++
.../github.com/gorilla/websocket/.gitignore | 25 +
.../github.com/gorilla/websocket/AUTHORS | 9 +
.../github.com/gorilla/websocket/LICENSE | 22 +
.../github.com/gorilla/websocket/README.md | 64 +
.../github.com/gorilla/websocket/client.go | 395 ++++++
.../gorilla/websocket/client_clone.go | 16 +
.../gorilla/websocket/client_clone_legacy.go | 38 +
.../gorilla/websocket/compression.go | 148 ++
.../github.com/gorilla/websocket/conn.go | 1201 +++++++++++++++++
.../gorilla/websocket/conn_write.go | 15 +
.../gorilla/websocket/conn_write_legacy.go | 18 +
.../github.com/gorilla/websocket/doc.go | 227 ++++
.../github.com/gorilla/websocket/go.mod | 3 +
.../github.com/gorilla/websocket/join.go | 42 +
.../github.com/gorilla/websocket/json.go | 60 +
.../github.com/gorilla/websocket/mask.go | 54 +
.../github.com/gorilla/websocket/mask_safe.go | 15 +
.../github.com/gorilla/websocket/prepared.go | 102 ++
.../github.com/gorilla/websocket/proxy.go | 77 ++
.../github.com/gorilla/websocket/server.go | 363 +++++
.../github.com/gorilla/websocket/trace.go | 19 +
.../github.com/gorilla/websocket/trace_17.go | 12 +
.../github.com/gorilla/websocket/util.go | 283 ++++
.../gorilla/websocket/x_net_proxy.go | 473 +++++++
.../github.com/klauspost/compress/LICENSE | 28 +
.../klauspost/compress/flate/deflate.go | 819 +++++++++++
.../klauspost/compress/flate/dict_decoder.go | 184 +++
.../klauspost/compress/flate/fast_encoder.go | 254 ++++
.../klauspost/compress/flate/gen_inflate.go | 274 ++++
.../compress/flate/huffman_bit_writer.go | 911 +++++++++++++
.../klauspost/compress/flate/huffman_code.go | 363 +++++
.../compress/flate/huffman_sortByFreq.go | 178 +++
.../compress/flate/huffman_sortByLiteral.go | 201 +++
.../klauspost/compress/flate/inflate.go | 1000 ++++++++++++++
.../klauspost/compress/flate/inflate_gen.go | 922 +++++++++++++
.../klauspost/compress/flate/level1.go | 179 +++
.../klauspost/compress/flate/level2.go | 205 +++
.../klauspost/compress/flate/level3.go | 229 ++++
.../klauspost/compress/flate/level4.go | 212 +++
.../klauspost/compress/flate/level5.go | 279 ++++
.../klauspost/compress/flate/level6.go | 282 ++++
.../klauspost/compress/flate/stateless.go | 297 ++++
.../klauspost/compress/flate/token.go | 375 +++++
.../github.com/sacOO7/go-logger/.gitignore | 12 +
.../github.com/sacOO7/go-logger/LICENSE | 21 +
.../github.com/sacOO7/go-logger/logging.go | 82 ++
.../github.com/sacOO7/go-logger/loggingL.go | 13 +
.../sacOO7/go-logger/logginglevel_string.go | 16 +
.../github.com/sacOO7/gowebsocket/.gitignore | 21 +
.../github.com/sacOO7/gowebsocket/LICENSE | 201 +++
.../github.com/sacOO7/gowebsocket/README.md | 157 +++
.../sacOO7/gowebsocket/gowebsocket.go | 186 +++
.../github.com/sacOO7/gowebsocket/utils.go | 15 +
.../CWE-918/vendor/golang.org/x/net/AUTHORS | 3 +
.../vendor/golang.org/x/net/CONTRIBUTORS | 3 +
.../CWE-918/vendor/golang.org/x/net/LICENSE | 27 +
.../CWE-918/vendor/golang.org/x/net/PATENTS | 22 +
.../golang.org/x/net/websocket/client.go | 106 ++
.../vendor/golang.org/x/net/websocket/dial.go | 24 +
.../vendor/golang.org/x/net/websocket/hybi.go | 583 ++++++++
.../golang.org/x/net/websocket/server.go | 113 ++
.../golang.org/x/net/websocket/websocket.go | 451 +++++++
.../Security/CWE-918/vendor/modules.txt | 30 +
.../vendor/nhooyr.io/websocket/.gitignore | 1 +
.../vendor/nhooyr.io/websocket/.travis.yml | 40 +
.../vendor/nhooyr.io/websocket/LICENSE.txt | 21 +
.../vendor/nhooyr.io/websocket/Makefile | 7 +
.../vendor/nhooyr.io/websocket/README.md | 132 ++
.../vendor/nhooyr.io/websocket/accept.go | 365 +++++
.../vendor/nhooyr.io/websocket/accept_js.go | 20 +
.../vendor/nhooyr.io/websocket/close.go | 76 ++
.../vendor/nhooyr.io/websocket/close_notjs.go | 211 +++
.../vendor/nhooyr.io/websocket/compress.go | 39 +
.../nhooyr.io/websocket/compress_notjs.go | 181 +++
.../vendor/nhooyr.io/websocket/conn.go | 13 +
.../vendor/nhooyr.io/websocket/conn_notjs.go | 265 ++++
.../vendor/nhooyr.io/websocket/dial.go | 287 ++++
.../CWE-918/vendor/nhooyr.io/websocket/doc.go | 32 +
.../vendor/nhooyr.io/websocket/frame.go | 294 ++++
.../CWE-918/vendor/nhooyr.io/websocket/go.mod | 14 +
.../websocket/internal/bpool/bpool.go | 24 +
.../nhooyr.io/websocket/internal/errd/wrap.go | 14 +
.../websocket/internal/wsjs/wsjs_js.go | 170 +++
.../nhooyr.io/websocket/internal/xsync/go.go | 25 +
.../websocket/internal/xsync/int64.go | 23 +
.../vendor/nhooyr.io/websocket/netconn.go | 166 +++
.../vendor/nhooyr.io/websocket/read.go | 471 +++++++
.../vendor/nhooyr.io/websocket/stringer.go | 91 ++
.../vendor/nhooyr.io/websocket/write.go | 386 ++++++
.../vendor/nhooyr.io/websocket/ws_js.go | 379 ++++++
.../query-tests/Security/CWE-918/websocket.go | 203 +++
270 files changed, 45019 insertions(+), 49 deletions(-)
create mode 100644 ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
create mode 100644 ql/src/semmle/go/frameworks/Websocket.qll
create mode 100644 ql/test/library-tests/semmle/go/Packages/packagePredicate.go
create mode 100644 ql/test/library-tests/semmle/go/Packages/predicate.expected
create mode 100644 ql/test/library-tests/semmle/go/Packages/predicate.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.ql
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
create mode 100644 ql/test/query-tests/Security/CWE-918/go.mod
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/cookie.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/head.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/httphead.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/lexer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/octet.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/option.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/writer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/generic.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/option.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pool.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.gitignore
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.travis.yml
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/Makefile
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/check.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/cipher.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go17.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go18.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/doc.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/errors.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/frame.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/http.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/nonce.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/read.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server_test.s
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/util.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/write.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/.gitignore
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/AUTHORS
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone_legacy.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/compression.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write_legacy.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/doc.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/go.mod
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/join.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/json.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask_safe.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/prepared.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/proxy.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/server.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace_17.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/util.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/x_net_proxy.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/deflate.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/dict_decoder.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/fast_encoder.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/gen_inflate.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_code.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate_gen.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level1.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level2.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level3.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level4.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level5.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level6.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/stateless.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/token.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/.gitignore
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logging.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/loggingL.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/.gitignore
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/utils.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/AUTHORS
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/CONTRIBUTORS
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/LICENSE
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/PATENTS
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/client.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/dial.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/hybi.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/server.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/websocket.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/modules.txt
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.gitignore
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.travis.yml
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/LICENSE.txt
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/Makefile
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/README.md
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept_js.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close_notjs.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress_notjs.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn_notjs.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/dial.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/doc.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/frame.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/go.mod
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/errd/wrap.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/go.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/int64.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/netconn.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/read.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stringer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/write.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/ws_js.go
create mode 100644 ql/test/query-tests/Security/CWE-918/websocket.go
diff --git a/ql/src/go.qll b/ql/src/go.qll
index 524717b7544..b234154bc8e 100644
--- a/ql/src/go.qll
+++ b/ql/src/go.qll
@@ -34,5 +34,6 @@ import semmle.go.frameworks.SQL
import semmle.go.frameworks.XPath
import semmle.go.frameworks.Stdlib
import semmle.go.frameworks.Testing
+import semmle.go.frameworks.Websocket
import semmle.go.security.FlowSources
import semmle.go.Util
diff --git a/ql/src/semmle/go/Packages.qll b/ql/src/semmle/go/Packages.qll
index 6b24a3cc6ab..2187bcf47c2 100644
--- a/ql/src/semmle/go/Packages.qll
+++ b/ql/src/semmle/go/Packages.qll
@@ -24,3 +24,12 @@ class Package extends @package {
/** Gets a textual representation of this element. */
string toString() { result = "package " + getPath() }
}
+
+/**
+ * Gets the Go import string that may identify a package in module `mod` with the given path,
+ * possibly modulo semantic import versioning.
+ */
+bindingset[result, mod, path]
+string package(string mod, string path) {
+ result.regexpMatch("\\Q" + mod + "\\E([/.]v[^/]+)?/\\Q" + path + "\\E")
+}
diff --git a/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll b/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
new file mode 100644
index 00000000000..c94307dd1ae
--- /dev/null
+++ b/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
@@ -0,0 +1,69 @@
+/**
+ * Contains implementations of some commonly used barrier
+ * guards for sanitizing untrusted URLs.
+ */
+
+import go
+
+/**
+ * A call to a function called `isLocalUrl`, `isValidRedirect`, or similar, which is
+ * considered a barrier guard for sanitizing untrusted URLs.
+ */
+class RedirectCheckBarrierGuard extends DataFlow::BarrierGuard, DataFlow::CallNode {
+ RedirectCheckBarrierGuard() {
+ this.getCalleeName().regexpMatch("(?i)(is_?)?(local_?url|valid_?redir(ect)?)")
+ }
+
+ override predicate checks(Expr e, boolean outcome) {
+ // `isLocalUrl(e)` is a barrier for `e` if it evaluates to `true`
+ getAnArgument().asExpr() = e and
+ outcome = true
+ }
+}
+
+/**
+ * An equality check comparing a data-flow node against a constant string, considered as
+ * a barrier guard for sanitizing untrusted URLs.
+ *
+ * Additionally, a check comparing `url.Hostname()` against a constant string is also
+ * considered a barrier guard for `url`.
+ */
+class UrlCheck extends DataFlow::BarrierGuard, DataFlow::EqualityTestNode {
+ DataFlow::Node url;
+
+ UrlCheck() {
+ exists(this.getAnOperand().getStringValue()) and
+ (
+ url = this.getAnOperand()
+ or
+ exists(DataFlow::MethodCallNode mc | mc = this.getAnOperand() |
+ mc.getTarget().getName() = "Hostname" and
+ url = mc.getReceiver()
+ )
+ }
+
+ override predicate checks(Expr e, boolean outcome) {
+ e = url.asExpr() and outcome = this.getPolarity()
+ }
+ }
+}
+
+/**
+ * A call to a regexp match function, considered as a barrier guard for sanitizing untrusted URLs.
+ *
+ * This is overapproximate: we do not attempt to reason about the correctness of the regexp.
+ */
+class RegexpCheck extends DataFlow::BarrierGuard {
+ RegexpMatchFunction matchfn;
+ DataFlow::CallNode call;
+
+ RegexpCheck() {
+ matchfn.getACall() = call and
+ this = matchfn.getResult().getNode(call).getASuccessor*()
+ }
+
+ override predicate checks(Expr e, boolean branch) {
+ e = matchfn.getValue().getNode(call).asExpr() and
+ (branch = false or branch = true)
+ }
+}
diff --git a/ql/src/semmle/go/frameworks/Websocket.qll b/ql/src/semmle/go/frameworks/Websocket.qll
new file mode 100644
index 00000000000..c16555da39f
--- /dev/null
+++ b/ql/src/semmle/go/frameworks/Websocket.qll
@@ -0,0 +1,134 @@
+/** Provides classes for working with WebSocket-related APIs. */
+
+import go
+
+/**
+ * A data-flow node that establishes a new WebSocket connection.
+ *
+ * Extend this class to refine existing API models. If you want to model new APIs,
+ * extend `WebSocketRequestCall::Range` instead.
+ */
+class WebSocketRequestCall extends DataFlow::CallNode {
+ WebSocketRequestCall::Range self;
+
+ WebSocketRequestCall() { this = self }
+
+ /** Gets the URL of the request. */
+ DataFlow::Node getRequestUrl() { result = self.getRequestUrl() }
+}
+
+/** Provides classes for working with WebSocket request functions. */
+module WebSocketRequestCall {
+ /**
+ * A data-flow node that establishes a new WebSocket connection.
+ *
+ * Extend this class to model new APIs. If you want to refine existing
+ * API models, extend `WebSocketRequestCall` instead.
+ */
+ abstract class Range extends DataFlow::CallNode {
+ /** Gets the URL of the request. */
+ abstract DataFlow::Node getRequestUrl();
+ }
+
+ /**
+ * A WebSocket request expression string used in an API function of the
+ * `golang.org/x/net/websocket` package.
+ */
+ private class GolangXNetDialFunc extends Range {
+ GolangXNetDialFunc() {
+ // func Dial(url_, protocol, origin string) (ws *Conn, err error)
+ this.getTarget().hasQualifiedName(package("golang.org/x/net", "websocket"), "Dial")
+ }
+
+ override DataFlow::Node getRequestUrl() { result = this.getArgument(0) }
+ }
+
+ /**
+ * A WebSocket DialConfig expression string used in an API function
+ * of the `golang.org/x/net/websocket` package.
+ */
+ private class GolangXNetDialConfigFunc extends Range {
+ GolangXNetDialConfigFunc() {
+ // func DialConfig(config *Config) (ws *Conn, err error)
+ this.getTarget().hasQualifiedName(package("golang.org/x/net", "websocket"), "DialConfig")
+ }
+
+ override DataFlow::Node getRequestUrl() {
+ exists(DataFlow::CallNode cn |
+ // func NewConfig(server, origin string) (config *Config, err error)
+ cn.getTarget().hasQualifiedName(package("golang.org/x/net", "websocket"), "NewConfig") and
+ this.getArgument(0) = cn.getResult(0).getASuccessor*() and
+ result = cn.getArgument(0)
+ )
+ }
+ }
+
+ /**
+ * A WebSocket request expression string used in an API function
+ * of the `github.com/gorilla/websocket` package.
+ */
+ private class GorillaWebsocketDialFunc extends Range {
+ DataFlow::Node url;
+
+ GorillaWebsocketDialFunc() {
+ // func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error)
+ // func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error)
+ exists(string name, Method f |
+ f = this.getTarget() and
+ f.hasQualifiedName(package("github.com/gorilla", "websocket"), "Dialer", name)
+ |
+ name = "Dial" and this.getArgument(0) = url
+ or
+ name = "DialContext" and this.getArgument(1) = url
+ )
+ }
+
+ override DataFlow::Node getRequestUrl() { result = url }
+ }
+
+ /**
+ * A WebSocket request expression string used in an API function
+ * of the `github.com/gobwas/ws` package.
+ */
+ private class GobwasWsDialFunc extends Range {
+ GobwasWsDialFunc() {
+ // func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error)
+ exists(Method m |
+ m.hasQualifiedName(package("github.com/gobwas", "ws"), "Dialer", "Dial") and
+ m = this.getTarget()
+ )
+ or
+ // func Dial(ctx context.Context, urlstr string) (net.Conn, *bufio.Reader, Handshake, error)
+ this.getTarget().hasQualifiedName(package("github.com/gobwas", "ws"), "Dial")
+ }
+
+ override DataFlow::Node getRequestUrl() { result = this.getArgument(1) }
+ }
+
+ /**
+ * A WebSocket request expression string used in an API function
+ * of the `nhooyr.io/websocket` package.
+ */
+ private class NhooyrWebsocketDialFunc extends Range {
+ NhooyrWebsocketDialFunc() {
+ // func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error)
+ this.getTarget().hasQualifiedName(package("nhooyr.io", "websocket"), "Dial")
+ }
+
+ override DataFlow::Node getRequestUrl() { result = this.getArgument(1) }
+ }
+
+ /**
+ * A WebSocket request expression string used in an API function
+ * of the `github.com/sacOO7/gowebsocket` package.
+ */
+ private class SacOO7DialFunc extends Range {
+ SacOO7DialFunc() {
+ // func BuildProxy(Url string) func(*http.Request) (*url.URL, error)
+ // func New(url string) Socket
+ this.getTarget().hasQualifiedName("github.com/sacOO7/gowebsocket", ["New", "BuildProxy"])
+ }
+
+ override DataFlow::Node getRequestUrl() { result = this.getArgument(0) }
+ }
+}
diff --git a/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll b/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
index ba88143eb80..f32efb143cd 100644
--- a/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
+++ b/ql/src/semmle/go/security/OpenUrlRedirectCustomizations.qll
@@ -7,6 +7,7 @@
import go
import UrlConcatenation
import SafeUrlFlowCustomizations
+import semmle.go.dataflow.BarrierGuardUtil
/**
* Provides extension points for customizing the taint-tracking configuration for reasoning about
@@ -104,62 +105,22 @@ module OpenUrlRedirect {
/**
* A call to a function called `isLocalUrl`, `isValidRedirect`, or similar, which is
- * considered a barrier for purposes of URL redirection.
+ * considered a barrier guard for sanitizing untrusted URLs.
*/
- class RedirectCheckBarrierGuard extends BarrierGuard, DataFlow::CallNode {
- RedirectCheckBarrierGuard() {
- this.getCalleeName().regexpMatch("(?i)(is_?)?(local_?url|valid_?redir(ect)?)")
- }
-
- override predicate checks(Expr e, boolean outcome) {
- // `isLocalUrl(e)` is a barrier for `e` if it evaluates to `true`
- getAnArgument().asExpr() = e and
- outcome = true
- }
- }
+ class RedirectCheckBarrierGuardAsBarrierGuard extends RedirectCheckBarrierGuard, BarrierGuard { }
/**
- * A check against a constant value, considered a barrier for redirection.
- */
- class EqualityTestGuard extends BarrierGuard, DataFlow::EqualityTestNode {
- DataFlow::Node url;
-
- EqualityTestGuard() {
- exists(this.getAnOperand().getStringValue()) and
- (
- url = this.getAnOperand()
- or
- exists(DataFlow::MethodCallNode mc | mc = this.getAnOperand() |
- mc.getTarget().getName() = "Hostname" and
- url = mc.getReceiver()
- )
- )
- }
-
- override predicate checks(Expr e, boolean outcome) {
- e = url.asExpr() and outcome = this.getPolarity()
- }
- }
-
- /**
- * A call to a regexp match function, considered as a barrier guard for unvalidated URLs.
+ * A call to a regexp match function, considered as a barrier guard for sanitizing untrusted URLs.
*
* This is overapproximate: we do not attempt to reason about the correctness of the regexp.
*/
- class RegexpCheck extends BarrierGuard {
- RegexpMatchFunction matchfn;
- DataFlow::CallNode call;
+ class RegexpCheckAsBarrierGuard extends RegexpCheck, BarrierGuard { }
- RegexpCheck() {
- matchfn.getACall() = call and
- this = matchfn.getResult().getNode(call).getASuccessor*()
- }
-
- override predicate checks(Expr e, boolean branch) {
- e = matchfn.getValue().getNode(call).asExpr() and
- (branch = false or branch = true)
- }
- }
+ /**
+ * A check against a constant value or the `Hostname` function,
+ * considered a barrier guard for url flow.
+ */
+ class UrlCheckAsBarrierGuard extends UrlCheck, BarrierGuard { }
}
/** A sink for an open redirect, considered as a sink for safe URL flow. */
diff --git a/ql/src/semmle/go/security/RequestForgeryCustomizations.qll b/ql/src/semmle/go/security/RequestForgeryCustomizations.qll
index aecd5e077fb..19a0852d2a7 100644
--- a/ql/src/semmle/go/security/RequestForgeryCustomizations.qll
+++ b/ql/src/semmle/go/security/RequestForgeryCustomizations.qll
@@ -5,6 +5,7 @@
import go
import UrlConcatenation
import SafeUrlFlowCustomizations
+import semmle.go.dataflow.BarrierGuardUtil
/** Provides classes and predicates for the request forgery query. */
module RequestForgery {
@@ -52,6 +53,19 @@ module RequestForgery {
override string getKind() { result = "URL" }
}
+ /**
+ * The URL of a WebSocket request, viewed as a sink for request forgery.
+ */
+ class WebSocketCallAsSink extends Sink {
+ WebSocketRequestCall request;
+
+ WebSocketCallAsSink() { this = request.getRequestUrl() }
+
+ override DataFlow::Node getARequest() { result = request }
+
+ override string getKind() { result = "WebSocket URL" }
+ }
+
/**
* A value that is the result of prepending a string that prevents any value from controlling the
* host of a URL.
@@ -59,6 +73,29 @@ module RequestForgery {
private class HostnameSanitizer extends SanitizerEdge {
HostnameSanitizer() { hostnameSanitizingPrefixEdge(this, _) }
}
+
+ /**
+ * A call to a function called `isLocalUrl`, `isValidRedirect`, or similar, which is
+ * considered a barrier guard.
+ */
+ class RedirectCheckBarrierGuardAsBarrierGuard extends RedirectCheckBarrierGuard, SanitizerGuard {
+ }
+
+ /**
+ * A call to a regexp match function, considered as a barrier guard for sanitizing untrusted URLs.
+ *
+ * This is overapproximate: we do not attempt to reason about the correctness of the regexp.
+ */
+ class RegexpCheckAsBarrierGuard extends RegexpCheck, SanitizerGuard { }
+
+ /**
+ * An equality check comparing a data-flow node against a constant string, considered as
+ * a barrier guard for sanitizing untrusted URLs.
+ *
+ * Additionally, a check comparing `url.Hostname()` against a constant string is also
+ * considered a barrier guard for `url`.
+ */
+ class UrlCheckAsBarrierGuard extends UrlCheck, SanitizerGuard { }
}
/** A sink for request forgery, considered as a sink for safe URL flow. */
diff --git a/ql/test/library-tests/semmle/go/Packages/packagePredicate.go b/ql/test/library-tests/semmle/go/Packages/packagePredicate.go
new file mode 100644
index 00000000000..dc0500dd1a3
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/Packages/packagePredicate.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "fmt"
+
+ _ "PackageName//v//test" // Not OK
+ _ "PackageName//v/test" // Not OK
+ _ "PackageName/test" // OK
+ _ "PackageName/v//test" // Not OK
+ _ "PackageName/v/asd/v2/test" // Not OK
+ _ "PackageName/v/test" // Not OK
+
+ _ "PackageName//v2//test" // Not OK
+ _ "PackageName//v2/test" // Not OK
+ _ "PackageName/v2//test" // Not OK
+ _ "PackageName/v2/test" //OK
+)
+
+func main() {
+ pkg.Foo()
+ fmt.Println("")
+}
diff --git a/ql/test/library-tests/semmle/go/Packages/predicate.expected b/ql/test/library-tests/semmle/go/Packages/predicate.expected
new file mode 100644
index 00000000000..fb9cf20d4d2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/Packages/predicate.expected
@@ -0,0 +1,2 @@
+| package PackageName/test | PackageName/test |
+| package PackageName/v2/test | PackageName/v2/test |
diff --git a/ql/test/library-tests/semmle/go/Packages/predicate.ql b/ql/test/library-tests/semmle/go/Packages/predicate.ql
new file mode 100644
index 00000000000..9a4cdd003ce
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/Packages/predicate.ql
@@ -0,0 +1,8 @@
+import go
+
+from Package pkg, string mod, string path
+where
+ packages(pkg, _, package(mod, path), _) and
+ mod = "PackageName" and
+ path = "test"
+select pkg, pkg.getPath()
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
new file mode 100644
index 00000000000..80e2b9058de
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
@@ -0,0 +1,9 @@
+| DialFunction.go:19:11:19:52 | call to Dial | DialFunction.go:19:26:19:39 | untrustedInput |
+| DialFunction.go:22:12:22:39 | call to DialConfig | DialFunction.go:21:35:21:48 | untrustedInput |
+| DialFunction.go:24:2:24:49 | call to Dial | DialFunction.go:24:30:24:43 | untrustedInput |
+| DialFunction.go:27:2:27:38 | call to Dial | DialFunction.go:27:14:27:27 | untrustedInput |
+| DialFunction.go:29:2:29:61 | call to DialContext | DialFunction.go:29:37:29:50 | untrustedInput |
+| DialFunction.go:31:2:31:44 | call to Dial | DialFunction.go:31:30:31:43 | untrustedInput |
+| DialFunction.go:34:2:34:45 | call to Dial | DialFunction.go:34:31:34:44 | untrustedInput |
+| DialFunction.go:36:2:36:31 | call to BuildProxy | DialFunction.go:36:17:36:30 | untrustedInput |
+| DialFunction.go:37:2:37:24 | call to New | DialFunction.go:37:10:37:23 | untrustedInput |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
new file mode 100644
index 00000000000..ed59e3a82cd
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+ "context"
+
+ gobwas "github.com/gobwas/ws"
+ gorilla "github.com/gorilla/websocket"
+ sac "github.com/sacOO7/gowebsocket"
+ "golang.org/x/net/websocket"
+ nhooyr "nhooyr.io/websocket"
+)
+
+func main() {
+ untrustedInput := r.Referer()
+
+ origin := "http://localhost/"
+
+ // bad as input is directly passed to dial function
+ ws, _ := websocket.Dial(untrustedInput, "", origin)
+
+ config, _ := websocket.NewConfig(untrustedInput, origin) // good
+ ws2, _ := websocket.DialConfig(config)
+
+ nhooyr.Dial(context.TODO(), untrustedInput, nil)
+
+ dialer := gorilla.Dialer{}
+ dialer.Dial(untrustedInput, r.Header)
+
+ dialer.DialContext(context.TODO(), untrustedInput, r.Header)
+
+ gobwas.Dial(context.TODO(), untrustedInput)
+
+ dialer2 := gobwas.Dialer{}
+ dialer2.Dial(context.TODO(), untrustedInput)
+
+ sac.BuildProxy(untrustedInput)
+ sac.New(untrustedInput)
+
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.ql b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.ql
new file mode 100644
index 00000000000..d6545ca87e6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.ql
@@ -0,0 +1,4 @@
+import go
+
+from WebSocketRequestCall::Range r
+select r, r.getRequestUrl()
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
new file mode 100644
index 00000000000..5f614a3d1d3
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
@@ -0,0 +1,12 @@
+module main
+
+go 1.14
+
+require (
+ github.com/gobwas/ws v1.0.3
+ github.com/gorilla/websocket v1.4.2
+ github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d // indirect
+ github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
+ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
+ nhooyr.io/websocket v1.8.5
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
new file mode 100644
index 00000000000..274431766fa
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
new file mode 100644
index 00000000000..67a97fdbe92
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
@@ -0,0 +1,63 @@
+# httphead.[go](https://golang.org)
+
+[![GoDoc][godoc-image]][godoc-url]
+
+> Tiny HTTP header value parsing library in go.
+
+## Overview
+
+This library contains low-level functions for scanning HTTP RFC2616 compatible header value grammars.
+
+## Install
+
+```shell
+ go get github.com/gobwas/httphead
+```
+
+## Example
+
+The example below shows how multiple-choise HTTP header value could be parsed with this library:
+
+```go
+ options, ok := httphead.ParseOptions([]byte(`foo;bar=1,baz`), nil)
+ fmt.Println(options, ok)
+ // Output: [{foo map[bar:1]} {baz map[]}] true
+```
+
+The low-level example below shows how to optimize keys skipping and selection
+of some key:
+
+```go
+ // The right part of full header line like:
+ // X-My-Header: key;foo=bar;baz,key;baz
+ header := []byte(`foo;a=0,foo;a=1,foo;a=2,foo;a=3`)
+
+ // We want to search key "foo" with an "a" parameter that equal to "2".
+ var (
+ foo = []byte(`foo`)
+ a = []byte(`a`)
+ v = []byte(`2`)
+ )
+ var found bool
+ httphead.ScanOptions(header, func(i int, key, param, value []byte) Control {
+ if !bytes.Equal(key, foo) {
+ return ControlSkip
+ }
+ if !bytes.Equal(param, a) {
+ if bytes.Equal(value, v) {
+ // Found it!
+ found = true
+ return ControlBreak
+ }
+ return ControlSkip
+ }
+ return ControlContinue
+ })
+```
+
+For more usage examples please see [docs][godoc-url] or package tests.
+
+[godoc-image]: https://godoc.org/github.com/gobwas/httphead?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/httphead
+[travis-image]: https://travis-ci.org/gobwas/httphead.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/httphead
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
new file mode 100644
index 00000000000..05c9a1fb6a1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
@@ -0,0 +1,200 @@
+package httphead
+
+import (
+ "bytes"
+)
+
+// ScanCookie scans cookie pairs from data using DefaultCookieScanner.Scan()
+// method.
+func ScanCookie(data []byte, it func(key, value []byte) bool) bool {
+ return DefaultCookieScanner.Scan(data, it)
+}
+
+// DefaultCookieScanner is a CookieScanner which is used by ScanCookie().
+// Note that it is intended to have the same behavior as http.Request.Cookies()
+// has.
+var DefaultCookieScanner = CookieScanner{}
+
+// CookieScanner contains options for scanning cookie pairs.
+// See https://tools.ietf.org/html/rfc6265#section-4.1.1
+type CookieScanner struct {
+ // DisableNameValidation disables name validation of a cookie. If false,
+ // only RFC2616 "tokens" are accepted.
+ DisableNameValidation bool
+
+ // DisableValueValidation disables value validation of a cookie. If false,
+ // only RFC6265 "cookie-octet" characters are accepted.
+ //
+ // Note that Strict option also affects validation of a value.
+ //
+ // If Strict is false, then scanner begins to allow space and comma
+ // characters inside the value for better compatibility with non standard
+ // cookies implementations.
+ DisableValueValidation bool
+
+ // BreakOnPairError sets scanner to immediately return after first pair syntax
+ // validation error.
+ // If false, scanner will try to skip invalid pair bytes and go ahead.
+ BreakOnPairError bool
+
+ // Strict enables strict RFC6265 mode scanning. It affects name and value
+ // validation, as also some other rules.
+ // If false, it is intended to bring the same behavior as
+ // http.Request.Cookies().
+ Strict bool
+}
+
+// Scan maps data to name and value pairs. Usually data represents value of the
+// Cookie header.
+func (c CookieScanner) Scan(data []byte, it func(name, value []byte) bool) bool {
+ lexer := &Scanner{data: data}
+
+ const (
+ statePair = iota
+ stateBefore
+ )
+
+ state := statePair
+
+ for lexer.Buffered() > 0 {
+ switch state {
+ case stateBefore:
+ // Pairs separated by ";" and space, according to the RFC6265:
+ // cookie-pair *( ";" SP cookie-pair )
+ //
+ // Cookie pairs MUST be separated by (";" SP). So our only option
+ // here is to fail as syntax error.
+ a, b := lexer.Peek2()
+ if a != ';' {
+ return false
+ }
+
+ state = statePair
+
+ advance := 1
+ if b == ' ' {
+ advance++
+ } else if c.Strict {
+ return false
+ }
+
+ lexer.Advance(advance)
+
+ case statePair:
+ if !lexer.FetchUntil(';') {
+ return false
+ }
+
+ var value []byte
+ name := lexer.Bytes()
+ if i := bytes.IndexByte(name, '='); i != -1 {
+ value = name[i+1:]
+ name = name[:i]
+ } else if c.Strict {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !c.Strict {
+ trimLeft(name)
+ }
+ if !c.DisableNameValidation && !ValidCookieName(name) {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !c.Strict {
+ value = trimRight(value)
+ }
+ value = stripQuotes(value)
+ if !c.DisableValueValidation && !ValidCookieValue(value, c.Strict) {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !it(name, value) {
+ return true
+ }
+
+ nextPair:
+ state = stateBefore
+ }
+ }
+
+ return true
+}
+
+// ValidCookieValue reports whether given value is a valid RFC6265
+// "cookie-octet" bytes.
+//
+// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+// ; US-ASCII characters excluding CTLs,
+// ; whitespace DQUOTE, comma, semicolon,
+// ; and backslash
+//
+// Note that the false strict parameter disables errors on space 0x20 and comma
+// 0x2c. This could be useful to bring some compatibility with non-compliant
+// clients/servers in the real world.
+// It acts the same as standard library cookie parser if strict is false.
+func ValidCookieValue(value []byte, strict bool) bool {
+ if len(value) == 0 {
+ return true
+ }
+ for _, c := range value {
+ switch c {
+ case '"', ';', '\\':
+ return false
+ case ',', ' ':
+ if strict {
+ return false
+ }
+ default:
+ if c <= 0x20 {
+ return false
+ }
+ if c >= 0x7f {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ValidCookieName reports wheter given bytes is a valid RFC2616 "token" bytes.
+func ValidCookieName(name []byte) bool {
+ for _, c := range name {
+ if !OctetTypes[c].IsToken() {
+ return false
+ }
+ }
+ return true
+}
+
+func stripQuotes(bts []byte) []byte {
+ if last := len(bts) - 1; last > 0 && bts[0] == '"' && bts[last] == '"' {
+ return bts[1:last]
+ }
+ return bts
+}
+
+func trimLeft(p []byte) []byte {
+ var i int
+ for i < len(p) && OctetTypes[p[i]].IsSpace() {
+ i++
+ }
+ return p[i:]
+}
+
+func trimRight(p []byte) []byte {
+ j := len(p)
+ for j > 0 && OctetTypes[p[j-1]].IsSpace() {
+ j--
+ }
+ return p[:j]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
new file mode 100644
index 00000000000..a50e907dd18
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
@@ -0,0 +1,275 @@
+package httphead
+
+import (
+ "bufio"
+ "bytes"
+)
+
+// Version contains protocol major and minor version.
+type Version struct {
+ Major int
+ Minor int
+}
+
+// RequestLine contains parameters parsed from the first request line.
+type RequestLine struct {
+ Method []byte
+ URI []byte
+ Version Version
+}
+
+// ResponseLine contains parameters parsed from the first response line.
+type ResponseLine struct {
+ Version Version
+ Status int
+ Reason []byte
+}
+
+// SplitRequestLine splits given slice of bytes into three chunks without
+// parsing.
+func SplitRequestLine(line []byte) (method, uri, version []byte) {
+ return split3(line, ' ')
+}
+
+// ParseRequestLine parses http request line like "GET / HTTP/1.0".
+func ParseRequestLine(line []byte) (r RequestLine, ok bool) {
+ var i int
+ for i = 0; i < len(line); i++ {
+ c := line[i]
+ if !OctetTypes[c].IsToken() {
+ if i > 0 && c == ' ' {
+ break
+ }
+ return
+ }
+ }
+ if i == len(line) {
+ return
+ }
+
+ var proto []byte
+ r.Method = line[:i]
+ r.URI, proto = split2(line[i+1:], ' ')
+ if len(r.URI) == 0 {
+ return
+ }
+ if major, minor, ok := ParseVersion(proto); ok {
+ r.Version.Major = major
+ r.Version.Minor = minor
+ return r, true
+ }
+
+ return r, false
+}
+
+// SplitResponseLine splits given slice of bytes into three chunks without
+// parsing.
+func SplitResponseLine(line []byte) (version, status, reason []byte) {
+ return split3(line, ' ')
+}
+
+// ParseResponseLine parses first response line into ResponseLine struct.
+func ParseResponseLine(line []byte) (r ResponseLine, ok bool) {
+ var (
+ proto []byte
+ status []byte
+ )
+ proto, status, r.Reason = split3(line, ' ')
+ if major, minor, ok := ParseVersion(proto); ok {
+ r.Version.Major = major
+ r.Version.Minor = minor
+ } else {
+ return r, false
+ }
+ if n, ok := IntFromASCII(status); ok {
+ r.Status = n
+ } else {
+ return r, false
+ }
+ // TODO(gobwas): parse here r.Reason fot TEXT rule:
+ // TEXT =
+ return r, true
+}
+
+var (
+ httpVersion10 = []byte("HTTP/1.0")
+ httpVersion11 = []byte("HTTP/1.1")
+ httpVersionPrefix = []byte("HTTP/")
+)
+
+// ParseVersion parses major and minor version of HTTP protocol.
+// It returns parsed values and true if parse is ok.
+func ParseVersion(bts []byte) (major, minor int, ok bool) {
+ switch {
+ case bytes.Equal(bts, httpVersion11):
+ return 1, 1, true
+ case bytes.Equal(bts, httpVersion10):
+ return 1, 0, true
+ case len(bts) < 8:
+ return
+ case !bytes.Equal(bts[:5], httpVersionPrefix):
+ return
+ }
+
+ bts = bts[5:]
+
+ dot := bytes.IndexByte(bts, '.')
+ if dot == -1 {
+ return
+ }
+ major, ok = IntFromASCII(bts[:dot])
+ if !ok {
+ return
+ }
+ minor, ok = IntFromASCII(bts[dot+1:])
+ if !ok {
+ return
+ }
+
+ return major, minor, true
+}
+
+// ReadLine reads line from br. It reads until '\n' and returns bytes without
+// '\n' or '\r\n' at the end.
+// It returns err if and only if line does not end in '\n'. Note that read
+// bytes returned in any case of error.
+//
+// It is much like the textproto/Reader.ReadLine() except the thing that it
+// returns raw bytes, instead of string. That is, it avoids copying bytes read
+// from br.
+//
+// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
+// safe with future I/O operations on br.
+//
+// We could control I/O operations on br and do not need to make additional
+// copy for safety.
+func ReadLine(br *bufio.Reader) ([]byte, error) {
+ var line []byte
+ for {
+ bts, err := br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // Copy bytes because next read will discard them.
+ line = append(line, bts...)
+ continue
+ }
+ // Avoid copy of single read.
+ if line == nil {
+ line = bts
+ } else {
+ line = append(line, bts...)
+ }
+ if err != nil {
+ return line, err
+ }
+ // Size of line is at least 1.
+ // In other case bufio.ReadSlice() returns error.
+ n := len(line)
+ // Cut '\n' or '\r\n'.
+ if n > 1 && line[n-2] == '\r' {
+ line = line[:n-2]
+ } else {
+ line = line[:n-1]
+ }
+ return line, nil
+ }
+}
+
+// ParseHeaderLine parses HTTP header as key-value pair. It returns parsed
+// values and true if parse is ok.
+func ParseHeaderLine(line []byte) (k, v []byte, ok bool) {
+ colon := bytes.IndexByte(line, ':')
+ if colon == -1 {
+ return
+ }
+ k = trim(line[:colon])
+ for _, c := range k {
+ if !OctetTypes[c].IsToken() {
+ return nil, nil, false
+ }
+ }
+ v = trim(line[colon+1:])
+ return k, v, true
+}
+
+// IntFromASCII converts ascii encoded decimal numeric value from HTTP entities
+// to an integer.
+func IntFromASCII(bts []byte) (ret int, ok bool) {
+ // ASCII numbers all start with the high-order bits 0011.
+ // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
+ // bits and interpret them directly as an integer.
+ var n int
+ if n = len(bts); n < 1 {
+ return 0, false
+ }
+ for i := 0; i < n; i++ {
+ if bts[i]&0xf0 != 0x30 {
+ return 0, false
+ }
+ ret += int(bts[i]&0xf) * pow(10, n-i-1)
+ }
+ return ret, true
+}
+
+const (
+ toLower = 'a' - 'A' // for use with OR.
+ toUpper = ^byte(toLower) // for use with AND.
+)
+
+// CanonicalizeHeaderKey is like standard textproto/CanonicalMIMEHeaderKey,
+// except that it operates with slice of bytes and modifies it inplace without
+// copying.
+func CanonicalizeHeaderKey(k []byte) {
+ upper := true
+ for i, c := range k {
+ if upper && 'a' <= c && c <= 'z' {
+ k[i] &= toUpper
+ } else if !upper && 'A' <= c && c <= 'Z' {
+ k[i] |= toLower
+ }
+ upper = c == '-'
+ }
+}
+
+// pow for integers implementation.
+// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
+func pow(a, b int) int {
+ p := 1
+ for b > 0 {
+ if b&1 != 0 {
+ p *= a
+ }
+ b >>= 1
+ a *= a
+ }
+ return p
+}
+
+func split3(p []byte, sep byte) (p1, p2, p3 []byte) {
+ a := bytes.IndexByte(p, sep)
+ b := bytes.IndexByte(p[a+1:], sep)
+ if a == -1 || b == -1 {
+ return p, nil, nil
+ }
+ b += a + 1
+ return p[:a], p[a+1 : b], p[b+1:]
+}
+
+func split2(p []byte, sep byte) (p1, p2 []byte) {
+ i := bytes.IndexByte(p, sep)
+ if i == -1 {
+ return p, nil
+ }
+ return p[:i], p[i+1:]
+}
+
+func trim(p []byte) []byte {
+ var i, j int
+ for i = 0; i < len(p) && (p[i] == ' ' || p[i] == '\t'); {
+ i++
+ }
+ for j = len(p); j > i && (p[j-1] == ' ' || p[j-1] == '\t'); {
+ j--
+ }
+ return p[i:j]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
new file mode 100644
index 00000000000..2387e8033c9
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
@@ -0,0 +1,331 @@
+// Package httphead contains utils for parsing HTTP and HTTP-grammar compatible
+// text protocols headers.
+//
+// That is, this package first aim is to bring ability to easily parse
+// constructions, described here https://tools.ietf.org/html/rfc2616#section-2
+package httphead
+
+import (
+ "bytes"
+ "strings"
+)
+
+// ScanTokens parses data in this form:
+//
+// list = 1#token
+//
+// It returns false if data is malformed.
+func ScanTokens(data []byte, it func([]byte) bool) bool {
+ lexer := &Scanner{data: data}
+
+ var ok bool
+ for lexer.Next() {
+ switch lexer.Type() {
+ case ItemToken:
+ ok = true
+ if !it(lexer.Bytes()) {
+ return true
+ }
+ case ItemSeparator:
+ if !isComma(lexer.Bytes()) {
+ return false
+ }
+ default:
+ return false
+ }
+ }
+
+ return ok && !lexer.err
+}
+
+// ParseOptions parses all header options and appends it to given slice of
+// Option. It returns flag of successful (wellformed input) parsing.
+//
+// Note that appended options are all consist of subslices of data. That is,
+// mutation of data will mutate appended options.
+func ParseOptions(data []byte, options []Option) ([]Option, bool) {
+ var i int
+ index := -1
+ return options, ScanOptions(data, func(idx int, name, attr, val []byte) Control {
+ if idx != index {
+ index = idx
+ i = len(options)
+ options = append(options, Option{Name: name})
+ }
+ if attr != nil {
+ options[i].Parameters.Set(attr, val)
+ }
+ return ControlContinue
+ })
+}
+
+// SelectFlag encodes way of options selection.
+type SelectFlag byte
+
+// String represetns flag as string.
+func (f SelectFlag) String() string {
+ var flags [2]string
+ var n int
+ if f&SelectCopy != 0 {
+ flags[n] = "copy"
+ n++
+ }
+ if f&SelectUnique != 0 {
+ flags[n] = "unique"
+ n++
+ }
+ return "[" + strings.Join(flags[:n], "|") + "]"
+}
+
+const (
+ // SelectCopy causes selector to copy selected option before appending it
+ // to resulting slice.
+ // If SelectCopy flag is not passed to selector, then appended options will
+ // contain sub-slices of the initial data.
+ SelectCopy SelectFlag = 1 << iota
+
+ // SelectUnique causes selector to append only not yet existing option to
+ // resulting slice. Unique is checked by comparing option names.
+ SelectUnique
+)
+
+// OptionSelector contains configuration for selecting Options from header value.
+type OptionSelector struct {
+ // Check is a filter function that applied to every Option that possibly
+ // could be selected.
+ // If Check is nil all options will be selected.
+ Check func(Option) bool
+
+ // Flags contains flags for options selection.
+ Flags SelectFlag
+
+ // Alloc used to allocate slice of bytes when selector is configured with
+ // SelectCopy flag. It will be called with number of bytes needed for copy
+ // of single Option.
+ // If Alloc is nil make is used.
+ Alloc func(n int) []byte
+}
+
+// Select parses header data and appends it to given slice of Option.
+// It also returns flag of successful (wellformed input) parsing.
+func (s OptionSelector) Select(data []byte, options []Option) ([]Option, bool) {
+ var current Option
+ var has bool
+ index := -1
+
+ alloc := s.Alloc
+ if alloc == nil {
+ alloc = defaultAlloc
+ }
+ check := s.Check
+ if check == nil {
+ check = defaultCheck
+ }
+
+ ok := ScanOptions(data, func(idx int, name, attr, val []byte) Control {
+ if idx != index {
+ if has && check(current) {
+ if s.Flags&SelectCopy != 0 {
+ current = current.Copy(alloc(current.Size()))
+ }
+ options = append(options, current)
+ has = false
+ }
+ if s.Flags&SelectUnique != 0 {
+ for i := len(options) - 1; i >= 0; i-- {
+ if bytes.Equal(options[i].Name, name) {
+ return ControlSkip
+ }
+ }
+ }
+ index = idx
+ current = Option{Name: name}
+ has = true
+ }
+ if attr != nil {
+ current.Parameters.Set(attr, val)
+ }
+
+ return ControlContinue
+ })
+ if has && check(current) {
+ if s.Flags&SelectCopy != 0 {
+ current = current.Copy(alloc(current.Size()))
+ }
+ options = append(options, current)
+ }
+
+ return options, ok
+}
+
+func defaultAlloc(n int) []byte { return make([]byte, n) }
+func defaultCheck(Option) bool { return true }
+
+// Control represents operation that scanner should perform.
+type Control byte
+
+const (
+ // ControlContinue causes scanner to continue scan tokens.
+ ControlContinue Control = iota
+ // ControlBreak causes scanner to stop scan tokens.
+ ControlBreak
+ // ControlSkip causes scanner to skip current entity.
+ ControlSkip
+)
+
+// ScanOptions parses data in this form:
+//
+// values = 1#value
+// value = token *( ";" param )
+// param = token [ "=" (token | quoted-string) ]
+//
+// It calls given callback with the index of the option, option itself and its
+// parameter (attribute and its value, both could be nil). Index is useful when
+// header contains multiple choises for the same named option.
+//
+// Given callback should return one of the defined Control* values.
+// ControlSkip means that passed key is not in caller's interest. That is, all
+// parameters of that key will be skipped.
+// ControlBreak means that no more keys and parameters should be parsed. That
+// is, it must break parsing immediately.
+// ControlContinue means that caller want to receive next parameter and its
+// value or the next key.
+//
+// It returns false if data is malformed.
+func ScanOptions(data []byte, it func(index int, option, attribute, value []byte) Control) bool {
+ lexer := &Scanner{data: data}
+
+ var ok bool
+ var state int
+ const (
+ stateKey = iota
+ stateParamBeforeName
+ stateParamName
+ stateParamBeforeValue
+ stateParamValue
+ )
+
+ var (
+ index int
+ key, param, value []byte
+ mustCall bool
+ )
+ for lexer.Next() {
+ var (
+ call bool
+ growIndex int
+ )
+
+ t := lexer.Type()
+ v := lexer.Bytes()
+
+ switch t {
+ case ItemToken:
+ switch state {
+ case stateKey, stateParamBeforeName:
+ key = v
+ state = stateParamBeforeName
+ mustCall = true
+ case stateParamName:
+ param = v
+ state = stateParamBeforeValue
+ mustCall = true
+ case stateParamValue:
+ value = v
+ state = stateParamBeforeName
+ call = true
+ default:
+ return false
+ }
+
+ case ItemString:
+ if state != stateParamValue {
+ return false
+ }
+ value = v
+ state = stateParamBeforeName
+ call = true
+
+ case ItemSeparator:
+ switch {
+ case isComma(v) && state == stateKey:
+ // Nothing to do.
+
+ case isComma(v) && state == stateParamBeforeName:
+ state = stateKey
+ // Make call only if we have not called this key yet.
+ call = mustCall
+ if !call {
+ // If we have already called callback with the key
+ // that just ended.
+ index++
+ } else {
+ // Else grow the index after calling callback.
+ growIndex = 1
+ }
+
+ case isComma(v) && state == stateParamBeforeValue:
+ state = stateKey
+ growIndex = 1
+ call = true
+
+ case isSemicolon(v) && state == stateParamBeforeName:
+ state = stateParamName
+
+ case isSemicolon(v) && state == stateParamBeforeValue:
+ state = stateParamName
+ call = true
+
+ case isEquality(v) && state == stateParamBeforeValue:
+ state = stateParamValue
+
+ default:
+ return false
+ }
+
+ default:
+ return false
+ }
+
+ if call {
+ switch it(index, key, param, value) {
+ case ControlBreak:
+ // User want to stop to parsing parameters.
+ return true
+
+ case ControlSkip:
+ // User want to skip current param.
+ state = stateKey
+ lexer.SkipEscaped(',')
+
+ case ControlContinue:
+ // User is interested in rest of parameters.
+ // Nothing to do.
+
+ default:
+ panic("unexpected control value")
+ }
+ ok = true
+ param = nil
+ value = nil
+ mustCall = false
+ index += growIndex
+ }
+ }
+ if mustCall {
+ ok = true
+ it(index, key, param, value)
+ }
+
+ return ok && !lexer.err
+}
+
+func isComma(b []byte) bool {
+ return len(b) == 1 && b[0] == ','
+}
+func isSemicolon(b []byte) bool {
+ return len(b) == 1 && b[0] == ';'
+}
+func isEquality(b []byte) bool {
+ return len(b) == 1 && b[0] == '='
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
new file mode 100644
index 00000000000..729855ed0d3
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
@@ -0,0 +1,360 @@
+package httphead
+
+import (
+ "bytes"
+)
+
+// ItemType encodes type of the lexing token.
+type ItemType int
+
+const (
+ // ItemUndef reports that token is undefined.
+ ItemUndef ItemType = iota
+ // ItemToken reports that token is RFC2616 token.
+ ItemToken
+ // ItemSeparator reports that token is RFC2616 separator.
+ ItemSeparator
+ // ItemString reports that token is RFC2616 quouted string.
+ ItemString
+ // ItemComment reports that token is RFC2616 comment.
+ ItemComment
+ // ItemOctet reports that token is octet slice.
+ ItemOctet
+)
+
+// Scanner represents header tokens scanner.
+// See https://tools.ietf.org/html/rfc2616#section-2
+type Scanner struct {
+ data []byte
+ pos int
+
+ itemType ItemType
+ itemBytes []byte
+
+ err bool
+}
+
+// NewScanner creates new RFC2616 data scanner.
+func NewScanner(data []byte) *Scanner {
+ return &Scanner{data: data}
+}
+
+// Next scans for next token. It returns true on successful scanning, and false
+// on error or EOF.
+func (l *Scanner) Next() bool {
+ c, ok := l.nextChar()
+ if !ok {
+ return false
+ }
+ switch c {
+ case '"': // quoted-string;
+ return l.fetchQuotedString()
+
+ case '(': // comment;
+ return l.fetchComment()
+
+ case '\\', ')': // unexpected chars;
+ l.err = true
+ return false
+
+ default:
+ return l.fetchToken()
+ }
+}
+
+// FetchUntil fetches ItemOctet from current scanner position to first
+// occurence of the c or to the end of the underlying data.
+func (l *Scanner) FetchUntil(c byte) bool {
+ l.resetItem()
+ if l.pos == len(l.data) {
+ return false
+ }
+ return l.fetchOctet(c)
+}
+
+// Peek reads byte at current position without advancing it. On end of data it
+// returns 0.
+func (l *Scanner) Peek() byte {
+ if l.pos == len(l.data) {
+ return 0
+ }
+ return l.data[l.pos]
+}
+
+// Peek2 reads two first bytes at current position without advancing it.
+// If there not enough data it returs 0.
+func (l *Scanner) Peek2() (a, b byte) {
+ if l.pos == len(l.data) {
+ return 0, 0
+ }
+ if l.pos+1 == len(l.data) {
+ return l.data[l.pos], 0
+ }
+ return l.data[l.pos], l.data[l.pos+1]
+}
+
+// Buffered reporst how many bytes there are left to scan.
+func (l *Scanner) Buffered() int {
+ return len(l.data) - l.pos
+}
+
+// Advance moves current position index at n bytes. It returns true on
+// successful move.
+func (l *Scanner) Advance(n int) bool {
+ l.pos += n
+ if l.pos > len(l.data) {
+ l.pos = len(l.data)
+ return false
+ }
+ return true
+}
+
+// Skip skips all bytes until first occurence of c.
+func (l *Scanner) Skip(c byte) {
+ if l.err {
+ return
+ }
+ // Reset scanner state.
+ l.resetItem()
+
+ if i := bytes.IndexByte(l.data[l.pos:], c); i == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += i + 1
+ }
+}
+
+// SkipEscaped skips all bytes until first occurence of non-escaped c.
+func (l *Scanner) SkipEscaped(c byte) {
+ if l.err {
+ return
+ }
+ // Reset scanner state.
+ l.resetItem()
+
+ if i := ScanUntil(l.data[l.pos:], c); i == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += i + 1
+ }
+}
+
+// Type reports current token type.
+func (l *Scanner) Type() ItemType {
+ return l.itemType
+}
+
+// Bytes returns current token bytes.
+func (l *Scanner) Bytes() []byte {
+ return l.itemBytes
+}
+
+func (l *Scanner) nextChar() (byte, bool) {
+ // Reset scanner state.
+ l.resetItem()
+
+ if l.err {
+ return 0, false
+ }
+ l.pos += SkipSpace(l.data[l.pos:])
+ if l.pos == len(l.data) {
+ return 0, false
+ }
+ return l.data[l.pos], true
+}
+
+func (l *Scanner) resetItem() {
+ l.itemType = ItemUndef
+ l.itemBytes = nil
+}
+
+func (l *Scanner) fetchOctet(c byte) bool {
+ i := l.pos
+ if j := bytes.IndexByte(l.data[l.pos:], c); j == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += j
+ }
+
+ l.itemType = ItemOctet
+ l.itemBytes = l.data[i:l.pos]
+
+ return true
+}
+
+func (l *Scanner) fetchToken() bool {
+ n, t := ScanToken(l.data[l.pos:])
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = t
+ l.itemBytes = l.data[l.pos : l.pos+n]
+ l.pos += n
+
+ return true
+}
+
+func (l *Scanner) fetchQuotedString() (ok bool) {
+ l.pos++
+
+ n := ScanUntil(l.data[l.pos:], '"')
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = ItemString
+ l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
+ l.pos += n + 1
+
+ return true
+}
+
+func (l *Scanner) fetchComment() (ok bool) {
+ l.pos++
+
+ n := ScanPairGreedy(l.data[l.pos:], '(', ')')
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = ItemComment
+ l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
+ l.pos += n + 1
+
+ return true
+}
+
+// ScanUntil scans for first non-escaped character c in given data.
+// It returns index of matched c and -1 if c is not found.
+func ScanUntil(data []byte, c byte) (n int) {
+ for {
+ i := bytes.IndexByte(data[n:], c)
+ if i == -1 {
+ return -1
+ }
+ n += i
+ if n == 0 || data[n-1] != '\\' {
+ break
+ }
+ n++
+ }
+ return
+}
+
+// ScanPairGreedy scans for complete pair of opening and closing chars in greedy manner.
+// Note that first opening byte must not be present in data.
+func ScanPairGreedy(data []byte, open, close byte) (n int) {
+ var m int
+ opened := 1
+ for {
+ i := bytes.IndexByte(data[n:], close)
+ if i == -1 {
+ return -1
+ }
+ n += i
+ // If found index is not escaped then it is the end.
+ if n == 0 || data[n-1] != '\\' {
+ opened--
+ }
+
+ for m < i {
+ j := bytes.IndexByte(data[m:i], open)
+ if j == -1 {
+ break
+ }
+ m += j + 1
+ opened++
+ }
+
+ if opened == 0 {
+ break
+ }
+
+ n++
+ m = n
+ }
+ return
+}
+
+// RemoveByte returns data without c. If c is not present in data it returns
+// the same slice. If not, it copies data without c.
+func RemoveByte(data []byte, c byte) []byte {
+ j := bytes.IndexByte(data, c)
+ if j == -1 {
+ return data
+ }
+
+ n := len(data) - 1
+
+ // If character is present, than allocate slice with n-1 capacity. That is,
+ // resulting bytes could be at most n-1 length.
+ result := make([]byte, n)
+ k := copy(result, data[:j])
+
+ for i := j + 1; i < n; {
+ j = bytes.IndexByte(data[i:], c)
+ if j != -1 {
+ k += copy(result[k:], data[i:i+j])
+ i = i + j + 1
+ } else {
+ k += copy(result[k:], data[i:])
+ break
+ }
+ }
+
+ return result[:k]
+}
+
+// SkipSpace skips spaces and lws-sequences from p.
+// It returns number ob bytes skipped.
+func SkipSpace(p []byte) (n int) {
+ for len(p) > 0 {
+ switch {
+ case len(p) >= 3 &&
+ p[0] == '\r' &&
+ p[1] == '\n' &&
+ OctetTypes[p[2]].IsSpace():
+ p = p[3:]
+ n += 3
+ case OctetTypes[p[0]].IsSpace():
+ p = p[1:]
+ n++
+ default:
+ return
+ }
+ }
+ return
+}
+
+// ScanToken scan for next token in p. It returns length of the token and its
+// type. It do not trim p.
+func ScanToken(p []byte) (n int, t ItemType) {
+ if len(p) == 0 {
+ return 0, ItemUndef
+ }
+
+ c := p[0]
+ switch {
+ case OctetTypes[c].IsSeparator():
+ return 1, ItemSeparator
+
+ case OctetTypes[c].IsToken():
+ for n = 1; n < len(p); n++ {
+ c := p[n]
+ if !OctetTypes[c].IsToken() {
+ break
+ }
+ }
+ return n, ItemToken
+
+ default:
+ return -1, ItemUndef
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
new file mode 100644
index 00000000000..2a04cdd0909
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
@@ -0,0 +1,83 @@
+package httphead
+
+// OctetType desribes character type.
+//
+// From the "Basic Rules" chapter of RFC2616
+// See https://tools.ietf.org/html/rfc2616#section-2.2
+//
+// OCTET =
+// CHAR =
+// UPALPHA =
+// LOALPHA =
+// ALPHA = UPALPHA | LOALPHA
+// DIGIT =
+// CTL =
+// CR =
+// LF =
+// SP =
+// HT =
+// <"> =
+// CRLF = CR LF
+// LWS = [CRLF] 1*( SP | HT )
+//
+// Many HTTP/1.1 header field values consist of words separated by LWS
+// or special characters. These special characters MUST be in a quoted
+// string to be used within a parameter value (as defined in section
+// 3.6).
+//
+// token = 1*
+// separators = "(" | ")" | "<" | ">" | "@"
+// | "," | ";" | ":" | "\" | <">
+// | "/" | "[" | "]" | "?" | "="
+// | "{" | "}" | SP | HT
+type OctetType byte
+
+// IsChar reports whether octet is CHAR.
+func (t OctetType) IsChar() bool { return t&octetChar != 0 }
+
+// IsControl reports whether octet is CTL.
+func (t OctetType) IsControl() bool { return t&octetControl != 0 }
+
+// IsSeparator reports whether octet is separator.
+func (t OctetType) IsSeparator() bool { return t&octetSeparator != 0 }
+
+// IsSpace reports whether octet is space (SP or HT).
+func (t OctetType) IsSpace() bool { return t&octetSpace != 0 }
+
+// IsToken reports whether octet is token.
+func (t OctetType) IsToken() bool { return t&octetToken != 0 }
+
+const (
+ octetChar OctetType = 1 << iota
+ octetControl
+ octetSpace
+ octetSeparator
+ octetToken
+)
+
+// OctetTypes is a table of octets.
+var OctetTypes [256]OctetType
+
+func init() {
+ for c := 32; c < 256; c++ {
+ var t OctetType
+ if c <= 127 {
+ t |= octetChar
+ }
+ if 0 <= c && c <= 31 || c == 127 {
+ t |= octetControl
+ }
+ switch c {
+ case '(', ')', '<', '>', '@', ',', ';', ':', '"', '/', '[', ']', '?', '=', '{', '}', '\\':
+ t |= octetSeparator
+ case ' ', '\t':
+ t |= octetSpace | octetSeparator
+ }
+
+ if t.IsChar() && !t.IsControl() && !t.IsSeparator() && !t.IsSpace() {
+ t |= octetToken
+ }
+
+ OctetTypes[c] = t
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
new file mode 100644
index 00000000000..243be08c9a0
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
@@ -0,0 +1,187 @@
+package httphead
+
+import (
+ "bytes"
+ "sort"
+)
+
+// Option represents a header option.
+type Option struct {
+ Name []byte
+ Parameters Parameters
+}
+
+// Size returns number of bytes need to be allocated for use in opt.Copy.
+func (opt Option) Size() int {
+ return len(opt.Name) + opt.Parameters.bytes
+}
+
+// Copy copies all underlying []byte slices into p and returns new Option.
+// Note that p must be at least of opt.Size() length.
+func (opt Option) Copy(p []byte) Option {
+ n := copy(p, opt.Name)
+ opt.Name = p[:n]
+ opt.Parameters, p = opt.Parameters.Copy(p[n:])
+ return opt
+}
+
+// String represents option as a string.
+func (opt Option) String() string {
+ return "{" + string(opt.Name) + " " + opt.Parameters.String() + "}"
+}
+
+// NewOption creates named option with given parameters.
+func NewOption(name string, params map[string]string) Option {
+ p := Parameters{}
+ for k, v := range params {
+ p.Set([]byte(k), []byte(v))
+ }
+ return Option{
+ Name: []byte(name),
+ Parameters: p,
+ }
+}
+
+// Equal reports whether option is equal to b.
+func (opt Option) Equal(b Option) bool {
+ if bytes.Equal(opt.Name, b.Name) {
+ return opt.Parameters.Equal(b.Parameters)
+ }
+ return false
+}
+
+// Parameters represents option's parameters.
+type Parameters struct {
+ pos int
+ bytes int
+ arr [8]pair
+ dyn []pair
+}
+
+// Equal reports whether a equal to b.
+func (p Parameters) Equal(b Parameters) bool {
+ switch {
+ case p.dyn == nil && b.dyn == nil:
+ case p.dyn != nil && b.dyn != nil:
+ default:
+ return false
+ }
+
+ ad, bd := p.data(), b.data()
+ if len(ad) != len(bd) {
+ return false
+ }
+
+ sort.Sort(pairs(ad))
+ sort.Sort(pairs(bd))
+
+ for i := 0; i < len(ad); i++ {
+ av, bv := ad[i], bd[i]
+ if !bytes.Equal(av.key, bv.key) || !bytes.Equal(av.value, bv.value) {
+ return false
+ }
+ }
+ return true
+}
+
+// Size returns number of bytes that needed to copy p.
+func (p *Parameters) Size() int {
+ return p.bytes
+}
+
+// Copy copies all underlying []byte slices into dst and returns new
+// Parameters.
+// Note that dst must be at least of p.Size() length.
+func (p *Parameters) Copy(dst []byte) (Parameters, []byte) {
+ ret := Parameters{
+ pos: p.pos,
+ bytes: p.bytes,
+ }
+ if p.dyn != nil {
+ ret.dyn = make([]pair, len(p.dyn))
+ for i, v := range p.dyn {
+ ret.dyn[i], dst = v.copy(dst)
+ }
+ } else {
+ for i, p := range p.arr {
+ ret.arr[i], dst = p.copy(dst)
+ }
+ }
+ return ret, dst
+}
+
+// Get returns value by key and flag about existence such value.
+func (p *Parameters) Get(key string) (value []byte, ok bool) {
+ for _, v := range p.data() {
+ if string(v.key) == key {
+ return v.value, true
+ }
+ }
+ return nil, false
+}
+
+// Set sets value by key.
+func (p *Parameters) Set(key, value []byte) {
+ p.bytes += len(key) + len(value)
+
+ if p.pos < len(p.arr) {
+ p.arr[p.pos] = pair{key, value}
+ p.pos++
+ return
+ }
+
+ if p.dyn == nil {
+ p.dyn = make([]pair, len(p.arr), len(p.arr)+1)
+ copy(p.dyn, p.arr[:])
+ }
+ p.dyn = append(p.dyn, pair{key, value})
+}
+
+// ForEach iterates over parameters key-value pairs and calls cb for each one.
+func (p *Parameters) ForEach(cb func(k, v []byte) bool) {
+ for _, v := range p.data() {
+ if !cb(v.key, v.value) {
+ break
+ }
+ }
+}
+
+// String represents parameters as a string.
+func (p *Parameters) String() (ret string) {
+ ret = "["
+ for i, v := range p.data() {
+ if i > 0 {
+ ret += " "
+ }
+ ret += string(v.key) + ":" + string(v.value)
+ }
+ return ret + "]"
+}
+
+func (p *Parameters) data() []pair {
+ if p.dyn != nil {
+ return p.dyn
+ }
+ return p.arr[:p.pos]
+}
+
+type pair struct {
+ key, value []byte
+}
+
+func (p pair) copy(dst []byte) (pair, []byte) {
+ n := copy(dst, p.key)
+ p.key = dst[:n]
+ m := n + copy(dst[n:], p.value)
+ p.value = dst[n:m]
+
+ dst = dst[m:]
+
+ return p, dst
+}
+
+type pairs []pair
+
+func (p pairs) Len() int { return len(p) }
+func (p pairs) Less(a, b int) bool { return bytes.Compare(p[a].key, p[b].key) == -1 }
+func (p pairs) Swap(a, b int) { p[a], p[b] = p[b], p[a] }
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
new file mode 100644
index 00000000000..e5df3ddf404
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
@@ -0,0 +1,101 @@
+package httphead
+
+import "io"
+
+var (
+ comma = []byte{','}
+ equality = []byte{'='}
+ semicolon = []byte{';'}
+ quote = []byte{'"'}
+ escape = []byte{'\\'}
+)
+
+// WriteOptions write options list to the dest.
+// It uses the same form as {Scan,Parse}Options functions:
+// values = 1#value
+// value = token *( ";" param )
+// param = token [ "=" (token | quoted-string) ]
+//
+// It wraps valuse into the quoted-string sequence if it contains any
+// non-token characters.
+func WriteOptions(dest io.Writer, options []Option) (n int, err error) {
+ w := writer{w: dest}
+ for i, opt := range options {
+ if i > 0 {
+ w.write(comma)
+ }
+
+ writeTokenSanitized(&w, opt.Name)
+
+ for _, p := range opt.Parameters.data() {
+ w.write(semicolon)
+ writeTokenSanitized(&w, p.key)
+ if len(p.value) != 0 {
+ w.write(equality)
+ writeTokenSanitized(&w, p.value)
+ }
+ }
+ }
+ return w.result()
+}
+
+// writeTokenSanitized writes token as is or as quouted string if it contains
+// non-token characters.
+//
+// Note that is is not expects LWS sequnces be in s, cause LWS is used only as
+// header field continuation:
+// "A CRLF is allowed in the definition of TEXT only as part of a header field
+// continuation. It is expected that the folding LWS will be replaced with a
+// single SP before interpretation of the TEXT value."
+// See https://tools.ietf.org/html/rfc2616#section-2
+//
+// That is we sanitizing s for writing, so there could not be any header field
+// continuation.
+// That is any CRLF will be escaped as any other control characters not allowd in TEXT.
+func writeTokenSanitized(bw *writer, bts []byte) {
+ var qt bool
+ var pos int
+ for i := 0; i < len(bts); i++ {
+ c := bts[i]
+ if !OctetTypes[c].IsToken() && !qt {
+ qt = true
+ bw.write(quote)
+ }
+ if OctetTypes[c].IsControl() || c == '"' {
+ if !qt {
+ qt = true
+ bw.write(quote)
+ }
+ bw.write(bts[pos:i])
+ bw.write(escape)
+ bw.write(bts[i : i+1])
+ pos = i + 1
+ }
+ }
+ if !qt {
+ bw.write(bts)
+ } else {
+ bw.write(bts[pos:])
+ bw.write(quote)
+ }
+}
+
+type writer struct {
+ w io.Writer
+ n int
+ err error
+}
+
+func (w *writer) write(p []byte) {
+ if w.err != nil {
+ return
+ }
+ var n int
+ n, w.err = w.w.Write(p)
+ w.n += n
+ return
+}
+
+func (w *writer) result() (int, error) {
+ return w.n, w.err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
new file mode 100644
index 00000000000..45685581dae
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
@@ -0,0 +1,107 @@
+# pool
+
+[![GoDoc][godoc-image]][godoc-url]
+
+> Tiny memory reuse helpers for Go.
+
+## generic
+
+Without use of subpackages, `pool` allows to reuse any struct distinguishable
+by size in generic way:
+
+```go
+package main
+
+import "github.com/gobwas/pool"
+
+func main() {
+ x, n := pool.Get(100) // Returns object with size 128 or nil.
+ if x == nil {
+ // Create x somehow with knowledge that n is 128.
+ }
+ defer pool.Put(x, n)
+
+ // Work with x.
+}
+```
+
+Pool allows you to pass specific options for constructing custom pool:
+
+```go
+package main
+
+import "github.com/gobwas/pool"
+
+func main() {
+ p := pool.Custom(
+ pool.WithLogSizeMapping(), // Will ceil size n passed to Get(n) to nearest power of two.
+ pool.WithLogSizeRange(64, 512), // Will reuse objects in logarithmic range [64, 512].
+ pool.WithSize(65536), // Will reuse object with size 65536.
+ )
+ x, n := p.Get(1000) // Returns nil and 1000 because mapped size 1000 => 1024 is not reusing by the pool.
+ defer pool.Put(x, n) // Will not reuse x.
+
+ // Work with x.
+}
+```
+
+Note that there are few non-generic pooling implementations inside subpackages.
+
+## pbytes
+
+Subpackage `pbytes` is intended for `[]byte` reuse.
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbytes"
+
+func main() {
+ bts := pbytes.GetCap(100) // Returns make([]byte, 0, 128).
+ defer pbytes.Put(bts)
+
+ // Work with bts.
+}
+```
+
+You can also create your own range for pooling:
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbytes"
+
+func main() {
+ // Reuse only slices whose capacity is 128, 256, 512 or 1024.
+ pool := pbytes.New(128, 1024)
+
+ bts := pool.GetCap(100) // Returns make([]byte, 0, 128).
+ defer pool.Put(bts)
+
+ // Work with bts.
+}
+```
+
+## pbufio
+
+Subpackage `pbufio` is intended for `*bufio.{Reader, Writer}` reuse.
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbufio"
+
+func main() {
+ bw := pbufio.GetWriter(os.Stdout, 100) // Returns bufio.NewWriterSize(128).
+ defer pbufio.PutWriter(bw)
+
+ // Work with bw.
+}
+```
+
+Like with `pbytes`, you can also create pool with custom reuse bounds.
+
+
+
+[godoc-image]: https://godoc.org/github.com/gobwas/pool?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/pool
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
new file mode 100644
index 00000000000..d40b362458b
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
@@ -0,0 +1,87 @@
+package pool
+
+import (
+ "sync"
+
+ "github.com/gobwas/pool/internal/pmath"
+)
+
+var DefaultPool = New(128, 65536)
+
+// Get pulls object whose generic size is at least of given size. It also
+// returns a real size of x for further pass to Put(). It returns -1 as real
+// size for nil x. Size >-1 does not mean that x is non-nil, so checks must be
+// done.
+//
+// Note that size could be ceiled to the next power of two.
+//
+// Get is a wrapper around DefaultPool.Get().
+func Get(size int) (interface{}, int) { return DefaultPool.Get(size) }
+
+// Put takes x and its size for future reuse.
+// Put is a wrapper around DefaultPool.Put().
+func Put(x interface{}, size int) { DefaultPool.Put(x, size) }
+
+// Pool contains logic of reusing objects distinguishable by size in generic
+// way.
+type Pool struct {
+ pool map[int]*sync.Pool
+ size func(int) int
+}
+
+// New creates new Pool that reuses objects which size is in logarithmic range
+// [min, max].
+//
+// Note that it is a shortcut for Custom() constructor with Options provided by
+// WithLogSizeMapping() and WithLogSizeRange(min, max) calls.
+func New(min, max int) *Pool {
+ return Custom(
+ WithLogSizeMapping(),
+ WithLogSizeRange(min, max),
+ )
+}
+
+// Custom creates new Pool with given options.
+func Custom(opts ...Option) *Pool {
+ p := &Pool{
+ pool: make(map[int]*sync.Pool),
+ size: pmath.Identity,
+ }
+
+ c := (*poolConfig)(p)
+ for _, opt := range opts {
+ opt(c)
+ }
+
+ return p
+}
+
+// Get pulls object whose generic size is at least of given size.
+// It also returns a real size of x for further pass to Put() even if x is nil.
+// Note that size could be ceiled to the next power of two.
+func (p *Pool) Get(size int) (interface{}, int) {
+ n := p.size(size)
+ if pool := p.pool[n]; pool != nil {
+ return pool.Get(), n
+ }
+ return nil, size
+}
+
+// Put takes x and its size for future reuse.
+func (p *Pool) Put(x interface{}, size int) {
+ if pool := p.pool[size]; pool != nil {
+ pool.Put(x)
+ }
+}
+
+type poolConfig Pool
+
+// AddSize adds size n to the map.
+func (p *poolConfig) AddSize(n int) {
+ p.pool[n] = new(sync.Pool)
+}
+
+// SetSizeMapping sets up incoming size mapping function.
+func (p *poolConfig) SetSizeMapping(size func(int) int) {
+ p.size = size
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
new file mode 100644
index 00000000000..df152ed12a5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
@@ -0,0 +1,65 @@
+package pmath
+
+const (
+ bitsize = 32 << (^uint(0) >> 63)
+ maxint = int(1<<(bitsize-1) - 1)
+ maxintHeadBit = 1 << (bitsize - 2)
+)
+
+// LogarithmicRange iterates from ceiled to power of two min to max,
+// calling cb on each iteration.
+func LogarithmicRange(min, max int, cb func(int)) {
+ if min == 0 {
+ min = 1
+ }
+ for n := CeilToPowerOfTwo(min); n <= max; n <<= 1 {
+ cb(n)
+ }
+}
+
+// IsPowerOfTwo reports whether given integer is a power of two.
+func IsPowerOfTwo(n int) bool {
+ return n&(n-1) == 0
+}
+
+// Identity is identity.
+func Identity(n int) int {
+ return n
+}
+
+// CeilToPowerOfTwo returns the least power of two integer value greater than
+// or equal to n.
+func CeilToPowerOfTwo(n int) int {
+ if n&maxintHeadBit != 0 && n > maxintHeadBit {
+ panic("argument is too large")
+ }
+ if n <= 2 {
+ return n
+ }
+ n--
+ n = fillBits(n)
+ n++
+ return n
+}
+
+// FloorToPowerOfTwo returns the greatest power of two integer value less than
+// or equal to n.
+func FloorToPowerOfTwo(n int) int {
+ if n <= 2 {
+ return n
+ }
+ n = fillBits(n)
+ n >>= 1
+ n++
+ return n
+}
+
+func fillBits(n int) int {
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n |= n >> 32
+ return n
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
new file mode 100644
index 00000000000..d6e42b70055
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
@@ -0,0 +1,43 @@
+package pool
+
+import "github.com/gobwas/pool/internal/pmath"
+
+// Option configures pool.
+type Option func(Config)
+
+// Config describes generic pool configuration.
+type Config interface {
+ AddSize(n int)
+ SetSizeMapping(func(int) int)
+}
+
+// WithSizeLogRange returns an Option that will add logarithmic range of
+// pooling sizes containing [min, max] values.
+func WithLogSizeRange(min, max int) Option {
+ return func(c Config) {
+ pmath.LogarithmicRange(min, max, func(n int) {
+ c.AddSize(n)
+ })
+ }
+}
+
+// WithSize returns an Option that will add given pooling size to the pool.
+func WithSize(n int) Option {
+ return func(c Config) {
+ c.AddSize(n)
+ }
+}
+
+func WithSizeMapping(sz func(int) int) Option {
+ return func(c Config) {
+ c.SetSizeMapping(sz)
+ }
+}
+
+func WithLogSizeMapping() Option {
+ return WithSizeMapping(pmath.CeilToPowerOfTwo)
+}
+
+func WithIdentitySizeMapping() Option {
+ return WithSizeMapping(pmath.Identity)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
new file mode 100644
index 00000000000..d526bd80da8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
@@ -0,0 +1,106 @@
+// Package pbufio contains tools for pooling bufio.Reader and bufio.Writers.
+package pbufio
+
+import (
+ "bufio"
+ "io"
+
+ "github.com/gobwas/pool"
+)
+
+var (
+ DefaultWriterPool = NewWriterPool(256, 65536)
+ DefaultReaderPool = NewReaderPool(256, 65536)
+)
+
+// GetWriter returns bufio.Writer whose buffer has at least size bytes.
+// Note that size could be ceiled to the next power of two.
+// GetWriter is a wrapper around DefaultWriterPool.Get().
+func GetWriter(w io.Writer, size int) *bufio.Writer { return DefaultWriterPool.Get(w, size) }
+
+// PutWriter takes bufio.Writer for future reuse.
+// It does not reuse bufio.Writer which underlying buffer size is not power of
+// PutWriter is a wrapper around DefaultWriterPool.Put().
+func PutWriter(bw *bufio.Writer) { DefaultWriterPool.Put(bw) }
+
+// GetReader returns bufio.Reader whose buffer has at least size bytes. It returns
+// its capacity for further pass to Put().
+// Note that size could be ceiled to the next power of two.
+// GetReader is a wrapper around DefaultReaderPool.Get().
+func GetReader(w io.Reader, size int) *bufio.Reader { return DefaultReaderPool.Get(w, size) }
+
+// PutReader takes bufio.Reader and its size for future reuse.
+// It does not reuse bufio.Reader if size is not power of two or is out of pool
+// min/max range.
+// PutReader is a wrapper around DefaultReaderPool.Put().
+func PutReader(bw *bufio.Reader) { DefaultReaderPool.Put(bw) }
+
+// WriterPool contains logic of *bufio.Writer reuse with various size.
+type WriterPool struct {
+ pool *pool.Pool
+}
+
+// NewWriterPool creates new WriterPool that reuses writers which size is in
+// logarithmic range [min, max].
+func NewWriterPool(min, max int) *WriterPool {
+ return &WriterPool{pool.New(min, max)}
+}
+
+// CustomWriterPool creates new WriterPool with given options.
+func CustomWriterPool(opts ...pool.Option) *WriterPool {
+ return &WriterPool{pool.Custom(opts...)}
+}
+
+// Get returns bufio.Writer whose buffer has at least size bytes.
+func (wp *WriterPool) Get(w io.Writer, size int) *bufio.Writer {
+ v, n := wp.pool.Get(size)
+ if v != nil {
+ bw := v.(*bufio.Writer)
+ bw.Reset(w)
+ return bw
+ }
+ return bufio.NewWriterSize(w, n)
+}
+
+// Put takes ownership of bufio.Writer for further reuse.
+func (wp *WriterPool) Put(bw *bufio.Writer) {
+ // Should reset even if we do Reset() inside Get().
+ // This is done to prevent locking underlying io.Writer from GC.
+ bw.Reset(nil)
+ wp.pool.Put(bw, writerSize(bw))
+}
+
+// ReaderPool contains logic of *bufio.Reader reuse with various size.
+type ReaderPool struct {
+ pool *pool.Pool
+}
+
+// NewReaderPool creates new ReaderPool that reuses writers which size is in
+// logarithmic range [min, max].
+func NewReaderPool(min, max int) *ReaderPool {
+ return &ReaderPool{pool.New(min, max)}
+}
+
+// CustomReaderPool creates new ReaderPool with given options.
+func CustomReaderPool(opts ...pool.Option) *ReaderPool {
+ return &ReaderPool{pool.Custom(opts...)}
+}
+
+// Get returns bufio.Reader whose buffer has at least size bytes.
+func (rp *ReaderPool) Get(r io.Reader, size int) *bufio.Reader {
+ v, n := rp.pool.Get(size)
+ if v != nil {
+ br := v.(*bufio.Reader)
+ br.Reset(r)
+ return br
+ }
+ return bufio.NewReaderSize(r, n)
+}
+
+// Put takes ownership of bufio.Reader for further reuse.
+func (rp *ReaderPool) Put(br *bufio.Reader) {
+ // Should reset even if we do Reset() inside Get().
+ // This is done to prevent locking underlying io.Reader from GC.
+ br.Reset(nil)
+ rp.pool.Put(br, readerSize(br))
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
new file mode 100644
index 00000000000..c736ae56e11
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
@@ -0,0 +1,13 @@
+// +build go1.10
+
+package pbufio
+
+import "bufio"
+
+func writerSize(bw *bufio.Writer) int {
+ return bw.Size()
+}
+
+func readerSize(br *bufio.Reader) int {
+ return br.Size()
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
new file mode 100644
index 00000000000..e71dd447d2a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
@@ -0,0 +1,27 @@
+// +build !go1.10
+
+package pbufio
+
+import "bufio"
+
+func writerSize(bw *bufio.Writer) int {
+ return bw.Available() + bw.Buffered()
+}
+
+// readerSize returns buffer size of the given buffered reader.
+// NOTE: current workaround implementation resets underlying io.Reader.
+func readerSize(br *bufio.Reader) int {
+ br.Reset(sizeReader)
+ br.ReadByte()
+ n := br.Buffered() + 1
+ br.Reset(nil)
+ return n
+}
+
+var sizeReader optimisticReader
+
+type optimisticReader struct{}
+
+func (optimisticReader) Read(p []byte) (int, error) {
+ return len(p), nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
new file mode 100644
index 00000000000..1fe9e602fc5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
@@ -0,0 +1,25 @@
+// Package pool contains helpers for pooling structures distinguishable by
+// size.
+//
+// Quick example:
+//
+// import "github.com/gobwas/pool"
+//
+// func main() {
+// // Reuse objects in logarithmic range from 0 to 64 (0,1,2,4,6,8,16,32,64).
+// p := pool.New(0, 64)
+//
+// buf, n := p.Get(10) // Returns buffer with 16 capacity.
+// if buf == nil {
+// buf = bytes.NewBuffer(make([]byte, n))
+// }
+// defer p.Put(buf, n)
+//
+// // Work with buf.
+// }
+//
+// There are non-generic implementations for pooling:
+// - pool/pbytes for []byte reuse;
+// - pool/pbufio for *bufio.Reader and *bufio.Writer reuse;
+//
+package pool
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
new file mode 100644
index 00000000000..e3e2b1080d0
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
@@ -0,0 +1,5 @@
+bin/
+reports/
+cpu.out
+mem.out
+ws.test
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
new file mode 100644
index 00000000000..cf74f1bee3c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
@@ -0,0 +1,25 @@
+sudo: required
+
+language: go
+
+services:
+ - docker
+
+os:
+ - linux
+ - windows
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.x
+
+install:
+ - go get github.com/gobwas/pool
+ - go get github.com/gobwas/httphead
+
+script:
+ - if [ "$TRAVIS_OS_NAME" = "windows" ]; then go test ./...; fi
+ - if [ "$TRAVIS_OS_NAME" = "linux" ]; then make test autobahn; fi
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/LICENSE
new file mode 100644
index 00000000000..d2611fddf55
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2018 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
new file mode 100644
index 00000000000..075e83c74bc
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
@@ -0,0 +1,47 @@
+BENCH ?=.
+BENCH_BASE?=master
+
+clean:
+ rm -f bin/reporter
+ rm -fr autobahn/report/*
+
+bin/reporter:
+ go build -o bin/reporter ./autobahn
+
+bin/gocovmerge:
+ go build -o bin/gocovmerge github.com/wadey/gocovmerge
+
+.PHONY: autobahn
+autobahn: clean bin/reporter
+ ./autobahn/script/test.sh --build
+ bin/reporter $(PWD)/autobahn/report/index.json
+
+test:
+ go test -coverprofile=ws.coverage .
+ go test -coverprofile=wsutil.coverage ./wsutil
+
+cover: bin/gocovmerge test autobahn
+ bin/gocovmerge ws.coverage wsutil.coverage autobahn/report/server.coverage > total.coverage
+
+benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
+benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX)
+benchcmp: BENCH_NEW:=$(shell mktemp -t new.XXXX)
+benchcmp:
+ if [ ! -z "$(shell git status -s)" ]; then\
+ echo "could not compare with $(BENCH_BASE) – found unstaged changes";\
+ exit 1;\
+ fi;\
+ if [ "$(BENCH_BRANCH)" == "$(BENCH_BASE)" ]; then\
+ echo "comparing the same branches";\
+ exit 1;\
+ fi;\
+ echo "benchmarking $(BENCH_BRANCH)...";\
+ go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_NEW);\
+ echo "benchmarking $(BENCH_BASE)...";\
+ git checkout -q $(BENCH_BASE);\
+ go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_OLD);\
+ git checkout -q $(BENCH_BRANCH);\
+ echo "\nresults:";\
+ echo "========\n";\
+ benchcmp $(BENCH_OLD) $(BENCH_NEW);\
+
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
new file mode 100644
index 00000000000..74acd78bd08
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
@@ -0,0 +1,360 @@
+# ws
+
+[![GoDoc][godoc-image]][godoc-url]
+[![Travis][travis-image]][travis-url]
+
+> [RFC6455][rfc-url] WebSocket implementation in Go.
+
+# Features
+
+- Zero-copy upgrade
+- No intermediate allocations during I/O
+- Low-level API which allows to build your own logic of packet handling and
+ buffers reuse
+- High-level wrappers and helpers around API in `wsutil` package, which allow
+ to start fast without digging the protocol internals
+
+# Documentation
+
+[GoDoc][godoc-url].
+
+# Why
+
+Existing WebSocket implementations do not allow users to reuse I/O buffers
+between connections in clear way. This library aims to export efficient
+low-level interface for working with the protocol without forcing only one way
+it could be used.
+
+By the way, if you want get the higher-level tools, you can use `wsutil`
+package.
+
+# Status
+
+Library is tagged as `v1*` so its API must not be broken during some
+improvements or refactoring.
+
+This implementation of RFC6455 passes [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) and currently has
+about 78% coverage.
+
+# Examples
+
+Example applications using `ws` are developed in separate repository
+[ws-examples](https://github.com/gobwas/ws-examples).
+
+# Usage
+
+The higher-level example of WebSocket echo server:
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/gobwas/ws"
+ "github.com/gobwas/ws/wsutil"
+)
+
+func main() {
+ http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ if err != nil {
+ // handle error
+ }
+ go func() {
+ defer conn.Close()
+
+ for {
+ msg, op, err := wsutil.ReadClientData(conn)
+ if err != nil {
+ // handle error
+ }
+ err = wsutil.WriteServerMessage(conn, op, msg)
+ if err != nil {
+ // handle error
+ }
+ }
+ }()
+ }))
+}
+```
+
+Lower-level, but still high-level example:
+
+
+```go
+import (
+ "net/http"
+ "io"
+
+ "github.com/gobwas/ws"
+ "github.com/gobwas/ws/wsutil"
+)
+
+func main() {
+ http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ if err != nil {
+ // handle error
+ }
+ go func() {
+ defer conn.Close()
+
+ var (
+ state = ws.StateServerSide
+ reader = wsutil.NewReader(conn, state)
+ writer = wsutil.NewWriter(conn, state, ws.OpText)
+ )
+ for {
+ header, err := reader.NextFrame()
+ if err != nil {
+ // handle error
+ }
+
+ // Reset writer to write frame with right operation code.
+ writer.Reset(conn, state, header.OpCode)
+
+ if _, err = io.Copy(writer, reader); err != nil {
+ // handle error
+ }
+ if err = writer.Flush(); err != nil {
+ // handle error
+ }
+ }
+ }()
+ }))
+}
+```
+
+We can apply the same pattern to read and write structured responses through a JSON encoder and decoder.:
+
+```go
+ ...
+ var (
+ r = wsutil.NewReader(conn, ws.StateServerSide)
+ w = wsutil.NewWriter(conn, ws.StateServerSide, ws.OpText)
+ decoder = json.NewDecoder(r)
+ encoder = json.NewEncoder(w)
+ )
+ for {
+ hdr, err = r.NextFrame()
+ if err != nil {
+ return err
+ }
+ if hdr.OpCode == ws.OpClose {
+ return io.EOF
+ }
+ var req Request
+ if err := decoder.Decode(&req); err != nil {
+ return err
+ }
+ var resp Response
+ if err := encoder.Encode(&resp); err != nil {
+ return err
+ }
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ }
+ ...
+```
+
+The lower-level example without `wsutil`:
+
+```go
+package main
+
+import (
+ "net"
+ "io"
+
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+ _, err = ws.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+
+ go func() {
+ defer conn.Close()
+
+ for {
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle error
+ }
+
+ payload := make([]byte, header.Length)
+ _, err = io.ReadFull(conn, payload)
+ if err != nil {
+ // handle error
+ }
+ if header.Masked {
+ ws.Cipher(payload, header.Mask, 0)
+ }
+
+ // Reset the Masked flag, server frames must not be masked as
+ // RFC6455 says.
+ header.Masked = false
+
+ if err := ws.WriteHeader(conn, header); err != nil {
+ // handle error
+ }
+ if _, err := conn.Write(payload); err != nil {
+ // handle error
+ }
+
+ if header.OpCode == ws.OpClose {
+ return
+ }
+ }
+ }()
+ }
+}
+```
+
+# Zero-copy upgrade
+
+Zero-copy upgrade helps to avoid unnecessary allocations and copying while
+handling HTTP Upgrade request.
+
+Processing of all non-websocket headers is made in place with use of registered
+user callbacks whose arguments are only valid until callback returns.
+
+The simple example looks like this:
+
+```go
+package main
+
+import (
+ "net"
+ "log"
+
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ log.Fatal(err)
+ }
+ u := ws.Upgrader{
+ OnHeader: func(key, value []byte) (err error) {
+ log.Printf("non-websocket header: %q=%q", key, value)
+ return
+ },
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+
+ _, err = u.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+ }
+}
+```
+
+Usage of `ws.Upgrader` here brings ability to control incoming connections on
+tcp level and simply not to accept them by some logic.
+
+Zero-copy upgrade is for high-load services which have to control many
+resources such as connections buffers.
+
+The real life example could be like this:
+
+```go
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ // handle error
+ }
+
+ // Prepare handshake header writer from http.Header mapping.
+ header := ws.HandshakeHeaderHTTP(http.Header{
+ "X-Go-Version": []string{runtime.Version()},
+ })
+
+ u := ws.Upgrader{
+ OnHost: func(host []byte) error {
+ if string(host) == "github.com" {
+ return nil
+ }
+ return ws.RejectConnectionError(
+ ws.RejectionStatus(403),
+ ws.RejectionHeader(ws.HandshakeHeaderString(
+ "X-Want-Host: github.com\r\n",
+ )),
+ )
+ },
+ OnHeader: func(key, value []byte) error {
+ if string(key) != "Cookie" {
+ return nil
+ }
+ ok := httphead.ScanCookie(value, func(key, value []byte) bool {
+ // Check session here or do some other stuff with cookies.
+ // Maybe copy some values for future use.
+ return true
+ })
+ if ok {
+ return nil
+ }
+ return ws.RejectConnectionError(
+ ws.RejectionReason("bad cookie"),
+ ws.RejectionStatus(400),
+ )
+ },
+ OnBeforeUpgrade: func() (ws.HandshakeHeader, error) {
+ return header, nil
+ },
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = u.Upgrade(conn)
+ if err != nil {
+ log.Printf("upgrade error: %s", err)
+ }
+ }
+}
+```
+
+
+
+[rfc-url]: https://tools.ietf.org/html/rfc6455
+[godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/ws
+[travis-image]: https://travis-ci.org/gobwas/ws.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/ws
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
new file mode 100644
index 00000000000..8aa0df8cc28
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
@@ -0,0 +1,145 @@
+package ws
+
+import "unicode/utf8"
+
+// State represents state of websocket endpoint.
+// It used by some functions to be more strict when checking compatibility with RFC6455.
+type State uint8
+
+const (
+ // StateServerSide means that endpoint (caller) is a server.
+ StateServerSide State = 0x1 << iota
+ // StateClientSide means that endpoint (caller) is a client.
+ StateClientSide
+ // StateExtended means that extension was negotiated during handshake.
+ StateExtended
+ // StateFragmented means that endpoint (caller) has received fragmented
+ // frame and waits for continuation parts.
+ StateFragmented
+)
+
+// Is checks whether the s has v enabled.
+func (s State) Is(v State) bool {
+ return uint8(s)&uint8(v) != 0
+}
+
+// Set enables v state on s.
+func (s State) Set(v State) State {
+ return s | v
+}
+
+// Clear disables v state on s.
+func (s State) Clear(v State) State {
+ return s & (^v)
+}
+
+// ServerSide reports whether states represents server side.
+func (s State) ServerSide() bool { return s.Is(StateServerSide) }
+
+// ClientSide reports whether state represents client side.
+func (s State) ClientSide() bool { return s.Is(StateClientSide) }
+
+// Extended reports whether state is extended.
+func (s State) Extended() bool { return s.Is(StateExtended) }
+
+// Fragmented reports whether state is fragmented.
+func (s State) Fragmented() bool { return s.Is(StateFragmented) }
+
+// ProtocolError describes error during checking/parsing websocket frames or
+// headers.
+type ProtocolError string
+
+// Error implements error interface.
+func (p ProtocolError) Error() string { return string(p) }
+
+// Errors used by the protocol checkers.
+var (
+ ErrProtocolOpCodeReserved = ProtocolError("use of reserved op code")
+ ErrProtocolControlPayloadOverflow = ProtocolError("control frame payload limit exceeded")
+ ErrProtocolControlNotFinal = ProtocolError("control frame is not final")
+ ErrProtocolNonZeroRsv = ProtocolError("non-zero rsv bits with no extension negotiated")
+ ErrProtocolMaskRequired = ProtocolError("frames from client to server must be masked")
+ ErrProtocolMaskUnexpected = ProtocolError("frames from server to client must be not masked")
+ ErrProtocolContinuationExpected = ProtocolError("unexpected non-continuation data frame")
+ ErrProtocolContinuationUnexpected = ProtocolError("unexpected continuation data frame")
+ ErrProtocolStatusCodeNotInUse = ProtocolError("status code is not in use")
+ ErrProtocolStatusCodeApplicationLevel = ProtocolError("status code is only application level")
+ ErrProtocolStatusCodeNoMeaning = ProtocolError("status code has no meaning yet")
+ ErrProtocolStatusCodeUnknown = ProtocolError("status code is not defined in spec")
+ ErrProtocolInvalidUTF8 = ProtocolError("invalid utf8 sequence in close reason")
+)
+
+// CheckHeader checks h to contain valid header data for given state s.
+//
+// Note that zero state (0) means that state is clean,
+// neither server or client side, nor fragmented, nor extended.
+func CheckHeader(h Header, s State) error {
+ if h.OpCode.IsReserved() {
+ return ErrProtocolOpCodeReserved
+ }
+ if h.OpCode.IsControl() {
+ if h.Length > MaxControlFramePayloadSize {
+ return ErrProtocolControlPayloadOverflow
+ }
+ if !h.Fin {
+ return ErrProtocolControlNotFinal
+ }
+ }
+
+ switch {
+ // [RFC6455]: MUST be 0 unless an extension is negotiated that defines meanings for
+ // non-zero values. If a nonzero value is received and none of the
+ // negotiated extensions defines the meaning of such a nonzero value, the
+ // receiving endpoint MUST _Fail the WebSocket Connection_.
+ case h.Rsv != 0 && !s.Extended():
+ return ErrProtocolNonZeroRsv
+
+ // [RFC6455]: The server MUST close the connection upon receiving a frame that is not masked.
+ // In this case, a server MAY send a Close frame with a status code of 1002 (protocol error)
+ // as defined in Section 7.4.1. A server MUST NOT mask any frames that it sends to the client.
+ // A client MUST close a connection if it detects a masked frame. In this case, it MAY use the
+ // status code 1002 (protocol error) as defined in Section 7.4.1.
+ case s.ServerSide() && !h.Masked:
+ return ErrProtocolMaskRequired
+ case s.ClientSide() && h.Masked:
+ return ErrProtocolMaskUnexpected
+
+ // [RFC6455]: See detailed explanation in 5.4 section.
+ case s.Fragmented() && !h.OpCode.IsControl() && h.OpCode != OpContinuation:
+ return ErrProtocolContinuationExpected
+ case !s.Fragmented() && h.OpCode == OpContinuation:
+ return ErrProtocolContinuationUnexpected
+
+ default:
+ return nil
+ }
+}
+
+// CheckCloseFrameData checks received close information
+// to be valid RFC6455 compatible close info.
+//
+// Note that code.Empty() or code.IsAppLevel() will raise error.
+//
+// If endpoint sends close frame without status code (with frame.Length = 0),
+// application should not check its payload.
+func CheckCloseFrameData(code StatusCode, reason string) error {
+ switch {
+ case code.IsNotUsed():
+ return ErrProtocolStatusCodeNotInUse
+
+ case code.IsProtocolReserved():
+ return ErrProtocolStatusCodeApplicationLevel
+
+ case code == StatusNoMeaningYet:
+ return ErrProtocolStatusCodeNoMeaning
+
+ case code.IsProtocolSpec() && !code.IsProtocolDefined():
+ return ErrProtocolStatusCodeUnknown
+
+ case !utf8.ValidString(reason):
+ return ErrProtocolInvalidUTF8
+
+ default:
+ return nil
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
new file mode 100644
index 00000000000..11a2af99bfc
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
@@ -0,0 +1,59 @@
+package ws
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+// Cipher applies XOR cipher to the payload using mask.
+// Offset is used to cipher chunked data (e.g. in io.Reader implementations).
+//
+// To convert masked data into unmasked data, or vice versa, the following
+// algorithm is applied. The same algorithm applies regardless of the
+// direction of the translation, e.g., the same steps are applied to
+// mask the data as to unmask the data.
+func Cipher(payload []byte, mask [4]byte, offset int) {
+ n := len(payload)
+ if n < 8 {
+ for i := 0; i < n; i++ {
+ payload[i] ^= mask[(offset+i)%4]
+ }
+ return
+ }
+
+ // Calculate position in mask due to previously processed bytes number.
+ mpos := offset % 4
+ // Count number of bytes will processed one by one from the beginning of payload.
+ ln := remain[mpos]
+ // Count number of bytes will processed one by one from the end of payload.
+ // This is done to process payload by 8 bytes in each iteration of main loop.
+ rn := (n - ln) % 8
+
+ for i := 0; i < ln; i++ {
+ payload[i] ^= mask[(mpos+i)%4]
+ }
+ for i := n - rn; i < n; i++ {
+ payload[i] ^= mask[(mpos+i)%4]
+ }
+
+ // We should cast mask to uint32 with unsafe instead of encoding.BigEndian
+ // to avoid care of os dependent byte order. That is, on any endianess mask
+ // and payload will be presented with the same order. In other words, we
+ // could not use encoding.BigEndian on xoring payload as uint64.
+ m := *(*uint32)(unsafe.Pointer(&mask))
+ m2 := uint64(m)<<32 | uint64(m)
+
+ // Skip already processed right part.
+ // Get number of uint64 parts remaining to process.
+ n = (n - ln - rn) >> 3
+ for i := 0; i < n; i++ {
+ idx := ln + (i << 3)
+ p := binary.LittleEndian.Uint64(payload[idx : idx+8])
+ p = p ^ m2
+ binary.LittleEndian.PutUint64(payload[idx:idx+8], p)
+ }
+}
+
+// remain maps position in masking key [0,4) to number
+// of bytes that need to be processed manually inside Cipher().
+var remain = [4]int{0, 3, 2, 1}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
new file mode 100644
index 00000000000..4357be2142b
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
@@ -0,0 +1,556 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/pool/pbufio"
+)
+
+// Constants used by Dialer.
+const (
+ DefaultClientReadBufferSize = 4096
+ DefaultClientWriteBufferSize = 4096
+)
+
+// Handshake represents handshake result.
+type Handshake struct {
+ // Protocol is the subprotocol selected during handshake.
+ Protocol string
+
+ // Extensions is the list of negotiated extensions.
+ Extensions []httphead.Option
+}
+
+// Errors used by the websocket client.
+var (
+ ErrHandshakeBadStatus = fmt.Errorf("unexpected http status")
+ ErrHandshakeBadSubProtocol = fmt.Errorf("unexpected protocol in %q header", headerSecProtocol)
+ ErrHandshakeBadExtensions = fmt.Errorf("unexpected extensions in %q header", headerSecProtocol)
+)
+
+// DefaultDialer is dialer that holds no options and is used by Dial function.
+var DefaultDialer Dialer
+
+// Dial is like Dialer{}.Dial().
+func Dial(ctx context.Context, urlstr string) (net.Conn, *bufio.Reader, Handshake, error) {
+ return DefaultDialer.Dial(ctx, urlstr)
+}
+
+// Dialer contains options for establishing websocket connection to an url.
+type Dialer struct {
+ // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
+ // They used to read and write http data while upgrading to WebSocket.
+ // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
+ //
+ // If a size is zero then default value is used.
+ ReadBufferSize, WriteBufferSize int
+
+ // Timeout is the maximum amount of time a Dial() will wait for a connect
+ // and an handshake to complete.
+ //
+ // The default is no timeout.
+ Timeout time.Duration
+
+ // Protocols is the list of subprotocols that the client wants to speak,
+ // ordered by preference.
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ Protocols []string
+
+ // Extensions is the list of extensions that client wants to speak.
+ //
+ // Note that if server decides to use some of this extensions, Dial() will
+ // return Handshake struct containing a slice of items, which are the
+ // shallow copies of the items from this list. That is, internals of
+ // Extensions items are shared during Dial().
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ // See https://tools.ietf.org/html/rfc6455#section-9.1
+ Extensions []httphead.Option
+
+ // Header is an optional HandshakeHeader instance that could be used to
+ // write additional headers to the handshake request.
+ //
+ // It used instead of any key-value mappings to avoid allocations in user
+ // land.
+ Header HandshakeHeader
+
+ // OnStatusError is the callback that will be called after receiving non
+ // "101 Continue" HTTP response status. It receives an io.Reader object
+ // representing server response bytes. That is, it gives ability to parse
+ // HTTP response somehow (probably with http.ReadResponse call) and make a
+ // decision of further logic.
+ //
+ // The arguments are only valid until the callback returns.
+ OnStatusError func(status int, reason []byte, resp io.Reader)
+
+ // OnHeader is the callback that will be called after successful parsing of
+ // header, that is not used during WebSocket handshake procedure. That is,
+ // it will be called with non-websocket headers, which could be relevant
+ // for application-level logic.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // Returned value could be used to prevent processing response.
+ OnHeader func(key, value []byte) (err error)
+
+ // NetDial is the function that is used to get plain tcp connection.
+ // If it is not nil, then it is used instead of net.Dialer.
+ NetDial func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // TLSClient is the callback that will be called after successful dial with
+ // received connection and its remote host name. If it is nil, then the
+ // default tls.Client() will be used.
+ // If it is not nil, then TLSConfig field is ignored.
+ TLSClient func(conn net.Conn, hostname string) net.Conn
+
+ // TLSConfig is passed to tls.Client() to start TLS over established
+ // connection. If TLSClient is not nil, then it is ignored. If TLSConfig is
+ // non-nil and its ServerName is empty, then for every Dial() it will be
+ // cloned and appropriate ServerName will be set.
+ TLSConfig *tls.Config
+
+ // WrapConn is the optional callback that will be called when connection is
+ // ready for an i/o. That is, it will be called after successful dial and
+ // TLS initialization (for "wss" schemes). It may be helpful for different
+ // user land purposes such as end to end encryption.
+ //
+ // Note that for debugging purposes of an http handshake (e.g. sent request
+ // and received response), there is an wsutil.DebugDialer struct.
+ WrapConn func(conn net.Conn) net.Conn
+}
+
+// Dial connects to the url host and upgrades connection to WebSocket.
+//
+// If server has sent frames right after successful handshake then returned
+// buffer will be non-nil. In other cases buffer is always nil. For better
+// memory efficiency received non-nil bufio.Reader should be returned to the
+// inner pool with PutReader() function after use.
+//
+// Note that Dialer does not implement IDNA (RFC5895) logic as net/http does.
+// If you want to dial non-ascii host name, take care of its name serialization
+// avoiding bad request issues. For more info see net/http Request.Write()
+// implementation, especially cleanHost() function.
+func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) {
+ u, err := url.ParseRequestURI(urlstr)
+ if err != nil {
+ return
+ }
+
+ // Prepare context to dial with. Initially it is the same as original, but
+ // if d.Timeout is non-zero and points to time that is before ctx.Deadline,
+ // we use more shorter context for dial.
+ dialctx := ctx
+
+ var deadline time.Time
+ if t := d.Timeout; t != 0 {
+ deadline = time.Now().Add(t)
+ if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
+ var cancel context.CancelFunc
+ dialctx, cancel = context.WithDeadline(ctx, deadline)
+ defer cancel()
+ }
+ }
+ if conn, err = d.dial(dialctx, u); err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+ if ctx == context.Background() {
+ // No need to start I/O interrupter goroutine which is not zero-cost.
+ conn.SetDeadline(deadline)
+ defer conn.SetDeadline(noDeadline)
+ } else {
+ // Context could be canceled or its deadline could be exceeded.
+ // Start the interrupter goroutine to handle context cancelation.
+ done := setupContextDeadliner(ctx, conn)
+ defer func() {
+ // Map Upgrade() error to a possible context expiration error. That
+ // is, even if Upgrade() err is nil, context could be already
+ // expired and connection be "poisoned" by SetDeadline() call.
+ // In that case we must not return ctx.Err() error.
+ done(&err)
+ }()
+ }
+
+ br, hs, err = d.Upgrade(conn, u)
+
+ return
+}
+
+var (
+ // netEmptyDialer is a net.Dialer without options, used in Dialer.dial() if
+ // Dialer.NetDial is not provided.
+ netEmptyDialer net.Dialer
+ // tlsEmptyConfig is an empty tls.Config used as default one.
+ tlsEmptyConfig tls.Config
+)
+
+func tlsDefaultConfig() *tls.Config {
+ return &tlsEmptyConfig
+}
+
+func hostport(host string, defaultPort string) (hostname, addr string) {
+ var (
+ colon = strings.LastIndexByte(host, ':')
+ bracket = strings.IndexByte(host, ']')
+ )
+ if colon > bracket {
+ return host[:colon], host
+ }
+ return host, host + defaultPort
+}
+
+func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) {
+ dial := d.NetDial
+ if dial == nil {
+ dial = netEmptyDialer.DialContext
+ }
+ switch u.Scheme {
+ case "ws":
+ _, addr := hostport(u.Host, ":80")
+ conn, err = dial(ctx, "tcp", addr)
+ case "wss":
+ hostname, addr := hostport(u.Host, ":443")
+ conn, err = dial(ctx, "tcp", addr)
+ if err != nil {
+ return
+ }
+ tlsClient := d.TLSClient
+ if tlsClient == nil {
+ tlsClient = d.tlsClient
+ }
+ conn = tlsClient(conn, hostname)
+ default:
+ return nil, fmt.Errorf("unexpected websocket scheme: %q", u.Scheme)
+ }
+ if wrap := d.WrapConn; wrap != nil {
+ conn = wrap(conn)
+ }
+ return
+}
+
+func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn {
+ config := d.TLSConfig
+ if config == nil {
+ config = tlsDefaultConfig()
+ }
+ if config.ServerName == "" {
+ config = tlsCloneConfig(config)
+ config.ServerName = hostname
+ }
+ // Do not make conn.Handshake() here because downstairs we will prepare
+ // i/o on this conn with proper context's timeout handling.
+ return tls.Client(conn, config)
+}
+
+var (
+ // This variables are set like in net/net.go.
+ // noDeadline is just zero value for readability.
+ noDeadline = time.Time{}
+ // aLongTimeAgo is a non-zero time, far in the past, used for immediate
+ // cancelation of dials.
+ aLongTimeAgo = time.Unix(42, 0)
+)
+
+// Upgrade writes an upgrade request to the given io.ReadWriter conn at given
+// url u and reads a response from it.
+//
+// It is a caller responsibility to manage I/O deadlines on conn.
+//
+// It returns handshake info and some bytes which could be written by the peer
+// right after response and be caught by us during buffered read.
+func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Handshake, err error) {
+ // headerSeen constants helps to report whether or not some header was seen
+ // during reading request bytes.
+ const (
+ headerSeenUpgrade = 1 << iota
+ headerSeenConnection
+ headerSeenSecAccept
+
+ // headerSeenAll is the value that we expect to receive at the end of
+ // headers read/parse loop.
+ headerSeenAll = 0 |
+ headerSeenUpgrade |
+ headerSeenConnection |
+ headerSeenSecAccept
+ )
+
+ br = pbufio.GetReader(conn,
+ nonZero(d.ReadBufferSize, DefaultClientReadBufferSize),
+ )
+ bw := pbufio.GetWriter(conn,
+ nonZero(d.WriteBufferSize, DefaultClientWriteBufferSize),
+ )
+ defer func() {
+ pbufio.PutWriter(bw)
+ if br.Buffered() == 0 || err != nil {
+ // Server does not wrote additional bytes to the connection or
+ // error occurred. That is, no reason to return buffer.
+ pbufio.PutReader(br)
+ br = nil
+ }
+ }()
+
+ nonce := make([]byte, nonceSize)
+ initNonce(nonce)
+
+ httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header)
+ if err = bw.Flush(); err != nil {
+ return
+ }
+
+ // Read HTTP status line like "HTTP/1.1 101 Switching Protocols".
+ sl, err := readLine(br)
+ if err != nil {
+ return
+ }
+ // Begin validation of the response.
+ // See https://tools.ietf.org/html/rfc6455#section-4.2.2
+ // Parse request line data like HTTP version, uri and method.
+ resp, err := httpParseResponseLine(sl)
+ if err != nil {
+ return
+ }
+ // Even if RFC says "1.1 or higher" without mentioning the part of the
+ // version, we apply it only to minor part.
+ if resp.major != 1 || resp.minor < 1 {
+ err = ErrHandshakeBadProtocol
+ return
+ }
+ if resp.status != 101 {
+ err = StatusError(resp.status)
+ if onStatusError := d.OnStatusError; onStatusError != nil {
+ // Invoke callback with multireader of status-line bytes br.
+ onStatusError(resp.status, resp.reason,
+ io.MultiReader(
+ bytes.NewReader(sl),
+ strings.NewReader(crlf),
+ br,
+ ),
+ )
+ }
+ return
+ }
+ // If response status is 101 then we expect all technical headers to be
+ // valid. If not, then we stop processing response without giving user
+ // ability to read non-technical headers. That is, we do not distinguish
+ // technical errors (such as parsing error) and protocol errors.
+ var headerSeen byte
+ for {
+ line, e := readLine(br)
+ if e != nil {
+ err = e
+ return
+ }
+ if len(line) == 0 {
+ // Blank line, no more lines to read.
+ break
+ }
+
+ k, v, ok := httpParseHeaderLine(line)
+ if !ok {
+ err = ErrMalformedResponse
+ return
+ }
+
+ switch btsToString(k) {
+ case headerUpgradeCanonical:
+ headerSeen |= headerSeenUpgrade
+ if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
+ err = ErrHandshakeBadUpgrade
+ return
+ }
+
+ case headerConnectionCanonical:
+ headerSeen |= headerSeenConnection
+ // Note that as RFC6455 says:
+ // > A |Connection| header field with value "Upgrade".
+ // That is, in server side, "Connection" header could contain
+ // multiple token. But in response it must contains exactly one.
+ if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) {
+ err = ErrHandshakeBadConnection
+ return
+ }
+
+ case headerSecAcceptCanonical:
+ headerSeen |= headerSeenSecAccept
+ if !checkAcceptFromNonce(v, nonce) {
+ err = ErrHandshakeBadSecAccept
+ return
+ }
+
+ case headerSecProtocolCanonical:
+ // RFC6455 1.3:
+ // "The server selects one or none of the acceptable protocols
+ // and echoes that value in its handshake to indicate that it has
+ // selected that protocol."
+ for _, want := range d.Protocols {
+ if string(v) == want {
+ hs.Protocol = want
+ break
+ }
+ }
+ if hs.Protocol == "" {
+ // Server echoed subprotocol that is not present in client
+ // requested protocols.
+ err = ErrHandshakeBadSubProtocol
+ return
+ }
+
+ case headerSecExtensionsCanonical:
+ hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions)
+ if err != nil {
+ return
+ }
+
+ default:
+ if onHeader := d.OnHeader; onHeader != nil {
+ if e := onHeader(k, v); e != nil {
+ err = e
+ return
+ }
+ }
+ }
+ }
+ if err == nil && headerSeen != headerSeenAll {
+ switch {
+ case headerSeen&headerSeenUpgrade == 0:
+ err = ErrHandshakeBadUpgrade
+ case headerSeen&headerSeenConnection == 0:
+ err = ErrHandshakeBadConnection
+ case headerSeen&headerSeenSecAccept == 0:
+ err = ErrHandshakeBadSecAccept
+ default:
+ panic("unknown headers state")
+ }
+ }
+ return
+}
+
+// PutReader returns bufio.Reader instance to the inner reuse pool.
+// It is useful in rare cases, when Dialer.Dial() returns non-nil buffer which
+// contains unprocessed buffered data, that was sent by the server quickly
+// right after handshake.
+func PutReader(br *bufio.Reader) {
+ pbufio.PutReader(br)
+}
+
+// StatusError contains an unexpected status-line code from the server.
+type StatusError int
+
+func (s StatusError) Error() string {
+ return "unexpected HTTP response status: " + strconv.Itoa(int(s))
+}
+
+func isTimeoutError(err error) bool {
+ t, ok := err.(net.Error)
+ return ok && t.Timeout()
+}
+
+func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option) ([]httphead.Option, error) {
+ if len(selected) == 0 {
+ return received, nil
+ }
+ var (
+ index int
+ option httphead.Option
+ err error
+ )
+ index = -1
+ match := func() (ok bool) {
+ for _, want := range wanted {
+ if option.Equal(want) {
+ // Check parsed extension to be present in client
+ // requested extensions. We move matched extension
+ // from client list to avoid allocation.
+ received = append(received, want)
+ return true
+ }
+ }
+ return false
+ }
+ ok := httphead.ScanOptions(selected, func(i int, name, attr, val []byte) httphead.Control {
+ if i != index {
+ // Met next option.
+ index = i
+ if i != 0 && !match() {
+ // Server returned non-requested extension.
+ err = ErrHandshakeBadExtensions
+ return httphead.ControlBreak
+ }
+ option = httphead.Option{Name: name}
+ }
+ if attr != nil {
+ option.Parameters.Set(attr, val)
+ }
+ return httphead.ControlContinue
+ })
+ if !ok {
+ err = ErrMalformedResponse
+ return received, err
+ }
+ if !match() {
+ return received, ErrHandshakeBadExtensions
+ }
+ return received, err
+}
+
+// setupContextDeadliner is a helper function that starts connection I/O
+// interrupter goroutine.
+//
+// Started goroutine calls SetDeadline() with long time ago value when context
+// become expired to make any I/O operations failed. It returns done function
+// that stops started goroutine and maps error received from conn I/O methods
+// to possible context expiration error.
+//
+// In concern with possible SetDeadline() call inside interrupter goroutine,
+// caller passes pointer to its I/O error (even if it is nil) to done(&err).
+// That is, even if I/O error is nil, context could be already expired and
+// connection "poisoned" by SetDeadline() call. In that case done(&err) will
+// store at *err ctx.Err() result. If err is caused not by timeout, it will
+// leaved untouched.
+func setupContextDeadliner(ctx context.Context, conn net.Conn) (done func(*error)) {
+ var (
+ quit = make(chan struct{})
+ interrupt = make(chan error, 1)
+ )
+ go func() {
+ select {
+ case <-quit:
+ interrupt <- nil
+ case <-ctx.Done():
+ // Cancel i/o immediately.
+ conn.SetDeadline(aLongTimeAgo)
+ interrupt <- ctx.Err()
+ }
+ }()
+ return func(err *error) {
+ close(quit)
+ // If ctx.Err() is non-nil and the original err is net.Error with
+ // Timeout() == true, then it means that I/O was canceled by us by
+ // SetDeadline(aLongTimeAgo) call, or by somebody else previously
+ // by conn.SetDeadline(x).
+ //
+ // Even on race condition when both deadlines are expired
+ // (SetDeadline() made not by us and context's), we prefer ctx.Err() to
+ // be returned.
+ if ctxErr := <-interrupt; ctxErr != nil && (*err == nil || isTimeoutError(*err)) {
+ *err = ctxErr
+ }
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
new file mode 100644
index 00000000000..b606e0ad909
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
@@ -0,0 +1,35 @@
+// +build !go1.8
+
+package ws
+
+import "crypto/tls"
+
+func tlsCloneConfig(c *tls.Config) *tls.Config {
+ // NOTE: we copying SessionTicketsDisabled and SessionTicketKey here
+ // without calling inner c.initOnceServer somehow because we only could get
+ // here from the ws.Dialer code, which is obviously a client and makes
+ // tls.Client() when it gets new net.Conn.
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
new file mode 100644
index 00000000000..a6704d5173a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package ws
+
+import "crypto/tls"
+
+func tlsCloneConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
new file mode 100644
index 00000000000..c9d5791570c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
@@ -0,0 +1,81 @@
+/*
+Package ws implements a client and server for the WebSocket protocol as
+specified in RFC 6455.
+
+The main purpose of this package is to provide simple low-level API for
+efficient work with protocol.
+
+Overview.
+
+Upgrade to WebSocket (or WebSocket handshake) can be done in two ways.
+
+The first way is to use `net/http` server:
+
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ })
+
+The second and much more efficient way is so-called "zero-copy upgrade". It
+avoids redundant allocations and copying of not used headers or other request
+data. User decides by himself which data should be copied.
+
+ ln, err := net.Listen("tcp", ":8080")
+ if err != nil {
+ // handle error
+ }
+
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+
+ handshake, err := ws.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+
+For customization details see `ws.Upgrader` documentation.
+
+After WebSocket handshake you can work with connection in multiple ways.
+That is, `ws` does not force the only one way of how to work with WebSocket:
+
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle err
+ }
+
+ buf := make([]byte, header.Length)
+ _, err := io.ReadFull(conn, buf)
+ if err != nil {
+ // handle err
+ }
+
+ resp := ws.NewBinaryFrame([]byte("hello, world!"))
+ if err := ws.WriteFrame(conn, frame); err != nil {
+ // handle err
+ }
+
+As you can see, it stream friendly:
+
+ const N = 42
+
+ ws.WriteHeader(ws.Header{
+ Fin: true,
+ Length: N,
+ OpCode: ws.OpBinary,
+ })
+
+ io.CopyN(conn, rand.Reader, N)
+
+Or:
+
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle err
+ }
+
+ io.CopyN(ioutil.Discard, conn, header.Length)
+
+For more info see the documentation.
+*/
+package ws
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
new file mode 100644
index 00000000000..48fce3b72c1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
@@ -0,0 +1,54 @@
+package ws
+
+// RejectOption represents an option used to control the way connection is
+// rejected.
+type RejectOption func(*rejectConnectionError)
+
+// RejectionReason returns an option that makes connection to be rejected with
+// given reason.
+func RejectionReason(reason string) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.reason = reason
+ }
+}
+
+// RejectionStatus returns an option that makes connection to be rejected with
+// given HTTP status code.
+func RejectionStatus(code int) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.code = code
+ }
+}
+
+// RejectionHeader returns an option that makes connection to be rejected with
+// given HTTP headers.
+func RejectionHeader(h HandshakeHeader) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.header = h
+ }
+}
+
+// RejectConnectionError constructs an error that could be used to control the way
+// handshake is rejected by Upgrader.
+func RejectConnectionError(options ...RejectOption) error {
+ err := new(rejectConnectionError)
+ for _, opt := range options {
+ opt(err)
+ }
+ return err
+}
+
+// rejectConnectionError represents a rejection of upgrade error.
+//
+// It can be returned by Upgrader's On* hooks to control the way WebSocket
+// handshake is rejected.
+type rejectConnectionError struct {
+ reason string
+ code int
+ header HandshakeHeader
+}
+
+// Error implements error interface.
+func (r *rejectConnectionError) Error() string {
+ return r.reason
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
new file mode 100644
index 00000000000..f157ee3e9ff
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
@@ -0,0 +1,389 @@
+package ws
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/rand"
+)
+
+// Constants defined by specification.
+const (
+ // All control frames MUST have a payload length of 125 bytes or less and MUST NOT be fragmented.
+ MaxControlFramePayloadSize = 125
+)
+
+// OpCode represents operation code.
+type OpCode byte
+
+// Operation codes defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+const (
+ OpContinuation OpCode = 0x0
+ OpText OpCode = 0x1
+ OpBinary OpCode = 0x2
+ OpClose OpCode = 0x8
+ OpPing OpCode = 0x9
+ OpPong OpCode = 0xa
+)
+
+// IsControl checks whether the c is control operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.5
+func (c OpCode) IsControl() bool {
+ // RFC6455: Control frames are identified by opcodes where
+ // the most significant bit of the opcode is 1.
+ //
+ // Note that OpCode is only 4 bit length.
+ return c&0x8 != 0
+}
+
+// IsData checks whether the c is data operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+func (c OpCode) IsData() bool {
+ // RFC6455: Data frames (e.g., non-control frames) are identified by opcodes
+ // where the most significant bit of the opcode is 0.
+ //
+ // Note that OpCode is only 4 bit length.
+ return c&0x8 == 0
+}
+
+// IsReserved checks whether the c is reserved operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func (c OpCode) IsReserved() bool {
+ // RFC6455:
+ // %x3-7 are reserved for further non-control frames
+ // %xB-F are reserved for further control frames
+ return (0x3 <= c && c <= 0x7) || (0xb <= c && c <= 0xf)
+}
+
+// StatusCode represents the encoded reason for closure of websocket connection.
+//
+// There are few helper methods on StatusCode that helps to define a range in
+// which given code is lay in. accordingly to ranges defined in specification.
+//
+// See https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode uint16
+
+// StatusCodeRange describes range of StatusCode values.
+type StatusCodeRange struct {
+ Min, Max StatusCode
+}
+
+// Status code ranges defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-7.4.2
+var (
+ StatusRangeNotInUse = StatusCodeRange{0, 999}
+ StatusRangeProtocol = StatusCodeRange{1000, 2999}
+ StatusRangeApplication = StatusCodeRange{3000, 3999}
+ StatusRangePrivate = StatusCodeRange{4000, 4999}
+)
+
+// Status codes defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-7.4.1
+const (
+ StatusNormalClosure StatusCode = 1000
+ StatusGoingAway StatusCode = 1001
+ StatusProtocolError StatusCode = 1002
+ StatusUnsupportedData StatusCode = 1003
+ StatusNoMeaningYet StatusCode = 1004
+ StatusInvalidFramePayloadData StatusCode = 1007
+ StatusPolicyViolation StatusCode = 1008
+ StatusMessageTooBig StatusCode = 1009
+ StatusMandatoryExt StatusCode = 1010
+ StatusInternalServerError StatusCode = 1011
+ StatusTLSHandshake StatusCode = 1015
+
+ // StatusAbnormalClosure is a special code designated for use in
+ // applications.
+ StatusAbnormalClosure StatusCode = 1006
+
+ // StatusNoStatusRcvd is a special code designated for use in applications.
+ StatusNoStatusRcvd StatusCode = 1005
+)
+
+// In reports whether the code is defined in given range.
+func (s StatusCode) In(r StatusCodeRange) bool {
+ return r.Min <= s && s <= r.Max
+}
+
+// Empty reports whether the code is empty.
+// Empty code has no any meaning neither app level codes nor other.
+// This method is useful just to check that code is golang default value 0.
+func (s StatusCode) Empty() bool {
+ return s == 0
+}
+
+// IsNotUsed reports whether the code is predefined in not used range.
+func (s StatusCode) IsNotUsed() bool {
+ return s.In(StatusRangeNotInUse)
+}
+
+// IsApplicationSpec reports whether the code should be defined by
+// application, framework or libraries specification.
+func (s StatusCode) IsApplicationSpec() bool {
+ return s.In(StatusRangeApplication)
+}
+
+// IsPrivateSpec reports whether the code should be defined privately.
+func (s StatusCode) IsPrivateSpec() bool {
+ return s.In(StatusRangePrivate)
+}
+
+// IsProtocolSpec reports whether the code should be defined by protocol specification.
+func (s StatusCode) IsProtocolSpec() bool {
+ return s.In(StatusRangeProtocol)
+}
+
+// IsProtocolDefined reports whether the code is already defined by protocol specification.
+func (s StatusCode) IsProtocolDefined() bool {
+ switch s {
+ case StatusNormalClosure,
+ StatusGoingAway,
+ StatusProtocolError,
+ StatusUnsupportedData,
+ StatusInvalidFramePayloadData,
+ StatusPolicyViolation,
+ StatusMessageTooBig,
+ StatusMandatoryExt,
+ StatusInternalServerError,
+ StatusNoStatusRcvd,
+ StatusAbnormalClosure,
+ StatusTLSHandshake:
+ return true
+ }
+ return false
+}
+
+// IsProtocolReserved reports whether the code is defined by protocol specification
+// to be reserved only for application usage purpose.
+func (s StatusCode) IsProtocolReserved() bool {
+ switch s {
+ // [RFC6455]: {1005,1006,1015} is a reserved value and MUST NOT be set as a status code in a
+ // Close control frame by an endpoint.
+ case StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compiled control frames for common use cases.
+// For construct-serialize optimizations.
+var (
+ CompiledPing = MustCompileFrame(NewPingFrame(nil))
+ CompiledPong = MustCompileFrame(NewPongFrame(nil))
+ CompiledClose = MustCompileFrame(NewCloseFrame(nil))
+
+ CompiledCloseNormalClosure = MustCompileFrame(closeFrameNormalClosure)
+ CompiledCloseGoingAway = MustCompileFrame(closeFrameGoingAway)
+ CompiledCloseProtocolError = MustCompileFrame(closeFrameProtocolError)
+ CompiledCloseUnsupportedData = MustCompileFrame(closeFrameUnsupportedData)
+ CompiledCloseNoMeaningYet = MustCompileFrame(closeFrameNoMeaningYet)
+ CompiledCloseInvalidFramePayloadData = MustCompileFrame(closeFrameInvalidFramePayloadData)
+ CompiledClosePolicyViolation = MustCompileFrame(closeFramePolicyViolation)
+ CompiledCloseMessageTooBig = MustCompileFrame(closeFrameMessageTooBig)
+ CompiledCloseMandatoryExt = MustCompileFrame(closeFrameMandatoryExt)
+ CompiledCloseInternalServerError = MustCompileFrame(closeFrameInternalServerError)
+ CompiledCloseTLSHandshake = MustCompileFrame(closeFrameTLSHandshake)
+)
+
+// Header represents websocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+type Header struct {
+ Fin bool
+ Rsv byte
+ OpCode OpCode
+ Masked bool
+ Mask [4]byte
+ Length int64
+}
+
+// Rsv1 reports whether the header has first rsv bit set.
+func (h Header) Rsv1() bool { return h.Rsv&bit5 != 0 }
+
+// Rsv2 reports whether the header has second rsv bit set.
+func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 }
+
+// Rsv3 reports whether the header has third rsv bit set.
+func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 }
+
+// Frame represents websocket frame.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+type Frame struct {
+ Header Header
+ Payload []byte
+}
+
+// NewFrame creates frame with given operation code,
+// flag of completeness and payload bytes.
+func NewFrame(op OpCode, fin bool, p []byte) Frame {
+ return Frame{
+ Header: Header{
+ Fin: fin,
+ OpCode: op,
+ Length: int64(len(p)),
+ },
+ Payload: p,
+ }
+}
+
+// NewTextFrame creates text frame with p as payload.
+// Note that p is not copied.
+func NewTextFrame(p []byte) Frame {
+ return NewFrame(OpText, true, p)
+}
+
+// NewBinaryFrame creates binary frame with p as payload.
+// Note that p is not copied.
+func NewBinaryFrame(p []byte) Frame {
+ return NewFrame(OpBinary, true, p)
+}
+
+// NewPingFrame creates ping frame with p as payload.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewPingFrame(p []byte) Frame {
+ return NewFrame(OpPing, true, p)
+}
+
+// NewPongFrame creates pong frame with p as payload.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewPongFrame(p []byte) Frame {
+ return NewFrame(OpPong, true, p)
+}
+
+// NewCloseFrame creates close frame with given close body.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewCloseFrame(p []byte) Frame {
+ return NewFrame(OpClose, true, p)
+}
+
+// NewCloseFrameBody encodes a closure code and a reason into a binary
+// representation.
+//
+// It returns slice which is at most MaxControlFramePayloadSize bytes length.
+// If the reason is too big it will be cropped to fit the limit defined by the
+// spec.
+//
+// See https://tools.ietf.org/html/rfc6455#section-5.5
+func NewCloseFrameBody(code StatusCode, reason string) []byte {
+ n := min(2+len(reason), MaxControlFramePayloadSize)
+ p := make([]byte, n)
+
+ crop := min(MaxControlFramePayloadSize-2, len(reason))
+ PutCloseFrameBody(p, code, reason[:crop])
+
+ return p
+}
+
+// PutCloseFrameBody encodes code and reason into buf.
+//
+// It will panic if the buffer is too small to accommodate a code or a reason.
+//
+// PutCloseFrameBody does not check buffer to be RFC compliant, but note that
+// by RFC it must be at most MaxControlFramePayloadSize.
+func PutCloseFrameBody(p []byte, code StatusCode, reason string) {
+ _ = p[1+len(reason)]
+ binary.BigEndian.PutUint16(p, uint16(code))
+ copy(p[2:], reason)
+}
+
+// MaskFrame masks frame and returns frame with masked payload and Mask header's field set.
+// Note that it copies f payload to prevent collisions.
+// For less allocations you could use MaskFrameInPlace or construct frame manually.
+func MaskFrame(f Frame) Frame {
+ return MaskFrameWith(f, NewMask())
+}
+
+// MaskFrameWith masks frame with given mask and returns frame
+// with masked payload and Mask header's field set.
+// Note that it copies f payload to prevent collisions.
+// For less allocations you could use MaskFrameInPlaceWith or construct frame manually.
+func MaskFrameWith(f Frame, mask [4]byte) Frame {
+ // TODO(gobwas): check CopyCipher ws copy() Cipher().
+ p := make([]byte, len(f.Payload))
+ copy(p, f.Payload)
+ f.Payload = p
+ return MaskFrameInPlaceWith(f, mask)
+}
+
+// MaskFrameInPlace masks frame and returns frame with masked payload and Mask
+// header's field set.
+// Note that it applies xor cipher to f.Payload without copying, that is, it
+// modifies f.Payload inplace.
+func MaskFrameInPlace(f Frame) Frame {
+ return MaskFrameInPlaceWith(f, NewMask())
+}
+
+// MaskFrameInPlaceWith masks frame with given mask and returns frame
+// with masked payload and Mask header's field set.
+// Note that it applies xor cipher to f.Payload without copying, that is, it
+// modifies f.Payload inplace.
+func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame {
+ f.Header.Masked = true
+ f.Header.Mask = m
+ Cipher(f.Payload, m, 0)
+ return f
+}
+
+// NewMask creates new random mask.
+func NewMask() (ret [4]byte) {
+ binary.BigEndian.PutUint32(ret[:], rand.Uint32())
+ return
+}
+
+// CompileFrame returns byte representation of given frame.
+// In terms of memory consumption it is useful to precompile static frames
+// which are often used.
+func CompileFrame(f Frame) (bts []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, 16))
+ err = WriteFrame(buf, f)
+ bts = buf.Bytes()
+ return
+}
+
+// MustCompileFrame is like CompileFrame but panics if frame can not be
+// encoded.
+func MustCompileFrame(f Frame) []byte {
+ bts, err := CompileFrame(f)
+ if err != nil {
+ panic(err)
+ }
+ return bts
+}
+
+// Rsv creates rsv byte representation.
+func Rsv(r1, r2, r3 bool) (rsv byte) {
+ if r1 {
+ rsv |= bit5
+ }
+ if r2 {
+ rsv |= bit6
+ }
+ if r3 {
+ rsv |= bit7
+ }
+ return rsv
+}
+
+func makeCloseFrame(code StatusCode) Frame {
+ return NewCloseFrame(NewCloseFrameBody(code, ""))
+}
+
+var (
+ closeFrameNormalClosure = makeCloseFrame(StatusNormalClosure)
+ closeFrameGoingAway = makeCloseFrame(StatusGoingAway)
+ closeFrameProtocolError = makeCloseFrame(StatusProtocolError)
+ closeFrameUnsupportedData = makeCloseFrame(StatusUnsupportedData)
+ closeFrameNoMeaningYet = makeCloseFrame(StatusNoMeaningYet)
+ closeFrameInvalidFramePayloadData = makeCloseFrame(StatusInvalidFramePayloadData)
+ closeFramePolicyViolation = makeCloseFrame(StatusPolicyViolation)
+ closeFrameMessageTooBig = makeCloseFrame(StatusMessageTooBig)
+ closeFrameMandatoryExt = makeCloseFrame(StatusMandatoryExt)
+ closeFrameInternalServerError = makeCloseFrame(StatusInternalServerError)
+ closeFrameTLSHandshake = makeCloseFrame(StatusTLSHandshake)
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
new file mode 100644
index 00000000000..e18df441b47
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
@@ -0,0 +1,468 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+
+ "github.com/gobwas/httphead"
+)
+
+const (
+ crlf = "\r\n"
+ colonAndSpace = ": "
+ commaAndSpace = ", "
+)
+
+const (
+ textHeadUpgrade = "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n"
+)
+
+var (
+ textHeadBadRequest = statusText(http.StatusBadRequest)
+ textHeadInternalServerError = statusText(http.StatusInternalServerError)
+ textHeadUpgradeRequired = statusText(http.StatusUpgradeRequired)
+
+ textTailErrHandshakeBadProtocol = errorText(ErrHandshakeBadProtocol)
+ textTailErrHandshakeBadMethod = errorText(ErrHandshakeBadMethod)
+ textTailErrHandshakeBadHost = errorText(ErrHandshakeBadHost)
+ textTailErrHandshakeBadUpgrade = errorText(ErrHandshakeBadUpgrade)
+ textTailErrHandshakeBadConnection = errorText(ErrHandshakeBadConnection)
+ textTailErrHandshakeBadSecAccept = errorText(ErrHandshakeBadSecAccept)
+ textTailErrHandshakeBadSecKey = errorText(ErrHandshakeBadSecKey)
+ textTailErrHandshakeBadSecVersion = errorText(ErrHandshakeBadSecVersion)
+ textTailErrUpgradeRequired = errorText(ErrHandshakeUpgradeRequired)
+)
+
+var (
+ headerHost = "Host"
+ headerUpgrade = "Upgrade"
+ headerConnection = "Connection"
+ headerSecVersion = "Sec-WebSocket-Version"
+ headerSecProtocol = "Sec-WebSocket-Protocol"
+ headerSecExtensions = "Sec-WebSocket-Extensions"
+ headerSecKey = "Sec-WebSocket-Key"
+ headerSecAccept = "Sec-WebSocket-Accept"
+
+ headerHostCanonical = textproto.CanonicalMIMEHeaderKey(headerHost)
+ headerUpgradeCanonical = textproto.CanonicalMIMEHeaderKey(headerUpgrade)
+ headerConnectionCanonical = textproto.CanonicalMIMEHeaderKey(headerConnection)
+ headerSecVersionCanonical = textproto.CanonicalMIMEHeaderKey(headerSecVersion)
+ headerSecProtocolCanonical = textproto.CanonicalMIMEHeaderKey(headerSecProtocol)
+ headerSecExtensionsCanonical = textproto.CanonicalMIMEHeaderKey(headerSecExtensions)
+ headerSecKeyCanonical = textproto.CanonicalMIMEHeaderKey(headerSecKey)
+ headerSecAcceptCanonical = textproto.CanonicalMIMEHeaderKey(headerSecAccept)
+)
+
+var (
+ specHeaderValueUpgrade = []byte("websocket")
+ specHeaderValueConnection = []byte("Upgrade")
+ specHeaderValueConnectionLower = []byte("upgrade")
+ specHeaderValueSecVersion = []byte("13")
+)
+
+var (
+ httpVersion1_0 = []byte("HTTP/1.0")
+ httpVersion1_1 = []byte("HTTP/1.1")
+ httpVersionPrefix = []byte("HTTP/")
+)
+
+type httpRequestLine struct {
+ method, uri []byte
+ major, minor int
+}
+
+type httpResponseLine struct {
+ major, minor int
+ status int
+ reason []byte
+}
+
+// httpParseRequestLine parses http request line like "GET / HTTP/1.0".
+func httpParseRequestLine(line []byte) (req httpRequestLine, err error) {
+ var proto []byte
+ req.method, req.uri, proto = bsplit3(line, ' ')
+
+ var ok bool
+ req.major, req.minor, ok = httpParseVersion(proto)
+ if !ok {
+ err = ErrMalformedRequest
+ return
+ }
+
+ return
+}
+
+func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) {
+ var (
+ proto []byte
+ status []byte
+ )
+ proto, status, resp.reason = bsplit3(line, ' ')
+
+ var ok bool
+ resp.major, resp.minor, ok = httpParseVersion(proto)
+ if !ok {
+ return resp, ErrMalformedResponse
+ }
+
+ var convErr error
+ resp.status, convErr = asciiToInt(status)
+ if convErr != nil {
+ return resp, ErrMalformedResponse
+ }
+
+ return resp, nil
+}
+
+// httpParseVersion parses major and minor version of HTTP protocol. It returns
+// parsed values and true if parse is ok.
+func httpParseVersion(bts []byte) (major, minor int, ok bool) {
+ switch {
+ case bytes.Equal(bts, httpVersion1_0):
+ return 1, 0, true
+ case bytes.Equal(bts, httpVersion1_1):
+ return 1, 1, true
+ case len(bts) < 8:
+ return
+ case !bytes.Equal(bts[:5], httpVersionPrefix):
+ return
+ }
+
+ bts = bts[5:]
+
+ dot := bytes.IndexByte(bts, '.')
+ if dot == -1 {
+ return
+ }
+ var err error
+ major, err = asciiToInt(bts[:dot])
+ if err != nil {
+ return
+ }
+ minor, err = asciiToInt(bts[dot+1:])
+ if err != nil {
+ return
+ }
+
+ return major, minor, true
+}
+
+// httpParseHeaderLine parses HTTP header as key-value pair. It returns parsed
+// values and true if parse is ok.
+func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) {
+ colon := bytes.IndexByte(line, ':')
+ if colon == -1 {
+ return
+ }
+
+ k = btrim(line[:colon])
+ // TODO(gobwas): maybe use just lower here?
+ canonicalizeHeaderKey(k)
+
+ v = btrim(line[colon+1:])
+
+ return k, v, true
+}
+
+// httpGetHeader is the same as textproto.MIMEHeader.Get, except the thing,
+// that key is already canonical. This helps to increase performance.
+func httpGetHeader(h http.Header, key string) string {
+ if h == nil {
+ return ""
+ }
+ v := h[key]
+ if len(v) == 0 {
+ return ""
+ }
+ return v[0]
+}
+
+// The request MAY include a header field with the name
+// |Sec-WebSocket-Protocol|. If present, this value indicates one or more
+// comma-separated subprotocol the client wishes to speak, ordered by
+// preference. The elements that comprise this value MUST be non-empty strings
+// with characters in the range U+0021 to U+007E not including separator
+// characters as defined in [RFC2616] and MUST all be unique strings. The ABNF
+// for the value of this header field is 1#token, where the definitions of
+// constructs and rules are as given in [RFC2616].
+func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool) {
+ ok = httphead.ScanTokens(strToBytes(h), func(v []byte) bool {
+ if check(btsToString(v)) {
+ ret = string(v)
+ return false
+ }
+ return true
+ })
+ return
+}
+func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) {
+ var selected []byte
+ ok = httphead.ScanTokens(h, func(v []byte) bool {
+ if check(v) {
+ selected = v
+ return false
+ }
+ return true
+ })
+ if ok && selected != nil {
+ return string(selected), true
+ }
+ return
+}
+
+func strSelectExtensions(h string, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
+ return btsSelectExtensions(strToBytes(h), selected, check)
+}
+
+func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
+ s := httphead.OptionSelector{
+ Flags: httphead.SelectUnique | httphead.SelectCopy,
+ Check: check,
+ }
+ return s.Select(h, selected)
+}
+
+func httpWriteHeader(bw *bufio.Writer, key, value string) {
+ httpWriteHeaderKey(bw, key)
+ bw.WriteString(value)
+ bw.WriteString(crlf)
+}
+
+func httpWriteHeaderBts(bw *bufio.Writer, key string, value []byte) {
+ httpWriteHeaderKey(bw, key)
+ bw.Write(value)
+ bw.WriteString(crlf)
+}
+
+func httpWriteHeaderKey(bw *bufio.Writer, key string) {
+ bw.WriteString(key)
+ bw.WriteString(colonAndSpace)
+}
+
+func httpWriteUpgradeRequest(
+ bw *bufio.Writer,
+ u *url.URL,
+ nonce []byte,
+ protocols []string,
+ extensions []httphead.Option,
+ header HandshakeHeader,
+) {
+ bw.WriteString("GET ")
+ bw.WriteString(u.RequestURI())
+ bw.WriteString(" HTTP/1.1\r\n")
+
+ httpWriteHeader(bw, headerHost, u.Host)
+
+ httpWriteHeaderBts(bw, headerUpgrade, specHeaderValueUpgrade)
+ httpWriteHeaderBts(bw, headerConnection, specHeaderValueConnection)
+ httpWriteHeaderBts(bw, headerSecVersion, specHeaderValueSecVersion)
+
+ // NOTE: write nonce bytes as a string to prevent heap allocation –
+ // WriteString() copy given string into its inner buffer, unlike Write()
+ // which may write p directly to the underlying io.Writer – which in turn
+ // will lead to p escape.
+ httpWriteHeader(bw, headerSecKey, btsToString(nonce))
+
+ if len(protocols) > 0 {
+ httpWriteHeaderKey(bw, headerSecProtocol)
+ for i, p := range protocols {
+ if i > 0 {
+ bw.WriteString(commaAndSpace)
+ }
+ bw.WriteString(p)
+ }
+ bw.WriteString(crlf)
+ }
+
+ if len(extensions) > 0 {
+ httpWriteHeaderKey(bw, headerSecExtensions)
+ httphead.WriteOptions(bw, extensions)
+ bw.WriteString(crlf)
+ }
+
+ if header != nil {
+ header.WriteTo(bw)
+ }
+
+ bw.WriteString(crlf)
+}
+
+func httpWriteResponseUpgrade(bw *bufio.Writer, nonce []byte, hs Handshake, header HandshakeHeaderFunc) {
+ bw.WriteString(textHeadUpgrade)
+
+ httpWriteHeaderKey(bw, headerSecAccept)
+ writeAccept(bw, nonce)
+ bw.WriteString(crlf)
+
+ if hs.Protocol != "" {
+ httpWriteHeader(bw, headerSecProtocol, hs.Protocol)
+ }
+ if len(hs.Extensions) > 0 {
+ httpWriteHeaderKey(bw, headerSecExtensions)
+ httphead.WriteOptions(bw, hs.Extensions)
+ bw.WriteString(crlf)
+ }
+ if header != nil {
+ header(bw)
+ }
+
+ bw.WriteString(crlf)
+}
+
+func httpWriteResponseError(bw *bufio.Writer, err error, code int, header HandshakeHeaderFunc) {
+ switch code {
+ case http.StatusBadRequest:
+ bw.WriteString(textHeadBadRequest)
+ case http.StatusInternalServerError:
+ bw.WriteString(textHeadInternalServerError)
+ case http.StatusUpgradeRequired:
+ bw.WriteString(textHeadUpgradeRequired)
+ default:
+ writeStatusText(bw, code)
+ }
+
+ // Write custom headers.
+ if header != nil {
+ header(bw)
+ }
+
+ switch err {
+ case ErrHandshakeBadProtocol:
+ bw.WriteString(textTailErrHandshakeBadProtocol)
+ case ErrHandshakeBadMethod:
+ bw.WriteString(textTailErrHandshakeBadMethod)
+ case ErrHandshakeBadHost:
+ bw.WriteString(textTailErrHandshakeBadHost)
+ case ErrHandshakeBadUpgrade:
+ bw.WriteString(textTailErrHandshakeBadUpgrade)
+ case ErrHandshakeBadConnection:
+ bw.WriteString(textTailErrHandshakeBadConnection)
+ case ErrHandshakeBadSecAccept:
+ bw.WriteString(textTailErrHandshakeBadSecAccept)
+ case ErrHandshakeBadSecKey:
+ bw.WriteString(textTailErrHandshakeBadSecKey)
+ case ErrHandshakeBadSecVersion:
+ bw.WriteString(textTailErrHandshakeBadSecVersion)
+ case ErrHandshakeUpgradeRequired:
+ bw.WriteString(textTailErrUpgradeRequired)
+ case nil:
+ bw.WriteString(crlf)
+ default:
+ writeErrorText(bw, err)
+ }
+}
+
+func writeStatusText(bw *bufio.Writer, code int) {
+ bw.WriteString("HTTP/1.1 ")
+ bw.WriteString(strconv.Itoa(code))
+ bw.WriteByte(' ')
+ bw.WriteString(http.StatusText(code))
+ bw.WriteString(crlf)
+ bw.WriteString("Content-Type: text/plain; charset=utf-8")
+ bw.WriteString(crlf)
+}
+
+func writeErrorText(bw *bufio.Writer, err error) {
+ body := err.Error()
+ bw.WriteString("Content-Length: ")
+ bw.WriteString(strconv.Itoa(len(body)))
+ bw.WriteString(crlf)
+ bw.WriteString(crlf)
+ bw.WriteString(body)
+}
+
+// httpError is like the http.Error with WebSocket context exception.
+func httpError(w http.ResponseWriter, body string, code int) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("Content-Length", strconv.Itoa(len(body)))
+ w.WriteHeader(code)
+ w.Write([]byte(body))
+}
+
+// statusText is a non-performant status text generator.
+// NOTE: Used only to generate constants.
+func statusText(code int) string {
+ var buf bytes.Buffer
+ bw := bufio.NewWriter(&buf)
+ writeStatusText(bw, code)
+ bw.Flush()
+ return buf.String()
+}
+
+// errorText is a non-performant error text generator.
+// NOTE: Used only to generate constants.
+func errorText(err error) string {
+ var buf bytes.Buffer
+ bw := bufio.NewWriter(&buf)
+ writeErrorText(bw, err)
+ bw.Flush()
+ return buf.String()
+}
+
+// HandshakeHeader is the interface that writes both upgrade request or
+// response headers into a given io.Writer.
+type HandshakeHeader interface {
+ io.WriterTo
+}
+
+// HandshakeHeaderString is an adapter to allow the use of headers represented
+// by ordinary string as HandshakeHeader.
+type HandshakeHeaderString string
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (s HandshakeHeaderString) WriteTo(w io.Writer) (int64, error) {
+ n, err := io.WriteString(w, string(s))
+ return int64(n), err
+}
+
+// HandshakeHeaderBytes is an adapter to allow the use of headers represented
+// by ordinary slice of bytes as HandshakeHeader.
+type HandshakeHeaderBytes []byte
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (b HandshakeHeaderBytes) WriteTo(w io.Writer) (int64, error) {
+ n, err := w.Write(b)
+ return int64(n), err
+}
+
+// HandshakeHeaderFunc is an adapter to allow the use of headers represented by
+// ordinary function as HandshakeHeader.
+type HandshakeHeaderFunc func(io.Writer) (int64, error)
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (f HandshakeHeaderFunc) WriteTo(w io.Writer) (int64, error) {
+ return f(w)
+}
+
+// HandshakeHeaderHTTP is an adapter to allow the use of http.Header as
+// HandshakeHeader.
+type HandshakeHeaderHTTP http.Header
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (h HandshakeHeaderHTTP) WriteTo(w io.Writer) (int64, error) {
+ wr := writer{w: w}
+ err := http.Header(h).Write(&wr)
+ return wr.n, err
+}
+
+type writer struct {
+ n int64
+ w io.Writer
+}
+
+func (w *writer) WriteString(s string) (int, error) {
+ n, err := io.WriteString(w.w, s)
+ w.n += int64(n)
+ return n, err
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ n, err := w.w.Write(p)
+ w.n += int64(n)
+ return n, err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
new file mode 100644
index 00000000000..e694da7c308
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
@@ -0,0 +1,80 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+)
+
+const (
+ // RFC6455: The value of this header field MUST be a nonce consisting of a
+ // randomly selected 16-byte value that has been base64-encoded (see
+ // Section 4 of [RFC4648]). The nonce MUST be selected randomly for each
+ // connection.
+ nonceKeySize = 16
+ nonceSize = 24 // base64.StdEncoding.EncodedLen(nonceKeySize)
+
+ // RFC6455: The value of this header field is constructed by concatenating
+ // /key/, defined above in step 4 in Section 4.2.2, with the string
+ // "258EAFA5- E914-47DA-95CA-C5AB0DC85B11", taking the SHA-1 hash of this
+ // concatenated value to obtain a 20-byte value and base64- encoding (see
+ // Section 4 of [RFC4648]) this 20-byte hash.
+ acceptSize = 28 // base64.StdEncoding.EncodedLen(sha1.Size)
+)
+
+// initNonce fills given slice with random base64-encoded nonce bytes.
+func initNonce(dst []byte) {
+ // NOTE: bts does not escape.
+ bts := make([]byte, nonceKeySize)
+ if _, err := rand.Read(bts); err != nil {
+ panic(fmt.Sprintf("rand read error: %s", err))
+ }
+ base64.StdEncoding.Encode(dst, bts)
+}
+
+// checkAcceptFromNonce reports whether given accept bytes are valid for given
+// nonce bytes.
+func checkAcceptFromNonce(accept, nonce []byte) bool {
+ if len(accept) != acceptSize {
+ return false
+ }
+ // NOTE: expect does not escape.
+ expect := make([]byte, acceptSize)
+ initAcceptFromNonce(expect, nonce)
+ return bytes.Equal(expect, accept)
+}
+
+// initAcceptFromNonce fills given slice with accept bytes generated from given
+// nonce bytes. Given buffer should be exactly acceptSize bytes.
+func initAcceptFromNonce(accept, nonce []byte) {
+ const magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ if len(accept) != acceptSize {
+ panic("accept buffer is invalid")
+ }
+ if len(nonce) != nonceSize {
+ panic("nonce is invalid")
+ }
+
+ p := make([]byte, nonceSize+len(magic))
+ copy(p[:nonceSize], nonce)
+ copy(p[nonceSize:], magic)
+
+ sum := sha1.Sum(p)
+ base64.StdEncoding.Encode(accept, sum[:])
+
+ return
+}
+
+func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) {
+ accept := make([]byte, acceptSize)
+ initAcceptFromNonce(accept, nonce)
+ // NOTE: write accept bytes as a string to prevent heap allocation –
+ // WriteString() copy given string into its inner buffer, unlike Write()
+ // which may write p directly to the underlying io.Writer – which in turn
+ // will lead to p escape.
+ return bw.WriteString(btsToString(accept))
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
new file mode 100644
index 00000000000..bc653e4690f
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
@@ -0,0 +1,147 @@
+package ws
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Errors used by frame reader.
+var (
+ ErrHeaderLengthMSB = fmt.Errorf("header error: the most significant bit must be 0")
+ ErrHeaderLengthUnexpected = fmt.Errorf("header error: unexpected payload length bits")
+)
+
+// ReadHeader reads a frame header from r.
+func ReadHeader(r io.Reader) (h Header, err error) {
+ // Make slice of bytes with capacity 12 that could hold any header.
+ //
+ // The maximum header size is 14, but due to the 2 hop reads,
+ // after first hop that reads first 2 constant bytes, we could reuse 2 bytes.
+ // So 14 - 2 = 12.
+ bts := make([]byte, 2, MaxHeaderSize-2)
+
+ // Prepare to hold first 2 bytes to choose size of next read.
+ _, err = io.ReadFull(r, bts)
+ if err != nil {
+ return
+ }
+
+ h.Fin = bts[0]&bit0 != 0
+ h.Rsv = (bts[0] & 0x70) >> 4
+ h.OpCode = OpCode(bts[0] & 0x0f)
+
+ var extra int
+
+ if bts[1]&bit0 != 0 {
+ h.Masked = true
+ extra += 4
+ }
+
+ length := bts[1] & 0x7f
+ switch {
+ case length < 126:
+ h.Length = int64(length)
+
+ case length == 126:
+ extra += 2
+
+ case length == 127:
+ extra += 8
+
+ default:
+ err = ErrHeaderLengthUnexpected
+ return
+ }
+
+ if extra == 0 {
+ return
+ }
+
+ // Increase len of bts to extra bytes need to read.
+ // Overwrite first 2 bytes that was read before.
+ bts = bts[:extra]
+ _, err = io.ReadFull(r, bts)
+ if err != nil {
+ return
+ }
+
+ switch {
+ case length == 126:
+ h.Length = int64(binary.BigEndian.Uint16(bts[:2]))
+ bts = bts[2:]
+
+ case length == 127:
+ if bts[0]&0x80 != 0 {
+ err = ErrHeaderLengthMSB
+ return
+ }
+ h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
+ bts = bts[8:]
+ }
+
+ if h.Masked {
+ copy(h.Mask[:], bts)
+ }
+
+ return
+}
+
+// ReadFrame reads a frame from r.
+// It is not designed for high optimized use case cause it makes allocation
+// for frame.Header.Length size inside to read frame payload into.
+//
+// Note that ReadFrame does not unmask payload.
+func ReadFrame(r io.Reader) (f Frame, err error) {
+ f.Header, err = ReadHeader(r)
+ if err != nil {
+ return
+ }
+
+ if f.Header.Length > 0 {
+ // int(f.Header.Length) is safe here cause we have
+ // checked it for overflow above in ReadHeader.
+ f.Payload = make([]byte, int(f.Header.Length))
+ _, err = io.ReadFull(r, f.Payload)
+ }
+
+ return
+}
+
+// MustReadFrame is like ReadFrame but panics if frame can not be read.
+func MustReadFrame(r io.Reader) Frame {
+ f, err := ReadFrame(r)
+ if err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// ParseCloseFrameData parses close frame status code and closure reason if any provided.
+// If there is no status code in the payload
+// the empty status code is returned (code.Empty()) with empty string as a reason.
+func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) {
+ if len(payload) < 2 {
+ // We returning empty StatusCode here, preventing the situation
+ // when endpoint really sent code 1005 and we should return ProtocolError on that.
+ //
+ // In other words, we ignoring this rule [RFC6455:7.1.5]:
+ // If this Close control frame contains no status code, _The WebSocket
+ // Connection Close Code_ is considered to be 1005.
+ return
+ }
+ code = StatusCode(binary.BigEndian.Uint16(payload))
+ reason = string(payload[2:])
+ return
+}
+
+// ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing
+// that it does not copies payload bytes into reason, but prepares unsafe cast.
+func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) {
+ if len(payload) < 2 {
+ return
+ }
+ code = StatusCode(binary.BigEndian.Uint16(payload))
+ reason = btsToString(payload[2:])
+ return
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
new file mode 100644
index 00000000000..48059aded49
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
@@ -0,0 +1,607 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/pool/pbufio"
+)
+
+// Constants used by ConnUpgrader.
+const (
+ DefaultServerReadBufferSize = 4096
+ DefaultServerWriteBufferSize = 512
+)
+
+// Errors used by both client and server when preparing WebSocket handshake.
+var (
+ ErrHandshakeBadProtocol = RejectConnectionError(
+ RejectionStatus(http.StatusHTTPVersionNotSupported),
+ RejectionReason(fmt.Sprintf("handshake error: bad HTTP protocol version")),
+ )
+ ErrHandshakeBadMethod = RejectConnectionError(
+ RejectionStatus(http.StatusMethodNotAllowed),
+ RejectionReason(fmt.Sprintf("handshake error: bad HTTP request method")),
+ )
+ ErrHandshakeBadHost = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerHost)),
+ )
+ ErrHandshakeBadUpgrade = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerUpgrade)),
+ )
+ ErrHandshakeBadConnection = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerConnection)),
+ )
+ ErrHandshakeBadSecAccept = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecAccept)),
+ )
+ ErrHandshakeBadSecKey = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecKey)),
+ )
+ ErrHandshakeBadSecVersion = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
+ )
+)
+
+// ErrMalformedResponse is returned by Dialer to indicate that server response
+// can not be parsed.
+var ErrMalformedResponse = fmt.Errorf("malformed HTTP response")
+
+// ErrMalformedRequest is returned when HTTP request can not be parsed.
+var ErrMalformedRequest = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason("malformed HTTP request"),
+)
+
+// ErrHandshakeUpgradeRequired is returned by Upgrader to indicate that
+// connection is rejected because given WebSocket version is malformed.
+//
+// According to RFC6455:
+// If this version does not match a version understood by the server, the
+// server MUST abort the WebSocket handshake described in this section and
+// instead send an appropriate HTTP error code (such as 426 Upgrade Required)
+// and a |Sec-WebSocket-Version| header field indicating the version(s) the
+// server is capable of understanding.
+var ErrHandshakeUpgradeRequired = RejectConnectionError(
+ RejectionStatus(http.StatusUpgradeRequired),
+ RejectionHeader(HandshakeHeaderString(headerSecVersion+": 13\r\n")),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
+)
+
+// ErrNotHijacker is an error returned when http.ResponseWriter does not
+// implement http.Hijacker interface.
+var ErrNotHijacker = RejectConnectionError(
+ RejectionStatus(http.StatusInternalServerError),
+ RejectionReason("given http.ResponseWriter is not a http.Hijacker"),
+)
+
+// DefaultHTTPUpgrader is an HTTPUpgrader that holds no options and is used by
+// UpgradeHTTP function.
+var DefaultHTTPUpgrader HTTPUpgrader
+
+// UpgradeHTTP is like HTTPUpgrader{}.Upgrade().
+func UpgradeHTTP(r *http.Request, w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, Handshake, error) {
+ return DefaultHTTPUpgrader.Upgrade(r, w)
+}
+
+// DefaultUpgrader is an Upgrader that holds no options and is used by Upgrade
+// function.
+var DefaultUpgrader Upgrader
+
+// Upgrade is like Upgrader{}.Upgrade().
+func Upgrade(conn io.ReadWriter) (Handshake, error) {
+ return DefaultUpgrader.Upgrade(conn)
+}
+
+// HTTPUpgrader contains options for upgrading connection to websocket from
+// net/http Handler arguments.
+type HTTPUpgrader struct {
+ // Timeout is the maximum amount of time an Upgrade() will spent while
+ // writing handshake response.
+ //
+ // The default is no timeout.
+ Timeout time.Duration
+
+ // Header is an optional http.Header mapping that could be used to
+ // write additional headers to the handshake response.
+ //
+ // Note that if present, it will be written in any result of handshake.
+ Header http.Header
+
+ // Protocol is the select function that is used to select subprotocol from
+ // list requested by client. If this field is set, then the first matched
+ // protocol is sent to a client as negotiated.
+ Protocol func(string) bool
+
+ // Extension is the select function that is used to select extensions from
+ // list requested by client. If this field is set, then the all matched
+ // extensions are sent to a client as negotiated.
+ Extension func(httphead.Option) bool
+}
+
+// Upgrade upgrades http connection to the websocket connection.
+//
+// It hijacks net.Conn from w and returns received net.Conn and
+// bufio.ReadWriter. On successful handshake it returns Handshake struct
+// describing handshake info.
+func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.Conn, rw *bufio.ReadWriter, hs Handshake, err error) {
+ // Hijack connection first to get the ability to write rejection errors the
+ // same way as in Upgrader.
+ hj, ok := w.(http.Hijacker)
+ if ok {
+ conn, rw, err = hj.Hijack()
+ } else {
+ err = ErrNotHijacker
+ }
+ if err != nil {
+ httpError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ // The method of the request MUST be GET, and the HTTP version MUST be at least 1.1.
+ var nonce string
+ if r.Method != http.MethodGet {
+ err = ErrHandshakeBadMethod
+ } else if r.ProtoMajor < 1 || (r.ProtoMajor == 1 && r.ProtoMinor < 1) {
+ err = ErrHandshakeBadProtocol
+ } else if r.Host == "" {
+ err = ErrHandshakeBadHost
+ } else if u := httpGetHeader(r.Header, headerUpgradeCanonical); u != "websocket" && !strings.EqualFold(u, "websocket") {
+ err = ErrHandshakeBadUpgrade
+ } else if c := httpGetHeader(r.Header, headerConnectionCanonical); c != "Upgrade" && !strHasToken(c, "upgrade") {
+ err = ErrHandshakeBadConnection
+ } else if nonce = httpGetHeader(r.Header, headerSecKeyCanonical); len(nonce) != nonceSize {
+ err = ErrHandshakeBadSecKey
+ } else if v := httpGetHeader(r.Header, headerSecVersionCanonical); v != "13" {
+ // According to RFC6455:
+ //
+ // If this version does not match a version understood by the server,
+ // the server MUST abort the WebSocket handshake described in this
+ // section and instead send an appropriate HTTP error code (such as 426
+ // Upgrade Required) and a |Sec-WebSocket-Version| header field
+ // indicating the version(s) the server is capable of understanding.
+ //
+ // So we branching here cause empty or not present version does not
+ // meet the ABNF rules of RFC6455:
+ //
+ // version = DIGIT | (NZDIGIT DIGIT) |
+ // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
+ // ; Limited to 0-255 range, with no leading zeros
+ //
+ // That is, if version is really invalid – we sent 426 status, if it
+ // not present or empty – it is 400.
+ if v != "" {
+ err = ErrHandshakeUpgradeRequired
+ } else {
+ err = ErrHandshakeBadSecVersion
+ }
+ }
+ if check := u.Protocol; err == nil && check != nil {
+ ps := r.Header[headerSecProtocolCanonical]
+ for i := 0; i < len(ps) && err == nil && hs.Protocol == ""; i++ {
+ var ok bool
+ hs.Protocol, ok = strSelectProtocol(ps[i], check)
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+ }
+ if check := u.Extension; err == nil && check != nil {
+ xs := r.Header[headerSecExtensionsCanonical]
+ for i := 0; i < len(xs) && err == nil; i++ {
+ var ok bool
+ hs.Extensions, ok = strSelectExtensions(xs[i], hs.Extensions, check)
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+ }
+
+ // Clear deadlines set by server.
+ conn.SetDeadline(noDeadline)
+ if t := u.Timeout; t != 0 {
+ conn.SetWriteDeadline(time.Now().Add(t))
+ defer conn.SetWriteDeadline(noDeadline)
+ }
+
+ var header handshakeHeader
+ if h := u.Header; h != nil {
+ header[0] = HandshakeHeaderHTTP(h)
+ }
+ if err == nil {
+ httpWriteResponseUpgrade(rw.Writer, strToBytes(nonce), hs, header.WriteTo)
+ err = rw.Writer.Flush()
+ } else {
+ var code int
+ if rej, ok := err.(*rejectConnectionError); ok {
+ code = rej.code
+ header[1] = rej.header
+ }
+ if code == 0 {
+ code = http.StatusInternalServerError
+ }
+ httpWriteResponseError(rw.Writer, err, code, header.WriteTo)
+ // Do not store Flush() error to not override already existing one.
+ rw.Writer.Flush()
+ }
+ return
+}
+
+// Upgrader contains options for upgrading connection to websocket.
+type Upgrader struct {
+ // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
+ // They used to read and write http data while upgrading to WebSocket.
+ // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
+ //
+ // If a size is zero then default value is used.
+ //
+ // Usually it is useful to set read buffer size bigger than write buffer
+ // size because incoming request could contain long header values, such as
+ // Cookie. Response, in other way, could be big only if user write multiple
+ // custom headers. Usually response takes less than 256 bytes.
+ ReadBufferSize, WriteBufferSize int
+
+ // Protocol is a select function that is used to select subprotocol
+ // from list requested by client. If this field is set, then the first matched
+ // protocol is sent to a client as negotiated.
+ //
+ // The argument is only valid until the callback returns.
+ Protocol func([]byte) bool
+
+ // ProtocolCustrom allow user to parse Sec-WebSocket-Protocol header manually.
+ // Note that returned bytes must be valid until Upgrade returns.
+ // If ProtocolCustom is set, it used instead of Protocol function.
+ ProtocolCustom func([]byte) (string, bool)
+
+ // Extension is a select function that is used to select extensions
+ // from list requested by client. If this field is set, then the all matched
+ // extensions are sent to a client as negotiated.
+ //
+ // The argument is only valid until the callback returns.
+ //
+ // According to the RFC6455 order of extensions passed by a client is
+ // significant. That is, returning true from this function means that no
+ // other extension with the same name should be checked because server
+ // accepted the most preferable extension right now:
+ // "Note that the order of extensions is significant. Any interactions between
+ // multiple extensions MAY be defined in the documents defining the extensions.
+ // In the absence of such definitions, the interpretation is that the header
+ // fields listed by the client in its request represent a preference of the
+ // header fields it wishes to use, with the first options listed being most
+ // preferable."
+ Extension func(httphead.Option) bool
+
+ // ExtensionCustorm allow user to parse Sec-WebSocket-Extensions header manually.
+ // Note that returned options should be valid until Upgrade returns.
+ // If ExtensionCustom is set, it used instead of Extension function.
+ ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool)
+
+ // Header is an optional HandshakeHeader instance that could be used to
+ // write additional headers to the handshake response.
+ //
+ // It used instead of any key-value mappings to avoid allocations in user
+ // land.
+ //
+ // Note that if present, it will be written in any result of handshake.
+ Header HandshakeHeader
+
+ // OnRequest is a callback that will be called after request line
+ // successful parsing.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnRequest func(uri []byte) error
+
+ // OnHost is a callback that will be called after "Host" header successful
+ // parsing.
+ //
+ // It is separated from OnHeader callback because the Host header must be
+ // present in each request since HTTP/1.1. Thus Host header is non-optional
+ // and required for every WebSocket handshake.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnHost func(host []byte) error
+
+ // OnHeader is a callback that will be called after successful parsing of
+ // header, that is not used during WebSocket handshake procedure. That is,
+ // it will be called with non-websocket headers, which could be relevant
+ // for application-level logic.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnHeader func(key, value []byte) error
+
+ // OnBeforeUpgrade is a callback that will be called before sending
+ // successful upgrade response.
+ //
+ // Setting OnBeforeUpgrade allows user to make final application-level
+ // checks and decide whether this connection is allowed to successfully
+ // upgrade to WebSocket.
+ //
+ // It must return non-nil either HandshakeHeader or error and never both.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnBeforeUpgrade func() (header HandshakeHeader, err error)
+}
+
+// Upgrade zero-copy upgrades connection to WebSocket. It interprets given conn
+// as connection with incoming HTTP Upgrade request.
+//
+// It is a caller responsibility to manage i/o timeouts on conn.
+//
+// Non-nil error means that request for the WebSocket upgrade is invalid or
+// malformed and usually connection should be closed.
+// Even when error is non-nil Upgrade will write appropriate response into
+// connection in compliance with RFC.
+func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
+ // headerSeen constants helps to report whether or not some header was seen
+ // during reading request bytes.
+ const (
+ headerSeenHost = 1 << iota
+ headerSeenUpgrade
+ headerSeenConnection
+ headerSeenSecVersion
+ headerSeenSecKey
+
+ // headerSeenAll is the value that we expect to receive at the end of
+ // headers read/parse loop.
+ headerSeenAll = 0 |
+ headerSeenHost |
+ headerSeenUpgrade |
+ headerSeenConnection |
+ headerSeenSecVersion |
+ headerSeenSecKey
+ )
+
+ // Prepare I/O buffers.
+ // TODO(gobwas): make it configurable.
+ br := pbufio.GetReader(conn,
+ nonZero(u.ReadBufferSize, DefaultServerReadBufferSize),
+ )
+ bw := pbufio.GetWriter(conn,
+ nonZero(u.WriteBufferSize, DefaultServerWriteBufferSize),
+ )
+ defer func() {
+ pbufio.PutReader(br)
+ pbufio.PutWriter(bw)
+ }()
+
+ // Read HTTP request line like "GET /ws HTTP/1.1".
+ rl, err := readLine(br)
+ if err != nil {
+ return
+ }
+ // Parse request line data like HTTP version, uri and method.
+ req, err := httpParseRequestLine(rl)
+ if err != nil {
+ return
+ }
+
+ // Prepare stack-based handshake header list.
+ header := handshakeHeader{
+ 0: u.Header,
+ }
+
+ // Parse and check HTTP request.
+ // As RFC6455 says:
+ // The client's opening handshake consists of the following parts. If the
+ // server, while reading the handshake, finds that the client did not
+ // send a handshake that matches the description below (note that as per
+ // [RFC2616], the order of the header fields is not important), including
+ // but not limited to any violations of the ABNF grammar specified for
+ // the components of the handshake, the server MUST stop processing the
+ // client's handshake and return an HTTP response with an appropriate
+ // error code (such as 400 Bad Request).
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.2.1
+
+ // An HTTP/1.1 or higher GET request, including a "Request-URI".
+ //
+ // Even if RFC says "1.1 or higher" without mentioning the part of the
+ // version, we apply it only to minor part.
+ switch {
+ case req.major != 1 || req.minor < 1:
+ // Abort processing the whole request because we do not even know how
+ // to actually parse it.
+ err = ErrHandshakeBadProtocol
+
+ case btsToString(req.method) != http.MethodGet:
+ err = ErrHandshakeBadMethod
+
+ default:
+ if onRequest := u.OnRequest; onRequest != nil {
+ err = onRequest(req.uri)
+ }
+ }
+ // Start headers read/parse loop.
+ var (
+ // headerSeen reports which header was seen by setting corresponding
+ // bit on.
+ headerSeen byte
+
+ nonce = make([]byte, nonceSize)
+ )
+ for err == nil {
+ line, e := readLine(br)
+ if e != nil {
+ return hs, e
+ }
+ if len(line) == 0 {
+ // Blank line, no more lines to read.
+ break
+ }
+
+ k, v, ok := httpParseHeaderLine(line)
+ if !ok {
+ err = ErrMalformedRequest
+ break
+ }
+
+ switch btsToString(k) {
+ case headerHostCanonical:
+ headerSeen |= headerSeenHost
+ if onHost := u.OnHost; onHost != nil {
+ err = onHost(v)
+ }
+
+ case headerUpgradeCanonical:
+ headerSeen |= headerSeenUpgrade
+ if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
+ err = ErrHandshakeBadUpgrade
+ }
+
+ case headerConnectionCanonical:
+ headerSeen |= headerSeenConnection
+ if !bytes.Equal(v, specHeaderValueConnection) && !btsHasToken(v, specHeaderValueConnectionLower) {
+ err = ErrHandshakeBadConnection
+ }
+
+ case headerSecVersionCanonical:
+ headerSeen |= headerSeenSecVersion
+ if !bytes.Equal(v, specHeaderValueSecVersion) {
+ err = ErrHandshakeUpgradeRequired
+ }
+
+ case headerSecKeyCanonical:
+ headerSeen |= headerSeenSecKey
+ if len(v) != nonceSize {
+ err = ErrHandshakeBadSecKey
+ } else {
+ copy(nonce[:], v)
+ }
+
+ case headerSecProtocolCanonical:
+ if custom, check := u.ProtocolCustom, u.Protocol; hs.Protocol == "" && (custom != nil || check != nil) {
+ var ok bool
+ if custom != nil {
+ hs.Protocol, ok = custom(v)
+ } else {
+ hs.Protocol, ok = btsSelectProtocol(v, check)
+ }
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+
+ case headerSecExtensionsCanonical:
+ if custom, check := u.ExtensionCustom, u.Extension; custom != nil || check != nil {
+ var ok bool
+ if custom != nil {
+ hs.Extensions, ok = custom(v, hs.Extensions)
+ } else {
+ hs.Extensions, ok = btsSelectExtensions(v, hs.Extensions, check)
+ }
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+
+ default:
+ if onHeader := u.OnHeader; onHeader != nil {
+ err = onHeader(k, v)
+ }
+ }
+ }
+ switch {
+ case err == nil && headerSeen != headerSeenAll:
+ switch {
+ case headerSeen&headerSeenHost == 0:
+ // As RFC2616 says:
+ // A client MUST include a Host header field in all HTTP/1.1
+ // request messages. If the requested URI does not include an
+ // Internet host name for the service being requested, then the
+ // Host header field MUST be given with an empty value. An
+ // HTTP/1.1 proxy MUST ensure that any request message it
+ // forwards does contain an appropriate Host header field that
+ // identifies the service being requested by the proxy. All
+ // Internet-based HTTP/1.1 servers MUST respond with a 400 (Bad
+ // Request) status code to any HTTP/1.1 request message which
+ // lacks a Host header field.
+ err = ErrHandshakeBadHost
+ case headerSeen&headerSeenUpgrade == 0:
+ err = ErrHandshakeBadUpgrade
+ case headerSeen&headerSeenConnection == 0:
+ err = ErrHandshakeBadConnection
+ case headerSeen&headerSeenSecVersion == 0:
+ // In case of empty or not present version we do not send 426 status,
+ // because it does not meet the ABNF rules of RFC6455:
+ //
+ // version = DIGIT | (NZDIGIT DIGIT) |
+ // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
+ // ; Limited to 0-255 range, with no leading zeros
+ //
+ // That is, if version is really invalid – we sent 426 status as above, if it
+ // not present – it is 400.
+ err = ErrHandshakeBadSecVersion
+ case headerSeen&headerSeenSecKey == 0:
+ err = ErrHandshakeBadSecKey
+ default:
+ panic("unknown headers state")
+ }
+
+ case err == nil && u.OnBeforeUpgrade != nil:
+ header[1], err = u.OnBeforeUpgrade()
+ }
+ if err != nil {
+ var code int
+ if rej, ok := err.(*rejectConnectionError); ok {
+ code = rej.code
+ header[1] = rej.header
+ }
+ if code == 0 {
+ code = http.StatusInternalServerError
+ }
+ httpWriteResponseError(bw, err, code, header.WriteTo)
+ // Do not store Flush() error to not override already existing one.
+ bw.Flush()
+ return
+ }
+
+ httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo)
+ err = bw.Flush()
+
+ return
+}
+
+type handshakeHeader [2]HandshakeHeader
+
+func (hs handshakeHeader) WriteTo(w io.Writer) (n int64, err error) {
+ for i := 0; i < len(hs) && err == nil; i++ {
+ if h := hs[i]; h != nil {
+ var m int64
+ m, err = h.WriteTo(w)
+ n += m
+ }
+ }
+ return n, err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
new file mode 100644
index 00000000000..67ad906e5d2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
@@ -0,0 +1,214 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/gobwas/httphead"
+)
+
+// SelectFromSlice creates accept function that could be used as Protocol/Extension
+// select during upgrade.
+func SelectFromSlice(accept []string) func(string) bool {
+ if len(accept) > 16 {
+ mp := make(map[string]struct{}, len(accept))
+ for _, p := range accept {
+ mp[p] = struct{}{}
+ }
+ return func(p string) bool {
+ _, ok := mp[p]
+ return ok
+ }
+ }
+ return func(p string) bool {
+ for _, ok := range accept {
+ if p == ok {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// SelectEqual creates accept function that could be used as Protocol/Extension
+// select during upgrade.
+func SelectEqual(v string) func(string) bool {
+ return func(p string) bool {
+ return v == p
+ }
+}
+
+func strToBytes(str string) (bts []byte) {
+ s := (*reflect.StringHeader)(unsafe.Pointer(&str))
+ b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
+ b.Data = s.Data
+ b.Len = s.Len
+ b.Cap = s.Len
+ return
+}
+
+func btsToString(bts []byte) (str string) {
+ return *(*string)(unsafe.Pointer(&bts))
+}
+
+// asciiToInt converts bytes to int.
+func asciiToInt(bts []byte) (ret int, err error) {
+ // ASCII numbers all start with the high-order bits 0011.
+ // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
+ // bits and interpret them directly as an integer.
+ var n int
+ if n = len(bts); n < 1 {
+ return 0, fmt.Errorf("converting empty bytes to int")
+ }
+ for i := 0; i < n; i++ {
+ if bts[i]&0xf0 != 0x30 {
+ return 0, fmt.Errorf("%s is not a numeric character", string(bts[i]))
+ }
+ ret += int(bts[i]&0xf) * pow(10, n-i-1)
+ }
+ return ret, nil
+}
+
+// pow for integers implementation.
+// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
+func pow(a, b int) int {
+ p := 1
+ for b > 0 {
+ if b&1 != 0 {
+ p *= a
+ }
+ b >>= 1
+ a *= a
+ }
+ return p
+}
+
+func bsplit3(bts []byte, sep byte) (b1, b2, b3 []byte) {
+ a := bytes.IndexByte(bts, sep)
+ b := bytes.IndexByte(bts[a+1:], sep)
+ if a == -1 || b == -1 {
+ return bts, nil, nil
+ }
+ b += a + 1
+ return bts[:a], bts[a+1 : b], bts[b+1:]
+}
+
+func btrim(bts []byte) []byte {
+ var i, j int
+ for i = 0; i < len(bts) && (bts[i] == ' ' || bts[i] == '\t'); {
+ i++
+ }
+ for j = len(bts); j > i && (bts[j-1] == ' ' || bts[j-1] == '\t'); {
+ j--
+ }
+ return bts[i:j]
+}
+
+func strHasToken(header, token string) (has bool) {
+ return btsHasToken(strToBytes(header), strToBytes(token))
+}
+
+func btsHasToken(header, token []byte) (has bool) {
+ httphead.ScanTokens(header, func(v []byte) bool {
+ has = bytes.EqualFold(v, token)
+ return !has
+ })
+ return
+}
+
+const (
+ toLower = 'a' - 'A' // for use with OR.
+ toUpper = ^byte(toLower) // for use with AND.
+ toLower8 = uint64(toLower) |
+ uint64(toLower)<<8 |
+ uint64(toLower)<<16 |
+ uint64(toLower)<<24 |
+ uint64(toLower)<<32 |
+ uint64(toLower)<<40 |
+ uint64(toLower)<<48 |
+ uint64(toLower)<<56
+)
+
+// Algorithm below is like standard textproto/CanonicalMIMEHeaderKey, except
+// that it operates with slice of bytes and modifies it inplace without copying.
+func canonicalizeHeaderKey(k []byte) {
+ upper := true
+ for i, c := range k {
+ if upper && 'a' <= c && c <= 'z' {
+ k[i] &= toUpper
+ } else if !upper && 'A' <= c && c <= 'Z' {
+ k[i] |= toLower
+ }
+ upper = c == '-'
+ }
+}
+
+// readLine reads line from br. It reads until '\n' and returns bytes without
+// '\n' or '\r\n' at the end.
+// It returns err if and only if line does not end in '\n'. Note that read
+// bytes returned in any case of error.
+//
+// It is much like the textproto/Reader.ReadLine() except the thing that it
+// returns raw bytes, instead of string. That is, it avoids copying bytes read
+// from br.
+//
+// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
+// safe with future I/O operations on br.
+//
+// We could control I/O operations on br and do not need to make additional
+// copy for safety.
+//
+// NOTE: it may return copied flag to notify that returned buffer is safe to
+// use.
+func readLine(br *bufio.Reader) ([]byte, error) {
+ var line []byte
+ for {
+ bts, err := br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // Copy bytes because next read will discard them.
+ line = append(line, bts...)
+ continue
+ }
+
+ // Avoid copy of single read.
+ if line == nil {
+ line = bts
+ } else {
+ line = append(line, bts...)
+ }
+
+ if err != nil {
+ return line, err
+ }
+
+ // Size of line is at least 1.
+ // In other case bufio.ReadSlice() returns error.
+ n := len(line)
+
+ // Cut '\n' or '\r\n'.
+ if n > 1 && line[n-2] == '\r' {
+ line = line[:n-2]
+ } else {
+ line = line[:n-1]
+ }
+
+ return line, nil
+ }
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func nonZero(a, b int) int {
+ if a != 0 {
+ return a
+ }
+ return b
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
new file mode 100644
index 00000000000..94557c69639
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
@@ -0,0 +1,104 @@
+package ws
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// Header size length bounds in bytes.
+const (
+ MaxHeaderSize = 14
+ MinHeaderSize = 2
+)
+
+const (
+ bit0 = 0x80
+ bit1 = 0x40
+ bit2 = 0x20
+ bit3 = 0x10
+ bit4 = 0x08
+ bit5 = 0x04
+ bit6 = 0x02
+ bit7 = 0x01
+
+ len7 = int64(125)
+ len16 = int64(^(uint16(0)))
+ len64 = int64(^(uint64(0)) >> 1)
+)
+
+// HeaderSize returns number of bytes that are needed to encode given header.
+// It returns -1 if header is malformed.
+func HeaderSize(h Header) (n int) {
+ switch {
+ case h.Length < 126:
+ n = 2
+ case h.Length <= len16:
+ n = 4
+ case h.Length <= len64:
+ n = 10
+ default:
+ return -1
+ }
+ if h.Masked {
+ n += len(h.Mask)
+ }
+ return n
+}
+
+// WriteHeader writes header binary representation into w.
+func WriteHeader(w io.Writer, h Header) error {
+ // Make slice of bytes with capacity 14 that could hold any header.
+ bts := make([]byte, MaxHeaderSize)
+
+ if h.Fin {
+ bts[0] |= bit0
+ }
+ bts[0] |= h.Rsv << 4
+ bts[0] |= byte(h.OpCode)
+
+ var n int
+ switch {
+ case h.Length <= len7:
+ bts[1] = byte(h.Length)
+ n = 2
+
+ case h.Length <= len16:
+ bts[1] = 126
+ binary.BigEndian.PutUint16(bts[2:4], uint16(h.Length))
+ n = 4
+
+ case h.Length <= len64:
+ bts[1] = 127
+ binary.BigEndian.PutUint64(bts[2:10], uint64(h.Length))
+ n = 10
+
+ default:
+ return ErrHeaderLengthUnexpected
+ }
+
+ if h.Masked {
+ bts[1] |= bit0
+ n += copy(bts[n:], h.Mask[:])
+ }
+
+ _, err := w.Write(bts[:n])
+
+ return err
+}
+
+// WriteFrame writes frame binary representation into w.
+func WriteFrame(w io.Writer, f Frame) error {
+ err := WriteHeader(w, f.Header)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(f.Payload)
+ return err
+}
+
+// MustWriteFrame is like WriteFrame but panics if frame can not be read.
+func MustWriteFrame(w io.Writer, f Frame) {
+ if err := WriteFrame(w, f); err != nil {
+ panic(err)
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 00000000000..cd3fcd1ef72
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 00000000000..1931f400682
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 00000000000..9171c972252
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 00000000000..19aa2e75c82
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+[](https://godoc.org/github.com/gorilla/websocket)
+[](https://circleci.com/gh/gorilla/websocket)
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+### Documentation
+
+* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+ a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+ function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+ Read returns when the input buffer is full or a frame boundary is
+ encountered. Each call to Write sends a single frame message. The Gorilla
+ io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 00000000000..962c06a391c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, net.DialContext is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer.
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: "GET",
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ } else {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" {
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ var err error
+ if trace != nil {
+ err = doHandshakeWithTrace(trace, tlsConn, cfg)
+ } else {
+ err = doHandshake(tlsConn, cfg)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+ !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 00000000000..4f0d943723a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 00000000000..babb007fb41
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 00000000000..813ffb1e843
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 00000000000..ca46d2f793c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1201 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan struct{} // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ // bytes remaining in current frame.
+ // set setReadRemaining to safely update this value and prevent overflow
+ readRemaining int64
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// setReadRemaining tracks the number of bytes remaining on the connection. If n
+// overflows, an ErrReadLimit is returned.
+func (c *Conn) setReadRemaining(n int64) error {
+ if n < 0 {
+ return ErrReadLimit
+ }
+
+ c.readRemaining = n
+ return nil
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := 1000 * time.Hour
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+// beginMessage prepares a connection and message writer for a new message.
+func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ mw.c = c
+ mw.frameType = messageType
+ mw.pos = maxFrameHeaderSize
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return nil, err
+ }
+ c.writer = &mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) endMessage(err error) error {
+ if w.err != nil {
+ return err
+ }
+ c := w.c
+ w.err = err
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.endMessage(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.endMessage(err)
+ }
+
+ if final {
+ w.endMessage(errWriteClosed)
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ return w.flushFrame(true, nil)
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return err
+ }
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ final := p[0]&finalBit != 0
+ frameType := int(p[0] & 0xf)
+ mask := p[1]&maskBit != 0
+ c.setReadRemaining(int64(p[1] & 0x7f))
+
+ c.readDecompress = false
+ if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+ c.readDecompress = true
+ p[0] &^= rsv1Bit
+ }
+
+ if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+ return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ return noFrame, c.handleProtocolError("control frame length > 125")
+ }
+ if !final {
+ return noFrame, c.handleProtocolError("control frame not final")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ return noFrame, c.handleProtocolError("message start before final message frame")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ return noFrame, c.handleProtocolError("continuation after final message frame")
+ }
+ c.readFinal = final
+ default:
+ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+ }
+
+ // 3. Read and parse frame length as per
+ // https://tools.ietf.org/html/rfc6455#section-5.2
+ //
+ // The length of the "Payload data", in bytes: if 0-125, that is the payload
+ // length.
+ // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
+ // integer are the payload length.
+ // - If 127, the following 8 bytes interpreted as
+ // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
+ // payload length. Multibyte length quantities are expressed in network byte
+ // order.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
+ return noFrame, err
+ }
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 4. Handle frame masking.
+
+ if mask != c.isServer {
+ return noFrame, c.handleProtocolError("incorrect mask flag")
+ }
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ // Don't allow readLength to overflow in the presence of a large readRemaining
+ // counter.
+ if c.readLength < 0 {
+ return noFrame, ErrReadLimit
+ }
+
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.setReadRemaining(0)
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("invalid close code")
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ rem := c.readRemaining
+ rem -= int64(n)
+ c.setReadRemaining(rem)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
new file mode 100644
index 00000000000..a509a21f87a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "net"
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
new file mode 100644
index 00000000000..37edaff5a57
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ for _, buf := range bufs {
+ if len(buf) > 0 {
+ if _, err := c.conn.Write(buf); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 00000000000..8db0cef95a2
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,227 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Buffers
+//
+// Connections buffer network input and output to reduce the number
+// of system calls when reading or writing messages.
+//
+// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
+// Section 5 for a discussion of message framing. A WebSocket frame header is
+// written to the network each time a write buffer is flushed to the network.
+// Decreasing the size of the write buffer can increase the amount of framing
+// overhead on the connection.
+//
+// The buffer sizes in bytes are specified by the ReadBufferSize and
+// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
+// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
+// buffers created by the HTTP server when a buffer size field is set to zero.
+// The HTTP server buffers have a size of 4096 at the time of this writing.
+//
+// The buffer sizes do not limit the size of a message that can be read or
+// written by a connection.
+//
+// Buffers are held for the lifetime of the connection by default. If the
+// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
+// write buffer only when writing a message.
+//
+// Applications should tune the buffer sizes to balance memory use and
+// performance. Increasing the buffer size uses more memory, but can reduce the
+// number of system calls to read or write the network. In the case of writing,
+// increasing the buffer size can reduce the number of frame headers written to
+// the network.
+//
+// Some guidelines for setting buffer parameters are:
+//
+// Limit the buffer sizes to the maximum expected message size. Buffers larger
+// than the largest message do not provide any benefit.
+//
+// Depending on the distribution of message sizes, setting the buffer size to
+// a value less than the maximum expected message size can greatly reduce memory
+// use with a small impact on performance. Here's an example: If 99% of the
+// messages are smaller than 256 bytes and the maximum message size is 512
+// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
+// than a buffer size of 512 bytes. The memory savings is 50%.
+//
+// A write buffer pool is useful when the application has a modest number
+// writes over a large number of connections. when buffers are pooled, a larger
+// buffer size has a reduced impact on total memory use and has the benefit of
+// reducing system calls and frame overhead.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
new file mode 100644
index 00000000000..1a7afd5028a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
@@ -0,0 +1,3 @@
+module github.com/gorilla/websocket
+
+go 1.12
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
new file mode 100644
index 00000000000..c64f8c82901
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "io"
+ "strings"
+)
+
+// JoinMessages concatenates received messages to create a single io.Reader.
+// The string term is appended to each message. The returned reader does not
+// support concurrent calls to the Read method.
+func JoinMessages(c *Conn, term string) io.Reader {
+ return &joinReader{c: c, term: term}
+}
+
+type joinReader struct {
+ c *Conn
+ term string
+ r io.Reader
+}
+
+func (r *joinReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var err error
+ _, r.r, err = r.c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ if r.term != "" {
+ r.r = io.MultiReader(r.r, strings.NewReader(r.term))
+ }
+ }
+ n, err := r.r.Read(p)
+ if err == io.EOF {
+ err = nil
+ r.r = nil
+ }
+ return n, err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 00000000000..dc2c1f6415f
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 00000000000..577fce9efd7
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 00000000000..2aac060e52e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 00000000000..c854225e967
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 00000000000..e87a8c9f0c9
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ forwardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.forwardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 00000000000..887d558918c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-WebSocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
new file mode 100644
index 00000000000..834f122a00d
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
@@ -0,0 +1,19 @@
+// +build go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(tlsConn, cfg)
+ if trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+ return err
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
new file mode 100644
index 00000000000..77d05a0b574
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
@@ -0,0 +1,12 @@
+// +build !go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ return doHandshake(tlsConn, cfg)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 00000000000..7bf2f66c674
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,283 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Token octets per RFC 2616.
+var isTokenOctet = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+// skipSpace returns a slice of the string s with all leading RFC 2616 linear
+// whitespace removed.
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if b := s[i]; b != ' ' && b != '\t' {
+ break
+ }
+ }
+ return s[i:]
+}
+
+// nextToken returns the leading RFC 2616 token of s and the string following
+// the token.
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if !isTokenOctet[s[i]] {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
+// and the string following the token or quoted string.
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 00000000000..2e668f6b882
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 00000000000..1eb75ef68e4
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 00000000000..2b101d26b25
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,819 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we put into a single flat block, just too
+ // stop things from getting too large.
+ maxFlateBlockTokens = 1 << 14
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 24
+
+ skipNever = math.MaxInt32
+
+ debugDeflate = false
+)
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-6 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ {0, 0, 0, 0, 0, 5},
+ {0, 0, 0, 0, 0, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 8, 24, 16, skipNever, 7},
+ {10, 16, 24, 64, skipNever, 8},
+ {32, 258, 258, 4096, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ hash uint32
+ maxInsertIndex int
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ chainHead int
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+ hashOffset int
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ byteAvailable bool // if true, still need to process window[index-1].
+ err error
+
+ // queued output tokens
+ tokens tokens
+ fast fastEnc
+ state *advancedState
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ copy(d.window[:], d.window[windowSize:2*windowSize])
+ s.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range s.hashPrev[:] {
+ if int(v) > delta {
+ s.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ s.hashPrev[i] = 0
+ }
+ }
+ for i, v := range s.hashHead[:] {
+ if int(v) > delta {
+ s.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ s.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ d.w.writeBlock(tok, eof, window)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window, d.sync)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ }
+ } else {
+ d.w.writeBlock(tok, eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only or huffman mode.
+ if d.level <= 0 {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
+ return
+ }
+ s := d.state
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ s.hash = newH
+ }
+ // Update window information.
+ d.windowEnd += n
+ s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = prevLength
+ if length >= d.good {
+ tries >>= 2
+ }
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+
+ if n > length && (n > minMatchLength || pos-i <= 4096) {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i == minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex || i < 0 {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ b = b[:4]
+ return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < 4 {
+ return
+ }
+ hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
+ for i := 1; i < end; i++ {
+ hb = (hb << 8) | uint32(b[i+3])
+ dst[i] = hash4u(hb, hashBits)
+ }
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.byteAvailable = false
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ s := d.state
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = debugDeflate
+
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ }
+
+ for {
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - s.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ return
+ }
+ }
+ if s.index < s.maxInsertIndex {
+ // Update the hash
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ ch := s.hashHead[s.hash&hashMask]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
+ }
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
+ }
+ }
+ if prevLength >= minMatchLength && s.length <= prevLength {
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ var newIndex int
+ newIndex = s.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ s.hash = newH
+ }
+
+ s.index = newIndex
+ d.byteAvailable = false
+ s.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ } else {
+ // Reset, if we got a match this run.
+ if s.length >= minMatchLength {
+ s.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ s.ii++
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when s.ii overflows after 64KB.
+ if s.ii > 31 {
+ n := int(s.ii >> 5)
+ for j := 0; j < n; j++ {
+ if s.index >= d.windowEnd-1 {
+ break
+ }
+
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ }
+ // Flush last byte
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ }
+ } else {
+ s.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < len(d.window) {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+ d.fast.Reset()
+ return
+ }
+ }
+
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if d.tokens.n == 0 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ d.step(d)
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.w.logNewTablePenalty = 4
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case level >= 1 && level <= 6:
+ d.w.logNewTablePenalty = 6
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logNewTablePenalty = 10
+ d.state = &advancedState{}
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflateLazy
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ d.level = level
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.fast != nil {
+ d.fast.Reset()
+ d.windowEnd = 0
+ d.tokens.Reset()
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompresssion.
+ d.windowEnd = 0
+ default:
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
+ }
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
+ }
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.Reset()
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ d.w.reset(nil)
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ zw, err := NewWriter(w, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if len(w.dict) > 0 {
+ // w was created with NewWriterDict
+ w.d.reset(dst)
+ if dst != nil {
+ w.d.fillWindow(w.dict)
+ }
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 00000000000..71c75a065ea
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// * Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// * Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 00000000000..6d4c1e98bc5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 17 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load32(b []byte, i int) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func load3232(b []byte, i int32) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func hash(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> tableShift
+}
+
+type tableEntry struct {
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
+
+// matchLen returns the maximum length.
+// 'a' must be the shortest of the two.
+func matchLen(a, b []byte) int {
+ b = b[:len(a)]
+ var checked int
+ if len(a) > 4 {
+ // Try 4 bytes first
+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
+ return bits.TrailingZeros32(diff) >> 3
+ }
+ // Switch to 8 byte matching.
+ checked = 4
+ a = a[4:]
+ b = b[4:]
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
new file mode 100644
index 00000000000..c74a95fe7f6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
@@ -0,0 +1,274 @@
+// +build generate
+
+//go:generate go run $GOFILE && gofmt -w inflate_gen.go
+
+package main
+
+import (
+ "os"
+ "strings"
+)
+
+func main() {
+ f, err := os.Create("inflate_gen.go")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
+ names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
+ imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
+ f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+`)
+
+ for _, imp := range imports {
+ f.WriteString("\t\"" + imp + "\"\n")
+ }
+ f.WriteString(")\n\n")
+
+ template := `
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) $FUNCNAME$() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.($TYPE$)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).$FUNCNAME$
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+`
+ for i, t := range types {
+ s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
+ s = strings.Replace(s, "$TYPE$", t, -1)
+ f.WriteString(s)
+ }
+ f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
+ f.WriteString("\tswitch f.r.(type) {\n")
+ for i, t := range types {
+ f.WriteString("\t\tcase " + t + ":\n")
+ f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
+ }
+ f.WriteString("\t\tdefault:\n")
+ f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
+ f.WriteString("\t}\n}\n")
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 00000000000..53fe1d06e25
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,911 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "io"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 240
+
+ // bufferSize is the actual output byte buffer size.
+ // It must have additional headroom for a flush
+ // which can contain up to 8 bytes.
+ bufferSize = bufferFlushSize + 8
+)
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]int8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// offset code word extra bits.
+var offsetExtraBits = [64]int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+}
+
+var offsetBase = [64]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
+ 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
+ 0x100000, 0x180000, 0x200000, 0x300000,
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint16
+ nbytes uint8
+ literalEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logNewTablePenalty uint
+ lastHuffMan bool
+ bytes [256]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalEncoding: newHuffmanEncoder(literalCount),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
+ offsets, lits = true, true
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ offsets = false
+ break
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalFreq[256:literalCount]
+ b = b[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ if lits {
+ a = t.litHist[:]
+ b = w.literalFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ }
+ return
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
+ w.bits |= uint64(b) << (w.nbits & 63)
+ w.nbits += nb
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen[:] // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = uint8(litEnc.codes[i].len)
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = uint8(offEnc.codes[i].len)
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+ size = litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:])
+ return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
+ size = header +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
+ extraBits
+ return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord = uint32(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[codeWord])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ }
+ }
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+ if length == 0 && isEof {
+ w.writeFixedHeader(isEof)
+ // EOB: 7 bits, value: 0
+ w.writeBits(0, 7)
+ w.flush()
+ return
+ }
+
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate(tokens)
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ extraBits = w.extraBitSize()
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = w.fixedSize(extraBits)
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize < size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
+
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+ if !sync {
+ tokens.Fill()
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+
+ var size int
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table.
+ // Use the previous header size as the best estimate.
+ newSize := w.lastHeader + tokens.EstimatedBits()
+ newSize += newSize >> w.logNewTablePenalty
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+ // Check if we get a reasonable size decrease.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
+ }
+
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ w.generate(tokens)
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ var numCodegens int
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHeader, _ = w.headerSize()
+ w.lastHuffMan = false
+ }
+
+ if sync {
+ w.lastHeader = 0
+ }
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ copy(w.literalFreq[:], t.litHist[:])
+ copy(w.literalFreq[256:], t.extraHist[:])
+ copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
+
+ if t.n == 0 {
+ return
+ }
+ if filled {
+ return maxNumLit, maxNumDist
+ }
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ return
+}
+
+func (w *huffmanBitWriter) generate(t *tokens) {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
+ for _, t := range tokens {
+ if t < matchType {
+ w.writeCode(lits[t.literal()])
+ continue
+ }
+
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length)
+ if false {
+ w.writeCode(lengths[lengthCode&31])
+ } else {
+ // inlined
+ c := lengths[lengthCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+
+ extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
+ if extraLengthBits > 0 {
+ extraLength := int32(length - lengthBase[lengthCode&31])
+ w.writeBits(extraLength, extraLengthBits)
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := offsetCode(offset)
+ if false {
+ w.writeCode(offs[offsetCode&31])
+ } else {
+ // inlined
+ c := offs[offsetCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+ extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
+ if extraOffsetBits > 0 {
+ extraOffset := int32(offset - offsetBase[offsetCode&63])
+ w.writeBits(extraOffset, extraOffsetBits)
+ }
+ }
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq[:] {
+ w.literalFreq[i] = 0
+ }
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
+
+ // Add everything as literals
+ // We have to estimate the header size.
+ // Assume header is around 70 bytes:
+ // https://stackoverflow.com/a/25454430
+ const guessHeaderSizeBits = 70 * 8
+ estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync)
+ estBits += w.lastHeader + 15
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
+
+ // Store bytes, if we don't get a reasonable improvement.
+ ssize, storable := w.storedSize(input)
+ if storable && ssize < estBits {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ if w.lastHeader > 0 {
+ reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256])
+ estBits += estExtra
+
+ if estBits < reuseSize {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ }
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+ if w.lastHeader == 0 {
+ w.literalFreq[endBlockMarker] = 1
+ w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
+ }
+
+ encoding := w.literalEncoding.codes[:257]
+ for _, t := range input {
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ w.bits |= uint64(c.code) << ((w.nbits) & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+ }
+ if eof || sync {
+ w.writeCode(encoding[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 00000000000..4c39a301871
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,363 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+)
+
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode struct {
+ code, len uint16
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ freqcache []literalNode
+ bitCount [17]int32
+}
+
+type literalNode struct {
+ literal uint16
+ freq uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint16) {
+ h.len = length
+ h.code = code
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+// maxBits The maximum number of bits that should be used to encode any literal.
+// Must be less than 16.
+// return An integer array in which array[i] indicates the number of literals
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: int32(list[1].freq),
+ nextCharFreq: int32(list[2].freq),
+ nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := maxBits
+ for {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ sortByLiteral(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+ if h.freqcache == nil {
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ h.freqcache = make([]literalNode, literalCount+1)
+ }
+ list := h.freqcache[:len(freq)+1]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ list[count] = literalNode{}
+ h.codes[i].len = 0
+ }
+ }
+ list[len(freq)] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ sortByFreq(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+func atLeastOne(v float32) float32 {
+ if v < 1 {
+ return 1
+ }
+ return v
+}
+
+// histogramSize accumulates a histogram of b in h.
+// An estimated size in bits is returned.
+// Unassigned values are assigned '1' in the histogram.
+// len(h) must be >= 256, and h's elements must be all zeroes.
+func histogramSize(b []byte, h []uint16, fill bool) (int, int) {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ invTotal := 1.0 / float32(len(b))
+ shannon := float32(0.0)
+ var extra float32
+ if fill {
+ oneBits := atLeastOne(-mFastLog2(invTotal))
+ for i, v := range h[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ } else {
+ h[i] = 1
+ extra += oneBits
+ }
+ }
+ } else {
+ for _, v := range h[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ }
+ }
+ }
+
+ return int(shannon + 0.99), int(extra + 0.99)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 00000000000..20778029900
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,178 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+ n := len(data)
+ quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivotByFreq(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSortByFreq(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSortByFreq(data, mhi, b)
+ } else {
+ quickSortByFreq(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSortByFreq(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSortByFreq(data, a, b)
+ }
+}
+
+// siftDownByFreq implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDownByFreq(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
+ child++
+ }
+ if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+ medianOfThreeSortByFreq(data, m, m-s, m+s)
+ medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+ }
+ for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+ }
+ for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 00000000000..93f1aea109e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+ n := len(data)
+ quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].literal < data[i-6].literal {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+func heapSort(data []literalNode, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDown(data, lo, i, first)
+ }
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+ child++
+ }
+ if data[first+root].literal > data[first+child].literal {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data[a].literal < data[pivot].literal; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+ }
+ for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].literal > data[pivot].literal { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+ }
+ for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].literal < data[m1].literal {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 00000000000..7f175a4ec26
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,1000 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "math/bits"
+ "strconv"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
+)
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError int64
+
+func (e CorruptInputError) Error() string {
+ return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
+}
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Read
+}
+
+func (e *ReadError) Error() string {
+ return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Write
+}
+
+func (e *WriteError) Error() string {
+ return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ maxRead int // the maximum number of bits we can read and not overread
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = &[huffmanNumChunks]uint16{}
+ }
+ if h.maxRead != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute maxRead and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1< huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint16, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint16(off<>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// The actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Input bits, in top of b.
+ b uint32
+ nb uint
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Next step in the decompression,
+ // and decompression state.
+ step func(*decompressor)
+ stepState int
+ final bool
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlockDecoder()()
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlockDecoder()()
+ default:
+ // 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+ f.step(f)
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.step(f)
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<>= nb
+ f.nb -= nb
+ if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the maxRead bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.maxRead < f.bits[endBlockMarker] {
+ f.h1.maxRead = f.bits[endBlockMarker]
+ }
+ if !f.final {
+ // If not the final block, the smallest block possible is
+ // a predefined table, BTYPE=01, with a single EOB marker.
+ // This will take up 3 + 7 bits.
+ f.h1.maxRead += 10
+ }
+
+ return nil
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBlockGeneric() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlockGeneric
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ left := (f.nb) & 7
+ f.nb -= left
+ f.b >>= left
+
+ offBytes := f.nb >> 3
+ // Unfilled values will be overwritten.
+ f.buf[0] = uint8(f.b)
+ f.buf[1] = uint8(f.b >> 8)
+ f.buf[2] = uint8(f.b >> 16)
+ f.buf[3] = uint8(f.b >> 24)
+
+ f.roffset += int64(offBytes)
+ f.nb, f.b = 0, 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+ nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+ if nn != ^n {
+ if debugDecode {
+ ncomp := ^n
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ f.toRead = f.dict.readFlush()
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = int(n)
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+ f.err = io.EOF
+ }
+ f.step = (*decompressor).nextBlock
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: (*decompressor).nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, nil)
+ return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, dict)
+ return &f
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 00000000000..397dc1b1a13
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,922 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Buffer)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bufio.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*strings.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+func (f *decompressor) huffmanBlockDecoder() func() {
+ switch f.r.(type) {
+ case *bytes.Buffer:
+ return f.huffmanBytesBuffer
+ case *bytes.Reader:
+ return f.huffmanBytesReader
+ case *bufio.Reader:
+ return f.huffmanBufioReader
+ case *strings.Reader:
+ return f.huffmanStringsReader
+ default:
+ return f.huffmanBlockGeneric
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 00000000000..1e5eea3968a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,179 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hash(uint32(now))
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash(cv)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hash(uint32(x))
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 00000000000..5b986a1944e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,205 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash4u(cv, bTableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hash4u(uint32(now), bTableBits)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ cv = uint32(now)
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, int32(i))
+ nextHash := hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash4u(uint32(x), bTableBits)
+ prevHash2 := hash4u(uint32(x>>8), bTableBits)
+ e.table[prevHash] = tableEntry{offset: o}
+ e.table[prevHash2] = tableEntry{offset: o + 1}
+ currHash := hash4u(uint32(x>>16), bTableBits)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+ cv = uint32(x >> 24)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 00000000000..c22b4244a5c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,229 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [tableSize]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ const skipLog = 6
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load3232(src, nextS)
+
+ // Safe offset distance until s + 4...
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if candidate.offset < minOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+
+ if cv == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) {
+ break
+ }
+ // Both match and are valid, pick longest.
+ offset := s - (candidate.offset - e.cur)
+ o2 := s - (candidates.Prev.offset - e.cur)
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ nextHash := hash(cv)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-3 to s.
+ x := load6432(src, s-3)
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 3},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1},
+ }
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidates := e.table[currHash]
+ cv = uint32(x)
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+
+ if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ }
+ }
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 00000000000..e62f0c02b1e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,212 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := nextS - (lCandidate.offset - e.cur)
+ if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if debugDeflate {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ e.bTable[prevHashL] = tableEntry{offset: o}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 00000000000..d513f1ffd37
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,279 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hash4x64(cv, tableBits)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 00000000000..a52c80ea456
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,282 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 00000000000..53e89912463
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,297 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false, nil)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
+ for len(in) > 0 {
+ todo := in
+ if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
+ }
+ in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
+ // Compress
+ statelessEnc(&dst, todo, int16(len(dict)))
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(uncompressed), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ dict = todo[len(todo)-maxStatelessDict:]
+ dst.Reset()
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ }
+ if !eof {
+ // Align, only a stored block can do that.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6416(b []byte, i int16) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = 0
+ return
+ }
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
+
+ s := startAt + 1
+ nextEmit := startAt
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 00000000000..f9abf606d67
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,375 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
+ // 8 bits: xlength = length - MIN_MATCH_LENGTH
+ // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ lengthShift = 22
+ offsetMask = 1<maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nLits = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nLits++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nLits++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.AddLiteral(tok.literal())
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset())
+ }
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ ol := int(dst.n)
+ for i, v := range lit {
+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ dst.litHist[v]++
+ }
+ dst.n += uint16(len(lit))
+ dst.nLits += len(lit)
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+ t.nLits++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+ ux := int32(math.Float32bits(val))
+ log2 := (float32)(((ux >> 23) & 255) - 128)
+ ux &= -0x7f800001
+ ux += 127 << 23
+ uval := math.Float32frombits(uint32(ux))
+ log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+ return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float32(0)
+ bits := int(0)
+ nMatches := 0
+ if t.nLits > 0 {
+ invTotal := 1.0 / float32(t.nLits)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for i, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ bits += int(lengthExtraBits[i&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float32(nMatches)
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ bits += int(offsetExtraBits[i&31]) * int(v)
+ }
+ }
+ }
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDeflate {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xlength)] & 31
+ t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset) & 31
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ xl = 258 - baseMatchLength
+ }
+ xlength -= xl
+ xl -= 3
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xl)] & 31
+ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
+
+// The code is never more than 8 bits, but is returned as uint32 for convenience.
+func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[uint8(off)]
+ }
+ return offsetCodes14[uint8(off>>7)]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
new file mode 100644
index 00000000000..f1c181ec9c5
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
new file mode 100644
index 00000000000..7364c76bad1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 sachin shinde
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
new file mode 100644
index 00000000000..12d377d8056
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
@@ -0,0 +1,82 @@
+package logging
+
+import (
+ "log"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+type Logger struct {
+ Name string
+ Trace *log.Logger
+ Info *log.Logger
+ Warning *log.Logger
+ Error *log.Logger
+ level LoggingLevel
+}
+
+var loggers = make(map[string]Logger)
+
+func GetLogger(name string) Logger {
+ return New(name, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
+}
+
+func (logger Logger) SetLevel(level LoggingLevel) Logger{
+ switch level {
+ case TRACE:
+ logger.Trace.SetOutput(os.Stdout);
+ logger.Info.SetOutput(os.Stdout);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case INFO:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(os.Stdout);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case WARNING:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case ERROR:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(ioutil.Discard);
+ logger.Error.SetOutput(os.Stderr);
+ case OFF:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(ioutil.Discard);
+ logger.Error.SetOutput(ioutil.Discard);
+ }
+ return logger;
+}
+
+func (logger Logger) GetLevel() LoggingLevel {
+ return logger.level;
+}
+
+func New(
+ name string,
+ traceHandle io.Writer,
+ infoHandle io.Writer,
+ warningHandle io.Writer,
+ errorHandle io.Writer) Logger {
+ loggers[name] = Logger{
+ Name: name,
+ Trace: log.New(traceHandle,
+ "TRACE: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Info: log.New(infoHandle,
+ "INFO: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Warning: log.New(warningHandle,
+ "WARNING: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Error: log.New(errorHandle,
+ "ERROR: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ }
+ return loggers[name]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
new file mode 100644
index 00000000000..aab5a8567af
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
@@ -0,0 +1,13 @@
+package logging
+
+type LoggingLevel int
+
+//go:generate stringer -type=LoggingLevel
+
+const (
+ TRACE LoggingLevel = iota
+ INFO
+ WARNING
+ ERROR
+ OFF
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
new file mode 100644
index 00000000000..9f24f0acbfe
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=LoggingLevel"; DO NOT EDIT.
+
+package logging
+
+import "strconv"
+
+const _LoggingLevel_name = "TRACEINFOWARNINGERROROFF"
+
+var _LoggingLevel_index = [...]uint8{0, 5, 9, 16, 21, 24}
+
+func (i LoggingLevel) String() string {
+ if i < 0 || i >= LoggingLevel(len(_LoggingLevel_index)-1) {
+ return "LoggingLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _LoggingLevel_name[_LoggingLevel_index[i]:_LoggingLevel_index[i+1]]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
new file mode 100644
index 00000000000..9a289397844
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
@@ -0,0 +1,21 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# ignore build under build directory
+build/
+bin/
+
+#ignore any IDE based files
+.idea/**
\ No newline at end of file
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/LICENSE
new file mode 100644
index 00000000000..95ab2c9a687
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018 Sachin Shinde
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
new file mode 100644
index 00000000000..2439bc6a7e1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
@@ -0,0 +1,157 @@
+# GoWebsocket
+Gorilla websocket based simplified client implementation in GO.
+
+Overview
+--------
+This client provides following easy to implement functionality
+- Support for emitting and receiving text and binary data
+- Data compression
+- Concurrency control
+- Proxy support
+- Setting request headers
+- Subprotocols support
+- SSL verification enable/disable
+
+To install use
+
+```markdown
+ go get github.com/sacOO7/gowebsocket
+```
+
+Description
+-----------
+
+Create instance of `Websocket` by passing url of websocket-server end-point
+
+```go
+ //Create a client instance
+ socket := gowebsocket.New("ws://echo.websocket.org/")
+
+```
+
+**Important Note** : url to websocket server must be specified with either **ws** or **wss**.
+
+#### Connecting to server
+- For connecting to server:
+
+```go
+ //This will send websocket handshake request to socketcluster-server
+ socket.Connect()
+```
+
+#### Registering All Listeners
+```go
+ package main
+
+ import (
+ "log"
+ "github.com/sacOO7/gowebsocket"
+ "os"
+ "os/signal"
+ )
+
+ func main() {
+
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+
+ socket := gowebsocket.New("ws://echo.websocket.org/");
+
+ socket.OnConnected = func(socket gowebsocket.Socket) {
+ log.Println("Connected to server");
+ };
+
+ socket.OnConnectError = func(err error, socket gowebsocket.Socket) {
+ log.Println("Recieved connect error ", err)
+ };
+
+ socket.OnTextMessage = func(message string, socket gowebsocket.Socket) {
+ log.Println("Recieved message " + message)
+ };
+
+ socket.OnBinaryMessage = func(data [] byte, socket gowebsocket.Socket) {
+ log.Println("Recieved binary data ", data)
+ };
+
+ socket.OnPingReceived = func(data string, socket gowebsocket.Socket) {
+ log.Println("Recieved ping " + data)
+ };
+
+ socket.OnPongReceived = func(data string, socket gowebsocket.Socket) {
+ log.Println("Recieved pong " + data)
+ };
+
+ socket.OnDisconnected = func(err error, socket gowebsocket.Socket) {
+ log.Println("Disconnected from server ")
+ return
+ };
+
+ socket.Connect()
+
+ for {
+ select {
+ case <-interrupt:
+ log.Println("interrupt")
+ socket.Close()
+ return
+ }
+ }
+ }
+
+```
+
+#### Sending Text message
+
+```go
+ socket.SendText("Hi there, this is my sample test message")
+```
+
+#### Sending Binary data
+```go
+ token := make([]byte, 4)
+ // rand.Read(token) putting some random value in token
+ socket.SendBinary(token)
+```
+
+#### Closing the connection with server
+```go
+ socket.Close()
+```
+
+#### Setting request headers
+```go
+ socket.RequestHeader.Set("Accept-Encoding","gzip, deflate, sdch")
+ socket.RequestHeader.Set("Accept-Language","en-US,en;q=0.8")
+ socket.RequestHeader.Set("Pragma","no-cache")
+ socket.RequestHeader.Set("User-Agent","Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36")
+
+```
+
+#### Setting proxy server
+- It can be set using connectionOptions by providing url to proxy server
+
+```go
+ socket.ConnectionOptions = gowebsocket.ConnectionOptions {
+ Proxy: gowebsocket.BuildProxy("http://example.com"),
+ }
+```
+
+#### Setting data compression, ssl verification and subprotocols
+
+- It can be set using connectionOptions inside socket
+
+```go
+ socket.ConnectionOptions = gowebsocket.ConnectionOptions {
+ UseSSL:true,
+ UseCompression:true,
+ Subprotocols: [] string{"chat","superchat"},
+ }
+```
+
+- ConnectionOptions needs to be applied before connecting to server
+- Please checkout [**examples/gowebsocket**](!https://github.com/sacOO7/GoWebsocket/tree/master/examples/gowebsocket) directory for detailed code..
+
+License
+-------
+Apache License, Version 2.0
+
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
new file mode 100644
index 00000000000..1ea2b0d7a71
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
@@ -0,0 +1,186 @@
+package gowebsocket
+
+import (
+ "github.com/gorilla/websocket"
+ "net/http"
+ "errors"
+ "crypto/tls"
+ "net/url"
+ "sync"
+ "github.com/sacOO7/go-logger"
+ "reflect"
+)
+
+type Empty struct {
+}
+
+var logger = logging.GetLogger(reflect.TypeOf(Empty{}).PkgPath()).SetLevel(logging.OFF)
+
+func (socket Socket) EnableLogging() {
+ logger.SetLevel(logging.TRACE)
+}
+
+func (socket Socket) GetLogger() logging.Logger {
+ return logger;
+}
+
+type Socket struct {
+ Conn *websocket.Conn
+ WebsocketDialer *websocket.Dialer
+ Url string
+ ConnectionOptions ConnectionOptions
+ RequestHeader http.Header
+ OnConnected func(socket Socket)
+ OnTextMessage func(message string, socket Socket)
+ OnBinaryMessage func(data [] byte, socket Socket)
+ OnConnectError func(err error, socket Socket)
+ OnDisconnected func(err error, socket Socket)
+ OnPingReceived func(data string, socket Socket)
+ OnPongReceived func(data string, socket Socket)
+ IsConnected bool
+ sendMu *sync.Mutex // Prevent "concurrent write to websocket connection"
+ receiveMu *sync.Mutex
+}
+
+type ConnectionOptions struct {
+ UseCompression bool
+ UseSSL bool
+ Proxy func(*http.Request) (*url.URL, error)
+ Subprotocols [] string
+}
+
+// todo Yet to be done
+type ReconnectionOptions struct {
+}
+
+func New(url string) Socket {
+ return Socket{
+ Url: url,
+ RequestHeader: http.Header{},
+ ConnectionOptions: ConnectionOptions{
+ UseCompression: false,
+ UseSSL: true,
+ },
+ WebsocketDialer: &websocket.Dialer{},
+ sendMu: &sync.Mutex{},
+ receiveMu: &sync.Mutex{},
+ }
+}
+
+func (socket *Socket) setConnectionOptions() {
+ socket.WebsocketDialer.EnableCompression = socket.ConnectionOptions.UseCompression
+ socket.WebsocketDialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: socket.ConnectionOptions.UseSSL}
+ socket.WebsocketDialer.Proxy = socket.ConnectionOptions.Proxy
+ socket.WebsocketDialer.Subprotocols = socket.ConnectionOptions.Subprotocols
+}
+
+func (socket *Socket) Connect() {
+ var err error;
+ socket.setConnectionOptions()
+
+ socket.Conn, _, err = socket.WebsocketDialer.Dial(socket.Url, socket.RequestHeader)
+
+ if err != nil {
+ logger.Error.Println("Error while connecting to server ", err)
+ socket.IsConnected = false
+ if socket.OnConnectError != nil {
+ socket.OnConnectError(err, *socket)
+ }
+ return
+ }
+
+ logger.Info.Println("Connected to server")
+
+ if socket.OnConnected != nil {
+ socket.IsConnected = true
+ socket.OnConnected(*socket)
+ }
+
+ defaultPingHandler := socket.Conn.PingHandler()
+ socket.Conn.SetPingHandler(func(appData string) error {
+ logger.Trace.Println("Received PING from server")
+ if socket.OnPingReceived != nil {
+ socket.OnPingReceived(appData, *socket)
+ }
+ return defaultPingHandler(appData)
+ })
+
+ defaultPongHandler := socket.Conn.PongHandler()
+ socket.Conn.SetPongHandler(func(appData string) error {
+ logger.Trace.Println("Received PONG from server")
+ if socket.OnPongReceived != nil {
+ socket.OnPongReceived(appData, *socket)
+ }
+ return defaultPongHandler(appData)
+ })
+
+ defaultCloseHandler := socket.Conn.CloseHandler()
+ socket.Conn.SetCloseHandler(func(code int, text string) error {
+ result := defaultCloseHandler(code, text)
+ logger.Warning.Println("Disconnected from server ", result)
+ if socket.OnDisconnected != nil {
+ socket.IsConnected = false
+ socket.OnDisconnected(errors.New(text), *socket)
+ }
+ return result
+ })
+
+ go func() {
+ for {
+ socket.receiveMu.Lock()
+ messageType, message, err := socket.Conn.ReadMessage()
+ socket.receiveMu.Unlock()
+ if err != nil {
+ logger.Error.Println("read:", err)
+ return
+ }
+ logger.Info.Println("recv: %s", message)
+
+ switch messageType {
+ case websocket.TextMessage:
+ if socket.OnTextMessage != nil {
+ socket.OnTextMessage(string(message), *socket)
+ }
+ case websocket.BinaryMessage:
+ if socket.OnBinaryMessage != nil {
+ socket.OnBinaryMessage(message, *socket)
+ }
+ }
+ }
+ }()
+}
+
+func (socket *Socket) SendText(message string) {
+ err := socket.send(websocket.TextMessage, [] byte (message))
+ if err != nil {
+ logger.Error.Println("write:", err)
+ return
+ }
+}
+
+func (socket *Socket) SendBinary(data [] byte) {
+ err := socket.send(websocket.BinaryMessage, data)
+ if err != nil {
+ logger.Error.Println("write:", err)
+ return
+ }
+}
+
+func (socket *Socket) send(messageType int, data [] byte) error {
+ socket.sendMu.Lock()
+ err := socket.Conn.WriteMessage(messageType, data)
+ socket.sendMu.Unlock()
+ return err
+}
+
+func (socket *Socket) Close() {
+ err := socket.send(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
+ if err != nil {
+ logger.Error.Println("write close:", err)
+ }
+ socket.Conn.Close()
+ if socket.OnDisconnected != nil {
+ socket.IsConnected = false
+ socket.OnDisconnected(err, *socket)
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
new file mode 100644
index 00000000000..d8702ebb6df
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
@@ -0,0 +1,15 @@
+package gowebsocket
+
+import (
+ "net/http"
+ "net/url"
+ "log"
+)
+
+func BuildProxy(Url string) func(*http.Request) (*url.URL, error) {
+ uProxy, err := url.Parse(Url)
+ if err != nil {
+ log.Fatal("Error while parsing url ", err)
+ }
+ return http.ProxyURL(uProxy)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 00000000000..15167cd746c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 00000000000..1c4577e9680
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 00000000000..6a66aea5eaf
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 00000000000..733099041f8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 00000000000..69a4ac7eefe
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,106 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ var client net.Conn
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+ dialer := config.Dialer
+ if dialer == nil {
+ dialer = &net.Dialer{}
+ }
+ client, err = dialWithDialer(dialer, config)
+ if err != nil {
+ goto Error
+ }
+ ws, err = NewClient(config, client)
+ if err != nil {
+ client.Close()
+ goto Error
+ }
+ return
+
+Error:
+ return nil, &DialError{config, err}
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
new file mode 100644
index 00000000000..2dab943a489
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
+ switch config.Location.Scheme {
+ case "ws":
+ conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
+
+ case "wss":
+ conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
+
+ default:
+ err = ErrBadScheme
+ }
+ return
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 00000000000..8cffdd16c91
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,583 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(ioutil.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifer from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 00000000000..0895dea1905
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 00000000000..6c45c735296
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,451 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+//
+// This package currently lacks some features found in alternative
+// and more actively maintained WebSocket packages:
+//
+// https://godoc.org/github.com/gorilla/websocket
+// https://godoc.org/nhooyr.io/websocket
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+
+ DefaultMaxPayloadBytes = 32 << 20 // 32MB
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// ErrFrameTooLarge is returned by Codec's Receive method if payload size
+// exceeds limit set by Conn.MaxPayloadBytes
+var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ // Dialer used when opening websocket connections.
+ Dialer *net.Dialer
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+
+ // MaxPayloadBytes limits the size of frame payload received over Conn
+ // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
+ MaxPayloadBytes int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(ioutil.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+// IsClientConn reports whether ws is a client-side connection.
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+
+// IsServerConn reports whether ws is a server-side connection.
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
+// in v. The whole frame payload is read to an in-memory buffer; max size of
+// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
+// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
+// completely. The next call to Receive would read and discard leftover data of
+// previous oversized frame before processing next frame.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(ioutil.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ maxPayloadBytes := ws.MaxPayloadBytes
+ if maxPayloadBytes == 0 {
+ maxPayloadBytes = DefaultMaxPayloadBytes
+ }
+ if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
+ // payload size exceeds limit, no need to call Unmarshal
+ //
+ // set frameReader to current oversized frame so that
+ // the next call to this function can drain leftover
+ // data before processing the next frame
+ ws.frameReader = frame
+ return ErrFrameTooLarge
+ }
+ payloadType := frame.PayloadType()
+ data, err := ioutil.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
new file mode 100644
index 00000000000..6551554790e
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
@@ -0,0 +1,30 @@
+# github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee
+github.com/gobwas/httphead
+# github.com/gobwas/pool v0.2.0
+github.com/gobwas/pool
+github.com/gobwas/pool/internal/pmath
+github.com/gobwas/pool/pbufio
+# github.com/gobwas/ws v1.0.3
+## explicit
+github.com/gobwas/ws
+# github.com/gorilla/websocket v1.4.2
+## explicit
+github.com/gorilla/websocket
+# github.com/klauspost/compress v1.10.3
+github.com/klauspost/compress/flate
+# github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d
+## explicit
+github.com/sacOO7/go-logger
+# github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
+## explicit
+github.com/sacOO7/gowebsocket
+# golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
+## explicit
+golang.org/x/net/websocket
+# nhooyr.io/websocket v1.8.5
+## explicit
+nhooyr.io/websocket
+nhooyr.io/websocket/internal/bpool
+nhooyr.io/websocket/internal/errd
+nhooyr.io/websocket/internal/wsjs
+nhooyr.io/websocket/internal/xsync
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
new file mode 100644
index 00000000000..6961e5c894a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
@@ -0,0 +1 @@
+websocket.test
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
new file mode 100644
index 00000000000..41d3c201468
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
@@ -0,0 +1,40 @@
+language: go
+go: 1.x
+dist: bionic
+
+env:
+ global:
+ - SHFMT_URL=https://github.com/mvdan/sh/releases/download/v3.0.1/shfmt_v3.0.1_linux_amd64
+ - GOFLAGS="-mod=readonly"
+
+jobs:
+ include:
+ - name: Format
+ before_script:
+ - sudo apt-get install -y npm
+ - sudo npm install -g prettier
+ - sudo curl -L "$SHFMT_URL" > /usr/local/bin/shfmt && sudo chmod +x /usr/local/bin/shfmt
+ - go get golang.org/x/tools/cmd/stringer
+ - go get golang.org/x/tools/cmd/goimports
+ script: make -j16 fmt
+ - name: Lint
+ before_script:
+ - sudo apt-get install -y shellcheck
+ - go get golang.org/x/lint/golint
+ script: make -j16 lint
+ - name: Test
+ before_script:
+ - sudo apt-get install -y chromium-browser
+ - go get github.com/agnivade/wasmbrowsertest
+ - go get github.com/mattn/goveralls
+ script: make -j16 test
+
+addons:
+ apt:
+ update: true
+
+cache:
+ npm: true
+ directories:
+ - ~/.cache
+ - ~/gopath/pkg
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt
new file mode 100644
index 00000000000..b5b5fef31f0
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Anmol Sethi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
new file mode 100644
index 00000000000..f9f31c49f1c
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
@@ -0,0 +1,7 @@
+all: fmt lint test
+
+.SILENT:
+
+include ci/fmt.mk
+include ci/lint.mk
+include ci/test.mk
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
new file mode 100644
index 00000000000..14c392935e1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
@@ -0,0 +1,132 @@
+# websocket
+
+[](https://pkg.go.dev/nhooyr.io/websocket)
+
+websocket is a minimal and idiomatic WebSocket library for Go.
+
+## Install
+
+```bash
+go get nhooyr.io/websocket
+```
+
+## Features
+
+- Minimal and idiomatic API
+- First class [context.Context](https://blog.golang.org/context) support
+- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite)
+- Thorough tests with [90% coverage](https://coveralls.io/github/nhooyr/websocket)
+- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports)
+- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- Zero alloc reads and writes
+- Concurrent writes
+- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close)
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression
+- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm)
+
+## Roadmap
+
+- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4)
+
+## Examples
+
+For a production quality example that demonstrates the complete API, see the
+[echo example](./examples/echo).
+
+For a full stack example, see the [chat example](./examples/chat).
+
+### Server
+
+```go
+http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ c, err := websocket.Accept(w, r, nil)
+ if err != nil {
+ // ...
+ }
+ defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+ ctx, cancel := context.WithTimeout(r.Context(), time.Second*10)
+ defer cancel()
+
+ var v interface{}
+ err = wsjson.Read(ctx, c, &v)
+ if err != nil {
+ // ...
+ }
+
+ log.Printf("received: %v", v)
+
+ c.Close(websocket.StatusNormalClosure, "")
+})
+```
+
+### Client
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+defer cancel()
+
+c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil)
+if err != nil {
+ // ...
+}
+defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+err = wsjson.Write(ctx, c, "hi")
+if err != nil {
+ // ...
+}
+
+c.Close(websocket.StatusNormalClosure, "")
+```
+
+## Comparison
+
+### gorilla/websocket
+
+Advantages of [gorilla/websocket](https://github.com/gorilla/websocket):
+
+- Mature and widely used
+- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage)
+- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers)
+
+Advantages of nhooyr.io/websocket:
+
+- Minimal and idiomatic API
+ - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side.
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535))
+- Full [context.Context](https://blog.golang.org/context) support
+- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client)
+ - Will enable easy HTTP/2 support in the future
+ - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client.
+- Concurrent writes
+- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448))
+- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+ - Gorilla requires registering a pong callback before sending a Ping
+- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432))
+- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go
+ - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/).
+- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support
+ - Gorilla only supports no context takeover mode
+ - We use a vendored [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203))
+- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492))
+- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370))
+
+#### golang.org/x/net/websocket
+
+[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated.
+See [golang/go/issues/18152](https://github.com/golang/go/issues/18152).
+
+The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning
+to nhooyr.io/websocket.
+
+#### gobwas/ws
+
+[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used
+in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb).
+
+However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
new file mode 100644
index 00000000000..6bed54da028
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
@@ -0,0 +1,365 @@
+// +build !js
+
+package websocket
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client.
+ // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to
+ // reject it, close the connection when c.Subprotocol() == "".
+ Subprotocols []string
+
+ // InsecureSkipVerify is used to disable Accept's origin verification behaviour.
+ //
+ // Deprecated: Use OriginPatterns with a match all pattern of * instead to control
+ // origin authorization yourself.
+ InsecureSkipVerify bool
+
+ // OriginPatterns lists the host patterns for authorized origins.
+ // The request host is always authorized.
+ // Use this to enable cross origin WebSockets.
+ //
+ // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com.
+ // In such a case, example.com is the origin and chat.example.com is the request host.
+ // One would set this field to []string{"example.com"} to authorize example.com to connect.
+ //
+ // Each pattern is matched case insensitively against the request origin host
+ // with filepath.Match.
+ // See https://golang.org/pkg/path/filepath/#Match
+ //
+ // Please ensure you understand the ramifications of enabling this.
+ // If used incorrectly your WebSocket server will be open to CSRF attacks.
+ OriginPatterns []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Accept accepts a WebSocket handshake from a client and upgrades the
+// the connection to a WebSocket.
+//
+// Accept will not allow cross origin requests by default.
+// See the InsecureSkipVerify option to allow cross origin requests.
+//
+// Accept will write a response to w on all errors.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return accept(w, r, opts)
+}
+
+func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) {
+ defer errd.Wrap(&err, "failed to accept WebSocket connection")
+
+ if opts == nil {
+ opts = &AcceptOptions{}
+ }
+ opts = &*opts
+
+ errCode, err := verifyClientRequest(w, r)
+ if err != nil {
+ http.Error(w, err.Error(), errCode)
+ return nil, err
+ }
+
+ if !opts.InsecureSkipVerify {
+ err = authenticateOrigin(r, opts.OriginPatterns)
+ if err != nil {
+ if errors.Is(err, filepath.ErrBadPattern) {
+ log.Printf("websocket: %v", err)
+ err = errors.New(http.StatusText(http.StatusForbidden))
+ }
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return nil, err
+ }
+ }
+
+ hj, ok := w.(http.Hijacker)
+ if !ok {
+ err = errors.New("http.ResponseWriter does not implement http.Hijacker")
+ http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
+ return nil, err
+ }
+
+ w.Header().Set("Upgrade", "websocket")
+ w.Header().Set("Connection", "Upgrade")
+
+ key := r.Header.Get("Sec-WebSocket-Key")
+ w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key))
+
+ subproto := selectSubprotocol(r, opts.Subprotocols)
+ if subproto != "" {
+ w.Header().Set("Sec-WebSocket-Protocol", subproto)
+ }
+
+ copts, err := acceptCompression(r, w, opts.CompressionMode)
+ if err != nil {
+ return nil, err
+ }
+
+ w.WriteHeader(http.StatusSwitchingProtocols)
+
+ netConn, brw, err := hj.Hijack()
+ if err != nil {
+ err = fmt.Errorf("failed to hijack connection: %w", err)
+ http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+ return nil, err
+ }
+
+ // https://github.com/golang/go/issues/32314
+ b, _ := brw.Reader.Peek(brw.Reader.Buffered())
+ brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn))
+
+ return newConn(connConfig{
+ subprotocol: w.Header().Get("Sec-WebSocket-Protocol"),
+ rwc: netConn,
+ client: false,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+
+ br: brw.Reader,
+ bw: brw.Writer,
+ }), nil
+}
+
+func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) {
+ if !r.ProtoAtLeast(1, 1) {
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto)
+ }
+
+ if !headerContainsToken(r.Header, "Connection", "Upgrade") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection"))
+ }
+
+ if !headerContainsToken(r.Header, "Upgrade", "websocket") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade"))
+ }
+
+ if r.Method != "GET" {
+ return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method)
+ }
+
+ if r.Header.Get("Sec-WebSocket-Version") != "13" {
+ w.Header().Set("Sec-WebSocket-Version", "13")
+ return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version"))
+ }
+
+ if r.Header.Get("Sec-WebSocket-Key") == "" {
+ return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key")
+ }
+
+ return 0, nil
+}
+
+func authenticateOrigin(r *http.Request, originHosts []string) error {
+ origin := r.Header.Get("Origin")
+ if origin == "" {
+ return nil
+ }
+
+ u, err := url.Parse(origin)
+ if err != nil {
+ return fmt.Errorf("failed to parse Origin header %q: %w", origin, err)
+ }
+
+ if strings.EqualFold(r.Host, u.Host) {
+ return nil
+ }
+
+ for _, hostPattern := range originHosts {
+ matched, err := match(hostPattern, u.Host)
+ if err != nil {
+ return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err)
+ }
+ if matched {
+ return nil
+ }
+ }
+ return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host)
+}
+
+func match(pattern, s string) (bool, error) {
+ return filepath.Match(strings.ToLower(pattern), strings.ToLower(s))
+}
+
+func selectSubprotocol(r *http.Request, subprotocols []string) string {
+ cps := headerTokens(r.Header, "Sec-WebSocket-Protocol")
+ for _, sp := range subprotocols {
+ for _, cp := range cps {
+ if strings.EqualFold(sp, cp) {
+ return cp
+ }
+ }
+ }
+ return ""
+}
+
+func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) {
+ if mode == CompressionDisabled {
+ return nil, nil
+ }
+
+ for _, ext := range websocketExtensions(r.Header) {
+ switch ext.name {
+ case "permessage-deflate":
+ return acceptDeflate(w, ext, mode)
+ // Disabled for now, see https://github.com/nhooyr/websocket/issues/218
+ // case "x-webkit-deflate-frame":
+ // return acceptWebkitDeflate(w, ext, mode)
+ }
+ }
+ return nil, nil
+}
+
+func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ if strings.HasPrefix(p, "client_max_window_bits") {
+ // We cannot adjust the read sliding window so cannot make use of this.
+ continue
+ }
+
+ err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ copts.setHeader(w.Header())
+
+ return copts, nil
+}
+
+func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+ // The peer must explicitly request it.
+ copts.serverNoContextTakeover = false
+
+ for _, p := range ext.params {
+ if p == "no_context_takeover" {
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead
+ // of ignoring it as the draft spec is unclear. It says the server can ignore it
+ // but the server has no way of signalling to the client it was ignored as the parameters
+ // are set one way.
+ // Thus us ignoring it would make the client think we understood it which would cause issues.
+ // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1
+ //
+ // Either way, we're only implementing this for webkit which never sends the max_window_bits
+ // parameter so we don't need to worry about it.
+ err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ s := "x-webkit-deflate-frame"
+ if copts.clientNoContextTakeover {
+ s += "; no_context_takeover"
+ }
+ w.Header().Set("Sec-WebSocket-Extensions", s)
+
+ return copts, nil
+}
+
+func headerContainsToken(h http.Header, key, token string) bool {
+ token = strings.ToLower(token)
+
+ for _, t := range headerTokens(h, key) {
+ if t == token {
+ return true
+ }
+ }
+ return false
+}
+
+type websocketExtension struct {
+ name string
+ params []string
+}
+
+func websocketExtensions(h http.Header) []websocketExtension {
+ var exts []websocketExtension
+ extStrs := headerTokens(h, "Sec-WebSocket-Extensions")
+ for _, extStr := range extStrs {
+ if extStr == "" {
+ continue
+ }
+
+ vals := strings.Split(extStr, ";")
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+
+ e := websocketExtension{
+ name: vals[0],
+ params: vals[1:],
+ }
+
+ exts = append(exts, e)
+ }
+ return exts
+}
+
+func headerTokens(h http.Header, key string) []string {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ var tokens []string
+ for _, v := range h[key] {
+ v = strings.TrimSpace(v)
+ for _, t := range strings.Split(v, ",") {
+ t = strings.ToLower(t)
+ t = strings.TrimSpace(t)
+ tokens = append(tokens, t)
+ }
+ }
+ return tokens
+}
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func secWebSocketAccept(secWebSocketKey string) string {
+ h := sha1.New()
+ h.Write([]byte(secWebSocketKey))
+ h.Write(keyGUID)
+
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
new file mode 100644
index 00000000000..daad4b79fec
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
@@ -0,0 +1,20 @@
+package websocket
+
+import (
+ "errors"
+ "net/http"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ Subprotocols []string
+ InsecureSkipVerify bool
+ OriginPatterns []string
+ CompressionMode CompressionMode
+ CompressionThreshold int
+}
+
+// Accept is stubbed out for Wasm.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return nil, errors.New("unimplemented")
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
new file mode 100644
index 00000000000..7cbc19e9def
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
@@ -0,0 +1,76 @@
+package websocket
+
+import (
+ "errors"
+ "fmt"
+)
+
+// StatusCode represents a WebSocket status code.
+// https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode int
+
+// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+//
+// These are only the status codes defined by the protocol.
+//
+// You can define custom codes in the 3000-4999 range.
+// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
+// The 4000-4999 range is reserved for private use.
+const (
+ StatusNormalClosure StatusCode = 1000
+ StatusGoingAway StatusCode = 1001
+ StatusProtocolError StatusCode = 1002
+ StatusUnsupportedData StatusCode = 1003
+
+ // 1004 is reserved and so unexported.
+ statusReserved StatusCode = 1004
+
+ // StatusNoStatusRcvd cannot be sent in a close message.
+ // It is reserved for when a close message is received without
+ // a status code.
+ StatusNoStatusRcvd StatusCode = 1005
+
+ // StatusAbnormalClosure is exported for use only with Wasm.
+ // In non Wasm Go, the returned error will indicate whether the
+ // connection was closed abnormally.
+ StatusAbnormalClosure StatusCode = 1006
+
+ StatusInvalidFramePayloadData StatusCode = 1007
+ StatusPolicyViolation StatusCode = 1008
+ StatusMessageTooBig StatusCode = 1009
+ StatusMandatoryExtension StatusCode = 1010
+ StatusInternalError StatusCode = 1011
+ StatusServiceRestart StatusCode = 1012
+ StatusTryAgainLater StatusCode = 1013
+ StatusBadGateway StatusCode = 1014
+
+ // StatusTLSHandshake is only exported for use with Wasm.
+ // In non Wasm Go, the returned error will indicate whether there was
+ // a TLS handshake failure.
+ StatusTLSHandshake StatusCode = 1015
+)
+
+// CloseError is returned when the connection is closed with a status and reason.
+//
+// Use Go 1.13's errors.As to check for this error.
+// Also see the CloseStatus helper.
+type CloseError struct {
+ Code StatusCode
+ Reason string
+}
+
+func (ce CloseError) Error() string {
+ return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
+}
+
+// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
+// the status code from a CloseError.
+//
+// -1 will be returned if the passed error is nil or not a CloseError.
+func CloseStatus(err error) StatusCode {
+ var ce CloseError
+ if errors.As(err, &ce) {
+ return ce.Code
+ }
+ return -1
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
new file mode 100644
index 00000000000..4251311d2e6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
@@ -0,0 +1,211 @@
+// +build !js
+
+package websocket
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "log"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Close performs the WebSocket close handshake with the given status code and reason.
+//
+// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for
+// the peer to send a close frame.
+// All data messages received from the peer during the close handshake will be discarded.
+//
+// The connection can only be closed once. Additional calls to Close
+// are no-ops.
+//
+// The maximum length of reason must be 125 bytes. Avoid
+// sending a dynamic reason.
+//
+// Close will unblock all goroutines interacting with the connection once
+// complete.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ return c.closeHandshake(code, reason)
+}
+
+func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) {
+ defer errd.Wrap(&err, "failed to close WebSocket")
+
+ writeErr := c.writeClose(code, reason)
+ closeHandshakeErr := c.waitCloseHandshake()
+
+ if writeErr != nil {
+ return writeErr
+ }
+
+ if CloseStatus(closeHandshakeErr) == -1 {
+ return closeHandshakeErr
+ }
+
+ return nil
+}
+
+var errAlreadyWroteClose = errors.New("already wrote close")
+
+func (c *Conn) writeClose(code StatusCode, reason string) error {
+ c.closeMu.Lock()
+ wroteClose := c.wroteClose
+ c.wroteClose = true
+ c.closeMu.Unlock()
+ if wroteClose {
+ return errAlreadyWroteClose
+ }
+
+ ce := CloseError{
+ Code: code,
+ Reason: reason,
+ }
+
+ var p []byte
+ var marshalErr error
+ if ce.Code != StatusNoStatusRcvd {
+ p, marshalErr = ce.bytes()
+ if marshalErr != nil {
+ log.Printf("websocket: %v", marshalErr)
+ }
+ }
+
+ writeErr := c.writeControl(context.Background(), opClose, p)
+ if CloseStatus(writeErr) != -1 {
+ // Not a real error if it's due to a close frame being received.
+ writeErr = nil
+ }
+
+ // We do this after in case there was an error writing the close frame.
+ c.setCloseErr(fmt.Errorf("sent close frame: %w", ce))
+
+ if marshalErr != nil {
+ return marshalErr
+ }
+ return writeErr
+}
+
+func (c *Conn) waitCloseHandshake() error {
+ defer c.close(nil)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ err := c.readMu.lock(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.readMu.unlock()
+
+ if c.readCloseFrameErr != nil {
+ return c.readCloseFrameErr
+ }
+
+ for {
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return err
+ }
+
+ for i := int64(0); i < h.payloadLength; i++ {
+ _, err := c.br.ReadByte()
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func parseClosePayload(p []byte) (CloseError, error) {
+ if len(p) == 0 {
+ return CloseError{
+ Code: StatusNoStatusRcvd,
+ }, nil
+ }
+
+ if len(p) < 2 {
+ return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p)
+ }
+
+ ce := CloseError{
+ Code: StatusCode(binary.BigEndian.Uint16(p)),
+ Reason: string(p[2:]),
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code)
+ }
+
+ return ce, nil
+}
+
+// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+// and https://tools.ietf.org/html/rfc6455#section-7.4.1
+func validWireCloseCode(code StatusCode) bool {
+ switch code {
+ case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+ return false
+ }
+
+ if code >= StatusNormalClosure && code <= StatusBadGateway {
+ return true
+ }
+ if code >= 3000 && code <= 4999 {
+ return true
+ }
+
+ return false
+}
+
+func (ce CloseError) bytes() ([]byte, error) {
+ p, err := ce.bytesErr()
+ if err != nil {
+ err = fmt.Errorf("failed to marshal close frame: %w", err)
+ ce = CloseError{
+ Code: StatusInternalError,
+ }
+ p, _ = ce.bytesErr()
+ }
+ return p, err
+}
+
+const maxCloseReason = maxControlPayload - 2
+
+func (ce CloseError) bytesErr() ([]byte, error) {
+ if len(ce.Reason) > maxCloseReason {
+ return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason))
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return nil, fmt.Errorf("status code %v cannot be set", ce.Code)
+ }
+
+ buf := make([]byte, 2+len(ce.Reason))
+ binary.BigEndian.PutUint16(buf, uint16(ce.Code))
+ copy(buf[2:], ce.Reason)
+ return buf, nil
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeMu.Lock()
+ c.setCloseErrLocked(err)
+ c.closeMu.Unlock()
+}
+
+func (c *Conn) setCloseErrLocked(err error) {
+ if c.closeErr == nil {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ }
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
new file mode 100644
index 00000000000..80b46d1c1d3
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
@@ -0,0 +1,39 @@
+package websocket
+
+// CompressionMode represents the modes available to the deflate extension.
+// See https://tools.ietf.org/html/rfc7692
+//
+// A compatibility layer is implemented for the older deflate-frame extension used
+// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06
+// It will work the same in every way except that we cannot signal to the peer we
+// want to use no context takeover on our side, we can only signal that they should.
+// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218
+type CompressionMode int
+
+const (
+ // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed
+ // for every message. This applies to both server and client side.
+ //
+ // This means less efficient compression as the sliding window from previous messages
+ // will not be used but the memory overhead will be lower if the connections
+ // are long lived and seldom used.
+ //
+ // The message will only be compressed if greater than 512 bytes.
+ CompressionNoContextTakeover CompressionMode = iota
+
+ // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection.
+ // This enables reusing the sliding window from previous messages.
+ // As most WebSocket protocols are repetitive, this can be very efficient.
+ // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover.
+ //
+ // If the peer negotiates NoContextTakeover on the client or server side, it will be
+ // used instead as this is required by the RFC.
+ CompressionContextTakeover
+
+ // CompressionDisabled disables the deflate extension.
+ //
+ // Use this if you are using a predominantly binary protocol with very
+ // little duplication in between messages or CPU and memory are more
+ // important than bandwidth.
+ CompressionDisabled
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
new file mode 100644
index 00000000000..809a272c3d1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
@@ -0,0 +1,181 @@
+// +build !js
+
+package websocket
+
+import (
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/klauspost/compress/flate"
+)
+
+func (m CompressionMode) opts() *compressionOptions {
+ return &compressionOptions{
+ clientNoContextTakeover: m == CompressionNoContextTakeover,
+ serverNoContextTakeover: m == CompressionNoContextTakeover,
+ }
+}
+
+type compressionOptions struct {
+ clientNoContextTakeover bool
+ serverNoContextTakeover bool
+}
+
+func (copts *compressionOptions) setHeader(h http.Header) {
+ s := "permessage-deflate"
+ if copts.clientNoContextTakeover {
+ s += "; client_no_context_takeover"
+ }
+ if copts.serverNoContextTakeover {
+ s += "; server_no_context_takeover"
+ }
+ h.Set("Sec-WebSocket-Extensions", s)
+}
+
+// These bytes are required to get flate.Reader to return.
+// They are removed when sending to avoid the overhead as
+// WebSocket framing tell's when the message has ended but then
+// we need to add them back otherwise flate.Reader keeps
+// trying to return more bytes.
+const deflateMessageTail = "\x00\x00\xff\xff"
+
+type trimLastFourBytesWriter struct {
+ w io.Writer
+ tail []byte
+}
+
+func (tw *trimLastFourBytesWriter) reset() {
+ if tw != nil && tw.tail != nil {
+ tw.tail = tw.tail[:0]
+ }
+}
+
+func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) {
+ if tw.tail == nil {
+ tw.tail = make([]byte, 0, 4)
+ }
+
+ extra := len(tw.tail) + len(p) - 4
+
+ if extra <= 0 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Now we need to write as many extra bytes as we can from the previous tail.
+ if extra > len(tw.tail) {
+ extra = len(tw.tail)
+ }
+ if extra > 0 {
+ _, err := tw.w.Write(tw.tail[:extra])
+ if err != nil {
+ return 0, err
+ }
+
+ // Shift remaining bytes in tail over.
+ n := copy(tw.tail, tw.tail[extra:])
+ tw.tail = tw.tail[:n]
+ }
+
+ // If p is less than or equal to 4 bytes,
+ // all of it is is part of the tail.
+ if len(p) <= 4 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Otherwise, only the last 4 bytes are.
+ tw.tail = append(tw.tail, p[len(p)-4:]...)
+
+ p = p[:len(p)-4]
+ n, err := tw.w.Write(p)
+ return n + 4, err
+}
+
+var flateReaderPool sync.Pool
+
+func getFlateReader(r io.Reader, dict []byte) io.Reader {
+ fr, ok := flateReaderPool.Get().(io.Reader)
+ if !ok {
+ return flate.NewReaderDict(r, dict)
+ }
+ fr.(flate.Resetter).Reset(r, dict)
+ return fr
+}
+
+func putFlateReader(fr io.Reader) {
+ flateReaderPool.Put(fr)
+}
+
+type slidingWindow struct {
+ buf []byte
+}
+
+var swPoolMu sync.RWMutex
+var swPool = map[int]*sync.Pool{}
+
+func slidingWindowPool(n int) *sync.Pool {
+ swPoolMu.RLock()
+ p, ok := swPool[n]
+ swPoolMu.RUnlock()
+ if ok {
+ return p
+ }
+
+ p = &sync.Pool{}
+
+ swPoolMu.Lock()
+ swPool[n] = p
+ swPoolMu.Unlock()
+
+ return p
+}
+
+func (sw *slidingWindow) init(n int) {
+ if sw.buf != nil {
+ return
+ }
+
+ if n == 0 {
+ n = 32768
+ }
+
+ p := slidingWindowPool(n)
+ buf, ok := p.Get().([]byte)
+ if ok {
+ sw.buf = buf[:0]
+ } else {
+ sw.buf = make([]byte, 0, n)
+ }
+}
+
+func (sw *slidingWindow) close() {
+ if sw.buf == nil {
+ return
+ }
+
+ swPoolMu.Lock()
+ swPool[cap(sw.buf)].Put(sw.buf)
+ swPoolMu.Unlock()
+ sw.buf = nil
+}
+
+func (sw *slidingWindow) write(p []byte) {
+ if len(p) >= cap(sw.buf) {
+ sw.buf = sw.buf[:cap(sw.buf)]
+ p = p[len(p)-cap(sw.buf):]
+ copy(sw.buf, p)
+ return
+ }
+
+ left := cap(sw.buf) - len(sw.buf)
+ if left < len(p) {
+ // We need to shift spaceNeeded bytes from the end to make room for p at the end.
+ spaceNeeded := len(p) - left
+ copy(sw.buf, sw.buf[spaceNeeded:])
+ sw.buf = sw.buf[:len(sw.buf)-spaceNeeded]
+ }
+
+ sw.buf = append(sw.buf, p...)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
new file mode 100644
index 00000000000..a41808be3fa
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
@@ -0,0 +1,13 @@
+package websocket
+
+// MessageType represents the type of a WebSocket message.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+type MessageType int
+
+// MessageType constants.
+const (
+ // MessageText is for UTF-8 encoded text messages like JSON.
+ MessageText MessageType = iota + 1
+ // MessageBinary is for binary messages like protobufs.
+ MessageBinary
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
new file mode 100644
index 00000000000..bb2eb22f7db
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
@@ -0,0 +1,265 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+// Conn represents a WebSocket connection.
+// All methods may be called concurrently except for Reader and Read.
+//
+// You must always read from the connection. Otherwise control
+// frames will not be handled. See Reader and CloseRead.
+//
+// Be sure to call Close on the connection when you
+// are finished with it to release associated resources.
+//
+// On any error from any method, the connection is closed
+// with an appropriate reason.
+type Conn struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+ br *bufio.Reader
+ bw *bufio.Writer
+
+ readTimeout chan context.Context
+ writeTimeout chan context.Context
+
+ // Read state.
+ readMu *mu
+ readHeaderBuf [8]byte
+ readControlBuf [maxControlPayload]byte
+ msgReader *msgReader
+ readCloseFrameErr error
+
+ // Write state.
+ msgWriterState *msgWriterState
+ writeFrameMu *mu
+ writeBuf []byte
+ writeHeaderBuf [8]byte
+ writeHeader header
+
+ closed chan struct{}
+ closeMu sync.Mutex
+ closeErr error
+ wroteClose bool
+
+ pingCounter int32
+ activePingsMu sync.Mutex
+ activePings map[string]chan<- struct{}
+}
+
+type connConfig struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+
+ br *bufio.Reader
+ bw *bufio.Writer
+}
+
+func newConn(cfg connConfig) *Conn {
+ c := &Conn{
+ subprotocol: cfg.subprotocol,
+ rwc: cfg.rwc,
+ client: cfg.client,
+ copts: cfg.copts,
+ flateThreshold: cfg.flateThreshold,
+
+ br: cfg.br,
+ bw: cfg.bw,
+
+ readTimeout: make(chan context.Context),
+ writeTimeout: make(chan context.Context),
+
+ closed: make(chan struct{}),
+ activePings: make(map[string]chan<- struct{}),
+ }
+
+ c.readMu = newMu(c)
+ c.writeFrameMu = newMu(c)
+
+ c.msgReader = newMsgReader(c)
+
+ c.msgWriterState = newMsgWriterState(c)
+ if c.client {
+ c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc)
+ }
+
+ if c.flate() && c.flateThreshold == 0 {
+ c.flateThreshold = 128
+ if !c.msgWriterState.flateContextTakeover() {
+ c.flateThreshold = 512
+ }
+ }
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.close(errors.New("connection garbage collected"))
+ })
+
+ go c.timeoutLoop()
+
+ return c
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+func (c *Conn) close(err error) {
+ c.closeMu.Lock()
+ defer c.closeMu.Unlock()
+
+ if c.isClosed() {
+ return
+ }
+ c.setCloseErrLocked(err)
+ close(c.closed)
+ runtime.SetFinalizer(c, nil)
+
+ // Have to close after c.closed is closed to ensure any goroutine that wakes up
+ // from the connection being closed also sees that c.closed is closed and returns
+ // closeErr.
+ c.rwc.Close()
+
+ go func() {
+ c.msgWriterState.close()
+
+ c.msgReader.close()
+ }()
+}
+
+func (c *Conn) timeoutLoop() {
+ readCtx := context.Background()
+ writeCtx := context.Background()
+
+ for {
+ select {
+ case <-c.closed:
+ return
+
+ case writeCtx = <-c.writeTimeout:
+ case readCtx = <-c.readTimeout:
+
+ case <-readCtx.Done():
+ c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err()))
+ go c.writeError(StatusPolicyViolation, errors.New("timed out"))
+ case <-writeCtx.Done():
+ c.close(fmt.Errorf("write timed out: %w", writeCtx.Err()))
+ return
+ }
+ }
+}
+
+func (c *Conn) flate() bool {
+ return c.copts != nil
+}
+
+// Ping sends a ping to the peer and waits for a pong.
+// Use this to measure latency or ensure the peer is responsive.
+// Ping must be called concurrently with Reader as it does
+// not read from the connection but instead waits for a Reader call
+// to read the pong.
+//
+// TCP Keepalives should suffice for most use cases.
+func (c *Conn) Ping(ctx context.Context) error {
+ p := atomic.AddInt32(&c.pingCounter, 1)
+
+ err := c.ping(ctx, strconv.Itoa(int(p)))
+ if err != nil {
+ return fmt.Errorf("failed to ping: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) ping(ctx context.Context, p string) error {
+ pong := make(chan struct{})
+
+ c.activePingsMu.Lock()
+ c.activePings[p] = pong
+ c.activePingsMu.Unlock()
+
+ defer func() {
+ c.activePingsMu.Lock()
+ delete(c.activePings, p)
+ c.activePingsMu.Unlock()
+ }()
+
+ err := c.writeControl(ctx, opPing, []byte(p))
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-c.closed:
+ return c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to wait for pong: %w", ctx.Err())
+ c.close(err)
+ return err
+ case <-pong:
+ return nil
+ }
+}
+
+type mu struct {
+ c *Conn
+ ch chan struct{}
+}
+
+func newMu(c *Conn) *mu {
+ return &mu{
+ c: c,
+ ch: make(chan struct{}, 1),
+ }
+}
+
+func (m *mu) forceLock() {
+ m.ch <- struct{}{}
+}
+
+func (m *mu) lock(ctx context.Context) error {
+ select {
+ case <-m.c.closed:
+ return m.c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to acquire lock: %w", ctx.Err())
+ m.c.close(err)
+ return err
+ case m.ch <- struct{}{}:
+ // To make sure the connection is certainly alive.
+ // As it's possible the send on m.ch was selected
+ // over the receive on closed.
+ select {
+ case <-m.c.closed:
+ // Make sure to release.
+ m.unlock()
+ return m.c.closeErr
+ default:
+ }
+ return nil
+ }
+}
+
+func (m *mu) unlock() {
+ select {
+ case <-m.ch:
+ default:
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
new file mode 100644
index 00000000000..2b25e3517d6
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
@@ -0,0 +1,287 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// DialOptions represents Dial's options.
+type DialOptions struct {
+ // HTTPClient is used for the connection.
+ // Its Transport must return writable bodies for WebSocket handshakes.
+ // http.Transport does beginning with Go 1.12.
+ HTTPClient *http.Client
+
+ // HTTPHeader specifies the HTTP headers included in the handshake request.
+ HTTPHeader http.Header
+
+ // Subprotocols lists the WebSocket subprotocols to negotiate with the server.
+ Subprotocols []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Dial performs a WebSocket handshake on url.
+//
+// The response is the WebSocket handshake response from the server.
+// You never need to close resp.Body yourself.
+//
+// If an error occurs, the returned response may be non nil.
+// However, you can only read the first 1024 bytes of the body.
+//
+// This function requires at least Go 1.12 as it uses a new feature
+// in net/http to perform WebSocket handshakes.
+// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861
+//
+// URLs with http/https schemes will work and are interpreted as ws/wss.
+func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) {
+ return dial(ctx, u, opts, nil)
+}
+
+func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) {
+ defer errd.Wrap(&err, "failed to WebSocket dial")
+
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ opts = &*opts
+ if opts.HTTPClient == nil {
+ opts.HTTPClient = http.DefaultClient
+ }
+ if opts.HTTPHeader == nil {
+ opts.HTTPHeader = http.Header{}
+ }
+
+ secWebSocketKey, err := secWebSocketKey(rand)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err)
+ }
+
+ var copts *compressionOptions
+ if opts.CompressionMode != CompressionDisabled {
+ copts = opts.CompressionMode.opts()
+ }
+
+ resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey)
+ if err != nil {
+ return nil, resp, err
+ }
+ respBody := resp.Body
+ resp.Body = nil
+ defer func() {
+ if err != nil {
+ // We read a bit of the body for easier debugging.
+ r := io.LimitReader(respBody, 1024)
+
+ timer := time.AfterFunc(time.Second*3, func() {
+ respBody.Close()
+ })
+ defer timer.Stop()
+
+ b, _ := ioutil.ReadAll(r)
+ respBody.Close()
+ resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ }()
+
+ copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ rwc, ok := respBody.(io.ReadWriteCloser)
+ if !ok {
+ return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody)
+ }
+
+ return newConn(connConfig{
+ subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"),
+ rwc: rwc,
+ client: true,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+ br: getBufioReader(rwc),
+ bw: getBufioWriter(rwc),
+ }), resp, nil
+}
+
+func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) {
+ if opts.HTTPClient.Timeout > 0 {
+ return nil, errors.New("use context for cancellation instead of http.Client.Timeout; see https://github.com/nhooyr/websocket/issues/67")
+ }
+
+ u, err := url.Parse(urls)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse url: %w", err)
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ case "http", "https":
+ default:
+ return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme)
+ }
+
+ req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
+ req.Header = opts.HTTPHeader.Clone()
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "websocket")
+ req.Header.Set("Sec-WebSocket-Version", "13")
+ req.Header.Set("Sec-WebSocket-Key", secWebSocketKey)
+ if len(opts.Subprotocols) > 0 {
+ req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ","))
+ }
+ if copts != nil {
+ copts.setHeader(req.Header)
+ }
+
+ resp, err := opts.HTTPClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to send handshake request: %w", err)
+ }
+ return resp, nil
+}
+
+func secWebSocketKey(rr io.Reader) (string, error) {
+ if rr == nil {
+ rr = rand.Reader
+ }
+ b := make([]byte, 16)
+ _, err := io.ReadFull(rr, b)
+ if err != nil {
+ return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err)
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) {
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode)
+ }
+
+ if !headerContainsToken(resp.Header, "Connection", "Upgrade") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection"))
+ }
+
+ if !headerContainsToken(resp.Header, "Upgrade", "WebSocket") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade"))
+ }
+
+ if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) {
+ return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q",
+ resp.Header.Get("Sec-WebSocket-Accept"),
+ secWebSocketKey,
+ )
+ }
+
+ err := verifySubprotocol(opts.Subprotocols, resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return verifyServerExtensions(copts, resp.Header)
+}
+
+func verifySubprotocol(subprotos []string, resp *http.Response) error {
+ proto := resp.Header.Get("Sec-WebSocket-Protocol")
+ if proto == "" {
+ return nil
+ }
+
+ for _, sp2 := range subprotos {
+ if strings.EqualFold(sp2, proto) {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto)
+}
+
+func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) {
+ exts := websocketExtensions(h)
+ if len(exts) == 0 {
+ return nil, nil
+ }
+
+ ext := exts[0]
+ if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil {
+ return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:])
+ }
+
+ copts = &*copts
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ }
+
+ return copts, nil
+}
+
+var bufioReaderPool sync.Pool
+
+func getBufioReader(r io.Reader) *bufio.Reader {
+ br, ok := bufioReaderPool.Get().(*bufio.Reader)
+ if !ok {
+ return bufio.NewReader(r)
+ }
+ br.Reset(r)
+ return br
+}
+
+func putBufioReader(br *bufio.Reader) {
+ bufioReaderPool.Put(br)
+}
+
+var bufioWriterPool sync.Pool
+
+func getBufioWriter(w io.Writer) *bufio.Writer {
+ bw, ok := bufioWriterPool.Get().(*bufio.Writer)
+ if !ok {
+ return bufio.NewWriter(w)
+ }
+ bw.Reset(w)
+ return bw
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+ bufioWriterPool.Put(bw)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
new file mode 100644
index 00000000000..efa920e3b61
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
@@ -0,0 +1,32 @@
+// +build !js
+
+// Package websocket implements the RFC 6455 WebSocket protocol.
+//
+// https://tools.ietf.org/html/rfc6455
+//
+// Use Dial to dial a WebSocket server.
+//
+// Use Accept to accept a WebSocket client.
+//
+// Conn represents the resulting WebSocket connection.
+//
+// The examples are the best way to understand how to correctly use the library.
+//
+// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages.
+//
+// More documentation at https://nhooyr.io/websocket.
+//
+// Wasm
+//
+// The client side supports compiling to Wasm.
+// It wraps the WebSocket browser API.
+//
+// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+//
+// Some important caveats to be aware of:
+//
+// - Accept always errors out
+// - Conn.Ping is no-op
+// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op
+// - *http.Response from Dial is &http.Response{} with a 101 status code on success
+package websocket // import "nhooyr.io/websocket"
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
new file mode 100644
index 00000000000..2a036f944ac
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
@@ -0,0 +1,294 @@
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// opcode represents a WebSocket opcode.
+type opcode int
+
+// https://tools.ietf.org/html/rfc6455#section-11.8.
+const (
+ opContinuation opcode = iota
+ opText
+ opBinary
+ // 3 - 7 are reserved for further non-control frames.
+ _
+ _
+ _
+ _
+ _
+ opClose
+ opPing
+ opPong
+ // 11-16 are reserved for further control frames.
+)
+
+// header represents a WebSocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+type header struct {
+ fin bool
+ rsv1 bool
+ rsv2 bool
+ rsv3 bool
+ opcode opcode
+
+ payloadLength int64
+
+ masked bool
+ maskKey uint32
+}
+
+// readFrameHeader reads a header from the reader.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) {
+ defer errd.Wrap(&err, "failed to read frame header")
+
+ b, err := r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.fin = b&(1<<7) != 0
+ h.rsv1 = b&(1<<6) != 0
+ h.rsv2 = b&(1<<5) != 0
+ h.rsv3 = b&(1<<4) != 0
+
+ h.opcode = opcode(b & 0xf)
+
+ b, err = r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.masked = b&(1<<7) != 0
+
+ payloadLength := b &^ (1 << 7)
+ switch {
+ case payloadLength < 126:
+ h.payloadLength = int64(payloadLength)
+ case payloadLength == 126:
+ _, err = io.ReadFull(r, readBuf[:2])
+ h.payloadLength = int64(binary.BigEndian.Uint16(readBuf))
+ case payloadLength == 127:
+ _, err = io.ReadFull(r, readBuf)
+ h.payloadLength = int64(binary.BigEndian.Uint64(readBuf))
+ }
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.payloadLength < 0 {
+ return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength)
+ }
+
+ if h.masked {
+ _, err = io.ReadFull(r, readBuf[:4])
+ if err != nil {
+ return header{}, err
+ }
+ h.maskKey = binary.LittleEndian.Uint32(readBuf)
+ }
+
+ return h, nil
+}
+
+// maxControlPayload is the maximum length of a control frame payload.
+// See https://tools.ietf.org/html/rfc6455#section-5.5.
+const maxControlPayload = 125
+
+// writeFrameHeader writes the bytes of the header to w.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {
+ defer errd.Wrap(&err, "failed to write frame header")
+
+ var b byte
+ if h.fin {
+ b |= 1 << 7
+ }
+ if h.rsv1 {
+ b |= 1 << 6
+ }
+ if h.rsv2 {
+ b |= 1 << 5
+ }
+ if h.rsv3 {
+ b |= 1 << 4
+ }
+
+ b |= byte(h.opcode)
+
+ err = w.WriteByte(b)
+ if err != nil {
+ return err
+ }
+
+ lengthByte := byte(0)
+ if h.masked {
+ lengthByte |= 1 << 7
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ lengthByte |= 127
+ case h.payloadLength > 125:
+ lengthByte |= 126
+ case h.payloadLength >= 0:
+ lengthByte |= byte(h.payloadLength)
+ }
+ err = w.WriteByte(lengthByte)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ binary.BigEndian.PutUint64(buf, uint64(h.payloadLength))
+ _, err = w.Write(buf)
+ case h.payloadLength > 125:
+ binary.BigEndian.PutUint16(buf, uint16(h.payloadLength))
+ _, err = w.Write(buf[:2])
+ }
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ binary.LittleEndian.PutUint32(buf, h.maskKey)
+ _, err = w.Write(buf[:4])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// mask applies the WebSocket masking algorithm to p
+// with the given key.
+// See https://tools.ietf.org/html/rfc6455#section-5.3
+//
+// The returned value is the correctly rotated key to
+// to continue to mask/unmask the message.
+//
+// It is optimized for LittleEndian and expects the key
+// to be in little endian.
+//
+// See https://github.com/golang/go/issues/31586
+func mask(key uint32, b []byte) uint32 {
+ if len(b) >= 8 {
+ key64 := uint64(key)<<32 | uint64(key)
+
+ // At some point in the future we can clean these unrolled loops up.
+ // See https://github.com/golang/go/issues/31586#issuecomment-487436401
+
+ // Then we xor until b is less than 128 bytes.
+ for len(b) >= 128 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ v = binary.LittleEndian.Uint64(b[64:72])
+ binary.LittleEndian.PutUint64(b[64:72], v^key64)
+ v = binary.LittleEndian.Uint64(b[72:80])
+ binary.LittleEndian.PutUint64(b[72:80], v^key64)
+ v = binary.LittleEndian.Uint64(b[80:88])
+ binary.LittleEndian.PutUint64(b[80:88], v^key64)
+ v = binary.LittleEndian.Uint64(b[88:96])
+ binary.LittleEndian.PutUint64(b[88:96], v^key64)
+ v = binary.LittleEndian.Uint64(b[96:104])
+ binary.LittleEndian.PutUint64(b[96:104], v^key64)
+ v = binary.LittleEndian.Uint64(b[104:112])
+ binary.LittleEndian.PutUint64(b[104:112], v^key64)
+ v = binary.LittleEndian.Uint64(b[112:120])
+ binary.LittleEndian.PutUint64(b[112:120], v^key64)
+ v = binary.LittleEndian.Uint64(b[120:128])
+ binary.LittleEndian.PutUint64(b[120:128], v^key64)
+ b = b[128:]
+ }
+
+ // Then we xor until b is less than 64 bytes.
+ for len(b) >= 64 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ b = b[64:]
+ }
+
+ // Then we xor until b is less than 32 bytes.
+ for len(b) >= 32 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ b = b[32:]
+ }
+
+ // Then we xor until b is less than 16 bytes.
+ for len(b) >= 16 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ b = b[16:]
+ }
+
+ // Then we xor until b is less than 8 bytes.
+ for len(b) >= 8 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ b = b[8:]
+ }
+ }
+
+ // Then we xor until b is less than 4 bytes.
+ for len(b) >= 4 {
+ v := binary.LittleEndian.Uint32(b)
+ binary.LittleEndian.PutUint32(b, v^key)
+ b = b[4:]
+ }
+
+ // xor remaining bytes.
+ for i := range b {
+ b[i] ^= byte(key)
+ key = bits.RotateLeft32(key, -8)
+ }
+
+ return key
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
new file mode 100644
index 00000000000..60377823cba
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
@@ -0,0 +1,14 @@
+module nhooyr.io/websocket
+
+go 1.13
+
+require (
+ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee // indirect
+ github.com/gobwas/pool v0.2.0 // indirect
+ github.com/gobwas/ws v1.0.2
+ github.com/golang/protobuf v1.3.5
+ github.com/google/go-cmp v0.4.0
+ github.com/gorilla/websocket v1.4.1
+ github.com/klauspost/compress v1.10.3
+ golang.org/x/time v0.0.0-20191024005414-555d28b269f0
+)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
new file mode 100644
index 00000000000..aa826fba2b1
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
@@ -0,0 +1,24 @@
+package bpool
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bpool sync.Pool
+
+// Get returns a buffer from the pool or creates a new one if
+// the pool is empty.
+func Get() *bytes.Buffer {
+ b := bpool.Get()
+ if b == nil {
+ return &bytes.Buffer{}
+ }
+ return b.(*bytes.Buffer)
+}
+
+// Put returns a buffer into the pool.
+func Put(b *bytes.Buffer) {
+ b.Reset()
+ bpool.Put(b)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
new file mode 100644
index 00000000000..6e779131af8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
@@ -0,0 +1,14 @@
+package errd
+
+import (
+ "fmt"
+)
+
+// Wrap wraps err with fmt.Errorf if err is non nil.
+// Intended for use with defer and a named error return.
+// Inspired by https://github.com/golang/go/issues/32676.
+func Wrap(err *error, f string, v ...interface{}) {
+ if *err != nil {
+ *err = fmt.Errorf(f+": %w", append(v, *err)...)
+ }
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
new file mode 100644
index 00000000000..26ffb45625b
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
@@ -0,0 +1,170 @@
+// +build js
+
+// Package wsjs implements typed access to the browser javascript WebSocket API.
+//
+// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+package wsjs
+
+import (
+ "syscall/js"
+)
+
+func handleJSError(err *error, onErr func()) {
+ r := recover()
+
+ if jsErr, ok := r.(js.Error); ok {
+ *err = jsErr
+
+ if onErr != nil {
+ onErr()
+ }
+ return
+ }
+
+ if r != nil {
+ panic(r)
+ }
+}
+
+// New is a wrapper around the javascript WebSocket constructor.
+func New(url string, protocols []string) (c WebSocket, err error) {
+ defer handleJSError(&err, func() {
+ c = WebSocket{}
+ })
+
+ jsProtocols := make([]interface{}, len(protocols))
+ for i, p := range protocols {
+ jsProtocols[i] = p
+ }
+
+ c = WebSocket{
+ v: js.Global().Get("WebSocket").New(url, jsProtocols),
+ }
+
+ c.setBinaryType("arraybuffer")
+
+ return c, nil
+}
+
+// WebSocket is a wrapper around a javascript WebSocket object.
+type WebSocket struct {
+ v js.Value
+}
+
+func (c WebSocket) setBinaryType(typ string) {
+ c.v.Set("binaryType", string(typ))
+}
+
+func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() {
+ f := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
+ fn(args[0])
+ return nil
+ })
+ c.v.Call("addEventListener", eventType, f)
+
+ return func() {
+ c.v.Call("removeEventListener", eventType, f)
+ f.Release()
+ }
+}
+
+// CloseEvent is the type passed to a WebSocket close handler.
+type CloseEvent struct {
+ Code uint16
+ Reason string
+ WasClean bool
+}
+
+// OnClose registers a function to be called when the WebSocket is closed.
+func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) {
+ return c.addEventListener("close", func(e js.Value) {
+ ce := CloseEvent{
+ Code: uint16(e.Get("code").Int()),
+ Reason: e.Get("reason").String(),
+ WasClean: e.Get("wasClean").Bool(),
+ }
+ fn(ce)
+ })
+}
+
+// OnError registers a function to be called when there is an error
+// with the WebSocket.
+func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("error", fn)
+}
+
+// MessageEvent is the type passed to a message handler.
+type MessageEvent struct {
+ // string or []byte.
+ Data interface{}
+
+ // There are more fields to the interface but we don't use them.
+ // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent
+}
+
+// OnMessage registers a function to be called when the WebSocket receives a message.
+func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) {
+ return c.addEventListener("message", func(e js.Value) {
+ var data interface{}
+
+ arrayBuffer := e.Get("data")
+ if arrayBuffer.Type() == js.TypeString {
+ data = arrayBuffer.String()
+ } else {
+ data = extractArrayBuffer(arrayBuffer)
+ }
+
+ me := MessageEvent{
+ Data: data,
+ }
+ fn(me)
+
+ return
+ })
+}
+
+// Subprotocol returns the WebSocket subprotocol in use.
+func (c WebSocket) Subprotocol() string {
+ return c.v.Get("protocol").String()
+}
+
+// OnOpen registers a function to be called when the WebSocket is opened.
+func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("open", fn)
+}
+
+// Close closes the WebSocket with the given code and reason.
+func (c WebSocket) Close(code int, reason string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("close", code, reason)
+ return err
+}
+
+// SendText sends the given string as a text message
+// on the WebSocket.
+func (c WebSocket) SendText(v string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", v)
+ return err
+}
+
+// SendBytes sends the given message as a binary message
+// on the WebSocket.
+func (c WebSocket) SendBytes(v []byte) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", uint8Array(v))
+ return err
+}
+
+func extractArrayBuffer(arrayBuffer js.Value) []byte {
+ uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer)
+ dst := make([]byte, uint8Array.Length())
+ js.CopyBytesToGo(dst, uint8Array)
+ return dst
+}
+
+func uint8Array(src []byte) js.Value {
+ uint8Array := js.Global().Get("Uint8Array").New(len(src))
+ js.CopyBytesToJS(uint8Array, src)
+ return uint8Array
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
new file mode 100644
index 00000000000..7a61f27fa2a
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
@@ -0,0 +1,25 @@
+package xsync
+
+import (
+ "fmt"
+)
+
+// Go allows running a function in another goroutine
+// and waiting for its error.
+func Go(fn func() error) <-chan error {
+ errs := make(chan error, 1)
+ go func() {
+ defer func() {
+ r := recover()
+ if r != nil {
+ select {
+ case errs <- fmt.Errorf("panic in go fn: %v", r):
+ default:
+ }
+ }
+ }()
+ errs <- fn()
+ }()
+
+ return errs
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
new file mode 100644
index 00000000000..a0c40204156
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
@@ -0,0 +1,23 @@
+package xsync
+
+import (
+ "sync/atomic"
+)
+
+// Int64 represents an atomic int64.
+type Int64 struct {
+ // We do not use atomic.Load/StoreInt64 since it does not
+ // work on 32 bit computers but we need 64 bit integers.
+ i atomic.Value
+}
+
+// Load loads the int64.
+func (v *Int64) Load() int64 {
+ i, _ := v.i.Load().(int64)
+ return i
+}
+
+// Store stores the int64.
+func (v *Int64) Store(i int64) {
+ v.i.Store(i)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
new file mode 100644
index 00000000000..64aadf0b998
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
@@ -0,0 +1,166 @@
+package websocket
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "sync"
+ "time"
+)
+
+// NetConn converts a *websocket.Conn into a net.Conn.
+//
+// It's for tunneling arbitrary protocols over WebSockets.
+// Few users of the library will need this but it's tricky to implement
+// correctly and so provided in the library.
+// See https://github.com/nhooyr/websocket/issues/100.
+//
+// Every Write to the net.Conn will correspond to a message write of
+// the given type on *websocket.Conn.
+//
+// The passed ctx bounds the lifetime of the net.Conn. If cancelled,
+// all reads and writes on the net.Conn will be cancelled.
+//
+// If a message is read that is not of the correct type, the connection
+// will be closed with StatusUnsupportedData and an error will be returned.
+//
+// Close will close the *websocket.Conn with StatusNormalClosure.
+//
+// When a deadline is hit, the connection will be closed. This is
+// different from most net.Conn implementations where only the
+// reading/writing goroutines are interrupted but the connection is kept alive.
+//
+// The Addr methods will return a mock net.Addr that returns "websocket" for Network
+// and "websocket/unknown-addr" for String.
+//
+// A received StatusNormalClosure or StatusGoingAway close frame will be translated to
+// io.EOF when reading.
+func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn {
+ nc := &netConn{
+ c: c,
+ msgType: msgType,
+ }
+
+ var cancel context.CancelFunc
+ nc.writeContext, cancel = context.WithCancel(ctx)
+ nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.writeTimer.Stop() {
+ <-nc.writeTimer.C
+ }
+
+ nc.readContext, cancel = context.WithCancel(ctx)
+ nc.readTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.readTimer.Stop() {
+ <-nc.readTimer.C
+ }
+
+ return nc
+}
+
+type netConn struct {
+ c *Conn
+ msgType MessageType
+
+ writeTimer *time.Timer
+ writeContext context.Context
+
+ readTimer *time.Timer
+ readContext context.Context
+
+ readMu sync.Mutex
+ eofed bool
+ reader io.Reader
+}
+
+var _ net.Conn = &netConn{}
+
+func (c *netConn) Close() error {
+ return c.c.Close(StatusNormalClosure, "")
+}
+
+func (c *netConn) Write(p []byte) (int, error) {
+ err := c.c.Write(c.writeContext, c.msgType, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (c *netConn) Read(p []byte) (int, error) {
+ c.readMu.Lock()
+ defer c.readMu.Unlock()
+
+ if c.eofed {
+ return 0, io.EOF
+ }
+
+ if c.reader == nil {
+ typ, r, err := c.c.Reader(c.readContext)
+ if err != nil {
+ switch CloseStatus(err) {
+ case StatusNormalClosure, StatusGoingAway:
+ c.eofed = true
+ return 0, io.EOF
+ }
+ return 0, err
+ }
+ if typ != c.msgType {
+ err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ)
+ c.c.Close(StatusUnsupportedData, err.Error())
+ return 0, err
+ }
+ c.reader = r
+ }
+
+ n, err := c.reader.Read(p)
+ if err == io.EOF {
+ c.reader = nil
+ err = nil
+ }
+ return n, err
+}
+
+type websocketAddr struct {
+}
+
+func (a websocketAddr) Network() string {
+ return "websocket"
+}
+
+func (a websocketAddr) String() string {
+ return "websocket/unknown-addr"
+}
+
+func (c *netConn) RemoteAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) LocalAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) SetDeadline(t time.Time) error {
+ c.SetWriteDeadline(t)
+ c.SetReadDeadline(t)
+ return nil
+}
+
+func (c *netConn) SetWriteDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.writeTimer.Stop()
+ } else {
+ c.writeTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
+
+func (c *netConn) SetReadDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.readTimer.Stop()
+ } else {
+ c.readTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
new file mode 100644
index 00000000000..afd08cc7cde
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
@@ -0,0 +1,471 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Reader reads from the connection until until there is a WebSocket
+// data message to be read. It will handle ping, pong and close frames as appropriate.
+//
+// It returns the type of the message and an io.Reader to read it.
+// The passed context will also bound the reader.
+// Ensure you read to EOF otherwise the connection will hang.
+//
+// Call CloseRead if you do not expect any data messages from the peer.
+//
+// Only one Reader may be open at a time.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ return c.reader(ctx)
+}
+
+// Read is a convenience method around Reader to read a single message
+// from the connection.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ typ, r, err := c.Reader(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ b, err := ioutil.ReadAll(r)
+ return typ, b, err
+}
+
+// CloseRead starts a goroutine to read from the connection until it is closed
+// or a data message is received.
+//
+// Once CloseRead is called you cannot read any messages from the connection.
+// The returned context will be cancelled when the connection is closed.
+//
+// If a data message is received, the connection will be closed with StatusPolicyViolation.
+//
+// Call CloseRead when you do not expect to read any more messages.
+// Since it actively reads from the connection, it will ensure that ping, pong and close
+// frames are responded to. This means c.Ping and c.Close will still work as expected.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.Reader(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit sets the max number of bytes to read for a single message.
+// It applies to the Reader and Read methods.
+//
+// By default, the connection has a message read limit of 32768 bytes.
+//
+// When the limit is hit, the connection will be closed with StatusMessageTooBig.
+func (c *Conn) SetReadLimit(n int64) {
+ // We add read one more byte than the limit in case
+ // there is a fin frame that needs to be read.
+ c.msgReader.limitReader.limit.Store(n + 1)
+}
+
+const defaultReadLimit = 32768
+
+func newMsgReader(c *Conn) *msgReader {
+ mr := &msgReader{
+ c: c,
+ fin: true,
+ }
+ mr.readFunc = mr.read
+
+ mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1)
+ return mr
+}
+
+func (mr *msgReader) resetFlate() {
+ if mr.flateContextTakeover() {
+ mr.dict.init(32768)
+ }
+ if mr.flateBufio == nil {
+ mr.flateBufio = getBufioReader(mr.readFunc)
+ }
+
+ mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf)
+ mr.limitReader.r = mr.flateReader
+ mr.flateTail.Reset(deflateMessageTail)
+}
+
+func (mr *msgReader) putFlateReader() {
+ if mr.flateReader != nil {
+ putFlateReader(mr.flateReader)
+ mr.flateReader = nil
+ }
+}
+
+func (mr *msgReader) close() {
+ mr.c.readMu.forceLock()
+ mr.putFlateReader()
+ mr.dict.close()
+ if mr.flateBufio != nil {
+ putBufioReader(mr.flateBufio)
+ }
+
+ if mr.c.client {
+ putBufioReader(mr.c.br)
+ mr.c.br = nil
+ }
+}
+
+func (mr *msgReader) flateContextTakeover() bool {
+ if mr.c.client {
+ return !mr.c.copts.serverNoContextTakeover
+ }
+ return !mr.c.copts.clientNoContextTakeover
+}
+
+func (c *Conn) readRSV1Illegal(h header) bool {
+ // If compression is disabled, rsv1 is illegal.
+ if !c.flate() {
+ return true
+ }
+ // rsv1 is only allowed on data frames beginning messages.
+ if h.opcode != opText && h.opcode != opBinary {
+ return true
+ }
+ return false
+}
+
+func (c *Conn) readLoop(ctx context.Context) (header, error) {
+ for {
+ h, err := c.readFrameHeader(ctx)
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 {
+ err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+
+ if !c.client && !h.masked {
+ return header{}, errors.New("received unmasked frame from client")
+ }
+
+ switch h.opcode {
+ case opClose, opPing, opPong:
+ err = c.handleControl(ctx, h)
+ if err != nil {
+ // Pass through CloseErrors when receiving a close frame.
+ if h.opcode == opClose && CloseStatus(err) != -1 {
+ return header{}, err
+ }
+ return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err)
+ }
+ case opContinuation, opText, opBinary:
+ return h, nil
+ default:
+ err := fmt.Errorf("received unknown opcode %v", h.opcode)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+ }
+}
+
+func (c *Conn) readFrameHeader(ctx context.Context) (header, error) {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ h, err := readFrameHeader(c.br, c.readHeaderBuf[:])
+ if err != nil {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case <-ctx.Done():
+ return header{}, ctx.Err()
+ default:
+ c.close(err)
+ return header{}, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return h, nil
+}
+
+func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) {
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ n, err := io.ReadFull(c.br, p)
+ if err != nil {
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case <-ctx.Done():
+ return n, ctx.Err()
+ default:
+ err = fmt.Errorf("failed to read frame payload: %w", err)
+ c.close(err)
+ return n, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return n, err
+}
+
+func (c *Conn) handleControl(ctx context.Context, h header) (err error) {
+ if h.payloadLength < 0 || h.payloadLength > maxControlPayload {
+ err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ if !h.fin {
+ err := errors.New("received fragmented control frame")
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ b := c.readControlBuf[:h.payloadLength]
+ _, err = c.readFramePayload(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ mask(h.maskKey, b)
+ }
+
+ switch h.opcode {
+ case opPing:
+ return c.writeControl(ctx, opPong, b)
+ case opPong:
+ c.activePingsMu.Lock()
+ pong, ok := c.activePings[string(b)]
+ c.activePingsMu.Unlock()
+ if ok {
+ close(pong)
+ }
+ return nil
+ }
+
+ defer func() {
+ c.readCloseFrameErr = err
+ }()
+
+ ce, err := parseClosePayload(b)
+ if err != nil {
+ err = fmt.Errorf("received invalid close payload: %w", err)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ err = fmt.Errorf("received close frame: %w", ce)
+ c.setCloseErr(err)
+ c.writeClose(ce.Code, ce.Reason)
+ c.close(err)
+ return err
+}
+
+func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) {
+ defer errd.Wrap(&err, "failed to get reader")
+
+ err = c.readMu.lock(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ defer c.readMu.unlock()
+
+ if !c.msgReader.fin {
+ err = errors.New("previous message not read to completion")
+ c.close(fmt.Errorf("failed to get reader: %w", err))
+ return 0, nil, err
+ }
+
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ if h.opcode == opContinuation {
+ err := errors.New("received continuation frame without text or binary frame")
+ c.writeError(StatusProtocolError, err)
+ return 0, nil, err
+ }
+
+ c.msgReader.reset(ctx, h)
+
+ return MessageType(h.opcode), c.msgReader, nil
+}
+
+type msgReader struct {
+ c *Conn
+
+ ctx context.Context
+ flate bool
+ flateReader io.Reader
+ flateBufio *bufio.Reader
+ flateTail strings.Reader
+ limitReader *limitReader
+ dict slidingWindow
+
+ fin bool
+ payloadLength int64
+ maskKey uint32
+
+ // readerFunc(mr.Read) to avoid continuous allocations.
+ readFunc readerFunc
+}
+
+func (mr *msgReader) reset(ctx context.Context, h header) {
+ mr.ctx = ctx
+ mr.flate = h.rsv1
+ mr.limitReader.reset(mr.readFunc)
+
+ if mr.flate {
+ mr.resetFlate()
+ }
+
+ mr.setFrame(h)
+}
+
+func (mr *msgReader) setFrame(h header) {
+ mr.fin = h.fin
+ mr.payloadLength = h.payloadLength
+ mr.maskKey = h.maskKey
+}
+
+func (mr *msgReader) Read(p []byte) (n int, err error) {
+ err = mr.c.readMu.lock(mr.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to read: %w", err)
+ }
+ defer mr.c.readMu.unlock()
+
+ n, err = mr.limitReader.Read(p)
+ if mr.flate && mr.flateContextTakeover() {
+ p = p[:n]
+ mr.dict.write(p)
+ }
+ if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate {
+ mr.putFlateReader()
+ return n, io.EOF
+ }
+ if err != nil {
+ err = fmt.Errorf("failed to read: %w", err)
+ mr.c.close(err)
+ }
+ return n, err
+}
+
+func (mr *msgReader) read(p []byte) (int, error) {
+ for {
+ if mr.payloadLength == 0 {
+ if mr.fin {
+ if mr.flate {
+ return mr.flateTail.Read(p)
+ }
+ return 0, io.EOF
+ }
+
+ h, err := mr.c.readLoop(mr.ctx)
+ if err != nil {
+ return 0, err
+ }
+ if h.opcode != opContinuation {
+ err := errors.New("received new data message without finishing the previous message")
+ mr.c.writeError(StatusProtocolError, err)
+ return 0, err
+ }
+ mr.setFrame(h)
+
+ continue
+ }
+
+ if int64(len(p)) > mr.payloadLength {
+ p = p[:mr.payloadLength]
+ }
+
+ n, err := mr.c.readFramePayload(mr.ctx, p)
+ if err != nil {
+ return n, err
+ }
+
+ mr.payloadLength -= int64(n)
+
+ if !mr.c.client {
+ mr.maskKey = mask(mr.maskKey, p)
+ }
+
+ return n, nil
+ }
+}
+
+type limitReader struct {
+ c *Conn
+ r io.Reader
+ limit xsync.Int64
+ n int64
+}
+
+func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader {
+ lr := &limitReader{
+ c: c,
+ }
+ lr.limit.Store(limit)
+ lr.reset(r)
+ return lr
+}
+
+func (lr *limitReader) reset(r io.Reader) {
+ lr.n = lr.limit.Load()
+ lr.r = r
+}
+
+func (lr *limitReader) Read(p []byte) (int, error) {
+ if lr.n <= 0 {
+ err := fmt.Errorf("read limited at %v bytes", lr.limit.Load())
+ lr.c.writeError(StatusMessageTooBig, err)
+ return 0, err
+ }
+
+ if int64(len(p)) > lr.n {
+ p = p[:lr.n]
+ }
+ n, err := lr.r.Read(p)
+ lr.n -= int64(n)
+ return n, err
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (f readerFunc) Read(p []byte) (int, error) {
+ return f(p)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
new file mode 100644
index 00000000000..5a66ba29076
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
@@ -0,0 +1,91 @@
+// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT.
+
+package websocket
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[opContinuation-0]
+ _ = x[opText-1]
+ _ = x[opBinary-2]
+ _ = x[opClose-8]
+ _ = x[opPing-9]
+ _ = x[opPong-10]
+}
+
+const (
+ _opcode_name_0 = "opContinuationopTextopBinary"
+ _opcode_name_1 = "opCloseopPingopPong"
+)
+
+var (
+ _opcode_index_0 = [...]uint8{0, 14, 20, 28}
+ _opcode_index_1 = [...]uint8{0, 7, 13, 19}
+)
+
+func (i opcode) String() string {
+ switch {
+ case 0 <= i && i <= 2:
+ return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]]
+ case 8 <= i && i <= 10:
+ i -= 8
+ return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]]
+ default:
+ return "opcode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[MessageText-1]
+ _ = x[MessageBinary-2]
+}
+
+const _MessageType_name = "MessageTextMessageBinary"
+
+var _MessageType_index = [...]uint8{0, 11, 24}
+
+func (i MessageType) String() string {
+ i -= 1
+ if i < 0 || i >= MessageType(len(_MessageType_index)-1) {
+ return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StatusNormalClosure-1000]
+ _ = x[StatusGoingAway-1001]
+ _ = x[StatusProtocolError-1002]
+ _ = x[StatusUnsupportedData-1003]
+ _ = x[statusReserved-1004]
+ _ = x[StatusNoStatusRcvd-1005]
+ _ = x[StatusAbnormalClosure-1006]
+ _ = x[StatusInvalidFramePayloadData-1007]
+ _ = x[StatusPolicyViolation-1008]
+ _ = x[StatusMessageTooBig-1009]
+ _ = x[StatusMandatoryExtension-1010]
+ _ = x[StatusInternalError-1011]
+ _ = x[StatusServiceRestart-1012]
+ _ = x[StatusTryAgainLater-1013]
+ _ = x[StatusBadGateway-1014]
+ _ = x[StatusTLSHandshake-1015]
+}
+
+const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake"
+
+var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312}
+
+func (i StatusCode) String() string {
+ i -= 1000
+ if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) {
+ return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")"
+ }
+ return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]]
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
new file mode 100644
index 00000000000..60a4fba0644
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
@@ -0,0 +1,386 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Writer returns a writer bounded by the context that will write
+// a WebSocket message of type dataType to the connection.
+//
+// You must close the writer once you have written the entire message.
+//
+// Only one writer can be open at a time, multiple calls will block until the previous writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ w, err := c.writer(ctx, typ)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get writer: %w", err)
+ }
+ return w, nil
+}
+
+// Write writes a message to the connection.
+//
+// See the Writer method if you want to stream a message.
+//
+// If compression is disabled or the threshold is not met, then it
+// will write the message in a single frame.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ _, err := c.write(ctx, typ, p)
+ if err != nil {
+ return fmt.Errorf("failed to write msg: %w", err)
+ }
+ return nil
+}
+
+type msgWriter struct {
+ mw *msgWriterState
+ closed bool
+}
+
+func (mw *msgWriter) Write(p []byte) (int, error) {
+ if mw.closed {
+ return 0, errors.New("cannot use closed writer")
+ }
+ return mw.mw.Write(p)
+}
+
+func (mw *msgWriter) Close() error {
+ if mw.closed {
+ return errors.New("cannot use closed writer")
+ }
+ mw.closed = true
+ return mw.mw.Close()
+}
+
+type msgWriterState struct {
+ c *Conn
+
+ mu *mu
+ writeMu *mu
+
+ ctx context.Context
+ opcode opcode
+ flate bool
+
+ trimWriter *trimLastFourBytesWriter
+ dict slidingWindow
+}
+
+func newMsgWriterState(c *Conn) *msgWriterState {
+ mw := &msgWriterState{
+ c: c,
+ mu: newMu(c),
+ writeMu: newMu(c),
+ }
+ return mw
+}
+
+func (mw *msgWriterState) ensureFlate() {
+ if mw.trimWriter == nil {
+ mw.trimWriter = &trimLastFourBytesWriter{
+ w: writerFunc(mw.write),
+ }
+ }
+
+ mw.dict.init(8192)
+ mw.flate = true
+}
+
+func (mw *msgWriterState) flateContextTakeover() bool {
+ if mw.c.client {
+ return !mw.c.copts.clientNoContextTakeover
+ }
+ return !mw.c.copts.serverNoContextTakeover
+}
+
+func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ err := c.msgWriterState.reset(ctx, typ)
+ if err != nil {
+ return nil, err
+ }
+ return &msgWriter{
+ mw: c.msgWriterState,
+ closed: false,
+ }, nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) {
+ mw, err := c.writer(ctx, typ)
+ if err != nil {
+ return 0, err
+ }
+
+ if !c.flate() {
+ defer c.msgWriterState.mu.unlock()
+ return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p)
+ }
+
+ n, err := mw.Write(p)
+ if err != nil {
+ return n, err
+ }
+
+ err = mw.Close()
+ return n, err
+}
+
+func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error {
+ err := mw.mu.lock(ctx)
+ if err != nil {
+ return err
+ }
+
+ mw.ctx = ctx
+ mw.opcode = opcode(typ)
+ mw.flate = false
+
+ mw.trimWriter.reset()
+
+ return nil
+}
+
+// Write writes the given bytes to the WebSocket connection.
+func (mw *msgWriterState) Write(p []byte) (_ int, err error) {
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to write: %w", err)
+ }
+ defer mw.writeMu.unlock()
+
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("failed to write: %w", err)
+ mw.c.close(err)
+ }
+ }()
+
+ if mw.c.flate() {
+ // Only enables flate if the length crosses the
+ // threshold on the first frame
+ if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold {
+ mw.ensureFlate()
+ }
+ }
+
+ if mw.flate {
+ err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf)
+ if err != nil {
+ return 0, err
+ }
+ mw.dict.write(p)
+ return len(p), nil
+ }
+
+ return mw.write(p)
+}
+
+func (mw *msgWriterState) write(p []byte) (int, error) {
+ n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write data frame: %w", err)
+ }
+ mw.opcode = opContinuation
+ return n, nil
+}
+
+// Close flushes the frame to the connection.
+func (mw *msgWriterState) Close() (err error) {
+ defer errd.Wrap(&err, "failed to close writer")
+
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return err
+ }
+ defer mw.writeMu.unlock()
+
+ _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil)
+ if err != nil {
+ return fmt.Errorf("failed to write fin frame: %w", err)
+ }
+
+ if mw.flate && !mw.flateContextTakeover() {
+ mw.dict.close()
+ }
+ mw.mu.unlock()
+ return nil
+}
+
+func (mw *msgWriterState) close() {
+ if mw.c.client {
+ mw.c.writeFrameMu.forceLock()
+ putBufioWriter(mw.c.bw)
+ }
+
+ mw.writeMu.forceLock()
+ mw.dict.close()
+}
+
+func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error {
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ _, err := c.writeFrame(ctx, true, false, opcode, p)
+ if err != nil {
+ return fmt.Errorf("failed to write control frame %v: %w", opcode, err)
+ }
+ return nil
+}
+
+// frame handles all writes to the connection.
+func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) {
+ err = c.writeFrameMu.lock(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ // We leave it locked when writing the close frame to avoid
+ // any other goroutine writing any other frame.
+ if opcode != opClose {
+ c.writeFrameMu.unlock()
+ }
+ }()
+
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.writeTimeout <- ctx:
+ }
+
+ defer func() {
+ if err != nil {
+ select {
+ case <-c.closed:
+ err = c.closeErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ c.close(err)
+ err = fmt.Errorf("failed to write frame: %w", err)
+ }
+ }()
+
+ c.writeHeader.fin = fin
+ c.writeHeader.opcode = opcode
+ c.writeHeader.payloadLength = int64(len(p))
+
+ if c.client {
+ c.writeHeader.masked = true
+ _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4])
+ if err != nil {
+ return 0, fmt.Errorf("failed to generate masking key: %w", err)
+ }
+ c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:])
+ }
+
+ c.writeHeader.rsv1 = false
+ if flate && (opcode == opText || opcode == opBinary) {
+ c.writeHeader.rsv1 = true
+ }
+
+ err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:])
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := c.writeFramePayload(p)
+ if err != nil {
+ return n, err
+ }
+
+ if c.writeHeader.fin {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, fmt.Errorf("failed to flush: %w", err)
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.writeTimeout <- context.Background():
+ }
+
+ return n, nil
+}
+
+func (c *Conn) writeFramePayload(p []byte) (n int, err error) {
+ defer errd.Wrap(&err, "failed to write frame payload")
+
+ if !c.writeHeader.masked {
+ return c.bw.Write(p)
+ }
+
+ maskKey := c.writeHeader.maskKey
+ for len(p) > 0 {
+ // If the buffer is full, we need to flush.
+ if c.bw.Available() == 0 {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, err
+ }
+ }
+
+ // Start of next write in the buffer.
+ i := c.bw.Buffered()
+
+ j := len(p)
+ if j > c.bw.Available() {
+ j = c.bw.Available()
+ }
+
+ _, err := c.bw.Write(p[:j])
+ if err != nil {
+ return n, err
+ }
+
+ maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()])
+
+ p = p[j:]
+ n += j
+ }
+
+ return n, nil
+}
+
+type writerFunc func(p []byte) (int, error)
+
+func (f writerFunc) Write(p []byte) (int, error) {
+ return f(p)
+}
+
+// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer
+// and returns it.
+func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte {
+ var writeBuf []byte
+ bw.Reset(writerFunc(func(p2 []byte) (int, error) {
+ writeBuf = p2[:cap(p2)]
+ return len(p2), nil
+ }))
+
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(w)
+
+ return writeBuf
+}
+
+func (c *Conn) writeError(code StatusCode, err error) {
+ c.setCloseErr(err)
+ c.writeClose(code, err.Error())
+ c.close(nil)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
new file mode 100644
index 00000000000..b87e32cdafb
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
@@ -0,0 +1,379 @@
+package websocket // import "nhooyr.io/websocket"
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall/js"
+
+ "nhooyr.io/websocket/internal/bpool"
+ "nhooyr.io/websocket/internal/wsjs"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Conn provides a wrapper around the browser WebSocket API.
+type Conn struct {
+ ws wsjs.WebSocket
+
+ // read limit for a message in bytes.
+ msgReadLimit xsync.Int64
+
+ closingMu sync.Mutex
+ isReadClosed xsync.Int64
+ closeOnce sync.Once
+ closed chan struct{}
+ closeErrOnce sync.Once
+ closeErr error
+ closeWasClean bool
+
+ releaseOnClose func()
+ releaseOnMessage func()
+
+ readSignal chan struct{}
+ readBufMu sync.Mutex
+ readBuf []wsjs.MessageEvent
+}
+
+func (c *Conn) close(err error, wasClean bool) {
+ c.closeOnce.Do(func() {
+ runtime.SetFinalizer(c, nil)
+
+ if !wasClean {
+ err = fmt.Errorf("unclean connection close: %w", err)
+ }
+ c.setCloseErr(err)
+ c.closeWasClean = wasClean
+ close(c.closed)
+ })
+}
+
+func (c *Conn) init() {
+ c.closed = make(chan struct{})
+ c.readSignal = make(chan struct{}, 1)
+
+ c.msgReadLimit.Store(32768)
+
+ c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) {
+ err := CloseError{
+ Code: StatusCode(e.Code),
+ Reason: e.Reason,
+ }
+ // We do not know if we sent or received this close as
+ // its possible the browser triggered it without us
+ // explicitly sending it.
+ c.close(err, e.WasClean)
+
+ c.releaseOnClose()
+ c.releaseOnMessage()
+ })
+
+ c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) {
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ c.readBuf = append(c.readBuf, e)
+
+ // Lets the read goroutine know there is definitely something in readBuf.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ })
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.setCloseErr(errors.New("connection garbage collected"))
+ c.closeWithInternal()
+ })
+}
+
+func (c *Conn) closeWithInternal() {
+ c.Close(StatusInternalError, "something went wrong")
+}
+
+// Read attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ if c.isReadClosed.Load() == 1 {
+ return 0, nil, errors.New("WebSocket connection read closed")
+ }
+
+ typ, p, err := c.read(ctx)
+ if err != nil {
+ return 0, nil, fmt.Errorf("failed to read: %w", err)
+ }
+ if int64(len(p)) > c.msgReadLimit.Load() {
+ err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load())
+ c.Close(StatusMessageTooBig, err.Error())
+ return 0, nil, err
+ }
+ return typ, p, nil
+}
+
+func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) {
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "read timed out")
+ return 0, nil, ctx.Err()
+ case <-c.readSignal:
+ case <-c.closed:
+ return 0, nil, c.closeErr
+ }
+
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ me := c.readBuf[0]
+ // We copy the messages forward and decrease the size
+ // of the slice to avoid reallocating.
+ copy(c.readBuf, c.readBuf[1:])
+ c.readBuf = c.readBuf[:len(c.readBuf)-1]
+
+ if len(c.readBuf) > 0 {
+ // Next time we read, we'll grab the message.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ }
+
+ switch p := me.Data.(type) {
+ case string:
+ return MessageText, []byte(p), nil
+ case []byte:
+ return MessageBinary, p, nil
+ default:
+ panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String())
+ }
+}
+
+// Ping is mocked out for Wasm.
+func (c *Conn) Ping(ctx context.Context) error {
+ return nil
+}
+
+// Write writes a message of the given type to the connection.
+// Always non blocking.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ err := c.write(ctx, typ, p)
+ if err != nil {
+ // Have to ensure the WebSocket is closed after a write error
+ // to match the Go API. It can only error if the message type
+ // is unexpected or the passed bytes contain invalid UTF-8 for
+ // MessageText.
+ err := fmt.Errorf("failed to write: %w", err)
+ c.setCloseErr(err)
+ c.closeWithInternal()
+ return err
+ }
+ return nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error {
+ if c.isClosed() {
+ return c.closeErr
+ }
+ switch typ {
+ case MessageBinary:
+ return c.ws.SendBytes(p)
+ case MessageText:
+ return c.ws.SendText(string(p))
+ default:
+ return fmt.Errorf("unexpected message type: %v", typ)
+ }
+}
+
+// Close closes the WebSocket with the given code and reason.
+// It will wait until the peer responds with a close frame
+// or the connection is closed.
+// It thus performs the full WebSocket close handshake.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ err := c.exportedClose(code, reason)
+ if err != nil {
+ return fmt.Errorf("failed to close WebSocket: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) exportedClose(code StatusCode, reason string) error {
+ c.closingMu.Lock()
+ defer c.closingMu.Unlock()
+
+ ce := fmt.Errorf("sent close: %w", CloseError{
+ Code: code,
+ Reason: reason,
+ })
+
+ if c.isClosed() {
+ return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr)
+ }
+
+ c.setCloseErr(ce)
+ err := c.ws.Close(int(code), reason)
+ if err != nil {
+ return err
+ }
+
+ <-c.closed
+ if !c.closeWasClean {
+ return c.closeErr
+ }
+ return nil
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.ws.Subprotocol()
+}
+
+// DialOptions represents the options available to pass to Dial.
+type DialOptions struct {
+ // Subprotocols lists the subprotocols to negotiate with the server.
+ Subprotocols []string
+}
+
+// Dial creates a new WebSocket connection to the given url with the given options.
+// The passed context bounds the maximum time spent waiting for the connection to open.
+// The returned *http.Response is always nil or a mock. It's only in the signature
+// to match the core API.
+func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ c, resp, err := dial(ctx, url, opts)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err)
+ }
+ return c, resp, nil
+}
+
+func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ url = strings.Replace(url, "http://", "ws://", 1)
+ url = strings.Replace(url, "https://", "wss://", 1)
+
+ ws, err := wsjs.New(url, opts.Subprotocols)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := &Conn{
+ ws: ws,
+ }
+ c.init()
+
+ opench := make(chan struct{})
+ releaseOpen := ws.OnOpen(func(e js.Value) {
+ close(opench)
+ })
+ defer releaseOpen()
+
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "dial timed out")
+ return nil, nil, ctx.Err()
+ case <-opench:
+ return c, &http.Response{
+ StatusCode: http.StatusSwitchingProtocols,
+ }, nil
+ case <-c.closed:
+ return nil, nil, c.closeErr
+ }
+}
+
+// Reader attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ typ, p, err := c.Read(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ return typ, bytes.NewReader(p), nil
+}
+
+// Writer returns a writer to write a WebSocket data message to the connection.
+// It buffers the entire message in memory and then sends it when the writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ return writer{
+ c: c,
+ ctx: ctx,
+ typ: typ,
+ b: bpool.Get(),
+ }, nil
+}
+
+type writer struct {
+ closed bool
+
+ c *Conn
+ ctx context.Context
+ typ MessageType
+
+ b *bytes.Buffer
+}
+
+func (w writer) Write(p []byte) (int, error) {
+ if w.closed {
+ return 0, errors.New("cannot write to closed writer")
+ }
+ n, err := w.b.Write(p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write message: %w", err)
+ }
+ return n, nil
+}
+
+func (w writer) Close() error {
+ if w.closed {
+ return errors.New("cannot close closed writer")
+ }
+ w.closed = true
+ defer bpool.Put(w.b)
+
+ err := w.c.Write(w.ctx, w.typ, w.b.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to close writer: %w", err)
+ }
+ return nil
+}
+
+// CloseRead implements *Conn.CloseRead for wasm.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ c.isReadClosed.Store(1)
+
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.read(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit implements *Conn.SetReadLimit for wasm.
+func (c *Conn) SetReadLimit(n int64) {
+ c.msgReadLimit.Store(n)
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeErrOnce.Do(func() {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ })
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/RequestForgery.expected b/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
index 5b8fb08dce1..2707f133c5c 100644
--- a/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
+++ b/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
@@ -13,6 +13,15 @@ edges
| tst.go:36:2:36:2 | implicit dereference : URL | tst.go:36:2:36:2 | implicit dereference : URL |
| tst.go:36:2:36:2 | implicit dereference : URL | tst.go:37:11:37:20 | call to String |
| tst.go:36:2:36:2 | u [pointer] : URL | tst.go:36:2:36:2 | implicit dereference : URL |
+| websocket.go:54:21:54:31 | call to Referer : string | websocket.go:59:27:59:40 | untrustedInput |
+| websocket.go:68:21:68:31 | call to Referer : string | websocket.go:72:36:72:49 | untrustedInput |
+| websocket.go:82:21:82:31 | call to Referer : string | websocket.go:85:31:85:44 | untrustedInput |
+| websocket.go:101:21:101:31 | call to Referer : string | websocket.go:104:15:104:28 | untrustedInput |
+| websocket.go:120:21:120:31 | call to Referer : string | websocket.go:123:38:123:51 | untrustedInput |
+| websocket.go:148:21:148:31 | call to Referer : string | websocket.go:149:31:149:44 | untrustedInput |
+| websocket.go:154:21:154:31 | call to Referer : string | websocket.go:156:31:156:44 | untrustedInput |
+| websocket.go:189:21:189:31 | call to Referer : string | websocket.go:191:18:191:31 | untrustedInput |
+| websocket.go:196:21:196:31 | call to Referer : string | websocket.go:198:11:198:24 | untrustedInput |
nodes
| RequestForgery.go:8:12:8:34 | call to FormValue : string | semmle.label | call to FormValue : string |
| RequestForgery.go:11:24:11:65 | ...+... | semmle.label | ...+... |
@@ -27,6 +36,24 @@ nodes
| tst.go:36:2:36:2 | implicit dereference : URL | semmle.label | implicit dereference : URL |
| tst.go:36:2:36:2 | u [pointer] : URL | semmle.label | u [pointer] : URL |
| tst.go:37:11:37:20 | call to String | semmle.label | call to String |
+| websocket.go:54:21:54:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:59:27:59:40 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:68:21:68:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:72:36:72:49 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:82:21:82:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:85:31:85:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:101:21:101:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:104:15:104:28 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:120:21:120:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:123:38:123:51 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:148:21:148:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:149:31:149:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:154:21:154:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:156:31:156:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:189:21:189:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:191:18:191:31 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:196:21:196:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:198:11:198:24 | untrustedInput | semmle.label | untrustedInput |
#select
| RequestForgery.go:11:15:11:66 | call to Get | RequestForgery.go:8:12:8:34 | call to FormValue : string | RequestForgery.go:11:24:11:65 | ...+... | The $@ of this request depends on $@. | RequestForgery.go:11:24:11:65 | ...+... | URL | RequestForgery.go:8:12:8:34 | call to FormValue : string | a user-provided value |
| tst.go:14:2:14:18 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:14:11:14:17 | tainted | The $@ of this request depends on $@. | tst.go:14:11:14:17 | tainted | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
@@ -36,3 +63,12 @@ nodes
| tst.go:27:2:27:30 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:27:11:27:29 | ...+... | The $@ of this request depends on $@. | tst.go:27:11:27:29 | ...+... | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
| tst.go:29:2:29:41 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:29:11:29:40 | ...+... | The $@ of this request depends on $@. | tst.go:29:11:29:40 | ...+... | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
| tst.go:37:2:37:21 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:37:11:37:20 | call to String | The $@ of this request depends on $@. | tst.go:37:11:37:20 | call to String | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
+| websocket.go:59:12:59:53 | call to Dial | websocket.go:54:21:54:31 | call to Referer : string | websocket.go:59:27:59:40 | untrustedInput | The $@ of this request depends on $@. | websocket.go:59:27:59:40 | untrustedInput | WebSocket URL | websocket.go:54:21:54:31 | call to Referer : string | a user-provided value |
+| websocket.go:73:13:73:40 | call to DialConfig | websocket.go:68:21:68:31 | call to Referer : string | websocket.go:72:36:72:49 | untrustedInput | The $@ of this request depends on $@. | websocket.go:72:36:72:49 | untrustedInput | WebSocket URL | websocket.go:68:21:68:31 | call to Referer : string | a user-provided value |
+| websocket.go:85:3:85:50 | call to Dial | websocket.go:82:21:82:31 | call to Referer : string | websocket.go:85:31:85:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:85:31:85:44 | untrustedInput | WebSocket URL | websocket.go:82:21:82:31 | call to Referer : string | a user-provided value |
+| websocket.go:104:3:104:39 | call to Dial | websocket.go:101:21:101:31 | call to Referer : string | websocket.go:104:15:104:28 | untrustedInput | The $@ of this request depends on $@. | websocket.go:104:15:104:28 | untrustedInput | WebSocket URL | websocket.go:101:21:101:31 | call to Referer : string | a user-provided value |
+| websocket.go:123:3:123:62 | call to DialContext | websocket.go:120:21:120:31 | call to Referer : string | websocket.go:123:38:123:51 | untrustedInput | The $@ of this request depends on $@. | websocket.go:123:38:123:51 | untrustedInput | WebSocket URL | websocket.go:120:21:120:31 | call to Referer : string | a user-provided value |
+| websocket.go:149:3:149:45 | call to Dial | websocket.go:148:21:148:31 | call to Referer : string | websocket.go:149:31:149:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:149:31:149:44 | untrustedInput | WebSocket URL | websocket.go:148:21:148:31 | call to Referer : string | a user-provided value |
+| websocket.go:156:3:156:45 | call to Dial | websocket.go:154:21:154:31 | call to Referer : string | websocket.go:156:31:156:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:156:31:156:44 | untrustedInput | WebSocket URL | websocket.go:154:21:154:31 | call to Referer : string | a user-provided value |
+| websocket.go:191:3:191:32 | call to BuildProxy | websocket.go:189:21:189:31 | call to Referer : string | websocket.go:191:18:191:31 | untrustedInput | The $@ of this request depends on $@. | websocket.go:191:18:191:31 | untrustedInput | WebSocket URL | websocket.go:189:21:189:31 | call to Referer : string | a user-provided value |
+| websocket.go:198:3:198:25 | call to New | websocket.go:196:21:196:31 | call to Referer : string | websocket.go:198:11:198:24 | untrustedInput | The $@ of this request depends on $@. | websocket.go:198:11:198:24 | untrustedInput | WebSocket URL | websocket.go:196:21:196:31 | call to Referer : string | a user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-918/go.mod b/ql/test/query-tests/Security/CWE-918/go.mod
new file mode 100644
index 00000000000..5f614a3d1d3
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/go.mod
@@ -0,0 +1,12 @@
+module main
+
+go 1.14
+
+require (
+ github.com/gobwas/ws v1.0.3
+ github.com/gorilla/websocket v1.4.2
+ github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d // indirect
+ github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
+ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
+ nhooyr.io/websocket v1.8.5
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/LICENSE
new file mode 100644
index 00000000000..274431766fa
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/README.md b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/README.md
new file mode 100644
index 00000000000..67a97fdbe92
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/README.md
@@ -0,0 +1,63 @@
+# httphead.[go](https://golang.org)
+
+[![GoDoc][godoc-image]][godoc-url]
+
+> Tiny HTTP header value parsing library in go.
+
+## Overview
+
+This library contains low-level functions for scanning HTTP RFC2616 compatible header value grammars.
+
+## Install
+
+```shell
+ go get github.com/gobwas/httphead
+```
+
+## Example
+
+The example below shows how multiple-choise HTTP header value could be parsed with this library:
+
+```go
+ options, ok := httphead.ParseOptions([]byte(`foo;bar=1,baz`), nil)
+ fmt.Println(options, ok)
+ // Output: [{foo map[bar:1]} {baz map[]}] true
+```
+
+The low-level example below shows how to optimize keys skipping and selection
+of some key:
+
+```go
+ // The right part of full header line like:
+ // X-My-Header: key;foo=bar;baz,key;baz
+ header := []byte(`foo;a=0,foo;a=1,foo;a=2,foo;a=3`)
+
+ // We want to search key "foo" with an "a" parameter that equal to "2".
+ var (
+ foo = []byte(`foo`)
+ a = []byte(`a`)
+ v = []byte(`2`)
+ )
+ var found bool
+ httphead.ScanOptions(header, func(i int, key, param, value []byte) Control {
+ if !bytes.Equal(key, foo) {
+ return ControlSkip
+ }
+ if !bytes.Equal(param, a) {
+ if bytes.Equal(value, v) {
+ // Found it!
+ found = true
+ return ControlBreak
+ }
+ return ControlSkip
+ }
+ return ControlContinue
+ })
+```
+
+For more usage examples please see [docs][godoc-url] or package tests.
+
+[godoc-image]: https://godoc.org/github.com/gobwas/httphead?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/httphead
+[travis-image]: https://travis-ci.org/gobwas/httphead.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/httphead
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/cookie.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/cookie.go
new file mode 100644
index 00000000000..05c9a1fb6a1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/cookie.go
@@ -0,0 +1,200 @@
+package httphead
+
+import (
+ "bytes"
+)
+
+// ScanCookie scans cookie pairs from data using DefaultCookieScanner.Scan()
+// method.
+func ScanCookie(data []byte, it func(key, value []byte) bool) bool {
+ return DefaultCookieScanner.Scan(data, it)
+}
+
+// DefaultCookieScanner is a CookieScanner which is used by ScanCookie().
+// Note that it is intended to have the same behavior as http.Request.Cookies()
+// has.
+var DefaultCookieScanner = CookieScanner{}
+
+// CookieScanner contains options for scanning cookie pairs.
+// See https://tools.ietf.org/html/rfc6265#section-4.1.1
+type CookieScanner struct {
+ // DisableNameValidation disables name validation of a cookie. If false,
+ // only RFC2616 "tokens" are accepted.
+ DisableNameValidation bool
+
+ // DisableValueValidation disables value validation of a cookie. If false,
+ // only RFC6265 "cookie-octet" characters are accepted.
+ //
+ // Note that Strict option also affects validation of a value.
+ //
+ // If Strict is false, then scanner begins to allow space and comma
+ // characters inside the value for better compatibility with non standard
+ // cookies implementations.
+ DisableValueValidation bool
+
+ // BreakOnPairError sets scanner to immediately return after first pair syntax
+ // validation error.
+ // If false, scanner will try to skip invalid pair bytes and go ahead.
+ BreakOnPairError bool
+
+ // Strict enables strict RFC6265 mode scanning. It affects name and value
+ // validation, as also some other rules.
+ // If false, it is intended to bring the same behavior as
+ // http.Request.Cookies().
+ Strict bool
+}
+
+// Scan maps data to name and value pairs. Usually data represents value of the
+// Cookie header.
+func (c CookieScanner) Scan(data []byte, it func(name, value []byte) bool) bool {
+ lexer := &Scanner{data: data}
+
+ const (
+ statePair = iota
+ stateBefore
+ )
+
+ state := statePair
+
+ for lexer.Buffered() > 0 {
+ switch state {
+ case stateBefore:
+ // Pairs separated by ";" and space, according to the RFC6265:
+ // cookie-pair *( ";" SP cookie-pair )
+ //
+ // Cookie pairs MUST be separated by (";" SP). So our only option
+ // here is to fail as syntax error.
+ a, b := lexer.Peek2()
+ if a != ';' {
+ return false
+ }
+
+ state = statePair
+
+ advance := 1
+ if b == ' ' {
+ advance++
+ } else if c.Strict {
+ return false
+ }
+
+ lexer.Advance(advance)
+
+ case statePair:
+ if !lexer.FetchUntil(';') {
+ return false
+ }
+
+ var value []byte
+ name := lexer.Bytes()
+ if i := bytes.IndexByte(name, '='); i != -1 {
+ value = name[i+1:]
+ name = name[:i]
+ } else if c.Strict {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !c.Strict {
+ trimLeft(name)
+ }
+ if !c.DisableNameValidation && !ValidCookieName(name) {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !c.Strict {
+ value = trimRight(value)
+ }
+ value = stripQuotes(value)
+ if !c.DisableValueValidation && !ValidCookieValue(value, c.Strict) {
+ if !c.BreakOnPairError {
+ goto nextPair
+ }
+ return false
+ }
+
+ if !it(name, value) {
+ return true
+ }
+
+ nextPair:
+ state = stateBefore
+ }
+ }
+
+ return true
+}
+
+// ValidCookieValue reports whether given value is a valid RFC6265
+// "cookie-octet" bytes.
+//
+// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+// ; US-ASCII characters excluding CTLs,
+// ; whitespace DQUOTE, comma, semicolon,
+// ; and backslash
+//
+// Note that the false strict parameter disables errors on space 0x20 and comma
+// 0x2c. This could be useful to bring some compatibility with non-compliant
+// clients/servers in the real world.
+// It acts the same as standard library cookie parser if strict is false.
+func ValidCookieValue(value []byte, strict bool) bool {
+ if len(value) == 0 {
+ return true
+ }
+ for _, c := range value {
+ switch c {
+ case '"', ';', '\\':
+ return false
+ case ',', ' ':
+ if strict {
+ return false
+ }
+ default:
+ if c <= 0x20 {
+ return false
+ }
+ if c >= 0x7f {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ValidCookieName reports wheter given bytes is a valid RFC2616 "token" bytes.
+func ValidCookieName(name []byte) bool {
+ for _, c := range name {
+ if !OctetTypes[c].IsToken() {
+ return false
+ }
+ }
+ return true
+}
+
+func stripQuotes(bts []byte) []byte {
+ if last := len(bts) - 1; last > 0 && bts[0] == '"' && bts[last] == '"' {
+ return bts[1:last]
+ }
+ return bts
+}
+
+func trimLeft(p []byte) []byte {
+ var i int
+ for i < len(p) && OctetTypes[p[i]].IsSpace() {
+ i++
+ }
+ return p[i:]
+}
+
+func trimRight(p []byte) []byte {
+ j := len(p)
+ for j > 0 && OctetTypes[p[j-1]].IsSpace() {
+ j--
+ }
+ return p[:j]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/head.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/head.go
new file mode 100644
index 00000000000..a50e907dd18
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/head.go
@@ -0,0 +1,275 @@
+package httphead
+
+import (
+ "bufio"
+ "bytes"
+)
+
+// Version contains protocol major and minor version.
+type Version struct {
+ Major int
+ Minor int
+}
+
+// RequestLine contains parameters parsed from the first request line.
+type RequestLine struct {
+ Method []byte
+ URI []byte
+ Version Version
+}
+
+// ResponseLine contains parameters parsed from the first response line.
+type ResponseLine struct {
+ Version Version
+ Status int
+ Reason []byte
+}
+
+// SplitRequestLine splits given slice of bytes into three chunks without
+// parsing.
+func SplitRequestLine(line []byte) (method, uri, version []byte) {
+ return split3(line, ' ')
+}
+
+// ParseRequestLine parses http request line like "GET / HTTP/1.0".
+func ParseRequestLine(line []byte) (r RequestLine, ok bool) {
+ var i int
+ for i = 0; i < len(line); i++ {
+ c := line[i]
+ if !OctetTypes[c].IsToken() {
+ if i > 0 && c == ' ' {
+ break
+ }
+ return
+ }
+ }
+ if i == len(line) {
+ return
+ }
+
+ var proto []byte
+ r.Method = line[:i]
+ r.URI, proto = split2(line[i+1:], ' ')
+ if len(r.URI) == 0 {
+ return
+ }
+ if major, minor, ok := ParseVersion(proto); ok {
+ r.Version.Major = major
+ r.Version.Minor = minor
+ return r, true
+ }
+
+ return r, false
+}
+
+// SplitResponseLine splits given slice of bytes into three chunks without
+// parsing.
+func SplitResponseLine(line []byte) (version, status, reason []byte) {
+ return split3(line, ' ')
+}
+
+// ParseResponseLine parses first response line into ResponseLine struct.
+func ParseResponseLine(line []byte) (r ResponseLine, ok bool) {
+ var (
+ proto []byte
+ status []byte
+ )
+ proto, status, r.Reason = split3(line, ' ')
+ if major, minor, ok := ParseVersion(proto); ok {
+ r.Version.Major = major
+ r.Version.Minor = minor
+ } else {
+ return r, false
+ }
+ if n, ok := IntFromASCII(status); ok {
+ r.Status = n
+ } else {
+ return r, false
+ }
+ // TODO(gobwas): parse here r.Reason fot TEXT rule:
+ // TEXT =
+ return r, true
+}
+
+var (
+ httpVersion10 = []byte("HTTP/1.0")
+ httpVersion11 = []byte("HTTP/1.1")
+ httpVersionPrefix = []byte("HTTP/")
+)
+
+// ParseVersion parses major and minor version of HTTP protocol.
+// It returns parsed values and true if parse is ok.
+func ParseVersion(bts []byte) (major, minor int, ok bool) {
+ switch {
+ case bytes.Equal(bts, httpVersion11):
+ return 1, 1, true
+ case bytes.Equal(bts, httpVersion10):
+ return 1, 0, true
+ case len(bts) < 8:
+ return
+ case !bytes.Equal(bts[:5], httpVersionPrefix):
+ return
+ }
+
+ bts = bts[5:]
+
+ dot := bytes.IndexByte(bts, '.')
+ if dot == -1 {
+ return
+ }
+ major, ok = IntFromASCII(bts[:dot])
+ if !ok {
+ return
+ }
+ minor, ok = IntFromASCII(bts[dot+1:])
+ if !ok {
+ return
+ }
+
+ return major, minor, true
+}
+
+// ReadLine reads line from br. It reads until '\n' and returns bytes without
+// '\n' or '\r\n' at the end.
+// It returns err if and only if line does not end in '\n'. Note that read
+// bytes returned in any case of error.
+//
+// It is much like the textproto/Reader.ReadLine() except the thing that it
+// returns raw bytes, instead of string. That is, it avoids copying bytes read
+// from br.
+//
+// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
+// safe with future I/O operations on br.
+//
+// We could control I/O operations on br and do not need to make additional
+// copy for safety.
+func ReadLine(br *bufio.Reader) ([]byte, error) {
+ var line []byte
+ for {
+ bts, err := br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // Copy bytes because next read will discard them.
+ line = append(line, bts...)
+ continue
+ }
+ // Avoid copy of single read.
+ if line == nil {
+ line = bts
+ } else {
+ line = append(line, bts...)
+ }
+ if err != nil {
+ return line, err
+ }
+ // Size of line is at least 1.
+ // In other case bufio.ReadSlice() returns error.
+ n := len(line)
+ // Cut '\n' or '\r\n'.
+ if n > 1 && line[n-2] == '\r' {
+ line = line[:n-2]
+ } else {
+ line = line[:n-1]
+ }
+ return line, nil
+ }
+}
+
+// ParseHeaderLine parses HTTP header as key-value pair. It returns parsed
+// values and true if parse is ok.
+func ParseHeaderLine(line []byte) (k, v []byte, ok bool) {
+ colon := bytes.IndexByte(line, ':')
+ if colon == -1 {
+ return
+ }
+ k = trim(line[:colon])
+ for _, c := range k {
+ if !OctetTypes[c].IsToken() {
+ return nil, nil, false
+ }
+ }
+ v = trim(line[colon+1:])
+ return k, v, true
+}
+
+// IntFromASCII converts ascii encoded decimal numeric value from HTTP entities
+// to an integer.
+func IntFromASCII(bts []byte) (ret int, ok bool) {
+ // ASCII numbers all start with the high-order bits 0011.
+ // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
+ // bits and interpret them directly as an integer.
+ var n int
+ if n = len(bts); n < 1 {
+ return 0, false
+ }
+ for i := 0; i < n; i++ {
+ if bts[i]&0xf0 != 0x30 {
+ return 0, false
+ }
+ ret += int(bts[i]&0xf) * pow(10, n-i-1)
+ }
+ return ret, true
+}
+
+const (
+ toLower = 'a' - 'A' // for use with OR.
+ toUpper = ^byte(toLower) // for use with AND.
+)
+
+// CanonicalizeHeaderKey is like standard textproto/CanonicalMIMEHeaderKey,
+// except that it operates with slice of bytes and modifies it inplace without
+// copying.
+func CanonicalizeHeaderKey(k []byte) {
+ upper := true
+ for i, c := range k {
+ if upper && 'a' <= c && c <= 'z' {
+ k[i] &= toUpper
+ } else if !upper && 'A' <= c && c <= 'Z' {
+ k[i] |= toLower
+ }
+ upper = c == '-'
+ }
+}
+
+// pow for integers implementation.
+// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
+func pow(a, b int) int {
+ p := 1
+ for b > 0 {
+ if b&1 != 0 {
+ p *= a
+ }
+ b >>= 1
+ a *= a
+ }
+ return p
+}
+
+func split3(p []byte, sep byte) (p1, p2, p3 []byte) {
+ a := bytes.IndexByte(p, sep)
+ b := bytes.IndexByte(p[a+1:], sep)
+ if a == -1 || b == -1 {
+ return p, nil, nil
+ }
+ b += a + 1
+ return p[:a], p[a+1 : b], p[b+1:]
+}
+
+func split2(p []byte, sep byte) (p1, p2 []byte) {
+ i := bytes.IndexByte(p, sep)
+ if i == -1 {
+ return p, nil
+ }
+ return p[:i], p[i+1:]
+}
+
+func trim(p []byte) []byte {
+ var i, j int
+ for i = 0; i < len(p) && (p[i] == ' ' || p[i] == '\t'); {
+ i++
+ }
+ for j = len(p); j > i && (p[j-1] == ' ' || p[j-1] == '\t'); {
+ j--
+ }
+ return p[i:j]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/httphead.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/httphead.go
new file mode 100644
index 00000000000..2387e8033c9
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/httphead.go
@@ -0,0 +1,331 @@
+// Package httphead contains utils for parsing HTTP and HTTP-grammar compatible
+// text protocols headers.
+//
+// That is, this package first aim is to bring ability to easily parse
+// constructions, described here https://tools.ietf.org/html/rfc2616#section-2
+package httphead
+
+import (
+ "bytes"
+ "strings"
+)
+
+// ScanTokens parses data in this form:
+//
+// list = 1#token
+//
+// It returns false if data is malformed.
+func ScanTokens(data []byte, it func([]byte) bool) bool {
+ lexer := &Scanner{data: data}
+
+ var ok bool
+ for lexer.Next() {
+ switch lexer.Type() {
+ case ItemToken:
+ ok = true
+ if !it(lexer.Bytes()) {
+ return true
+ }
+ case ItemSeparator:
+ if !isComma(lexer.Bytes()) {
+ return false
+ }
+ default:
+ return false
+ }
+ }
+
+ return ok && !lexer.err
+}
+
+// ParseOptions parses all header options and appends it to given slice of
+// Option. It returns flag of successful (wellformed input) parsing.
+//
+// Note that appended options are all consist of subslices of data. That is,
+// mutation of data will mutate appended options.
+func ParseOptions(data []byte, options []Option) ([]Option, bool) {
+ var i int
+ index := -1
+ return options, ScanOptions(data, func(idx int, name, attr, val []byte) Control {
+ if idx != index {
+ index = idx
+ i = len(options)
+ options = append(options, Option{Name: name})
+ }
+ if attr != nil {
+ options[i].Parameters.Set(attr, val)
+ }
+ return ControlContinue
+ })
+}
+
+// SelectFlag encodes way of options selection.
+type SelectFlag byte
+
+// String represetns flag as string.
+func (f SelectFlag) String() string {
+ var flags [2]string
+ var n int
+ if f&SelectCopy != 0 {
+ flags[n] = "copy"
+ n++
+ }
+ if f&SelectUnique != 0 {
+ flags[n] = "unique"
+ n++
+ }
+ return "[" + strings.Join(flags[:n], "|") + "]"
+}
+
+const (
+ // SelectCopy causes selector to copy selected option before appending it
+ // to resulting slice.
+ // If SelectCopy flag is not passed to selector, then appended options will
+ // contain sub-slices of the initial data.
+ SelectCopy SelectFlag = 1 << iota
+
+ // SelectUnique causes selector to append only not yet existing option to
+ // resulting slice. Unique is checked by comparing option names.
+ SelectUnique
+)
+
+// OptionSelector contains configuration for selecting Options from header value.
+type OptionSelector struct {
+ // Check is a filter function that applied to every Option that possibly
+ // could be selected.
+ // If Check is nil all options will be selected.
+ Check func(Option) bool
+
+ // Flags contains flags for options selection.
+ Flags SelectFlag
+
+ // Alloc used to allocate slice of bytes when selector is configured with
+ // SelectCopy flag. It will be called with number of bytes needed for copy
+ // of single Option.
+ // If Alloc is nil make is used.
+ Alloc func(n int) []byte
+}
+
+// Select parses header data and appends it to given slice of Option.
+// It also returns flag of successful (wellformed input) parsing.
+func (s OptionSelector) Select(data []byte, options []Option) ([]Option, bool) {
+ var current Option
+ var has bool
+ index := -1
+
+ alloc := s.Alloc
+ if alloc == nil {
+ alloc = defaultAlloc
+ }
+ check := s.Check
+ if check == nil {
+ check = defaultCheck
+ }
+
+ ok := ScanOptions(data, func(idx int, name, attr, val []byte) Control {
+ if idx != index {
+ if has && check(current) {
+ if s.Flags&SelectCopy != 0 {
+ current = current.Copy(alloc(current.Size()))
+ }
+ options = append(options, current)
+ has = false
+ }
+ if s.Flags&SelectUnique != 0 {
+ for i := len(options) - 1; i >= 0; i-- {
+ if bytes.Equal(options[i].Name, name) {
+ return ControlSkip
+ }
+ }
+ }
+ index = idx
+ current = Option{Name: name}
+ has = true
+ }
+ if attr != nil {
+ current.Parameters.Set(attr, val)
+ }
+
+ return ControlContinue
+ })
+ if has && check(current) {
+ if s.Flags&SelectCopy != 0 {
+ current = current.Copy(alloc(current.Size()))
+ }
+ options = append(options, current)
+ }
+
+ return options, ok
+}
+
+func defaultAlloc(n int) []byte { return make([]byte, n) }
+func defaultCheck(Option) bool { return true }
+
+// Control represents operation that scanner should perform.
+type Control byte
+
+const (
+ // ControlContinue causes scanner to continue scan tokens.
+ ControlContinue Control = iota
+ // ControlBreak causes scanner to stop scan tokens.
+ ControlBreak
+ // ControlSkip causes scanner to skip current entity.
+ ControlSkip
+)
+
+// ScanOptions parses data in this form:
+//
+// values = 1#value
+// value = token *( ";" param )
+// param = token [ "=" (token | quoted-string) ]
+//
+// It calls given callback with the index of the option, option itself and its
+// parameter (attribute and its value, both could be nil). Index is useful when
+// header contains multiple choises for the same named option.
+//
+// Given callback should return one of the defined Control* values.
+// ControlSkip means that passed key is not in caller's interest. That is, all
+// parameters of that key will be skipped.
+// ControlBreak means that no more keys and parameters should be parsed. That
+// is, it must break parsing immediately.
+// ControlContinue means that caller want to receive next parameter and its
+// value or the next key.
+//
+// It returns false if data is malformed.
+func ScanOptions(data []byte, it func(index int, option, attribute, value []byte) Control) bool {
+ lexer := &Scanner{data: data}
+
+ var ok bool
+ var state int
+ const (
+ stateKey = iota
+ stateParamBeforeName
+ stateParamName
+ stateParamBeforeValue
+ stateParamValue
+ )
+
+ var (
+ index int
+ key, param, value []byte
+ mustCall bool
+ )
+ for lexer.Next() {
+ var (
+ call bool
+ growIndex int
+ )
+
+ t := lexer.Type()
+ v := lexer.Bytes()
+
+ switch t {
+ case ItemToken:
+ switch state {
+ case stateKey, stateParamBeforeName:
+ key = v
+ state = stateParamBeforeName
+ mustCall = true
+ case stateParamName:
+ param = v
+ state = stateParamBeforeValue
+ mustCall = true
+ case stateParamValue:
+ value = v
+ state = stateParamBeforeName
+ call = true
+ default:
+ return false
+ }
+
+ case ItemString:
+ if state != stateParamValue {
+ return false
+ }
+ value = v
+ state = stateParamBeforeName
+ call = true
+
+ case ItemSeparator:
+ switch {
+ case isComma(v) && state == stateKey:
+ // Nothing to do.
+
+ case isComma(v) && state == stateParamBeforeName:
+ state = stateKey
+ // Make call only if we have not called this key yet.
+ call = mustCall
+ if !call {
+ // If we have already called callback with the key
+ // that just ended.
+ index++
+ } else {
+ // Else grow the index after calling callback.
+ growIndex = 1
+ }
+
+ case isComma(v) && state == stateParamBeforeValue:
+ state = stateKey
+ growIndex = 1
+ call = true
+
+ case isSemicolon(v) && state == stateParamBeforeName:
+ state = stateParamName
+
+ case isSemicolon(v) && state == stateParamBeforeValue:
+ state = stateParamName
+ call = true
+
+ case isEquality(v) && state == stateParamBeforeValue:
+ state = stateParamValue
+
+ default:
+ return false
+ }
+
+ default:
+ return false
+ }
+
+ if call {
+ switch it(index, key, param, value) {
+ case ControlBreak:
+ // User want to stop to parsing parameters.
+ return true
+
+ case ControlSkip:
+ // User want to skip current param.
+ state = stateKey
+ lexer.SkipEscaped(',')
+
+ case ControlContinue:
+ // User is interested in rest of parameters.
+ // Nothing to do.
+
+ default:
+ panic("unexpected control value")
+ }
+ ok = true
+ param = nil
+ value = nil
+ mustCall = false
+ index += growIndex
+ }
+ }
+ if mustCall {
+ ok = true
+ it(index, key, param, value)
+ }
+
+ return ok && !lexer.err
+}
+
+func isComma(b []byte) bool {
+ return len(b) == 1 && b[0] == ','
+}
+func isSemicolon(b []byte) bool {
+ return len(b) == 1 && b[0] == ';'
+}
+func isEquality(b []byte) bool {
+ return len(b) == 1 && b[0] == '='
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/lexer.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/lexer.go
new file mode 100644
index 00000000000..729855ed0d3
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/lexer.go
@@ -0,0 +1,360 @@
+package httphead
+
+import (
+ "bytes"
+)
+
+// ItemType encodes type of the lexing token.
+type ItemType int
+
+const (
+ // ItemUndef reports that token is undefined.
+ ItemUndef ItemType = iota
+ // ItemToken reports that token is RFC2616 token.
+ ItemToken
+ // ItemSeparator reports that token is RFC2616 separator.
+ ItemSeparator
+ // ItemString reports that token is RFC2616 quouted string.
+ ItemString
+ // ItemComment reports that token is RFC2616 comment.
+ ItemComment
+ // ItemOctet reports that token is octet slice.
+ ItemOctet
+)
+
+// Scanner represents header tokens scanner.
+// See https://tools.ietf.org/html/rfc2616#section-2
+type Scanner struct {
+ data []byte
+ pos int
+
+ itemType ItemType
+ itemBytes []byte
+
+ err bool
+}
+
+// NewScanner creates new RFC2616 data scanner.
+func NewScanner(data []byte) *Scanner {
+ return &Scanner{data: data}
+}
+
+// Next scans for next token. It returns true on successful scanning, and false
+// on error or EOF.
+func (l *Scanner) Next() bool {
+ c, ok := l.nextChar()
+ if !ok {
+ return false
+ }
+ switch c {
+ case '"': // quoted-string;
+ return l.fetchQuotedString()
+
+ case '(': // comment;
+ return l.fetchComment()
+
+ case '\\', ')': // unexpected chars;
+ l.err = true
+ return false
+
+ default:
+ return l.fetchToken()
+ }
+}
+
+// FetchUntil fetches ItemOctet from current scanner position to first
+// occurence of the c or to the end of the underlying data.
+func (l *Scanner) FetchUntil(c byte) bool {
+ l.resetItem()
+ if l.pos == len(l.data) {
+ return false
+ }
+ return l.fetchOctet(c)
+}
+
+// Peek reads byte at current position without advancing it. On end of data it
+// returns 0.
+func (l *Scanner) Peek() byte {
+ if l.pos == len(l.data) {
+ return 0
+ }
+ return l.data[l.pos]
+}
+
+// Peek2 reads two first bytes at current position without advancing it.
+// If there not enough data it returs 0.
+func (l *Scanner) Peek2() (a, b byte) {
+ if l.pos == len(l.data) {
+ return 0, 0
+ }
+ if l.pos+1 == len(l.data) {
+ return l.data[l.pos], 0
+ }
+ return l.data[l.pos], l.data[l.pos+1]
+}
+
+// Buffered reporst how many bytes there are left to scan.
+func (l *Scanner) Buffered() int {
+ return len(l.data) - l.pos
+}
+
+// Advance moves current position index at n bytes. It returns true on
+// successful move.
+func (l *Scanner) Advance(n int) bool {
+ l.pos += n
+ if l.pos > len(l.data) {
+ l.pos = len(l.data)
+ return false
+ }
+ return true
+}
+
+// Skip skips all bytes until first occurence of c.
+func (l *Scanner) Skip(c byte) {
+ if l.err {
+ return
+ }
+ // Reset scanner state.
+ l.resetItem()
+
+ if i := bytes.IndexByte(l.data[l.pos:], c); i == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += i + 1
+ }
+}
+
+// SkipEscaped skips all bytes until first occurence of non-escaped c.
+func (l *Scanner) SkipEscaped(c byte) {
+ if l.err {
+ return
+ }
+ // Reset scanner state.
+ l.resetItem()
+
+ if i := ScanUntil(l.data[l.pos:], c); i == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += i + 1
+ }
+}
+
+// Type reports current token type.
+func (l *Scanner) Type() ItemType {
+ return l.itemType
+}
+
+// Bytes returns current token bytes.
+func (l *Scanner) Bytes() []byte {
+ return l.itemBytes
+}
+
+func (l *Scanner) nextChar() (byte, bool) {
+ // Reset scanner state.
+ l.resetItem()
+
+ if l.err {
+ return 0, false
+ }
+ l.pos += SkipSpace(l.data[l.pos:])
+ if l.pos == len(l.data) {
+ return 0, false
+ }
+ return l.data[l.pos], true
+}
+
+func (l *Scanner) resetItem() {
+ l.itemType = ItemUndef
+ l.itemBytes = nil
+}
+
+func (l *Scanner) fetchOctet(c byte) bool {
+ i := l.pos
+ if j := bytes.IndexByte(l.data[l.pos:], c); j == -1 {
+ // Reached the end of data.
+ l.pos = len(l.data)
+ } else {
+ l.pos += j
+ }
+
+ l.itemType = ItemOctet
+ l.itemBytes = l.data[i:l.pos]
+
+ return true
+}
+
+func (l *Scanner) fetchToken() bool {
+ n, t := ScanToken(l.data[l.pos:])
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = t
+ l.itemBytes = l.data[l.pos : l.pos+n]
+ l.pos += n
+
+ return true
+}
+
+func (l *Scanner) fetchQuotedString() (ok bool) {
+ l.pos++
+
+ n := ScanUntil(l.data[l.pos:], '"')
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = ItemString
+ l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
+ l.pos += n + 1
+
+ return true
+}
+
+func (l *Scanner) fetchComment() (ok bool) {
+ l.pos++
+
+ n := ScanPairGreedy(l.data[l.pos:], '(', ')')
+ if n == -1 {
+ l.err = true
+ return false
+ }
+
+ l.itemType = ItemComment
+ l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
+ l.pos += n + 1
+
+ return true
+}
+
+// ScanUntil scans for first non-escaped character c in given data.
+// It returns index of matched c and -1 if c is not found.
+func ScanUntil(data []byte, c byte) (n int) {
+ for {
+ i := bytes.IndexByte(data[n:], c)
+ if i == -1 {
+ return -1
+ }
+ n += i
+ if n == 0 || data[n-1] != '\\' {
+ break
+ }
+ n++
+ }
+ return
+}
+
+// ScanPairGreedy scans for complete pair of opening and closing chars in greedy manner.
+// Note that first opening byte must not be present in data.
+func ScanPairGreedy(data []byte, open, close byte) (n int) {
+ var m int
+ opened := 1
+ for {
+ i := bytes.IndexByte(data[n:], close)
+ if i == -1 {
+ return -1
+ }
+ n += i
+ // If found index is not escaped then it is the end.
+ if n == 0 || data[n-1] != '\\' {
+ opened--
+ }
+
+ for m < i {
+ j := bytes.IndexByte(data[m:i], open)
+ if j == -1 {
+ break
+ }
+ m += j + 1
+ opened++
+ }
+
+ if opened == 0 {
+ break
+ }
+
+ n++
+ m = n
+ }
+ return
+}
+
+// RemoveByte returns data without c. If c is not present in data it returns
+// the same slice. If not, it copies data without c.
+func RemoveByte(data []byte, c byte) []byte {
+ j := bytes.IndexByte(data, c)
+ if j == -1 {
+ return data
+ }
+
+ n := len(data) - 1
+
+ // If character is present, than allocate slice with n-1 capacity. That is,
+ // resulting bytes could be at most n-1 length.
+ result := make([]byte, n)
+ k := copy(result, data[:j])
+
+ for i := j + 1; i < n; {
+ j = bytes.IndexByte(data[i:], c)
+ if j != -1 {
+ k += copy(result[k:], data[i:i+j])
+ i = i + j + 1
+ } else {
+ k += copy(result[k:], data[i:])
+ break
+ }
+ }
+
+ return result[:k]
+}
+
+// SkipSpace skips spaces and lws-sequences from p.
+// It returns number ob bytes skipped.
+func SkipSpace(p []byte) (n int) {
+ for len(p) > 0 {
+ switch {
+ case len(p) >= 3 &&
+ p[0] == '\r' &&
+ p[1] == '\n' &&
+ OctetTypes[p[2]].IsSpace():
+ p = p[3:]
+ n += 3
+ case OctetTypes[p[0]].IsSpace():
+ p = p[1:]
+ n++
+ default:
+ return
+ }
+ }
+ return
+}
+
+// ScanToken scan for next token in p. It returns length of the token and its
+// type. It do not trim p.
+func ScanToken(p []byte) (n int, t ItemType) {
+ if len(p) == 0 {
+ return 0, ItemUndef
+ }
+
+ c := p[0]
+ switch {
+ case OctetTypes[c].IsSeparator():
+ return 1, ItemSeparator
+
+ case OctetTypes[c].IsToken():
+ for n = 1; n < len(p); n++ {
+ c := p[n]
+ if !OctetTypes[c].IsToken() {
+ break
+ }
+ }
+ return n, ItemToken
+
+ default:
+ return -1, ItemUndef
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/octet.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/octet.go
new file mode 100644
index 00000000000..2a04cdd0909
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/octet.go
@@ -0,0 +1,83 @@
+package httphead
+
+// OctetType desribes character type.
+//
+// From the "Basic Rules" chapter of RFC2616
+// See https://tools.ietf.org/html/rfc2616#section-2.2
+//
+// OCTET =
+// CHAR =
+// UPALPHA =
+// LOALPHA =
+// ALPHA = UPALPHA | LOALPHA
+// DIGIT =
+// CTL =
+// CR =
+// LF =
+// SP =
+// HT =
+// <"> =
+// CRLF = CR LF
+// LWS = [CRLF] 1*( SP | HT )
+//
+// Many HTTP/1.1 header field values consist of words separated by LWS
+// or special characters. These special characters MUST be in a quoted
+// string to be used within a parameter value (as defined in section
+// 3.6).
+//
+// token = 1*
+// separators = "(" | ")" | "<" | ">" | "@"
+// | "," | ";" | ":" | "\" | <">
+// | "/" | "[" | "]" | "?" | "="
+// | "{" | "}" | SP | HT
+type OctetType byte
+
+// IsChar reports whether octet is CHAR.
+func (t OctetType) IsChar() bool { return t&octetChar != 0 }
+
+// IsControl reports whether octet is CTL.
+func (t OctetType) IsControl() bool { return t&octetControl != 0 }
+
+// IsSeparator reports whether octet is separator.
+func (t OctetType) IsSeparator() bool { return t&octetSeparator != 0 }
+
+// IsSpace reports whether octet is space (SP or HT).
+func (t OctetType) IsSpace() bool { return t&octetSpace != 0 }
+
+// IsToken reports whether octet is token.
+func (t OctetType) IsToken() bool { return t&octetToken != 0 }
+
+const (
+ octetChar OctetType = 1 << iota
+ octetControl
+ octetSpace
+ octetSeparator
+ octetToken
+)
+
+// OctetTypes is a table of octets.
+var OctetTypes [256]OctetType
+
+func init() {
+ for c := 32; c < 256; c++ {
+ var t OctetType
+ if c <= 127 {
+ t |= octetChar
+ }
+ if 0 <= c && c <= 31 || c == 127 {
+ t |= octetControl
+ }
+ switch c {
+ case '(', ')', '<', '>', '@', ',', ';', ':', '"', '/', '[', ']', '?', '=', '{', '}', '\\':
+ t |= octetSeparator
+ case ' ', '\t':
+ t |= octetSpace | octetSeparator
+ }
+
+ if t.IsChar() && !t.IsControl() && !t.IsSeparator() && !t.IsSpace() {
+ t |= octetToken
+ }
+
+ OctetTypes[c] = t
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/option.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/option.go
new file mode 100644
index 00000000000..243be08c9a0
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/option.go
@@ -0,0 +1,187 @@
+package httphead
+
+import (
+ "bytes"
+ "sort"
+)
+
+// Option represents a header option.
+type Option struct {
+ Name []byte
+ Parameters Parameters
+}
+
+// Size returns number of bytes need to be allocated for use in opt.Copy.
+func (opt Option) Size() int {
+ return len(opt.Name) + opt.Parameters.bytes
+}
+
+// Copy copies all underlying []byte slices into p and returns new Option.
+// Note that p must be at least of opt.Size() length.
+func (opt Option) Copy(p []byte) Option {
+ n := copy(p, opt.Name)
+ opt.Name = p[:n]
+ opt.Parameters, p = opt.Parameters.Copy(p[n:])
+ return opt
+}
+
+// String represents option as a string.
+func (opt Option) String() string {
+ return "{" + string(opt.Name) + " " + opt.Parameters.String() + "}"
+}
+
+// NewOption creates named option with given parameters.
+func NewOption(name string, params map[string]string) Option {
+ p := Parameters{}
+ for k, v := range params {
+ p.Set([]byte(k), []byte(v))
+ }
+ return Option{
+ Name: []byte(name),
+ Parameters: p,
+ }
+}
+
+// Equal reports whether option is equal to b.
+func (opt Option) Equal(b Option) bool {
+ if bytes.Equal(opt.Name, b.Name) {
+ return opt.Parameters.Equal(b.Parameters)
+ }
+ return false
+}
+
+// Parameters represents option's parameters.
+type Parameters struct {
+ pos int
+ bytes int
+ arr [8]pair
+ dyn []pair
+}
+
+// Equal reports whether a equal to b.
+func (p Parameters) Equal(b Parameters) bool {
+ switch {
+ case p.dyn == nil && b.dyn == nil:
+ case p.dyn != nil && b.dyn != nil:
+ default:
+ return false
+ }
+
+ ad, bd := p.data(), b.data()
+ if len(ad) != len(bd) {
+ return false
+ }
+
+ sort.Sort(pairs(ad))
+ sort.Sort(pairs(bd))
+
+ for i := 0; i < len(ad); i++ {
+ av, bv := ad[i], bd[i]
+ if !bytes.Equal(av.key, bv.key) || !bytes.Equal(av.value, bv.value) {
+ return false
+ }
+ }
+ return true
+}
+
+// Size returns number of bytes that needed to copy p.
+func (p *Parameters) Size() int {
+ return p.bytes
+}
+
+// Copy copies all underlying []byte slices into dst and returns new
+// Parameters.
+// Note that dst must be at least of p.Size() length.
+func (p *Parameters) Copy(dst []byte) (Parameters, []byte) {
+ ret := Parameters{
+ pos: p.pos,
+ bytes: p.bytes,
+ }
+ if p.dyn != nil {
+ ret.dyn = make([]pair, len(p.dyn))
+ for i, v := range p.dyn {
+ ret.dyn[i], dst = v.copy(dst)
+ }
+ } else {
+ for i, p := range p.arr {
+ ret.arr[i], dst = p.copy(dst)
+ }
+ }
+ return ret, dst
+}
+
+// Get returns value by key and flag about existence such value.
+func (p *Parameters) Get(key string) (value []byte, ok bool) {
+ for _, v := range p.data() {
+ if string(v.key) == key {
+ return v.value, true
+ }
+ }
+ return nil, false
+}
+
+// Set sets value by key.
+func (p *Parameters) Set(key, value []byte) {
+ p.bytes += len(key) + len(value)
+
+ if p.pos < len(p.arr) {
+ p.arr[p.pos] = pair{key, value}
+ p.pos++
+ return
+ }
+
+ if p.dyn == nil {
+ p.dyn = make([]pair, len(p.arr), len(p.arr)+1)
+ copy(p.dyn, p.arr[:])
+ }
+ p.dyn = append(p.dyn, pair{key, value})
+}
+
+// ForEach iterates over parameters key-value pairs and calls cb for each one.
+func (p *Parameters) ForEach(cb func(k, v []byte) bool) {
+ for _, v := range p.data() {
+ if !cb(v.key, v.value) {
+ break
+ }
+ }
+}
+
+// String represents parameters as a string.
+func (p *Parameters) String() (ret string) {
+ ret = "["
+ for i, v := range p.data() {
+ if i > 0 {
+ ret += " "
+ }
+ ret += string(v.key) + ":" + string(v.value)
+ }
+ return ret + "]"
+}
+
+func (p *Parameters) data() []pair {
+ if p.dyn != nil {
+ return p.dyn
+ }
+ return p.arr[:p.pos]
+}
+
+type pair struct {
+ key, value []byte
+}
+
+func (p pair) copy(dst []byte) (pair, []byte) {
+ n := copy(dst, p.key)
+ p.key = dst[:n]
+ m := n + copy(dst[n:], p.value)
+ p.value = dst[n:m]
+
+ dst = dst[m:]
+
+ return p, dst
+}
+
+type pairs []pair
+
+func (p pairs) Len() int { return len(p) }
+func (p pairs) Less(a, b int) bool { return bytes.Compare(p[a].key, p[b].key) == -1 }
+func (p pairs) Swap(a, b int) { p[a], p[b] = p[b], p[a] }
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/writer.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/writer.go
new file mode 100644
index 00000000000..e5df3ddf404
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/writer.go
@@ -0,0 +1,101 @@
+package httphead
+
+import "io"
+
+var (
+ comma = []byte{','}
+ equality = []byte{'='}
+ semicolon = []byte{';'}
+ quote = []byte{'"'}
+ escape = []byte{'\\'}
+)
+
+// WriteOptions write options list to the dest.
+// It uses the same form as {Scan,Parse}Options functions:
+// values = 1#value
+// value = token *( ";" param )
+// param = token [ "=" (token | quoted-string) ]
+//
+// It wraps valuse into the quoted-string sequence if it contains any
+// non-token characters.
+func WriteOptions(dest io.Writer, options []Option) (n int, err error) {
+ w := writer{w: dest}
+ for i, opt := range options {
+ if i > 0 {
+ w.write(comma)
+ }
+
+ writeTokenSanitized(&w, opt.Name)
+
+ for _, p := range opt.Parameters.data() {
+ w.write(semicolon)
+ writeTokenSanitized(&w, p.key)
+ if len(p.value) != 0 {
+ w.write(equality)
+ writeTokenSanitized(&w, p.value)
+ }
+ }
+ }
+ return w.result()
+}
+
+// writeTokenSanitized writes token as is or as quouted string if it contains
+// non-token characters.
+//
+// Note that is is not expects LWS sequnces be in s, cause LWS is used only as
+// header field continuation:
+// "A CRLF is allowed in the definition of TEXT only as part of a header field
+// continuation. It is expected that the folding LWS will be replaced with a
+// single SP before interpretation of the TEXT value."
+// See https://tools.ietf.org/html/rfc2616#section-2
+//
+// That is we sanitizing s for writing, so there could not be any header field
+// continuation.
+// That is any CRLF will be escaped as any other control characters not allowd in TEXT.
+func writeTokenSanitized(bw *writer, bts []byte) {
+ var qt bool
+ var pos int
+ for i := 0; i < len(bts); i++ {
+ c := bts[i]
+ if !OctetTypes[c].IsToken() && !qt {
+ qt = true
+ bw.write(quote)
+ }
+ if OctetTypes[c].IsControl() || c == '"' {
+ if !qt {
+ qt = true
+ bw.write(quote)
+ }
+ bw.write(bts[pos:i])
+ bw.write(escape)
+ bw.write(bts[i : i+1])
+ pos = i + 1
+ }
+ }
+ if !qt {
+ bw.write(bts)
+ } else {
+ bw.write(bts[pos:])
+ bw.write(quote)
+ }
+}
+
+type writer struct {
+ w io.Writer
+ n int
+ err error
+}
+
+func (w *writer) write(p []byte) {
+ if w.err != nil {
+ return
+ }
+ var n int
+ n, w.err = w.w.Write(p)
+ w.n += n
+ return
+}
+
+func (w *writer) result() (int, error) {
+ return w.n, w.err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/README.md b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/README.md
new file mode 100644
index 00000000000..45685581dae
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/README.md
@@ -0,0 +1,107 @@
+# pool
+
+[![GoDoc][godoc-image]][godoc-url]
+
+> Tiny memory reuse helpers for Go.
+
+## generic
+
+Without use of subpackages, `pool` allows to reuse any struct distinguishable
+by size in generic way:
+
+```go
+package main
+
+import "github.com/gobwas/pool"
+
+func main() {
+ x, n := pool.Get(100) // Returns object with size 128 or nil.
+ if x == nil {
+ // Create x somehow with knowledge that n is 128.
+ }
+ defer pool.Put(x, n)
+
+ // Work with x.
+}
+```
+
+Pool allows you to pass specific options for constructing custom pool:
+
+```go
+package main
+
+import "github.com/gobwas/pool"
+
+func main() {
+ p := pool.Custom(
+ pool.WithLogSizeMapping(), // Will ceil size n passed to Get(n) to nearest power of two.
+ pool.WithLogSizeRange(64, 512), // Will reuse objects in logarithmic range [64, 512].
+ pool.WithSize(65536), // Will reuse object with size 65536.
+ )
+ x, n := p.Get(1000) // Returns nil and 1000 because mapped size 1000 => 1024 is not reusing by the pool.
+ defer pool.Put(x, n) // Will not reuse x.
+
+ // Work with x.
+}
+```
+
+Note that there are few non-generic pooling implementations inside subpackages.
+
+## pbytes
+
+Subpackage `pbytes` is intended for `[]byte` reuse.
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbytes"
+
+func main() {
+ bts := pbytes.GetCap(100) // Returns make([]byte, 0, 128).
+ defer pbytes.Put(bts)
+
+ // Work with bts.
+}
+```
+
+You can also create your own range for pooling:
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbytes"
+
+func main() {
+ // Reuse only slices whose capacity is 128, 256, 512 or 1024.
+ pool := pbytes.New(128, 1024)
+
+ bts := pool.GetCap(100) // Returns make([]byte, 0, 128).
+ defer pool.Put(bts)
+
+ // Work with bts.
+}
+```
+
+## pbufio
+
+Subpackage `pbufio` is intended for `*bufio.{Reader, Writer}` reuse.
+
+```go
+package main
+
+import "github.com/gobwas/pool/pbufio"
+
+func main() {
+ bw := pbufio.GetWriter(os.Stdout, 100) // Returns bufio.NewWriterSize(128).
+ defer pbufio.PutWriter(bw)
+
+ // Work with bw.
+}
+```
+
+Like with `pbytes`, you can also create pool with custom reuse bounds.
+
+
+
+[godoc-image]: https://godoc.org/github.com/gobwas/pool?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/pool
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/generic.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/generic.go
new file mode 100644
index 00000000000..d40b362458b
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/generic.go
@@ -0,0 +1,87 @@
+package pool
+
+import (
+ "sync"
+
+ "github.com/gobwas/pool/internal/pmath"
+)
+
+var DefaultPool = New(128, 65536)
+
+// Get pulls object whose generic size is at least of given size. It also
+// returns a real size of x for further pass to Put(). It returns -1 as real
+// size for nil x. Size >-1 does not mean that x is non-nil, so checks must be
+// done.
+//
+// Note that size could be ceiled to the next power of two.
+//
+// Get is a wrapper around DefaultPool.Get().
+func Get(size int) (interface{}, int) { return DefaultPool.Get(size) }
+
+// Put takes x and its size for future reuse.
+// Put is a wrapper around DefaultPool.Put().
+func Put(x interface{}, size int) { DefaultPool.Put(x, size) }
+
+// Pool contains logic of reusing objects distinguishable by size in generic
+// way.
+type Pool struct {
+ pool map[int]*sync.Pool
+ size func(int) int
+}
+
+// New creates new Pool that reuses objects which size is in logarithmic range
+// [min, max].
+//
+// Note that it is a shortcut for Custom() constructor with Options provided by
+// WithLogSizeMapping() and WithLogSizeRange(min, max) calls.
+func New(min, max int) *Pool {
+ return Custom(
+ WithLogSizeMapping(),
+ WithLogSizeRange(min, max),
+ )
+}
+
+// Custom creates new Pool with given options.
+func Custom(opts ...Option) *Pool {
+ p := &Pool{
+ pool: make(map[int]*sync.Pool),
+ size: pmath.Identity,
+ }
+
+ c := (*poolConfig)(p)
+ for _, opt := range opts {
+ opt(c)
+ }
+
+ return p
+}
+
+// Get pulls object whose generic size is at least of given size.
+// It also returns a real size of x for further pass to Put() even if x is nil.
+// Note that size could be ceiled to the next power of two.
+func (p *Pool) Get(size int) (interface{}, int) {
+ n := p.size(size)
+ if pool := p.pool[n]; pool != nil {
+ return pool.Get(), n
+ }
+ return nil, size
+}
+
+// Put takes x and its size for future reuse.
+func (p *Pool) Put(x interface{}, size int) {
+ if pool := p.pool[size]; pool != nil {
+ pool.Put(x)
+ }
+}
+
+type poolConfig Pool
+
+// AddSize adds size n to the map.
+func (p *poolConfig) AddSize(n int) {
+ p.pool[n] = new(sync.Pool)
+}
+
+// SetSizeMapping sets up incoming size mapping function.
+func (p *poolConfig) SetSizeMapping(size func(int) int) {
+ p.size = size
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/internal/pmath/pmath.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
new file mode 100644
index 00000000000..df152ed12a5
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
@@ -0,0 +1,65 @@
+package pmath
+
+const (
+ bitsize = 32 << (^uint(0) >> 63)
+ maxint = int(1<<(bitsize-1) - 1)
+ maxintHeadBit = 1 << (bitsize - 2)
+)
+
+// LogarithmicRange iterates from ceiled to power of two min to max,
+// calling cb on each iteration.
+func LogarithmicRange(min, max int, cb func(int)) {
+ if min == 0 {
+ min = 1
+ }
+ for n := CeilToPowerOfTwo(min); n <= max; n <<= 1 {
+ cb(n)
+ }
+}
+
+// IsPowerOfTwo reports whether given integer is a power of two.
+func IsPowerOfTwo(n int) bool {
+ return n&(n-1) == 0
+}
+
+// Identity is identity.
+func Identity(n int) int {
+ return n
+}
+
+// CeilToPowerOfTwo returns the least power of two integer value greater than
+// or equal to n.
+func CeilToPowerOfTwo(n int) int {
+ if n&maxintHeadBit != 0 && n > maxintHeadBit {
+ panic("argument is too large")
+ }
+ if n <= 2 {
+ return n
+ }
+ n--
+ n = fillBits(n)
+ n++
+ return n
+}
+
+// FloorToPowerOfTwo returns the greatest power of two integer value less than
+// or equal to n.
+func FloorToPowerOfTwo(n int) int {
+ if n <= 2 {
+ return n
+ }
+ n = fillBits(n)
+ n >>= 1
+ n++
+ return n
+}
+
+func fillBits(n int) int {
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n |= n >> 32
+ return n
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/option.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/option.go
new file mode 100644
index 00000000000..d6e42b70055
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/option.go
@@ -0,0 +1,43 @@
+package pool
+
+import "github.com/gobwas/pool/internal/pmath"
+
+// Option configures pool.
+type Option func(Config)
+
+// Config describes generic pool configuration.
+type Config interface {
+ AddSize(n int)
+ SetSizeMapping(func(int) int)
+}
+
+// WithSizeLogRange returns an Option that will add logarithmic range of
+// pooling sizes containing [min, max] values.
+func WithLogSizeRange(min, max int) Option {
+ return func(c Config) {
+ pmath.LogarithmicRange(min, max, func(n int) {
+ c.AddSize(n)
+ })
+ }
+}
+
+// WithSize returns an Option that will add given pooling size to the pool.
+func WithSize(n int) Option {
+ return func(c Config) {
+ c.AddSize(n)
+ }
+}
+
+func WithSizeMapping(sz func(int) int) Option {
+ return func(c Config) {
+ c.SetSizeMapping(sz)
+ }
+}
+
+func WithLogSizeMapping() Option {
+ return WithSizeMapping(pmath.CeilToPowerOfTwo)
+}
+
+func WithIdentitySizeMapping() Option {
+ return WithSizeMapping(pmath.Identity)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio.go
new file mode 100644
index 00000000000..d526bd80da8
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio.go
@@ -0,0 +1,106 @@
+// Package pbufio contains tools for pooling bufio.Reader and bufio.Writers.
+package pbufio
+
+import (
+ "bufio"
+ "io"
+
+ "github.com/gobwas/pool"
+)
+
+var (
+ DefaultWriterPool = NewWriterPool(256, 65536)
+ DefaultReaderPool = NewReaderPool(256, 65536)
+)
+
+// GetWriter returns bufio.Writer whose buffer has at least size bytes.
+// Note that size could be ceiled to the next power of two.
+// GetWriter is a wrapper around DefaultWriterPool.Get().
+func GetWriter(w io.Writer, size int) *bufio.Writer { return DefaultWriterPool.Get(w, size) }
+
+// PutWriter takes bufio.Writer for future reuse.
+// It does not reuse bufio.Writer which underlying buffer size is not power of
+// PutWriter is a wrapper around DefaultWriterPool.Put().
+func PutWriter(bw *bufio.Writer) { DefaultWriterPool.Put(bw) }
+
+// GetReader returns bufio.Reader whose buffer has at least size bytes. It returns
+// its capacity for further pass to Put().
+// Note that size could be ceiled to the next power of two.
+// GetReader is a wrapper around DefaultReaderPool.Get().
+func GetReader(w io.Reader, size int) *bufio.Reader { return DefaultReaderPool.Get(w, size) }
+
+// PutReader takes bufio.Reader and its size for future reuse.
+// It does not reuse bufio.Reader if size is not power of two or is out of pool
+// min/max range.
+// PutReader is a wrapper around DefaultReaderPool.Put().
+func PutReader(bw *bufio.Reader) { DefaultReaderPool.Put(bw) }
+
+// WriterPool contains logic of *bufio.Writer reuse with various size.
+type WriterPool struct {
+ pool *pool.Pool
+}
+
+// NewWriterPool creates new WriterPool that reuses writers which size is in
+// logarithmic range [min, max].
+func NewWriterPool(min, max int) *WriterPool {
+ return &WriterPool{pool.New(min, max)}
+}
+
+// CustomWriterPool creates new WriterPool with given options.
+func CustomWriterPool(opts ...pool.Option) *WriterPool {
+ return &WriterPool{pool.Custom(opts...)}
+}
+
+// Get returns bufio.Writer whose buffer has at least size bytes.
+func (wp *WriterPool) Get(w io.Writer, size int) *bufio.Writer {
+ v, n := wp.pool.Get(size)
+ if v != nil {
+ bw := v.(*bufio.Writer)
+ bw.Reset(w)
+ return bw
+ }
+ return bufio.NewWriterSize(w, n)
+}
+
+// Put takes ownership of bufio.Writer for further reuse.
+func (wp *WriterPool) Put(bw *bufio.Writer) {
+ // Should reset even if we do Reset() inside Get().
+ // This is done to prevent locking underlying io.Writer from GC.
+ bw.Reset(nil)
+ wp.pool.Put(bw, writerSize(bw))
+}
+
+// ReaderPool contains logic of *bufio.Reader reuse with various size.
+type ReaderPool struct {
+ pool *pool.Pool
+}
+
+// NewReaderPool creates new ReaderPool that reuses writers which size is in
+// logarithmic range [min, max].
+func NewReaderPool(min, max int) *ReaderPool {
+ return &ReaderPool{pool.New(min, max)}
+}
+
+// CustomReaderPool creates new ReaderPool with given options.
+func CustomReaderPool(opts ...pool.Option) *ReaderPool {
+ return &ReaderPool{pool.Custom(opts...)}
+}
+
+// Get returns bufio.Reader whose buffer has at least size bytes.
+func (rp *ReaderPool) Get(r io.Reader, size int) *bufio.Reader {
+ v, n := rp.pool.Get(size)
+ if v != nil {
+ br := v.(*bufio.Reader)
+ br.Reset(r)
+ return br
+ }
+ return bufio.NewReaderSize(r, n)
+}
+
+// Put takes ownership of bufio.Reader for further reuse.
+func (rp *ReaderPool) Put(br *bufio.Reader) {
+ // Should reset even if we do Reset() inside Get().
+ // This is done to prevent locking underlying io.Reader from GC.
+ br.Reset(nil)
+ rp.pool.Put(br, readerSize(br))
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
new file mode 100644
index 00000000000..c736ae56e11
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
@@ -0,0 +1,13 @@
+// +build go1.10
+
+package pbufio
+
+import "bufio"
+
+func writerSize(bw *bufio.Writer) int {
+ return bw.Size()
+}
+
+func readerSize(br *bufio.Reader) int {
+ return br.Size()
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
new file mode 100644
index 00000000000..e71dd447d2a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
@@ -0,0 +1,27 @@
+// +build !go1.10
+
+package pbufio
+
+import "bufio"
+
+func writerSize(bw *bufio.Writer) int {
+ return bw.Available() + bw.Buffered()
+}
+
+// readerSize returns buffer size of the given buffered reader.
+// NOTE: current workaround implementation resets underlying io.Reader.
+func readerSize(br *bufio.Reader) int {
+ br.Reset(sizeReader)
+ br.ReadByte()
+ n := br.Buffered() + 1
+ br.Reset(nil)
+ return n
+}
+
+var sizeReader optimisticReader
+
+type optimisticReader struct{}
+
+func (optimisticReader) Read(p []byte) (int, error) {
+ return len(p), nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pool.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pool.go
new file mode 100644
index 00000000000..1fe9e602fc5
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pool.go
@@ -0,0 +1,25 @@
+// Package pool contains helpers for pooling structures distinguishable by
+// size.
+//
+// Quick example:
+//
+// import "github.com/gobwas/pool"
+//
+// func main() {
+// // Reuse objects in logarithmic range from 0 to 64 (0,1,2,4,6,8,16,32,64).
+// p := pool.New(0, 64)
+//
+// buf, n := p.Get(10) // Returns buffer with 16 capacity.
+// if buf == nil {
+// buf = bytes.NewBuffer(make([]byte, n))
+// }
+// defer p.Put(buf, n)
+//
+// // Work with buf.
+// }
+//
+// There are non-generic implementations for pooling:
+// - pool/pbytes for []byte reuse;
+// - pool/pbufio for *bufio.Reader and *bufio.Writer reuse;
+//
+package pool
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.gitignore b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.gitignore
new file mode 100644
index 00000000000..e3e2b1080d0
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.gitignore
@@ -0,0 +1,5 @@
+bin/
+reports/
+cpu.out
+mem.out
+ws.test
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.travis.yml b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.travis.yml
new file mode 100644
index 00000000000..cf74f1bee3c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.travis.yml
@@ -0,0 +1,25 @@
+sudo: required
+
+language: go
+
+services:
+ - docker
+
+os:
+ - linux
+ - windows
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.x
+
+install:
+ - go get github.com/gobwas/pool
+ - go get github.com/gobwas/httphead
+
+script:
+ - if [ "$TRAVIS_OS_NAME" = "windows" ]; then go test ./...; fi
+ - if [ "$TRAVIS_OS_NAME" = "linux" ]; then make test autobahn; fi
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/LICENSE
new file mode 100644
index 00000000000..d2611fddf55
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2018 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/Makefile b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/Makefile
new file mode 100644
index 00000000000..075e83c74bc
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/Makefile
@@ -0,0 +1,47 @@
+BENCH ?=.
+BENCH_BASE?=master
+
+clean:
+ rm -f bin/reporter
+ rm -fr autobahn/report/*
+
+bin/reporter:
+ go build -o bin/reporter ./autobahn
+
+bin/gocovmerge:
+ go build -o bin/gocovmerge github.com/wadey/gocovmerge
+
+.PHONY: autobahn
+autobahn: clean bin/reporter
+ ./autobahn/script/test.sh --build
+ bin/reporter $(PWD)/autobahn/report/index.json
+
+test:
+ go test -coverprofile=ws.coverage .
+ go test -coverprofile=wsutil.coverage ./wsutil
+
+cover: bin/gocovmerge test autobahn
+ bin/gocovmerge ws.coverage wsutil.coverage autobahn/report/server.coverage > total.coverage
+
+benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
+benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX)
+benchcmp: BENCH_NEW:=$(shell mktemp -t new.XXXX)
+benchcmp:
+ if [ ! -z "$(shell git status -s)" ]; then\
+ echo "could not compare with $(BENCH_BASE) – found unstaged changes";\
+ exit 1;\
+ fi;\
+ if [ "$(BENCH_BRANCH)" == "$(BENCH_BASE)" ]; then\
+ echo "comparing the same branches";\
+ exit 1;\
+ fi;\
+ echo "benchmarking $(BENCH_BRANCH)...";\
+ go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_NEW);\
+ echo "benchmarking $(BENCH_BASE)...";\
+ git checkout -q $(BENCH_BASE);\
+ go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_OLD);\
+ git checkout -q $(BENCH_BRANCH);\
+ echo "\nresults:";\
+ echo "========\n";\
+ benchcmp $(BENCH_OLD) $(BENCH_NEW);\
+
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/README.md b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/README.md
new file mode 100644
index 00000000000..74acd78bd08
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/README.md
@@ -0,0 +1,360 @@
+# ws
+
+[![GoDoc][godoc-image]][godoc-url]
+[![Travis][travis-image]][travis-url]
+
+> [RFC6455][rfc-url] WebSocket implementation in Go.
+
+# Features
+
+- Zero-copy upgrade
+- No intermediate allocations during I/O
+- Low-level API which allows to build your own logic of packet handling and
+ buffers reuse
+- High-level wrappers and helpers around API in `wsutil` package, which allow
+ to start fast without digging the protocol internals
+
+# Documentation
+
+[GoDoc][godoc-url].
+
+# Why
+
+Existing WebSocket implementations do not allow users to reuse I/O buffers
+between connections in clear way. This library aims to export efficient
+low-level interface for working with the protocol without forcing only one way
+it could be used.
+
+By the way, if you want get the higher-level tools, you can use `wsutil`
+package.
+
+# Status
+
+Library is tagged as `v1*` so its API must not be broken during some
+improvements or refactoring.
+
+This implementation of RFC6455 passes [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) and currently has
+about 78% coverage.
+
+# Examples
+
+Example applications using `ws` are developed in separate repository
+[ws-examples](https://github.com/gobwas/ws-examples).
+
+# Usage
+
+The higher-level example of WebSocket echo server:
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/gobwas/ws"
+ "github.com/gobwas/ws/wsutil"
+)
+
+func main() {
+ http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ if err != nil {
+ // handle error
+ }
+ go func() {
+ defer conn.Close()
+
+ for {
+ msg, op, err := wsutil.ReadClientData(conn)
+ if err != nil {
+ // handle error
+ }
+ err = wsutil.WriteServerMessage(conn, op, msg)
+ if err != nil {
+ // handle error
+ }
+ }
+ }()
+ }))
+}
+```
+
+Lower-level, but still high-level example:
+
+
+```go
+import (
+ "net/http"
+ "io"
+
+ "github.com/gobwas/ws"
+ "github.com/gobwas/ws/wsutil"
+)
+
+func main() {
+ http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ if err != nil {
+ // handle error
+ }
+ go func() {
+ defer conn.Close()
+
+ var (
+ state = ws.StateServerSide
+ reader = wsutil.NewReader(conn, state)
+ writer = wsutil.NewWriter(conn, state, ws.OpText)
+ )
+ for {
+ header, err := reader.NextFrame()
+ if err != nil {
+ // handle error
+ }
+
+ // Reset writer to write frame with right operation code.
+ writer.Reset(conn, state, header.OpCode)
+
+ if _, err = io.Copy(writer, reader); err != nil {
+ // handle error
+ }
+ if err = writer.Flush(); err != nil {
+ // handle error
+ }
+ }
+ }()
+ }))
+}
+```
+
+We can apply the same pattern to read and write structured responses through a JSON encoder and decoder.:
+
+```go
+ ...
+ var (
+ r = wsutil.NewReader(conn, ws.StateServerSide)
+ w = wsutil.NewWriter(conn, ws.StateServerSide, ws.OpText)
+ decoder = json.NewDecoder(r)
+ encoder = json.NewEncoder(w)
+ )
+ for {
+ hdr, err = r.NextFrame()
+ if err != nil {
+ return err
+ }
+ if hdr.OpCode == ws.OpClose {
+ return io.EOF
+ }
+ var req Request
+ if err := decoder.Decode(&req); err != nil {
+ return err
+ }
+ var resp Response
+ if err := encoder.Encode(&resp); err != nil {
+ return err
+ }
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ }
+ ...
+```
+
+The lower-level example without `wsutil`:
+
+```go
+package main
+
+import (
+ "net"
+ "io"
+
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+ _, err = ws.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+
+ go func() {
+ defer conn.Close()
+
+ for {
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle error
+ }
+
+ payload := make([]byte, header.Length)
+ _, err = io.ReadFull(conn, payload)
+ if err != nil {
+ // handle error
+ }
+ if header.Masked {
+ ws.Cipher(payload, header.Mask, 0)
+ }
+
+ // Reset the Masked flag, server frames must not be masked as
+ // RFC6455 says.
+ header.Masked = false
+
+ if err := ws.WriteHeader(conn, header); err != nil {
+ // handle error
+ }
+ if _, err := conn.Write(payload); err != nil {
+ // handle error
+ }
+
+ if header.OpCode == ws.OpClose {
+ return
+ }
+ }
+ }()
+ }
+}
+```
+
+# Zero-copy upgrade
+
+Zero-copy upgrade helps to avoid unnecessary allocations and copying while
+handling HTTP Upgrade request.
+
+Processing of all non-websocket headers is made in place with use of registered
+user callbacks whose arguments are only valid until callback returns.
+
+The simple example looks like this:
+
+```go
+package main
+
+import (
+ "net"
+ "log"
+
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ log.Fatal(err)
+ }
+ u := ws.Upgrader{
+ OnHeader: func(key, value []byte) (err error) {
+ log.Printf("non-websocket header: %q=%q", key, value)
+ return
+ },
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+
+ _, err = u.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+ }
+}
+```
+
+Usage of `ws.Upgrader` here brings ability to control incoming connections on
+tcp level and simply not to accept them by some logic.
+
+Zero-copy upgrade is for high-load services which have to control many
+resources such as connections buffers.
+
+The real life example could be like this:
+
+```go
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/ws"
+)
+
+func main() {
+ ln, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ // handle error
+ }
+
+ // Prepare handshake header writer from http.Header mapping.
+ header := ws.HandshakeHeaderHTTP(http.Header{
+ "X-Go-Version": []string{runtime.Version()},
+ })
+
+ u := ws.Upgrader{
+ OnHost: func(host []byte) error {
+ if string(host) == "github.com" {
+ return nil
+ }
+ return ws.RejectConnectionError(
+ ws.RejectionStatus(403),
+ ws.RejectionHeader(ws.HandshakeHeaderString(
+ "X-Want-Host: github.com\r\n",
+ )),
+ )
+ },
+ OnHeader: func(key, value []byte) error {
+ if string(key) != "Cookie" {
+ return nil
+ }
+ ok := httphead.ScanCookie(value, func(key, value []byte) bool {
+ // Check session here or do some other stuff with cookies.
+ // Maybe copy some values for future use.
+ return true
+ })
+ if ok {
+ return nil
+ }
+ return ws.RejectConnectionError(
+ ws.RejectionReason("bad cookie"),
+ ws.RejectionStatus(400),
+ )
+ },
+ OnBeforeUpgrade: func() (ws.HandshakeHeader, error) {
+ return header, nil
+ },
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = u.Upgrade(conn)
+ if err != nil {
+ log.Printf("upgrade error: %s", err)
+ }
+ }
+}
+```
+
+
+
+[rfc-url]: https://tools.ietf.org/html/rfc6455
+[godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/ws
+[travis-image]: https://travis-ci.org/gobwas/ws.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/ws
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/check.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/check.go
new file mode 100644
index 00000000000..8aa0df8cc28
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/check.go
@@ -0,0 +1,145 @@
+package ws
+
+import "unicode/utf8"
+
+// State represents state of websocket endpoint.
+// It used by some functions to be more strict when checking compatibility with RFC6455.
+type State uint8
+
+const (
+ // StateServerSide means that endpoint (caller) is a server.
+ StateServerSide State = 0x1 << iota
+ // StateClientSide means that endpoint (caller) is a client.
+ StateClientSide
+ // StateExtended means that extension was negotiated during handshake.
+ StateExtended
+ // StateFragmented means that endpoint (caller) has received fragmented
+ // frame and waits for continuation parts.
+ StateFragmented
+)
+
+// Is checks whether the s has v enabled.
+func (s State) Is(v State) bool {
+ return uint8(s)&uint8(v) != 0
+}
+
+// Set enables v state on s.
+func (s State) Set(v State) State {
+ return s | v
+}
+
+// Clear disables v state on s.
+func (s State) Clear(v State) State {
+ return s & (^v)
+}
+
+// ServerSide reports whether states represents server side.
+func (s State) ServerSide() bool { return s.Is(StateServerSide) }
+
+// ClientSide reports whether state represents client side.
+func (s State) ClientSide() bool { return s.Is(StateClientSide) }
+
+// Extended reports whether state is extended.
+func (s State) Extended() bool { return s.Is(StateExtended) }
+
+// Fragmented reports whether state is fragmented.
+func (s State) Fragmented() bool { return s.Is(StateFragmented) }
+
+// ProtocolError describes error during checking/parsing websocket frames or
+// headers.
+type ProtocolError string
+
+// Error implements error interface.
+func (p ProtocolError) Error() string { return string(p) }
+
+// Errors used by the protocol checkers.
+var (
+ ErrProtocolOpCodeReserved = ProtocolError("use of reserved op code")
+ ErrProtocolControlPayloadOverflow = ProtocolError("control frame payload limit exceeded")
+ ErrProtocolControlNotFinal = ProtocolError("control frame is not final")
+ ErrProtocolNonZeroRsv = ProtocolError("non-zero rsv bits with no extension negotiated")
+ ErrProtocolMaskRequired = ProtocolError("frames from client to server must be masked")
+ ErrProtocolMaskUnexpected = ProtocolError("frames from server to client must be not masked")
+ ErrProtocolContinuationExpected = ProtocolError("unexpected non-continuation data frame")
+ ErrProtocolContinuationUnexpected = ProtocolError("unexpected continuation data frame")
+ ErrProtocolStatusCodeNotInUse = ProtocolError("status code is not in use")
+ ErrProtocolStatusCodeApplicationLevel = ProtocolError("status code is only application level")
+ ErrProtocolStatusCodeNoMeaning = ProtocolError("status code has no meaning yet")
+ ErrProtocolStatusCodeUnknown = ProtocolError("status code is not defined in spec")
+ ErrProtocolInvalidUTF8 = ProtocolError("invalid utf8 sequence in close reason")
+)
+
+// CheckHeader checks h to contain valid header data for given state s.
+//
+// Note that zero state (0) means that state is clean,
+// neither server or client side, nor fragmented, nor extended.
+func CheckHeader(h Header, s State) error {
+ if h.OpCode.IsReserved() {
+ return ErrProtocolOpCodeReserved
+ }
+ if h.OpCode.IsControl() {
+ if h.Length > MaxControlFramePayloadSize {
+ return ErrProtocolControlPayloadOverflow
+ }
+ if !h.Fin {
+ return ErrProtocolControlNotFinal
+ }
+ }
+
+ switch {
+ // [RFC6455]: MUST be 0 unless an extension is negotiated that defines meanings for
+ // non-zero values. If a nonzero value is received and none of the
+ // negotiated extensions defines the meaning of such a nonzero value, the
+ // receiving endpoint MUST _Fail the WebSocket Connection_.
+ case h.Rsv != 0 && !s.Extended():
+ return ErrProtocolNonZeroRsv
+
+ // [RFC6455]: The server MUST close the connection upon receiving a frame that is not masked.
+ // In this case, a server MAY send a Close frame with a status code of 1002 (protocol error)
+ // as defined in Section 7.4.1. A server MUST NOT mask any frames that it sends to the client.
+ // A client MUST close a connection if it detects a masked frame. In this case, it MAY use the
+ // status code 1002 (protocol error) as defined in Section 7.4.1.
+ case s.ServerSide() && !h.Masked:
+ return ErrProtocolMaskRequired
+ case s.ClientSide() && h.Masked:
+ return ErrProtocolMaskUnexpected
+
+ // [RFC6455]: See detailed explanation in 5.4 section.
+ case s.Fragmented() && !h.OpCode.IsControl() && h.OpCode != OpContinuation:
+ return ErrProtocolContinuationExpected
+ case !s.Fragmented() && h.OpCode == OpContinuation:
+ return ErrProtocolContinuationUnexpected
+
+ default:
+ return nil
+ }
+}
+
+// CheckCloseFrameData checks received close information
+// to be valid RFC6455 compatible close info.
+//
+// Note that code.Empty() or code.IsAppLevel() will raise error.
+//
+// If endpoint sends close frame without status code (with frame.Length = 0),
+// application should not check its payload.
+func CheckCloseFrameData(code StatusCode, reason string) error {
+ switch {
+ case code.IsNotUsed():
+ return ErrProtocolStatusCodeNotInUse
+
+ case code.IsProtocolReserved():
+ return ErrProtocolStatusCodeApplicationLevel
+
+ case code == StatusNoMeaningYet:
+ return ErrProtocolStatusCodeNoMeaning
+
+ case code.IsProtocolSpec() && !code.IsProtocolDefined():
+ return ErrProtocolStatusCodeUnknown
+
+ case !utf8.ValidString(reason):
+ return ErrProtocolInvalidUTF8
+
+ default:
+ return nil
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/cipher.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/cipher.go
new file mode 100644
index 00000000000..11a2af99bfc
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/cipher.go
@@ -0,0 +1,59 @@
+package ws
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+// Cipher applies XOR cipher to the payload using mask.
+// Offset is used to cipher chunked data (e.g. in io.Reader implementations).
+//
+// To convert masked data into unmasked data, or vice versa, the following
+// algorithm is applied. The same algorithm applies regardless of the
+// direction of the translation, e.g., the same steps are applied to
+// mask the data as to unmask the data.
+func Cipher(payload []byte, mask [4]byte, offset int) {
+ n := len(payload)
+ if n < 8 {
+ for i := 0; i < n; i++ {
+ payload[i] ^= mask[(offset+i)%4]
+ }
+ return
+ }
+
+ // Calculate position in mask due to previously processed bytes number.
+ mpos := offset % 4
+ // Count number of bytes will processed one by one from the beginning of payload.
+ ln := remain[mpos]
+ // Count number of bytes will processed one by one from the end of payload.
+ // This is done to process payload by 8 bytes in each iteration of main loop.
+ rn := (n - ln) % 8
+
+ for i := 0; i < ln; i++ {
+ payload[i] ^= mask[(mpos+i)%4]
+ }
+ for i := n - rn; i < n; i++ {
+ payload[i] ^= mask[(mpos+i)%4]
+ }
+
+ // We should cast mask to uint32 with unsafe instead of encoding.BigEndian
+ // to avoid care of os dependent byte order. That is, on any endianess mask
+ // and payload will be presented with the same order. In other words, we
+ // could not use encoding.BigEndian on xoring payload as uint64.
+ m := *(*uint32)(unsafe.Pointer(&mask))
+ m2 := uint64(m)<<32 | uint64(m)
+
+ // Skip already processed right part.
+ // Get number of uint64 parts remaining to process.
+ n = (n - ln - rn) >> 3
+ for i := 0; i < n; i++ {
+ idx := ln + (i << 3)
+ p := binary.LittleEndian.Uint64(payload[idx : idx+8])
+ p = p ^ m2
+ binary.LittleEndian.PutUint64(payload[idx:idx+8], p)
+ }
+}
+
+// remain maps position in masking key [0,4) to number
+// of bytes that need to be processed manually inside Cipher().
+var remain = [4]int{0, 3, 2, 1}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer.go
new file mode 100644
index 00000000000..4357be2142b
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer.go
@@ -0,0 +1,556 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/pool/pbufio"
+)
+
+// Constants used by Dialer.
+const (
+ DefaultClientReadBufferSize = 4096
+ DefaultClientWriteBufferSize = 4096
+)
+
+// Handshake represents handshake result.
+type Handshake struct {
+ // Protocol is the subprotocol selected during handshake.
+ Protocol string
+
+ // Extensions is the list of negotiated extensions.
+ Extensions []httphead.Option
+}
+
+// Errors used by the websocket client.
+var (
+ ErrHandshakeBadStatus = fmt.Errorf("unexpected http status")
+ ErrHandshakeBadSubProtocol = fmt.Errorf("unexpected protocol in %q header", headerSecProtocol)
+ ErrHandshakeBadExtensions = fmt.Errorf("unexpected extensions in %q header", headerSecProtocol)
+)
+
+// DefaultDialer is dialer that holds no options and is used by Dial function.
+var DefaultDialer Dialer
+
+// Dial is like Dialer{}.Dial().
+func Dial(ctx context.Context, urlstr string) (net.Conn, *bufio.Reader, Handshake, error) {
+ return DefaultDialer.Dial(ctx, urlstr)
+}
+
+// Dialer contains options for establishing websocket connection to an url.
+type Dialer struct {
+ // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
+ // They used to read and write http data while upgrading to WebSocket.
+ // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
+ //
+ // If a size is zero then default value is used.
+ ReadBufferSize, WriteBufferSize int
+
+ // Timeout is the maximum amount of time a Dial() will wait for a connect
+ // and an handshake to complete.
+ //
+ // The default is no timeout.
+ Timeout time.Duration
+
+ // Protocols is the list of subprotocols that the client wants to speak,
+ // ordered by preference.
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ Protocols []string
+
+ // Extensions is the list of extensions that client wants to speak.
+ //
+ // Note that if server decides to use some of this extensions, Dial() will
+ // return Handshake struct containing a slice of items, which are the
+ // shallow copies of the items from this list. That is, internals of
+ // Extensions items are shared during Dial().
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ // See https://tools.ietf.org/html/rfc6455#section-9.1
+ Extensions []httphead.Option
+
+ // Header is an optional HandshakeHeader instance that could be used to
+ // write additional headers to the handshake request.
+ //
+ // It used instead of any key-value mappings to avoid allocations in user
+ // land.
+ Header HandshakeHeader
+
+ // OnStatusError is the callback that will be called after receiving non
+ // "101 Continue" HTTP response status. It receives an io.Reader object
+ // representing server response bytes. That is, it gives ability to parse
+ // HTTP response somehow (probably with http.ReadResponse call) and make a
+ // decision of further logic.
+ //
+ // The arguments are only valid until the callback returns.
+ OnStatusError func(status int, reason []byte, resp io.Reader)
+
+ // OnHeader is the callback that will be called after successful parsing of
+ // header, that is not used during WebSocket handshake procedure. That is,
+ // it will be called with non-websocket headers, which could be relevant
+ // for application-level logic.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // Returned value could be used to prevent processing response.
+ OnHeader func(key, value []byte) (err error)
+
+ // NetDial is the function that is used to get plain tcp connection.
+ // If it is not nil, then it is used instead of net.Dialer.
+ NetDial func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // TLSClient is the callback that will be called after successful dial with
+ // received connection and its remote host name. If it is nil, then the
+ // default tls.Client() will be used.
+ // If it is not nil, then TLSConfig field is ignored.
+ TLSClient func(conn net.Conn, hostname string) net.Conn
+
+ // TLSConfig is passed to tls.Client() to start TLS over established
+ // connection. If TLSClient is not nil, then it is ignored. If TLSConfig is
+ // non-nil and its ServerName is empty, then for every Dial() it will be
+ // cloned and appropriate ServerName will be set.
+ TLSConfig *tls.Config
+
+ // WrapConn is the optional callback that will be called when connection is
+ // ready for an i/o. That is, it will be called after successful dial and
+ // TLS initialization (for "wss" schemes). It may be helpful for different
+ // user land purposes such as end to end encryption.
+ //
+ // Note that for debugging purposes of an http handshake (e.g. sent request
+ // and received response), there is an wsutil.DebugDialer struct.
+ WrapConn func(conn net.Conn) net.Conn
+}
+
+// Dial connects to the url host and upgrades connection to WebSocket.
+//
+// If server has sent frames right after successful handshake then returned
+// buffer will be non-nil. In other cases buffer is always nil. For better
+// memory efficiency received non-nil bufio.Reader should be returned to the
+// inner pool with PutReader() function after use.
+//
+// Note that Dialer does not implement IDNA (RFC5895) logic as net/http does.
+// If you want to dial non-ascii host name, take care of its name serialization
+// avoiding bad request issues. For more info see net/http Request.Write()
+// implementation, especially cleanHost() function.
+func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) {
+ u, err := url.ParseRequestURI(urlstr)
+ if err != nil {
+ return
+ }
+
+ // Prepare context to dial with. Initially it is the same as original, but
+ // if d.Timeout is non-zero and points to time that is before ctx.Deadline,
+ // we use more shorter context for dial.
+ dialctx := ctx
+
+ var deadline time.Time
+ if t := d.Timeout; t != 0 {
+ deadline = time.Now().Add(t)
+ if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
+ var cancel context.CancelFunc
+ dialctx, cancel = context.WithDeadline(ctx, deadline)
+ defer cancel()
+ }
+ }
+ if conn, err = d.dial(dialctx, u); err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+ if ctx == context.Background() {
+ // No need to start I/O interrupter goroutine which is not zero-cost.
+ conn.SetDeadline(deadline)
+ defer conn.SetDeadline(noDeadline)
+ } else {
+ // Context could be canceled or its deadline could be exceeded.
+ // Start the interrupter goroutine to handle context cancelation.
+ done := setupContextDeadliner(ctx, conn)
+ defer func() {
+ // Map Upgrade() error to a possible context expiration error. That
+ // is, even if Upgrade() err is nil, context could be already
+ // expired and connection be "poisoned" by SetDeadline() call.
+ // In that case we must not return ctx.Err() error.
+ done(&err)
+ }()
+ }
+
+ br, hs, err = d.Upgrade(conn, u)
+
+ return
+}
+
+var (
+ // netEmptyDialer is a net.Dialer without options, used in Dialer.dial() if
+ // Dialer.NetDial is not provided.
+ netEmptyDialer net.Dialer
+ // tlsEmptyConfig is an empty tls.Config used as default one.
+ tlsEmptyConfig tls.Config
+)
+
+func tlsDefaultConfig() *tls.Config {
+ return &tlsEmptyConfig
+}
+
+func hostport(host string, defaultPort string) (hostname, addr string) {
+ var (
+ colon = strings.LastIndexByte(host, ':')
+ bracket = strings.IndexByte(host, ']')
+ )
+ if colon > bracket {
+ return host[:colon], host
+ }
+ return host, host + defaultPort
+}
+
+func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) {
+ dial := d.NetDial
+ if dial == nil {
+ dial = netEmptyDialer.DialContext
+ }
+ switch u.Scheme {
+ case "ws":
+ _, addr := hostport(u.Host, ":80")
+ conn, err = dial(ctx, "tcp", addr)
+ case "wss":
+ hostname, addr := hostport(u.Host, ":443")
+ conn, err = dial(ctx, "tcp", addr)
+ if err != nil {
+ return
+ }
+ tlsClient := d.TLSClient
+ if tlsClient == nil {
+ tlsClient = d.tlsClient
+ }
+ conn = tlsClient(conn, hostname)
+ default:
+ return nil, fmt.Errorf("unexpected websocket scheme: %q", u.Scheme)
+ }
+ if wrap := d.WrapConn; wrap != nil {
+ conn = wrap(conn)
+ }
+ return
+}
+
+func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn {
+ config := d.TLSConfig
+ if config == nil {
+ config = tlsDefaultConfig()
+ }
+ if config.ServerName == "" {
+ config = tlsCloneConfig(config)
+ config.ServerName = hostname
+ }
+ // Do not make conn.Handshake() here because downstairs we will prepare
+ // i/o on this conn with proper context's timeout handling.
+ return tls.Client(conn, config)
+}
+
+var (
+ // This variables are set like in net/net.go.
+ // noDeadline is just zero value for readability.
+ noDeadline = time.Time{}
+ // aLongTimeAgo is a non-zero time, far in the past, used for immediate
+ // cancelation of dials.
+ aLongTimeAgo = time.Unix(42, 0)
+)
+
+// Upgrade writes an upgrade request to the given io.ReadWriter conn at given
+// url u and reads a response from it.
+//
+// It is a caller responsibility to manage I/O deadlines on conn.
+//
+// It returns handshake info and some bytes which could be written by the peer
+// right after response and be caught by us during buffered read.
+func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Handshake, err error) {
+ // headerSeen constants helps to report whether or not some header was seen
+ // during reading request bytes.
+ const (
+ headerSeenUpgrade = 1 << iota
+ headerSeenConnection
+ headerSeenSecAccept
+
+ // headerSeenAll is the value that we expect to receive at the end of
+ // headers read/parse loop.
+ headerSeenAll = 0 |
+ headerSeenUpgrade |
+ headerSeenConnection |
+ headerSeenSecAccept
+ )
+
+ br = pbufio.GetReader(conn,
+ nonZero(d.ReadBufferSize, DefaultClientReadBufferSize),
+ )
+ bw := pbufio.GetWriter(conn,
+ nonZero(d.WriteBufferSize, DefaultClientWriteBufferSize),
+ )
+ defer func() {
+ pbufio.PutWriter(bw)
+ if br.Buffered() == 0 || err != nil {
+ // Server does not wrote additional bytes to the connection or
+ // error occurred. That is, no reason to return buffer.
+ pbufio.PutReader(br)
+ br = nil
+ }
+ }()
+
+ nonce := make([]byte, nonceSize)
+ initNonce(nonce)
+
+ httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header)
+ if err = bw.Flush(); err != nil {
+ return
+ }
+
+ // Read HTTP status line like "HTTP/1.1 101 Switching Protocols".
+ sl, err := readLine(br)
+ if err != nil {
+ return
+ }
+ // Begin validation of the response.
+ // See https://tools.ietf.org/html/rfc6455#section-4.2.2
+ // Parse request line data like HTTP version, uri and method.
+ resp, err := httpParseResponseLine(sl)
+ if err != nil {
+ return
+ }
+ // Even if RFC says "1.1 or higher" without mentioning the part of the
+ // version, we apply it only to minor part.
+ if resp.major != 1 || resp.minor < 1 {
+ err = ErrHandshakeBadProtocol
+ return
+ }
+ if resp.status != 101 {
+ err = StatusError(resp.status)
+ if onStatusError := d.OnStatusError; onStatusError != nil {
+ // Invoke callback with multireader of status-line bytes br.
+ onStatusError(resp.status, resp.reason,
+ io.MultiReader(
+ bytes.NewReader(sl),
+ strings.NewReader(crlf),
+ br,
+ ),
+ )
+ }
+ return
+ }
+ // If response status is 101 then we expect all technical headers to be
+ // valid. If not, then we stop processing response without giving user
+ // ability to read non-technical headers. That is, we do not distinguish
+ // technical errors (such as parsing error) and protocol errors.
+ var headerSeen byte
+ for {
+ line, e := readLine(br)
+ if e != nil {
+ err = e
+ return
+ }
+ if len(line) == 0 {
+ // Blank line, no more lines to read.
+ break
+ }
+
+ k, v, ok := httpParseHeaderLine(line)
+ if !ok {
+ err = ErrMalformedResponse
+ return
+ }
+
+ switch btsToString(k) {
+ case headerUpgradeCanonical:
+ headerSeen |= headerSeenUpgrade
+ if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
+ err = ErrHandshakeBadUpgrade
+ return
+ }
+
+ case headerConnectionCanonical:
+ headerSeen |= headerSeenConnection
+ // Note that as RFC6455 says:
+ // > A |Connection| header field with value "Upgrade".
+ // That is, in server side, "Connection" header could contain
+ // multiple token. But in response it must contains exactly one.
+ if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) {
+ err = ErrHandshakeBadConnection
+ return
+ }
+
+ case headerSecAcceptCanonical:
+ headerSeen |= headerSeenSecAccept
+ if !checkAcceptFromNonce(v, nonce) {
+ err = ErrHandshakeBadSecAccept
+ return
+ }
+
+ case headerSecProtocolCanonical:
+ // RFC6455 1.3:
+ // "The server selects one or none of the acceptable protocols
+ // and echoes that value in its handshake to indicate that it has
+ // selected that protocol."
+ for _, want := range d.Protocols {
+ if string(v) == want {
+ hs.Protocol = want
+ break
+ }
+ }
+ if hs.Protocol == "" {
+ // Server echoed subprotocol that is not present in client
+ // requested protocols.
+ err = ErrHandshakeBadSubProtocol
+ return
+ }
+
+ case headerSecExtensionsCanonical:
+ hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions)
+ if err != nil {
+ return
+ }
+
+ default:
+ if onHeader := d.OnHeader; onHeader != nil {
+ if e := onHeader(k, v); e != nil {
+ err = e
+ return
+ }
+ }
+ }
+ }
+ if err == nil && headerSeen != headerSeenAll {
+ switch {
+ case headerSeen&headerSeenUpgrade == 0:
+ err = ErrHandshakeBadUpgrade
+ case headerSeen&headerSeenConnection == 0:
+ err = ErrHandshakeBadConnection
+ case headerSeen&headerSeenSecAccept == 0:
+ err = ErrHandshakeBadSecAccept
+ default:
+ panic("unknown headers state")
+ }
+ }
+ return
+}
+
+// PutReader returns bufio.Reader instance to the inner reuse pool.
+// It is useful in rare cases, when Dialer.Dial() returns non-nil buffer which
+// contains unprocessed buffered data, that was sent by the server quickly
+// right after handshake.
+func PutReader(br *bufio.Reader) {
+ pbufio.PutReader(br)
+}
+
+// StatusError contains an unexpected status-line code from the server.
+type StatusError int
+
+func (s StatusError) Error() string {
+ return "unexpected HTTP response status: " + strconv.Itoa(int(s))
+}
+
+func isTimeoutError(err error) bool {
+ t, ok := err.(net.Error)
+ return ok && t.Timeout()
+}
+
+func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option) ([]httphead.Option, error) {
+ if len(selected) == 0 {
+ return received, nil
+ }
+ var (
+ index int
+ option httphead.Option
+ err error
+ )
+ index = -1
+ match := func() (ok bool) {
+ for _, want := range wanted {
+ if option.Equal(want) {
+ // Check parsed extension to be present in client
+ // requested extensions. We move matched extension
+ // from client list to avoid allocation.
+ received = append(received, want)
+ return true
+ }
+ }
+ return false
+ }
+ ok := httphead.ScanOptions(selected, func(i int, name, attr, val []byte) httphead.Control {
+ if i != index {
+ // Met next option.
+ index = i
+ if i != 0 && !match() {
+ // Server returned non-requested extension.
+ err = ErrHandshakeBadExtensions
+ return httphead.ControlBreak
+ }
+ option = httphead.Option{Name: name}
+ }
+ if attr != nil {
+ option.Parameters.Set(attr, val)
+ }
+ return httphead.ControlContinue
+ })
+ if !ok {
+ err = ErrMalformedResponse
+ return received, err
+ }
+ if !match() {
+ return received, ErrHandshakeBadExtensions
+ }
+ return received, err
+}
+
+// setupContextDeadliner is a helper function that starts connection I/O
+// interrupter goroutine.
+//
+// Started goroutine calls SetDeadline() with long time ago value when context
+// become expired to make any I/O operations failed. It returns done function
+// that stops started goroutine and maps error received from conn I/O methods
+// to possible context expiration error.
+//
+// In concern with possible SetDeadline() call inside interrupter goroutine,
+// caller passes pointer to its I/O error (even if it is nil) to done(&err).
+// That is, even if I/O error is nil, context could be already expired and
+// connection "poisoned" by SetDeadline() call. In that case done(&err) will
+// store at *err ctx.Err() result. If err is caused not by timeout, it will
+// leaved untouched.
+func setupContextDeadliner(ctx context.Context, conn net.Conn) (done func(*error)) {
+ var (
+ quit = make(chan struct{})
+ interrupt = make(chan error, 1)
+ )
+ go func() {
+ select {
+ case <-quit:
+ interrupt <- nil
+ case <-ctx.Done():
+ // Cancel i/o immediately.
+ conn.SetDeadline(aLongTimeAgo)
+ interrupt <- ctx.Err()
+ }
+ }()
+ return func(err *error) {
+ close(quit)
+ // If ctx.Err() is non-nil and the original err is net.Error with
+ // Timeout() == true, then it means that I/O was canceled by us by
+ // SetDeadline(aLongTimeAgo) call, or by somebody else previously
+ // by conn.SetDeadline(x).
+ //
+ // Even on race condition when both deadlines are expired
+ // (SetDeadline() made not by us and context's), we prefer ctx.Err() to
+ // be returned.
+ if ctxErr := <-interrupt; ctxErr != nil && (*err == nil || isTimeoutError(*err)) {
+ *err = ctxErr
+ }
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go17.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go17.go
new file mode 100644
index 00000000000..b606e0ad909
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go17.go
@@ -0,0 +1,35 @@
+// +build !go1.8
+
+package ws
+
+import "crypto/tls"
+
+func tlsCloneConfig(c *tls.Config) *tls.Config {
+ // NOTE: we copying SessionTicketsDisabled and SessionTicketKey here
+ // without calling inner c.initOnceServer somehow because we only could get
+ // here from the ws.Dialer code, which is obviously a client and makes
+ // tls.Client() when it gets new net.Conn.
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go18.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go18.go
new file mode 100644
index 00000000000..a6704d5173a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go18.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package ws
+
+import "crypto/tls"
+
+func tlsCloneConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/doc.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/doc.go
new file mode 100644
index 00000000000..c9d5791570c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/doc.go
@@ -0,0 +1,81 @@
+/*
+Package ws implements a client and server for the WebSocket protocol as
+specified in RFC 6455.
+
+The main purpose of this package is to provide simple low-level API for
+efficient work with protocol.
+
+Overview.
+
+Upgrade to WebSocket (or WebSocket handshake) can be done in two ways.
+
+The first way is to use `net/http` server:
+
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ conn, _, _, err := ws.UpgradeHTTP(r, w)
+ })
+
+The second and much more efficient way is so-called "zero-copy upgrade". It
+avoids redundant allocations and copying of not used headers or other request
+data. User decides by himself which data should be copied.
+
+ ln, err := net.Listen("tcp", ":8080")
+ if err != nil {
+ // handle error
+ }
+
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+
+ handshake, err := ws.Upgrade(conn)
+ if err != nil {
+ // handle error
+ }
+
+For customization details see `ws.Upgrader` documentation.
+
+After WebSocket handshake you can work with connection in multiple ways.
+That is, `ws` does not force the only one way of how to work with WebSocket:
+
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle err
+ }
+
+ buf := make([]byte, header.Length)
+ _, err := io.ReadFull(conn, buf)
+ if err != nil {
+ // handle err
+ }
+
+ resp := ws.NewBinaryFrame([]byte("hello, world!"))
+ if err := ws.WriteFrame(conn, frame); err != nil {
+ // handle err
+ }
+
+As you can see, it stream friendly:
+
+ const N = 42
+
+ ws.WriteHeader(ws.Header{
+ Fin: true,
+ Length: N,
+ OpCode: ws.OpBinary,
+ })
+
+ io.CopyN(conn, rand.Reader, N)
+
+Or:
+
+ header, err := ws.ReadHeader(conn)
+ if err != nil {
+ // handle err
+ }
+
+ io.CopyN(ioutil.Discard, conn, header.Length)
+
+For more info see the documentation.
+*/
+package ws
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/errors.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/errors.go
new file mode 100644
index 00000000000..48fce3b72c1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/errors.go
@@ -0,0 +1,54 @@
+package ws
+
+// RejectOption represents an option used to control the way connection is
+// rejected.
+type RejectOption func(*rejectConnectionError)
+
+// RejectionReason returns an option that makes connection to be rejected with
+// given reason.
+func RejectionReason(reason string) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.reason = reason
+ }
+}
+
+// RejectionStatus returns an option that makes connection to be rejected with
+// given HTTP status code.
+func RejectionStatus(code int) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.code = code
+ }
+}
+
+// RejectionHeader returns an option that makes connection to be rejected with
+// given HTTP headers.
+func RejectionHeader(h HandshakeHeader) RejectOption {
+ return func(err *rejectConnectionError) {
+ err.header = h
+ }
+}
+
+// RejectConnectionError constructs an error that could be used to control the way
+// handshake is rejected by Upgrader.
+func RejectConnectionError(options ...RejectOption) error {
+ err := new(rejectConnectionError)
+ for _, opt := range options {
+ opt(err)
+ }
+ return err
+}
+
+// rejectConnectionError represents a rejection of upgrade error.
+//
+// It can be returned by Upgrader's On* hooks to control the way WebSocket
+// handshake is rejected.
+type rejectConnectionError struct {
+ reason string
+ code int
+ header HandshakeHeader
+}
+
+// Error implements error interface.
+func (r *rejectConnectionError) Error() string {
+ return r.reason
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/frame.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/frame.go
new file mode 100644
index 00000000000..f157ee3e9ff
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/frame.go
@@ -0,0 +1,389 @@
+package ws
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/rand"
+)
+
+// Constants defined by specification.
+const (
+ // All control frames MUST have a payload length of 125 bytes or less and MUST NOT be fragmented.
+ MaxControlFramePayloadSize = 125
+)
+
+// OpCode represents operation code.
+type OpCode byte
+
+// Operation codes defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+const (
+ OpContinuation OpCode = 0x0
+ OpText OpCode = 0x1
+ OpBinary OpCode = 0x2
+ OpClose OpCode = 0x8
+ OpPing OpCode = 0x9
+ OpPong OpCode = 0xa
+)
+
+// IsControl checks whether the c is control operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.5
+func (c OpCode) IsControl() bool {
+ // RFC6455: Control frames are identified by opcodes where
+ // the most significant bit of the opcode is 1.
+ //
+ // Note that OpCode is only 4 bit length.
+ return c&0x8 != 0
+}
+
+// IsData checks whether the c is data operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+func (c OpCode) IsData() bool {
+ // RFC6455: Data frames (e.g., non-control frames) are identified by opcodes
+ // where the most significant bit of the opcode is 0.
+ //
+ // Note that OpCode is only 4 bit length.
+ return c&0x8 == 0
+}
+
+// IsReserved checks whether the c is reserved operation code.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func (c OpCode) IsReserved() bool {
+ // RFC6455:
+ // %x3-7 are reserved for further non-control frames
+ // %xB-F are reserved for further control frames
+ return (0x3 <= c && c <= 0x7) || (0xb <= c && c <= 0xf)
+}
+
+// StatusCode represents the encoded reason for closure of websocket connection.
+//
+// There are few helper methods on StatusCode that helps to define a range in
+// which given code is lay in. accordingly to ranges defined in specification.
+//
+// See https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode uint16
+
+// StatusCodeRange describes range of StatusCode values.
+type StatusCodeRange struct {
+ Min, Max StatusCode
+}
+
+// Status code ranges defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-7.4.2
+var (
+ StatusRangeNotInUse = StatusCodeRange{0, 999}
+ StatusRangeProtocol = StatusCodeRange{1000, 2999}
+ StatusRangeApplication = StatusCodeRange{3000, 3999}
+ StatusRangePrivate = StatusCodeRange{4000, 4999}
+)
+
+// Status codes defined by specification.
+// See https://tools.ietf.org/html/rfc6455#section-7.4.1
+const (
+ StatusNormalClosure StatusCode = 1000
+ StatusGoingAway StatusCode = 1001
+ StatusProtocolError StatusCode = 1002
+ StatusUnsupportedData StatusCode = 1003
+ StatusNoMeaningYet StatusCode = 1004
+ StatusInvalidFramePayloadData StatusCode = 1007
+ StatusPolicyViolation StatusCode = 1008
+ StatusMessageTooBig StatusCode = 1009
+ StatusMandatoryExt StatusCode = 1010
+ StatusInternalServerError StatusCode = 1011
+ StatusTLSHandshake StatusCode = 1015
+
+ // StatusAbnormalClosure is a special code designated for use in
+ // applications.
+ StatusAbnormalClosure StatusCode = 1006
+
+ // StatusNoStatusRcvd is a special code designated for use in applications.
+ StatusNoStatusRcvd StatusCode = 1005
+)
+
+// In reports whether the code is defined in given range.
+func (s StatusCode) In(r StatusCodeRange) bool {
+ return r.Min <= s && s <= r.Max
+}
+
+// Empty reports whether the code is empty.
+// Empty code has no any meaning neither app level codes nor other.
+// This method is useful just to check that code is golang default value 0.
+func (s StatusCode) Empty() bool {
+ return s == 0
+}
+
+// IsNotUsed reports whether the code is predefined in not used range.
+func (s StatusCode) IsNotUsed() bool {
+ return s.In(StatusRangeNotInUse)
+}
+
+// IsApplicationSpec reports whether the code should be defined by
+// application, framework or libraries specification.
+func (s StatusCode) IsApplicationSpec() bool {
+ return s.In(StatusRangeApplication)
+}
+
+// IsPrivateSpec reports whether the code should be defined privately.
+func (s StatusCode) IsPrivateSpec() bool {
+ return s.In(StatusRangePrivate)
+}
+
+// IsProtocolSpec reports whether the code should be defined by protocol specification.
+func (s StatusCode) IsProtocolSpec() bool {
+ return s.In(StatusRangeProtocol)
+}
+
+// IsProtocolDefined reports whether the code is already defined by protocol specification.
+func (s StatusCode) IsProtocolDefined() bool {
+ switch s {
+ case StatusNormalClosure,
+ StatusGoingAway,
+ StatusProtocolError,
+ StatusUnsupportedData,
+ StatusInvalidFramePayloadData,
+ StatusPolicyViolation,
+ StatusMessageTooBig,
+ StatusMandatoryExt,
+ StatusInternalServerError,
+ StatusNoStatusRcvd,
+ StatusAbnormalClosure,
+ StatusTLSHandshake:
+ return true
+ }
+ return false
+}
+
+// IsProtocolReserved reports whether the code is defined by protocol specification
+// to be reserved only for application usage purpose.
+func (s StatusCode) IsProtocolReserved() bool {
+ switch s {
+ // [RFC6455]: {1005,1006,1015} is a reserved value and MUST NOT be set as a status code in a
+ // Close control frame by an endpoint.
+ case StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compiled control frames for common use cases.
+// For construct-serialize optimizations.
+var (
+ CompiledPing = MustCompileFrame(NewPingFrame(nil))
+ CompiledPong = MustCompileFrame(NewPongFrame(nil))
+ CompiledClose = MustCompileFrame(NewCloseFrame(nil))
+
+ CompiledCloseNormalClosure = MustCompileFrame(closeFrameNormalClosure)
+ CompiledCloseGoingAway = MustCompileFrame(closeFrameGoingAway)
+ CompiledCloseProtocolError = MustCompileFrame(closeFrameProtocolError)
+ CompiledCloseUnsupportedData = MustCompileFrame(closeFrameUnsupportedData)
+ CompiledCloseNoMeaningYet = MustCompileFrame(closeFrameNoMeaningYet)
+ CompiledCloseInvalidFramePayloadData = MustCompileFrame(closeFrameInvalidFramePayloadData)
+ CompiledClosePolicyViolation = MustCompileFrame(closeFramePolicyViolation)
+ CompiledCloseMessageTooBig = MustCompileFrame(closeFrameMessageTooBig)
+ CompiledCloseMandatoryExt = MustCompileFrame(closeFrameMandatoryExt)
+ CompiledCloseInternalServerError = MustCompileFrame(closeFrameInternalServerError)
+ CompiledCloseTLSHandshake = MustCompileFrame(closeFrameTLSHandshake)
+)
+
+// Header represents websocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+type Header struct {
+ Fin bool
+ Rsv byte
+ OpCode OpCode
+ Masked bool
+ Mask [4]byte
+ Length int64
+}
+
+// Rsv1 reports whether the header has first rsv bit set.
+func (h Header) Rsv1() bool { return h.Rsv&bit5 != 0 }
+
+// Rsv2 reports whether the header has second rsv bit set.
+func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 }
+
+// Rsv3 reports whether the header has third rsv bit set.
+func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 }
+
+// Frame represents websocket frame.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+type Frame struct {
+ Header Header
+ Payload []byte
+}
+
+// NewFrame creates frame with given operation code,
+// flag of completeness and payload bytes.
+func NewFrame(op OpCode, fin bool, p []byte) Frame {
+ return Frame{
+ Header: Header{
+ Fin: fin,
+ OpCode: op,
+ Length: int64(len(p)),
+ },
+ Payload: p,
+ }
+}
+
+// NewTextFrame creates text frame with p as payload.
+// Note that p is not copied.
+func NewTextFrame(p []byte) Frame {
+ return NewFrame(OpText, true, p)
+}
+
+// NewBinaryFrame creates binary frame with p as payload.
+// Note that p is not copied.
+func NewBinaryFrame(p []byte) Frame {
+ return NewFrame(OpBinary, true, p)
+}
+
+// NewPingFrame creates ping frame with p as payload.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewPingFrame(p []byte) Frame {
+ return NewFrame(OpPing, true, p)
+}
+
+// NewPongFrame creates pong frame with p as payload.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewPongFrame(p []byte) Frame {
+ return NewFrame(OpPong, true, p)
+}
+
+// NewCloseFrame creates close frame with given close body.
+// Note that p is not copied.
+// Note that p must have length of MaxControlFramePayloadSize bytes or less due
+// to RFC.
+func NewCloseFrame(p []byte) Frame {
+ return NewFrame(OpClose, true, p)
+}
+
+// NewCloseFrameBody encodes a closure code and a reason into a binary
+// representation.
+//
+// It returns slice which is at most MaxControlFramePayloadSize bytes length.
+// If the reason is too big it will be cropped to fit the limit defined by the
+// spec.
+//
+// See https://tools.ietf.org/html/rfc6455#section-5.5
+func NewCloseFrameBody(code StatusCode, reason string) []byte {
+ n := min(2+len(reason), MaxControlFramePayloadSize)
+ p := make([]byte, n)
+
+ crop := min(MaxControlFramePayloadSize-2, len(reason))
+ PutCloseFrameBody(p, code, reason[:crop])
+
+ return p
+}
+
+// PutCloseFrameBody encodes code and reason into buf.
+//
+// It will panic if the buffer is too small to accommodate a code or a reason.
+//
+// PutCloseFrameBody does not check buffer to be RFC compliant, but note that
+// by RFC it must be at most MaxControlFramePayloadSize.
+func PutCloseFrameBody(p []byte, code StatusCode, reason string) {
+ _ = p[1+len(reason)]
+ binary.BigEndian.PutUint16(p, uint16(code))
+ copy(p[2:], reason)
+}
+
+// MaskFrame masks frame and returns frame with masked payload and Mask header's field set.
+// Note that it copies f payload to prevent collisions.
+// For less allocations you could use MaskFrameInPlace or construct frame manually.
+func MaskFrame(f Frame) Frame {
+ return MaskFrameWith(f, NewMask())
+}
+
+// MaskFrameWith masks frame with given mask and returns frame
+// with masked payload and Mask header's field set.
+// Note that it copies f payload to prevent collisions.
+// For less allocations you could use MaskFrameInPlaceWith or construct frame manually.
+func MaskFrameWith(f Frame, mask [4]byte) Frame {
+ // TODO(gobwas): check CopyCipher ws copy() Cipher().
+ p := make([]byte, len(f.Payload))
+ copy(p, f.Payload)
+ f.Payload = p
+ return MaskFrameInPlaceWith(f, mask)
+}
+
+// MaskFrameInPlace masks frame and returns frame with masked payload and Mask
+// header's field set.
+// Note that it applies xor cipher to f.Payload without copying, that is, it
+// modifies f.Payload inplace.
+func MaskFrameInPlace(f Frame) Frame {
+ return MaskFrameInPlaceWith(f, NewMask())
+}
+
+// MaskFrameInPlaceWith masks frame with given mask and returns frame
+// with masked payload and Mask header's field set.
+// Note that it applies xor cipher to f.Payload without copying, that is, it
+// modifies f.Payload inplace.
+func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame {
+ f.Header.Masked = true
+ f.Header.Mask = m
+ Cipher(f.Payload, m, 0)
+ return f
+}
+
+// NewMask creates new random mask.
+func NewMask() (ret [4]byte) {
+ binary.BigEndian.PutUint32(ret[:], rand.Uint32())
+ return
+}
+
+// CompileFrame returns byte representation of given frame.
+// In terms of memory consumption it is useful to precompile static frames
+// which are often used.
+func CompileFrame(f Frame) (bts []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, 16))
+ err = WriteFrame(buf, f)
+ bts = buf.Bytes()
+ return
+}
+
+// MustCompileFrame is like CompileFrame but panics if frame can not be
+// encoded.
+func MustCompileFrame(f Frame) []byte {
+ bts, err := CompileFrame(f)
+ if err != nil {
+ panic(err)
+ }
+ return bts
+}
+
+// Rsv creates rsv byte representation.
+func Rsv(r1, r2, r3 bool) (rsv byte) {
+ if r1 {
+ rsv |= bit5
+ }
+ if r2 {
+ rsv |= bit6
+ }
+ if r3 {
+ rsv |= bit7
+ }
+ return rsv
+}
+
+func makeCloseFrame(code StatusCode) Frame {
+ return NewCloseFrame(NewCloseFrameBody(code, ""))
+}
+
+var (
+ closeFrameNormalClosure = makeCloseFrame(StatusNormalClosure)
+ closeFrameGoingAway = makeCloseFrame(StatusGoingAway)
+ closeFrameProtocolError = makeCloseFrame(StatusProtocolError)
+ closeFrameUnsupportedData = makeCloseFrame(StatusUnsupportedData)
+ closeFrameNoMeaningYet = makeCloseFrame(StatusNoMeaningYet)
+ closeFrameInvalidFramePayloadData = makeCloseFrame(StatusInvalidFramePayloadData)
+ closeFramePolicyViolation = makeCloseFrame(StatusPolicyViolation)
+ closeFrameMessageTooBig = makeCloseFrame(StatusMessageTooBig)
+ closeFrameMandatoryExt = makeCloseFrame(StatusMandatoryExt)
+ closeFrameInternalServerError = makeCloseFrame(StatusInternalServerError)
+ closeFrameTLSHandshake = makeCloseFrame(StatusTLSHandshake)
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/http.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/http.go
new file mode 100644
index 00000000000..e18df441b47
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/http.go
@@ -0,0 +1,468 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+
+ "github.com/gobwas/httphead"
+)
+
+const (
+ crlf = "\r\n"
+ colonAndSpace = ": "
+ commaAndSpace = ", "
+)
+
+const (
+ textHeadUpgrade = "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n"
+)
+
+var (
+ textHeadBadRequest = statusText(http.StatusBadRequest)
+ textHeadInternalServerError = statusText(http.StatusInternalServerError)
+ textHeadUpgradeRequired = statusText(http.StatusUpgradeRequired)
+
+ textTailErrHandshakeBadProtocol = errorText(ErrHandshakeBadProtocol)
+ textTailErrHandshakeBadMethod = errorText(ErrHandshakeBadMethod)
+ textTailErrHandshakeBadHost = errorText(ErrHandshakeBadHost)
+ textTailErrHandshakeBadUpgrade = errorText(ErrHandshakeBadUpgrade)
+ textTailErrHandshakeBadConnection = errorText(ErrHandshakeBadConnection)
+ textTailErrHandshakeBadSecAccept = errorText(ErrHandshakeBadSecAccept)
+ textTailErrHandshakeBadSecKey = errorText(ErrHandshakeBadSecKey)
+ textTailErrHandshakeBadSecVersion = errorText(ErrHandshakeBadSecVersion)
+ textTailErrUpgradeRequired = errorText(ErrHandshakeUpgradeRequired)
+)
+
+var (
+ headerHost = "Host"
+ headerUpgrade = "Upgrade"
+ headerConnection = "Connection"
+ headerSecVersion = "Sec-WebSocket-Version"
+ headerSecProtocol = "Sec-WebSocket-Protocol"
+ headerSecExtensions = "Sec-WebSocket-Extensions"
+ headerSecKey = "Sec-WebSocket-Key"
+ headerSecAccept = "Sec-WebSocket-Accept"
+
+ headerHostCanonical = textproto.CanonicalMIMEHeaderKey(headerHost)
+ headerUpgradeCanonical = textproto.CanonicalMIMEHeaderKey(headerUpgrade)
+ headerConnectionCanonical = textproto.CanonicalMIMEHeaderKey(headerConnection)
+ headerSecVersionCanonical = textproto.CanonicalMIMEHeaderKey(headerSecVersion)
+ headerSecProtocolCanonical = textproto.CanonicalMIMEHeaderKey(headerSecProtocol)
+ headerSecExtensionsCanonical = textproto.CanonicalMIMEHeaderKey(headerSecExtensions)
+ headerSecKeyCanonical = textproto.CanonicalMIMEHeaderKey(headerSecKey)
+ headerSecAcceptCanonical = textproto.CanonicalMIMEHeaderKey(headerSecAccept)
+)
+
+var (
+ specHeaderValueUpgrade = []byte("websocket")
+ specHeaderValueConnection = []byte("Upgrade")
+ specHeaderValueConnectionLower = []byte("upgrade")
+ specHeaderValueSecVersion = []byte("13")
+)
+
+var (
+ httpVersion1_0 = []byte("HTTP/1.0")
+ httpVersion1_1 = []byte("HTTP/1.1")
+ httpVersionPrefix = []byte("HTTP/")
+)
+
+type httpRequestLine struct {
+ method, uri []byte
+ major, minor int
+}
+
+type httpResponseLine struct {
+ major, minor int
+ status int
+ reason []byte
+}
+
+// httpParseRequestLine parses http request line like "GET / HTTP/1.0".
+func httpParseRequestLine(line []byte) (req httpRequestLine, err error) {
+ var proto []byte
+ req.method, req.uri, proto = bsplit3(line, ' ')
+
+ var ok bool
+ req.major, req.minor, ok = httpParseVersion(proto)
+ if !ok {
+ err = ErrMalformedRequest
+ return
+ }
+
+ return
+}
+
+func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) {
+ var (
+ proto []byte
+ status []byte
+ )
+ proto, status, resp.reason = bsplit3(line, ' ')
+
+ var ok bool
+ resp.major, resp.minor, ok = httpParseVersion(proto)
+ if !ok {
+ return resp, ErrMalformedResponse
+ }
+
+ var convErr error
+ resp.status, convErr = asciiToInt(status)
+ if convErr != nil {
+ return resp, ErrMalformedResponse
+ }
+
+ return resp, nil
+}
+
+// httpParseVersion parses major and minor version of HTTP protocol. It returns
+// parsed values and true if parse is ok.
+func httpParseVersion(bts []byte) (major, minor int, ok bool) {
+ switch {
+ case bytes.Equal(bts, httpVersion1_0):
+ return 1, 0, true
+ case bytes.Equal(bts, httpVersion1_1):
+ return 1, 1, true
+ case len(bts) < 8:
+ return
+ case !bytes.Equal(bts[:5], httpVersionPrefix):
+ return
+ }
+
+ bts = bts[5:]
+
+ dot := bytes.IndexByte(bts, '.')
+ if dot == -1 {
+ return
+ }
+ var err error
+ major, err = asciiToInt(bts[:dot])
+ if err != nil {
+ return
+ }
+ minor, err = asciiToInt(bts[dot+1:])
+ if err != nil {
+ return
+ }
+
+ return major, minor, true
+}
+
+// httpParseHeaderLine parses HTTP header as key-value pair. It returns parsed
+// values and true if parse is ok.
+func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) {
+ colon := bytes.IndexByte(line, ':')
+ if colon == -1 {
+ return
+ }
+
+ k = btrim(line[:colon])
+ // TODO(gobwas): maybe use just lower here?
+ canonicalizeHeaderKey(k)
+
+ v = btrim(line[colon+1:])
+
+ return k, v, true
+}
+
+// httpGetHeader is the same as textproto.MIMEHeader.Get, except the thing,
+// that key is already canonical. This helps to increase performance.
+func httpGetHeader(h http.Header, key string) string {
+ if h == nil {
+ return ""
+ }
+ v := h[key]
+ if len(v) == 0 {
+ return ""
+ }
+ return v[0]
+}
+
+// The request MAY include a header field with the name
+// |Sec-WebSocket-Protocol|. If present, this value indicates one or more
+// comma-separated subprotocol the client wishes to speak, ordered by
+// preference. The elements that comprise this value MUST be non-empty strings
+// with characters in the range U+0021 to U+007E not including separator
+// characters as defined in [RFC2616] and MUST all be unique strings. The ABNF
+// for the value of this header field is 1#token, where the definitions of
+// constructs and rules are as given in [RFC2616].
+func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool) {
+ ok = httphead.ScanTokens(strToBytes(h), func(v []byte) bool {
+ if check(btsToString(v)) {
+ ret = string(v)
+ return false
+ }
+ return true
+ })
+ return
+}
+func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) {
+ var selected []byte
+ ok = httphead.ScanTokens(h, func(v []byte) bool {
+ if check(v) {
+ selected = v
+ return false
+ }
+ return true
+ })
+ if ok && selected != nil {
+ return string(selected), true
+ }
+ return
+}
+
+func strSelectExtensions(h string, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
+ return btsSelectExtensions(strToBytes(h), selected, check)
+}
+
+func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
+ s := httphead.OptionSelector{
+ Flags: httphead.SelectUnique | httphead.SelectCopy,
+ Check: check,
+ }
+ return s.Select(h, selected)
+}
+
+func httpWriteHeader(bw *bufio.Writer, key, value string) {
+ httpWriteHeaderKey(bw, key)
+ bw.WriteString(value)
+ bw.WriteString(crlf)
+}
+
+func httpWriteHeaderBts(bw *bufio.Writer, key string, value []byte) {
+ httpWriteHeaderKey(bw, key)
+ bw.Write(value)
+ bw.WriteString(crlf)
+}
+
+func httpWriteHeaderKey(bw *bufio.Writer, key string) {
+ bw.WriteString(key)
+ bw.WriteString(colonAndSpace)
+}
+
+func httpWriteUpgradeRequest(
+ bw *bufio.Writer,
+ u *url.URL,
+ nonce []byte,
+ protocols []string,
+ extensions []httphead.Option,
+ header HandshakeHeader,
+) {
+ bw.WriteString("GET ")
+ bw.WriteString(u.RequestURI())
+ bw.WriteString(" HTTP/1.1\r\n")
+
+ httpWriteHeader(bw, headerHost, u.Host)
+
+ httpWriteHeaderBts(bw, headerUpgrade, specHeaderValueUpgrade)
+ httpWriteHeaderBts(bw, headerConnection, specHeaderValueConnection)
+ httpWriteHeaderBts(bw, headerSecVersion, specHeaderValueSecVersion)
+
+ // NOTE: write nonce bytes as a string to prevent heap allocation –
+ // WriteString() copy given string into its inner buffer, unlike Write()
+ // which may write p directly to the underlying io.Writer – which in turn
+ // will lead to p escape.
+ httpWriteHeader(bw, headerSecKey, btsToString(nonce))
+
+ if len(protocols) > 0 {
+ httpWriteHeaderKey(bw, headerSecProtocol)
+ for i, p := range protocols {
+ if i > 0 {
+ bw.WriteString(commaAndSpace)
+ }
+ bw.WriteString(p)
+ }
+ bw.WriteString(crlf)
+ }
+
+ if len(extensions) > 0 {
+ httpWriteHeaderKey(bw, headerSecExtensions)
+ httphead.WriteOptions(bw, extensions)
+ bw.WriteString(crlf)
+ }
+
+ if header != nil {
+ header.WriteTo(bw)
+ }
+
+ bw.WriteString(crlf)
+}
+
+func httpWriteResponseUpgrade(bw *bufio.Writer, nonce []byte, hs Handshake, header HandshakeHeaderFunc) {
+ bw.WriteString(textHeadUpgrade)
+
+ httpWriteHeaderKey(bw, headerSecAccept)
+ writeAccept(bw, nonce)
+ bw.WriteString(crlf)
+
+ if hs.Protocol != "" {
+ httpWriteHeader(bw, headerSecProtocol, hs.Protocol)
+ }
+ if len(hs.Extensions) > 0 {
+ httpWriteHeaderKey(bw, headerSecExtensions)
+ httphead.WriteOptions(bw, hs.Extensions)
+ bw.WriteString(crlf)
+ }
+ if header != nil {
+ header(bw)
+ }
+
+ bw.WriteString(crlf)
+}
+
+func httpWriteResponseError(bw *bufio.Writer, err error, code int, header HandshakeHeaderFunc) {
+ switch code {
+ case http.StatusBadRequest:
+ bw.WriteString(textHeadBadRequest)
+ case http.StatusInternalServerError:
+ bw.WriteString(textHeadInternalServerError)
+ case http.StatusUpgradeRequired:
+ bw.WriteString(textHeadUpgradeRequired)
+ default:
+ writeStatusText(bw, code)
+ }
+
+ // Write custom headers.
+ if header != nil {
+ header(bw)
+ }
+
+ switch err {
+ case ErrHandshakeBadProtocol:
+ bw.WriteString(textTailErrHandshakeBadProtocol)
+ case ErrHandshakeBadMethod:
+ bw.WriteString(textTailErrHandshakeBadMethod)
+ case ErrHandshakeBadHost:
+ bw.WriteString(textTailErrHandshakeBadHost)
+ case ErrHandshakeBadUpgrade:
+ bw.WriteString(textTailErrHandshakeBadUpgrade)
+ case ErrHandshakeBadConnection:
+ bw.WriteString(textTailErrHandshakeBadConnection)
+ case ErrHandshakeBadSecAccept:
+ bw.WriteString(textTailErrHandshakeBadSecAccept)
+ case ErrHandshakeBadSecKey:
+ bw.WriteString(textTailErrHandshakeBadSecKey)
+ case ErrHandshakeBadSecVersion:
+ bw.WriteString(textTailErrHandshakeBadSecVersion)
+ case ErrHandshakeUpgradeRequired:
+ bw.WriteString(textTailErrUpgradeRequired)
+ case nil:
+ bw.WriteString(crlf)
+ default:
+ writeErrorText(bw, err)
+ }
+}
+
+func writeStatusText(bw *bufio.Writer, code int) {
+ bw.WriteString("HTTP/1.1 ")
+ bw.WriteString(strconv.Itoa(code))
+ bw.WriteByte(' ')
+ bw.WriteString(http.StatusText(code))
+ bw.WriteString(crlf)
+ bw.WriteString("Content-Type: text/plain; charset=utf-8")
+ bw.WriteString(crlf)
+}
+
+func writeErrorText(bw *bufio.Writer, err error) {
+ body := err.Error()
+ bw.WriteString("Content-Length: ")
+ bw.WriteString(strconv.Itoa(len(body)))
+ bw.WriteString(crlf)
+ bw.WriteString(crlf)
+ bw.WriteString(body)
+}
+
+// httpError is like the http.Error with WebSocket context exception.
+func httpError(w http.ResponseWriter, body string, code int) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("Content-Length", strconv.Itoa(len(body)))
+ w.WriteHeader(code)
+ w.Write([]byte(body))
+}
+
+// statusText is a non-performant status text generator.
+// NOTE: Used only to generate constants.
+func statusText(code int) string {
+ var buf bytes.Buffer
+ bw := bufio.NewWriter(&buf)
+ writeStatusText(bw, code)
+ bw.Flush()
+ return buf.String()
+}
+
+// errorText is a non-performant error text generator.
+// NOTE: Used only to generate constants.
+func errorText(err error) string {
+ var buf bytes.Buffer
+ bw := bufio.NewWriter(&buf)
+ writeErrorText(bw, err)
+ bw.Flush()
+ return buf.String()
+}
+
+// HandshakeHeader is the interface that writes both upgrade request or
+// response headers into a given io.Writer.
+type HandshakeHeader interface {
+ io.WriterTo
+}
+
+// HandshakeHeaderString is an adapter to allow the use of headers represented
+// by ordinary string as HandshakeHeader.
+type HandshakeHeaderString string
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (s HandshakeHeaderString) WriteTo(w io.Writer) (int64, error) {
+ n, err := io.WriteString(w, string(s))
+ return int64(n), err
+}
+
+// HandshakeHeaderBytes is an adapter to allow the use of headers represented
+// by ordinary slice of bytes as HandshakeHeader.
+type HandshakeHeaderBytes []byte
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (b HandshakeHeaderBytes) WriteTo(w io.Writer) (int64, error) {
+ n, err := w.Write(b)
+ return int64(n), err
+}
+
+// HandshakeHeaderFunc is an adapter to allow the use of headers represented by
+// ordinary function as HandshakeHeader.
+type HandshakeHeaderFunc func(io.Writer) (int64, error)
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (f HandshakeHeaderFunc) WriteTo(w io.Writer) (int64, error) {
+ return f(w)
+}
+
+// HandshakeHeaderHTTP is an adapter to allow the use of http.Header as
+// HandshakeHeader.
+type HandshakeHeaderHTTP http.Header
+
+// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
+func (h HandshakeHeaderHTTP) WriteTo(w io.Writer) (int64, error) {
+ wr := writer{w: w}
+ err := http.Header(h).Write(&wr)
+ return wr.n, err
+}
+
+type writer struct {
+ n int64
+ w io.Writer
+}
+
+func (w *writer) WriteString(s string) (int, error) {
+ n, err := io.WriteString(w.w, s)
+ w.n += int64(n)
+ return n, err
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ n, err := w.w.Write(p)
+ w.n += int64(n)
+ return n, err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/nonce.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/nonce.go
new file mode 100644
index 00000000000..e694da7c308
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/nonce.go
@@ -0,0 +1,80 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+)
+
+const (
+ // RFC6455: The value of this header field MUST be a nonce consisting of a
+ // randomly selected 16-byte value that has been base64-encoded (see
+ // Section 4 of [RFC4648]). The nonce MUST be selected randomly for each
+ // connection.
+ nonceKeySize = 16
+ nonceSize = 24 // base64.StdEncoding.EncodedLen(nonceKeySize)
+
+ // RFC6455: The value of this header field is constructed by concatenating
+ // /key/, defined above in step 4 in Section 4.2.2, with the string
+ // "258EAFA5- E914-47DA-95CA-C5AB0DC85B11", taking the SHA-1 hash of this
+ // concatenated value to obtain a 20-byte value and base64- encoding (see
+ // Section 4 of [RFC4648]) this 20-byte hash.
+ acceptSize = 28 // base64.StdEncoding.EncodedLen(sha1.Size)
+)
+
+// initNonce fills given slice with random base64-encoded nonce bytes.
+func initNonce(dst []byte) {
+ // NOTE: bts does not escape.
+ bts := make([]byte, nonceKeySize)
+ if _, err := rand.Read(bts); err != nil {
+ panic(fmt.Sprintf("rand read error: %s", err))
+ }
+ base64.StdEncoding.Encode(dst, bts)
+}
+
+// checkAcceptFromNonce reports whether given accept bytes are valid for given
+// nonce bytes.
+func checkAcceptFromNonce(accept, nonce []byte) bool {
+ if len(accept) != acceptSize {
+ return false
+ }
+ // NOTE: expect does not escape.
+ expect := make([]byte, acceptSize)
+ initAcceptFromNonce(expect, nonce)
+ return bytes.Equal(expect, accept)
+}
+
+// initAcceptFromNonce fills given slice with accept bytes generated from given
+// nonce bytes. Given buffer should be exactly acceptSize bytes.
+func initAcceptFromNonce(accept, nonce []byte) {
+ const magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ if len(accept) != acceptSize {
+ panic("accept buffer is invalid")
+ }
+ if len(nonce) != nonceSize {
+ panic("nonce is invalid")
+ }
+
+ p := make([]byte, nonceSize+len(magic))
+ copy(p[:nonceSize], nonce)
+ copy(p[nonceSize:], magic)
+
+ sum := sha1.Sum(p)
+ base64.StdEncoding.Encode(accept, sum[:])
+
+ return
+}
+
+func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) {
+ accept := make([]byte, acceptSize)
+ initAcceptFromNonce(accept, nonce)
+ // NOTE: write accept bytes as a string to prevent heap allocation –
+ // WriteString() copy given string into its inner buffer, unlike Write()
+ // which may write p directly to the underlying io.Writer – which in turn
+ // will lead to p escape.
+ return bw.WriteString(btsToString(accept))
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/read.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/read.go
new file mode 100644
index 00000000000..bc653e4690f
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/read.go
@@ -0,0 +1,147 @@
+package ws
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Errors used by frame reader.
+var (
+ ErrHeaderLengthMSB = fmt.Errorf("header error: the most significant bit must be 0")
+ ErrHeaderLengthUnexpected = fmt.Errorf("header error: unexpected payload length bits")
+)
+
+// ReadHeader reads a frame header from r.
+func ReadHeader(r io.Reader) (h Header, err error) {
+ // Make slice of bytes with capacity 12 that could hold any header.
+ //
+ // The maximum header size is 14, but due to the 2 hop reads,
+ // after first hop that reads first 2 constant bytes, we could reuse 2 bytes.
+ // So 14 - 2 = 12.
+ bts := make([]byte, 2, MaxHeaderSize-2)
+
+ // Prepare to hold first 2 bytes to choose size of next read.
+ _, err = io.ReadFull(r, bts)
+ if err != nil {
+ return
+ }
+
+ h.Fin = bts[0]&bit0 != 0
+ h.Rsv = (bts[0] & 0x70) >> 4
+ h.OpCode = OpCode(bts[0] & 0x0f)
+
+ var extra int
+
+ if bts[1]&bit0 != 0 {
+ h.Masked = true
+ extra += 4
+ }
+
+ length := bts[1] & 0x7f
+ switch {
+ case length < 126:
+ h.Length = int64(length)
+
+ case length == 126:
+ extra += 2
+
+ case length == 127:
+ extra += 8
+
+ default:
+ err = ErrHeaderLengthUnexpected
+ return
+ }
+
+ if extra == 0 {
+ return
+ }
+
+ // Increase len of bts to extra bytes need to read.
+ // Overwrite first 2 bytes that was read before.
+ bts = bts[:extra]
+ _, err = io.ReadFull(r, bts)
+ if err != nil {
+ return
+ }
+
+ switch {
+ case length == 126:
+ h.Length = int64(binary.BigEndian.Uint16(bts[:2]))
+ bts = bts[2:]
+
+ case length == 127:
+ if bts[0]&0x80 != 0 {
+ err = ErrHeaderLengthMSB
+ return
+ }
+ h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
+ bts = bts[8:]
+ }
+
+ if h.Masked {
+ copy(h.Mask[:], bts)
+ }
+
+ return
+}
+
+// ReadFrame reads a frame from r.
+// It is not designed for high optimized use case cause it makes allocation
+// for frame.Header.Length size inside to read frame payload into.
+//
+// Note that ReadFrame does not unmask payload.
+func ReadFrame(r io.Reader) (f Frame, err error) {
+ f.Header, err = ReadHeader(r)
+ if err != nil {
+ return
+ }
+
+ if f.Header.Length > 0 {
+ // int(f.Header.Length) is safe here cause we have
+ // checked it for overflow above in ReadHeader.
+ f.Payload = make([]byte, int(f.Header.Length))
+ _, err = io.ReadFull(r, f.Payload)
+ }
+
+ return
+}
+
+// MustReadFrame is like ReadFrame but panics if frame can not be read.
+func MustReadFrame(r io.Reader) Frame {
+ f, err := ReadFrame(r)
+ if err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// ParseCloseFrameData parses close frame status code and closure reason if any provided.
+// If there is no status code in the payload
+// the empty status code is returned (code.Empty()) with empty string as a reason.
+func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) {
+ if len(payload) < 2 {
+ // We returning empty StatusCode here, preventing the situation
+ // when endpoint really sent code 1005 and we should return ProtocolError on that.
+ //
+ // In other words, we ignoring this rule [RFC6455:7.1.5]:
+ // If this Close control frame contains no status code, _The WebSocket
+ // Connection Close Code_ is considered to be 1005.
+ return
+ }
+ code = StatusCode(binary.BigEndian.Uint16(payload))
+ reason = string(payload[2:])
+ return
+}
+
+// ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing
+// that it does not copies payload bytes into reason, but prepares unsafe cast.
+func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) {
+ if len(payload) < 2 {
+ return
+ }
+ code = StatusCode(binary.BigEndian.Uint16(payload))
+ reason = btsToString(payload[2:])
+ return
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server.go
new file mode 100644
index 00000000000..48059aded49
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server.go
@@ -0,0 +1,607 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/gobwas/httphead"
+ "github.com/gobwas/pool/pbufio"
+)
+
+// Constants used by ConnUpgrader.
+const (
+ DefaultServerReadBufferSize = 4096
+ DefaultServerWriteBufferSize = 512
+)
+
+// Errors used by both client and server when preparing WebSocket handshake.
+var (
+ ErrHandshakeBadProtocol = RejectConnectionError(
+ RejectionStatus(http.StatusHTTPVersionNotSupported),
+ RejectionReason(fmt.Sprintf("handshake error: bad HTTP protocol version")),
+ )
+ ErrHandshakeBadMethod = RejectConnectionError(
+ RejectionStatus(http.StatusMethodNotAllowed),
+ RejectionReason(fmt.Sprintf("handshake error: bad HTTP request method")),
+ )
+ ErrHandshakeBadHost = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerHost)),
+ )
+ ErrHandshakeBadUpgrade = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerUpgrade)),
+ )
+ ErrHandshakeBadConnection = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerConnection)),
+ )
+ ErrHandshakeBadSecAccept = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecAccept)),
+ )
+ ErrHandshakeBadSecKey = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecKey)),
+ )
+ ErrHandshakeBadSecVersion = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
+ )
+)
+
+// ErrMalformedResponse is returned by Dialer to indicate that server response
+// can not be parsed.
+var ErrMalformedResponse = fmt.Errorf("malformed HTTP response")
+
+// ErrMalformedRequest is returned when HTTP request can not be parsed.
+var ErrMalformedRequest = RejectConnectionError(
+ RejectionStatus(http.StatusBadRequest),
+ RejectionReason("malformed HTTP request"),
+)
+
+// ErrHandshakeUpgradeRequired is returned by Upgrader to indicate that
+// connection is rejected because given WebSocket version is malformed.
+//
+// According to RFC6455:
+// If this version does not match a version understood by the server, the
+// server MUST abort the WebSocket handshake described in this section and
+// instead send an appropriate HTTP error code (such as 426 Upgrade Required)
+// and a |Sec-WebSocket-Version| header field indicating the version(s) the
+// server is capable of understanding.
+var ErrHandshakeUpgradeRequired = RejectConnectionError(
+ RejectionStatus(http.StatusUpgradeRequired),
+ RejectionHeader(HandshakeHeaderString(headerSecVersion+": 13\r\n")),
+ RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
+)
+
+// ErrNotHijacker is an error returned when http.ResponseWriter does not
+// implement http.Hijacker interface.
+var ErrNotHijacker = RejectConnectionError(
+ RejectionStatus(http.StatusInternalServerError),
+ RejectionReason("given http.ResponseWriter is not a http.Hijacker"),
+)
+
+// DefaultHTTPUpgrader is an HTTPUpgrader that holds no options and is used by
+// UpgradeHTTP function.
+var DefaultHTTPUpgrader HTTPUpgrader
+
+// UpgradeHTTP is like HTTPUpgrader{}.Upgrade().
+func UpgradeHTTP(r *http.Request, w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, Handshake, error) {
+ return DefaultHTTPUpgrader.Upgrade(r, w)
+}
+
+// DefaultUpgrader is an Upgrader that holds no options and is used by Upgrade
+// function.
+var DefaultUpgrader Upgrader
+
+// Upgrade is like Upgrader{}.Upgrade().
+func Upgrade(conn io.ReadWriter) (Handshake, error) {
+ return DefaultUpgrader.Upgrade(conn)
+}
+
+// HTTPUpgrader contains options for upgrading connection to websocket from
+// net/http Handler arguments.
+type HTTPUpgrader struct {
+ // Timeout is the maximum amount of time an Upgrade() will spent while
+ // writing handshake response.
+ //
+ // The default is no timeout.
+ Timeout time.Duration
+
+ // Header is an optional http.Header mapping that could be used to
+ // write additional headers to the handshake response.
+ //
+ // Note that if present, it will be written in any result of handshake.
+ Header http.Header
+
+ // Protocol is the select function that is used to select subprotocol from
+ // list requested by client. If this field is set, then the first matched
+ // protocol is sent to a client as negotiated.
+ Protocol func(string) bool
+
+ // Extension is the select function that is used to select extensions from
+ // list requested by client. If this field is set, then the all matched
+ // extensions are sent to a client as negotiated.
+ Extension func(httphead.Option) bool
+}
+
+// Upgrade upgrades http connection to the websocket connection.
+//
+// It hijacks net.Conn from w and returns received net.Conn and
+// bufio.ReadWriter. On successful handshake it returns Handshake struct
+// describing handshake info.
+func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.Conn, rw *bufio.ReadWriter, hs Handshake, err error) {
+ // Hijack connection first to get the ability to write rejection errors the
+ // same way as in Upgrader.
+ hj, ok := w.(http.Hijacker)
+ if ok {
+ conn, rw, err = hj.Hijack()
+ } else {
+ err = ErrNotHijacker
+ }
+ if err != nil {
+ httpError(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // See https://tools.ietf.org/html/rfc6455#section-4.1
+ // The method of the request MUST be GET, and the HTTP version MUST be at least 1.1.
+ var nonce string
+ if r.Method != http.MethodGet {
+ err = ErrHandshakeBadMethod
+ } else if r.ProtoMajor < 1 || (r.ProtoMajor == 1 && r.ProtoMinor < 1) {
+ err = ErrHandshakeBadProtocol
+ } else if r.Host == "" {
+ err = ErrHandshakeBadHost
+ } else if u := httpGetHeader(r.Header, headerUpgradeCanonical); u != "websocket" && !strings.EqualFold(u, "websocket") {
+ err = ErrHandshakeBadUpgrade
+ } else if c := httpGetHeader(r.Header, headerConnectionCanonical); c != "Upgrade" && !strHasToken(c, "upgrade") {
+ err = ErrHandshakeBadConnection
+ } else if nonce = httpGetHeader(r.Header, headerSecKeyCanonical); len(nonce) != nonceSize {
+ err = ErrHandshakeBadSecKey
+ } else if v := httpGetHeader(r.Header, headerSecVersionCanonical); v != "13" {
+ // According to RFC6455:
+ //
+ // If this version does not match a version understood by the server,
+ // the server MUST abort the WebSocket handshake described in this
+ // section and instead send an appropriate HTTP error code (such as 426
+ // Upgrade Required) and a |Sec-WebSocket-Version| header field
+ // indicating the version(s) the server is capable of understanding.
+ //
+ // So we branching here cause empty or not present version does not
+ // meet the ABNF rules of RFC6455:
+ //
+ // version = DIGIT | (NZDIGIT DIGIT) |
+ // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
+ // ; Limited to 0-255 range, with no leading zeros
+ //
+ // That is, if version is really invalid – we sent 426 status, if it
+ // not present or empty – it is 400.
+ if v != "" {
+ err = ErrHandshakeUpgradeRequired
+ } else {
+ err = ErrHandshakeBadSecVersion
+ }
+ }
+ if check := u.Protocol; err == nil && check != nil {
+ ps := r.Header[headerSecProtocolCanonical]
+ for i := 0; i < len(ps) && err == nil && hs.Protocol == ""; i++ {
+ var ok bool
+ hs.Protocol, ok = strSelectProtocol(ps[i], check)
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+ }
+ if check := u.Extension; err == nil && check != nil {
+ xs := r.Header[headerSecExtensionsCanonical]
+ for i := 0; i < len(xs) && err == nil; i++ {
+ var ok bool
+ hs.Extensions, ok = strSelectExtensions(xs[i], hs.Extensions, check)
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+ }
+
+ // Clear deadlines set by server.
+ conn.SetDeadline(noDeadline)
+ if t := u.Timeout; t != 0 {
+ conn.SetWriteDeadline(time.Now().Add(t))
+ defer conn.SetWriteDeadline(noDeadline)
+ }
+
+ var header handshakeHeader
+ if h := u.Header; h != nil {
+ header[0] = HandshakeHeaderHTTP(h)
+ }
+ if err == nil {
+ httpWriteResponseUpgrade(rw.Writer, strToBytes(nonce), hs, header.WriteTo)
+ err = rw.Writer.Flush()
+ } else {
+ var code int
+ if rej, ok := err.(*rejectConnectionError); ok {
+ code = rej.code
+ header[1] = rej.header
+ }
+ if code == 0 {
+ code = http.StatusInternalServerError
+ }
+ httpWriteResponseError(rw.Writer, err, code, header.WriteTo)
+ // Do not store Flush() error to not override already existing one.
+ rw.Writer.Flush()
+ }
+ return
+}
+
+// Upgrader contains options for upgrading connection to websocket.
+type Upgrader struct {
+ // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
+ // They used to read and write http data while upgrading to WebSocket.
+ // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
+ //
+ // If a size is zero then default value is used.
+ //
+ // Usually it is useful to set read buffer size bigger than write buffer
+ // size because incoming request could contain long header values, such as
+ // Cookie. Response, in other way, could be big only if user write multiple
+ // custom headers. Usually response takes less than 256 bytes.
+ ReadBufferSize, WriteBufferSize int
+
+ // Protocol is a select function that is used to select subprotocol
+ // from list requested by client. If this field is set, then the first matched
+ // protocol is sent to a client as negotiated.
+ //
+ // The argument is only valid until the callback returns.
+ Protocol func([]byte) bool
+
+ // ProtocolCustrom allow user to parse Sec-WebSocket-Protocol header manually.
+ // Note that returned bytes must be valid until Upgrade returns.
+ // If ProtocolCustom is set, it used instead of Protocol function.
+ ProtocolCustom func([]byte) (string, bool)
+
+ // Extension is a select function that is used to select extensions
+ // from list requested by client. If this field is set, then the all matched
+ // extensions are sent to a client as negotiated.
+ //
+ // The argument is only valid until the callback returns.
+ //
+ // According to the RFC6455 order of extensions passed by a client is
+ // significant. That is, returning true from this function means that no
+ // other extension with the same name should be checked because server
+ // accepted the most preferable extension right now:
+ // "Note that the order of extensions is significant. Any interactions between
+ // multiple extensions MAY be defined in the documents defining the extensions.
+ // In the absence of such definitions, the interpretation is that the header
+ // fields listed by the client in its request represent a preference of the
+ // header fields it wishes to use, with the first options listed being most
+ // preferable."
+ Extension func(httphead.Option) bool
+
+ // ExtensionCustorm allow user to parse Sec-WebSocket-Extensions header manually.
+ // Note that returned options should be valid until Upgrade returns.
+ // If ExtensionCustom is set, it used instead of Extension function.
+ ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool)
+
+ // Header is an optional HandshakeHeader instance that could be used to
+ // write additional headers to the handshake response.
+ //
+ // It used instead of any key-value mappings to avoid allocations in user
+ // land.
+ //
+ // Note that if present, it will be written in any result of handshake.
+ Header HandshakeHeader
+
+ // OnRequest is a callback that will be called after request line
+ // successful parsing.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnRequest func(uri []byte) error
+
+ // OnHost is a callback that will be called after "Host" header successful
+ // parsing.
+ //
+ // It is separated from OnHeader callback because the Host header must be
+ // present in each request since HTTP/1.1. Thus Host header is non-optional
+ // and required for every WebSocket handshake.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnHost func(host []byte) error
+
+ // OnHeader is a callback that will be called after successful parsing of
+ // header, that is not used during WebSocket handshake procedure. That is,
+ // it will be called with non-websocket headers, which could be relevant
+ // for application-level logic.
+ //
+ // The arguments are only valid until the callback returns.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnHeader func(key, value []byte) error
+
+ // OnBeforeUpgrade is a callback that will be called before sending
+ // successful upgrade response.
+ //
+ // Setting OnBeforeUpgrade allows user to make final application-level
+ // checks and decide whether this connection is allowed to successfully
+ // upgrade to WebSocket.
+ //
+ // It must return non-nil either HandshakeHeader or error and never both.
+ //
+ // If returned error is non-nil then connection is rejected and response is
+ // sent with appropriate HTTP error code and body set to error message.
+ //
+ // RejectConnectionError could be used to get more control on response.
+ OnBeforeUpgrade func() (header HandshakeHeader, err error)
+}
+
+// Upgrade zero-copy upgrades connection to WebSocket. It interprets given conn
+// as connection with incoming HTTP Upgrade request.
+//
+// It is a caller responsibility to manage i/o timeouts on conn.
+//
+// Non-nil error means that request for the WebSocket upgrade is invalid or
+// malformed and usually connection should be closed.
+// Even when error is non-nil Upgrade will write appropriate response into
+// connection in compliance with RFC.
+func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
+ // headerSeen constants helps to report whether or not some header was seen
+ // during reading request bytes.
+ const (
+ headerSeenHost = 1 << iota
+ headerSeenUpgrade
+ headerSeenConnection
+ headerSeenSecVersion
+ headerSeenSecKey
+
+ // headerSeenAll is the value that we expect to receive at the end of
+ // headers read/parse loop.
+ headerSeenAll = 0 |
+ headerSeenHost |
+ headerSeenUpgrade |
+ headerSeenConnection |
+ headerSeenSecVersion |
+ headerSeenSecKey
+ )
+
+ // Prepare I/O buffers.
+ // TODO(gobwas): make it configurable.
+ br := pbufio.GetReader(conn,
+ nonZero(u.ReadBufferSize, DefaultServerReadBufferSize),
+ )
+ bw := pbufio.GetWriter(conn,
+ nonZero(u.WriteBufferSize, DefaultServerWriteBufferSize),
+ )
+ defer func() {
+ pbufio.PutReader(br)
+ pbufio.PutWriter(bw)
+ }()
+
+ // Read HTTP request line like "GET /ws HTTP/1.1".
+ rl, err := readLine(br)
+ if err != nil {
+ return
+ }
+ // Parse request line data like HTTP version, uri and method.
+ req, err := httpParseRequestLine(rl)
+ if err != nil {
+ return
+ }
+
+ // Prepare stack-based handshake header list.
+ header := handshakeHeader{
+ 0: u.Header,
+ }
+
+ // Parse and check HTTP request.
+ // As RFC6455 says:
+ // The client's opening handshake consists of the following parts. If the
+ // server, while reading the handshake, finds that the client did not
+ // send a handshake that matches the description below (note that as per
+ // [RFC2616], the order of the header fields is not important), including
+ // but not limited to any violations of the ABNF grammar specified for
+ // the components of the handshake, the server MUST stop processing the
+ // client's handshake and return an HTTP response with an appropriate
+ // error code (such as 400 Bad Request).
+ //
+ // See https://tools.ietf.org/html/rfc6455#section-4.2.1
+
+ // An HTTP/1.1 or higher GET request, including a "Request-URI".
+ //
+ // Even if RFC says "1.1 or higher" without mentioning the part of the
+ // version, we apply it only to minor part.
+ switch {
+ case req.major != 1 || req.minor < 1:
+ // Abort processing the whole request because we do not even know how
+ // to actually parse it.
+ err = ErrHandshakeBadProtocol
+
+ case btsToString(req.method) != http.MethodGet:
+ err = ErrHandshakeBadMethod
+
+ default:
+ if onRequest := u.OnRequest; onRequest != nil {
+ err = onRequest(req.uri)
+ }
+ }
+ // Start headers read/parse loop.
+ var (
+ // headerSeen reports which header was seen by setting corresponding
+ // bit on.
+ headerSeen byte
+
+ nonce = make([]byte, nonceSize)
+ )
+ for err == nil {
+ line, e := readLine(br)
+ if e != nil {
+ return hs, e
+ }
+ if len(line) == 0 {
+ // Blank line, no more lines to read.
+ break
+ }
+
+ k, v, ok := httpParseHeaderLine(line)
+ if !ok {
+ err = ErrMalformedRequest
+ break
+ }
+
+ switch btsToString(k) {
+ case headerHostCanonical:
+ headerSeen |= headerSeenHost
+ if onHost := u.OnHost; onHost != nil {
+ err = onHost(v)
+ }
+
+ case headerUpgradeCanonical:
+ headerSeen |= headerSeenUpgrade
+ if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
+ err = ErrHandshakeBadUpgrade
+ }
+
+ case headerConnectionCanonical:
+ headerSeen |= headerSeenConnection
+ if !bytes.Equal(v, specHeaderValueConnection) && !btsHasToken(v, specHeaderValueConnectionLower) {
+ err = ErrHandshakeBadConnection
+ }
+
+ case headerSecVersionCanonical:
+ headerSeen |= headerSeenSecVersion
+ if !bytes.Equal(v, specHeaderValueSecVersion) {
+ err = ErrHandshakeUpgradeRequired
+ }
+
+ case headerSecKeyCanonical:
+ headerSeen |= headerSeenSecKey
+ if len(v) != nonceSize {
+ err = ErrHandshakeBadSecKey
+ } else {
+ copy(nonce[:], v)
+ }
+
+ case headerSecProtocolCanonical:
+ if custom, check := u.ProtocolCustom, u.Protocol; hs.Protocol == "" && (custom != nil || check != nil) {
+ var ok bool
+ if custom != nil {
+ hs.Protocol, ok = custom(v)
+ } else {
+ hs.Protocol, ok = btsSelectProtocol(v, check)
+ }
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+
+ case headerSecExtensionsCanonical:
+ if custom, check := u.ExtensionCustom, u.Extension; custom != nil || check != nil {
+ var ok bool
+ if custom != nil {
+ hs.Extensions, ok = custom(v, hs.Extensions)
+ } else {
+ hs.Extensions, ok = btsSelectExtensions(v, hs.Extensions, check)
+ }
+ if !ok {
+ err = ErrMalformedRequest
+ }
+ }
+
+ default:
+ if onHeader := u.OnHeader; onHeader != nil {
+ err = onHeader(k, v)
+ }
+ }
+ }
+ switch {
+ case err == nil && headerSeen != headerSeenAll:
+ switch {
+ case headerSeen&headerSeenHost == 0:
+ // As RFC2616 says:
+ // A client MUST include a Host header field in all HTTP/1.1
+ // request messages. If the requested URI does not include an
+ // Internet host name for the service being requested, then the
+ // Host header field MUST be given with an empty value. An
+ // HTTP/1.1 proxy MUST ensure that any request message it
+ // forwards does contain an appropriate Host header field that
+ // identifies the service being requested by the proxy. All
+ // Internet-based HTTP/1.1 servers MUST respond with a 400 (Bad
+ // Request) status code to any HTTP/1.1 request message which
+ // lacks a Host header field.
+ err = ErrHandshakeBadHost
+ case headerSeen&headerSeenUpgrade == 0:
+ err = ErrHandshakeBadUpgrade
+ case headerSeen&headerSeenConnection == 0:
+ err = ErrHandshakeBadConnection
+ case headerSeen&headerSeenSecVersion == 0:
+ // In case of empty or not present version we do not send 426 status,
+ // because it does not meet the ABNF rules of RFC6455:
+ //
+ // version = DIGIT | (NZDIGIT DIGIT) |
+ // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
+ // ; Limited to 0-255 range, with no leading zeros
+ //
+ // That is, if version is really invalid – we sent 426 status as above, if it
+ // not present – it is 400.
+ err = ErrHandshakeBadSecVersion
+ case headerSeen&headerSeenSecKey == 0:
+ err = ErrHandshakeBadSecKey
+ default:
+ panic("unknown headers state")
+ }
+
+ case err == nil && u.OnBeforeUpgrade != nil:
+ header[1], err = u.OnBeforeUpgrade()
+ }
+ if err != nil {
+ var code int
+ if rej, ok := err.(*rejectConnectionError); ok {
+ code = rej.code
+ header[1] = rej.header
+ }
+ if code == 0 {
+ code = http.StatusInternalServerError
+ }
+ httpWriteResponseError(bw, err, code, header.WriteTo)
+ // Do not store Flush() error to not override already existing one.
+ bw.Flush()
+ return
+ }
+
+ httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo)
+ err = bw.Flush()
+
+ return
+}
+
+type handshakeHeader [2]HandshakeHeader
+
+func (hs handshakeHeader) WriteTo(w io.Writer) (n int64, err error) {
+ for i := 0; i < len(hs) && err == nil; i++ {
+ if h := hs[i]; h != nil {
+ var m int64
+ m, err = h.WriteTo(w)
+ n += m
+ }
+ }
+ return n, err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server_test.s b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server_test.s
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/util.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/util.go
new file mode 100644
index 00000000000..67ad906e5d2
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/util.go
@@ -0,0 +1,214 @@
+package ws
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/gobwas/httphead"
+)
+
+// SelectFromSlice creates accept function that could be used as Protocol/Extension
+// select during upgrade.
+func SelectFromSlice(accept []string) func(string) bool {
+ if len(accept) > 16 {
+ mp := make(map[string]struct{}, len(accept))
+ for _, p := range accept {
+ mp[p] = struct{}{}
+ }
+ return func(p string) bool {
+ _, ok := mp[p]
+ return ok
+ }
+ }
+ return func(p string) bool {
+ for _, ok := range accept {
+ if p == ok {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// SelectEqual creates accept function that could be used as Protocol/Extension
+// select during upgrade.
+func SelectEqual(v string) func(string) bool {
+ return func(p string) bool {
+ return v == p
+ }
+}
+
+func strToBytes(str string) (bts []byte) {
+ s := (*reflect.StringHeader)(unsafe.Pointer(&str))
+ b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
+ b.Data = s.Data
+ b.Len = s.Len
+ b.Cap = s.Len
+ return
+}
+
+func btsToString(bts []byte) (str string) {
+ return *(*string)(unsafe.Pointer(&bts))
+}
+
+// asciiToInt converts bytes to int.
+func asciiToInt(bts []byte) (ret int, err error) {
+ // ASCII numbers all start with the high-order bits 0011.
+ // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
+ // bits and interpret them directly as an integer.
+ var n int
+ if n = len(bts); n < 1 {
+ return 0, fmt.Errorf("converting empty bytes to int")
+ }
+ for i := 0; i < n; i++ {
+ if bts[i]&0xf0 != 0x30 {
+ return 0, fmt.Errorf("%s is not a numeric character", string(bts[i]))
+ }
+ ret += int(bts[i]&0xf) * pow(10, n-i-1)
+ }
+ return ret, nil
+}
+
+// pow for integers implementation.
+// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
+func pow(a, b int) int {
+ p := 1
+ for b > 0 {
+ if b&1 != 0 {
+ p *= a
+ }
+ b >>= 1
+ a *= a
+ }
+ return p
+}
+
+func bsplit3(bts []byte, sep byte) (b1, b2, b3 []byte) {
+ a := bytes.IndexByte(bts, sep)
+ b := bytes.IndexByte(bts[a+1:], sep)
+ if a == -1 || b == -1 {
+ return bts, nil, nil
+ }
+ b += a + 1
+ return bts[:a], bts[a+1 : b], bts[b+1:]
+}
+
+func btrim(bts []byte) []byte {
+ var i, j int
+ for i = 0; i < len(bts) && (bts[i] == ' ' || bts[i] == '\t'); {
+ i++
+ }
+ for j = len(bts); j > i && (bts[j-1] == ' ' || bts[j-1] == '\t'); {
+ j--
+ }
+ return bts[i:j]
+}
+
+func strHasToken(header, token string) (has bool) {
+ return btsHasToken(strToBytes(header), strToBytes(token))
+}
+
+func btsHasToken(header, token []byte) (has bool) {
+ httphead.ScanTokens(header, func(v []byte) bool {
+ has = bytes.EqualFold(v, token)
+ return !has
+ })
+ return
+}
+
+const (
+ toLower = 'a' - 'A' // for use with OR.
+ toUpper = ^byte(toLower) // for use with AND.
+ toLower8 = uint64(toLower) |
+ uint64(toLower)<<8 |
+ uint64(toLower)<<16 |
+ uint64(toLower)<<24 |
+ uint64(toLower)<<32 |
+ uint64(toLower)<<40 |
+ uint64(toLower)<<48 |
+ uint64(toLower)<<56
+)
+
+// Algorithm below is like standard textproto/CanonicalMIMEHeaderKey, except
+// that it operates with slice of bytes and modifies it inplace without copying.
+func canonicalizeHeaderKey(k []byte) {
+ upper := true
+ for i, c := range k {
+ if upper && 'a' <= c && c <= 'z' {
+ k[i] &= toUpper
+ } else if !upper && 'A' <= c && c <= 'Z' {
+ k[i] |= toLower
+ }
+ upper = c == '-'
+ }
+}
+
+// readLine reads line from br. It reads until '\n' and returns bytes without
+// '\n' or '\r\n' at the end.
+// It returns err if and only if line does not end in '\n'. Note that read
+// bytes returned in any case of error.
+//
+// It is much like the textproto/Reader.ReadLine() except the thing that it
+// returns raw bytes, instead of string. That is, it avoids copying bytes read
+// from br.
+//
+// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
+// safe with future I/O operations on br.
+//
+// We could control I/O operations on br and do not need to make additional
+// copy for safety.
+//
+// NOTE: it may return copied flag to notify that returned buffer is safe to
+// use.
+func readLine(br *bufio.Reader) ([]byte, error) {
+ var line []byte
+ for {
+ bts, err := br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // Copy bytes because next read will discard them.
+ line = append(line, bts...)
+ continue
+ }
+
+ // Avoid copy of single read.
+ if line == nil {
+ line = bts
+ } else {
+ line = append(line, bts...)
+ }
+
+ if err != nil {
+ return line, err
+ }
+
+ // Size of line is at least 1.
+ // In other case bufio.ReadSlice() returns error.
+ n := len(line)
+
+ // Cut '\n' or '\r\n'.
+ if n > 1 && line[n-2] == '\r' {
+ line = line[:n-2]
+ } else {
+ line = line[:n-1]
+ }
+
+ return line, nil
+ }
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func nonZero(a, b int) int {
+ if a != 0 {
+ return a
+ }
+ return b
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/write.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/write.go
new file mode 100644
index 00000000000..94557c69639
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/write.go
@@ -0,0 +1,104 @@
+package ws
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// Header size length bounds in bytes.
+const (
+ MaxHeaderSize = 14
+ MinHeaderSize = 2
+)
+
+const (
+ bit0 = 0x80
+ bit1 = 0x40
+ bit2 = 0x20
+ bit3 = 0x10
+ bit4 = 0x08
+ bit5 = 0x04
+ bit6 = 0x02
+ bit7 = 0x01
+
+ len7 = int64(125)
+ len16 = int64(^(uint16(0)))
+ len64 = int64(^(uint64(0)) >> 1)
+)
+
+// HeaderSize returns number of bytes that are needed to encode given header.
+// It returns -1 if header is malformed.
+func HeaderSize(h Header) (n int) {
+ switch {
+ case h.Length < 126:
+ n = 2
+ case h.Length <= len16:
+ n = 4
+ case h.Length <= len64:
+ n = 10
+ default:
+ return -1
+ }
+ if h.Masked {
+ n += len(h.Mask)
+ }
+ return n
+}
+
+// WriteHeader writes header binary representation into w.
+func WriteHeader(w io.Writer, h Header) error {
+ // Make slice of bytes with capacity 14 that could hold any header.
+ bts := make([]byte, MaxHeaderSize)
+
+ if h.Fin {
+ bts[0] |= bit0
+ }
+ bts[0] |= h.Rsv << 4
+ bts[0] |= byte(h.OpCode)
+
+ var n int
+ switch {
+ case h.Length <= len7:
+ bts[1] = byte(h.Length)
+ n = 2
+
+ case h.Length <= len16:
+ bts[1] = 126
+ binary.BigEndian.PutUint16(bts[2:4], uint16(h.Length))
+ n = 4
+
+ case h.Length <= len64:
+ bts[1] = 127
+ binary.BigEndian.PutUint64(bts[2:10], uint64(h.Length))
+ n = 10
+
+ default:
+ return ErrHeaderLengthUnexpected
+ }
+
+ if h.Masked {
+ bts[1] |= bit0
+ n += copy(bts[n:], h.Mask[:])
+ }
+
+ _, err := w.Write(bts[:n])
+
+ return err
+}
+
+// WriteFrame writes frame binary representation into w.
+func WriteFrame(w io.Writer, f Frame) error {
+ err := WriteHeader(w, f.Header)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(f.Payload)
+ return err
+}
+
+// MustWriteFrame is like WriteFrame but panics if frame can not be read.
+func MustWriteFrame(w io.Writer, f Frame) {
+ if err := WriteFrame(w, f); err != nil {
+ panic(err)
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/.gitignore b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 00000000000..cd3fcd1ef72
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/AUTHORS b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 00000000000..1931f400682
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 00000000000..9171c972252
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/README.md b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 00000000000..19aa2e75c82
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+[](https://godoc.org/github.com/gorilla/websocket)
+[](https://circleci.com/gh/gorilla/websocket)
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+### Documentation
+
+* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+ a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+ function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+ Read returns when the input buffer is full or a frame boundary is
+ encountered. Each call to Write sends a single frame message. The Gorilla
+ io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 00000000000..962c06a391c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, net.DialContext is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer.
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: "GET",
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ } else {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" {
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ var err error
+ if trace != nil {
+ err = doHandshakeWithTrace(trace, tlsConn, cfg)
+ } else {
+ err = doHandshake(tlsConn, cfg)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+ !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 00000000000..4f0d943723a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 00000000000..babb007fb41
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/compression.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 00000000000..813ffb1e843
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 00000000000..ca46d2f793c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1201 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan struct{} // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ // bytes remaining in current frame.
+ // set setReadRemaining to safely update this value and prevent overflow
+ readRemaining int64
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// setReadRemaining tracks the number of bytes remaining on the connection. If n
+// overflows, an ErrReadLimit is returned.
+func (c *Conn) setReadRemaining(n int64) error {
+ if n < 0 {
+ return ErrReadLimit
+ }
+
+ c.readRemaining = n
+ return nil
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := 1000 * time.Hour
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+// beginMessage prepares a connection and message writer for a new message.
+func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ mw.c = c
+ mw.frameType = messageType
+ mw.pos = maxFrameHeaderSize
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return nil, err
+ }
+ c.writer = &mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) endMessage(err error) error {
+ if w.err != nil {
+ return err
+ }
+ c := w.c
+ w.err = err
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.endMessage(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.endMessage(err)
+ }
+
+ if final {
+ w.endMessage(errWriteClosed)
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ return w.flushFrame(true, nil)
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return err
+ }
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ final := p[0]&finalBit != 0
+ frameType := int(p[0] & 0xf)
+ mask := p[1]&maskBit != 0
+ c.setReadRemaining(int64(p[1] & 0x7f))
+
+ c.readDecompress = false
+ if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+ c.readDecompress = true
+ p[0] &^= rsv1Bit
+ }
+
+ if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+ return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ return noFrame, c.handleProtocolError("control frame length > 125")
+ }
+ if !final {
+ return noFrame, c.handleProtocolError("control frame not final")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ return noFrame, c.handleProtocolError("message start before final message frame")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ return noFrame, c.handleProtocolError("continuation after final message frame")
+ }
+ c.readFinal = final
+ default:
+ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+ }
+
+ // 3. Read and parse frame length as per
+ // https://tools.ietf.org/html/rfc6455#section-5.2
+ //
+ // The length of the "Payload data", in bytes: if 0-125, that is the payload
+ // length.
+ // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
+ // integer are the payload length.
+ // - If 127, the following 8 bytes interpreted as
+ // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
+ // payload length. Multibyte length quantities are expressed in network byte
+ // order.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
+ return noFrame, err
+ }
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 4. Handle frame masking.
+
+ if mask != c.isServer {
+ return noFrame, c.handleProtocolError("incorrect mask flag")
+ }
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ // Don't allow readLength to overflow in the presence of a large readRemaining
+ // counter.
+ if c.readLength < 0 {
+ return noFrame, ErrReadLimit
+ }
+
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.setReadRemaining(0)
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("invalid close code")
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ rem := c.readRemaining
+ rem -= int64(n)
+ c.setReadRemaining(rem)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write.go
new file mode 100644
index 00000000000..a509a21f87a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "net"
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write_legacy.go
new file mode 100644
index 00000000000..37edaff5a57
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write_legacy.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ for _, buf := range bufs {
+ if len(buf) > 0 {
+ if _, err := c.conn.Write(buf); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/doc.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 00000000000..8db0cef95a2
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,227 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Buffers
+//
+// Connections buffer network input and output to reduce the number
+// of system calls when reading or writing messages.
+//
+// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
+// Section 5 for a discussion of message framing. A WebSocket frame header is
+// written to the network each time a write buffer is flushed to the network.
+// Decreasing the size of the write buffer can increase the amount of framing
+// overhead on the connection.
+//
+// The buffer sizes in bytes are specified by the ReadBufferSize and
+// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
+// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
+// buffers created by the HTTP server when a buffer size field is set to zero.
+// The HTTP server buffers have a size of 4096 at the time of this writing.
+//
+// The buffer sizes do not limit the size of a message that can be read or
+// written by a connection.
+//
+// Buffers are held for the lifetime of the connection by default. If the
+// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
+// write buffer only when writing a message.
+//
+// Applications should tune the buffer sizes to balance memory use and
+// performance. Increasing the buffer size uses more memory, but can reduce the
+// number of system calls to read or write the network. In the case of writing,
+// increasing the buffer size can reduce the number of frame headers written to
+// the network.
+//
+// Some guidelines for setting buffer parameters are:
+//
+// Limit the buffer sizes to the maximum expected message size. Buffers larger
+// than the largest message do not provide any benefit.
+//
+// Depending on the distribution of message sizes, setting the buffer size to
+// a value less than the maximum expected message size can greatly reduce memory
+// use with a small impact on performance. Here's an example: If 99% of the
+// messages are smaller than 256 bytes and the maximum message size is 512
+// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
+// than a buffer size of 512 bytes. The memory savings is 50%.
+//
+// A write buffer pool is useful when the application has a modest number
+// writes over a large number of connections. when buffers are pooled, a larger
+// buffer size has a reduced impact on total memory use and has the benefit of
+// reducing system calls and frame overhead.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/go.mod b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/go.mod
new file mode 100644
index 00000000000..1a7afd5028a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/go.mod
@@ -0,0 +1,3 @@
+module github.com/gorilla/websocket
+
+go 1.12
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/join.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/join.go
new file mode 100644
index 00000000000..c64f8c82901
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/join.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "io"
+ "strings"
+)
+
+// JoinMessages concatenates received messages to create a single io.Reader.
+// The string term is appended to each message. The returned reader does not
+// support concurrent calls to the Read method.
+func JoinMessages(c *Conn, term string) io.Reader {
+ return &joinReader{c: c, term: term}
+}
+
+type joinReader struct {
+ c *Conn
+ term string
+ r io.Reader
+}
+
+func (r *joinReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var err error
+ _, r.r, err = r.c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ if r.term != "" {
+ r.r = io.MultiReader(r.r, strings.NewReader(r.term))
+ }
+ }
+ n, err := r.r.Read(p)
+ if err == io.EOF {
+ err = nil
+ r.r = nil
+ }
+ return n, err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/json.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 00000000000..dc2c1f6415f
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 00000000000..577fce9efd7
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask_safe.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 00000000000..2aac060e52e
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/prepared.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 00000000000..c854225e967
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/proxy.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 00000000000..e87a8c9f0c9
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ forwardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.forwardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/server.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 00000000000..887d558918c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-WebSocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace.go
new file mode 100644
index 00000000000..834f122a00d
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace.go
@@ -0,0 +1,19 @@
+// +build go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(tlsConn, cfg)
+ if trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+ return err
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace_17.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace_17.go
new file mode 100644
index 00000000000..77d05a0b574
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace_17.go
@@ -0,0 +1,12 @@
+// +build !go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ return doHandshake(tlsConn, cfg)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/util.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 00000000000..7bf2f66c674
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,283 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Token octets per RFC 2616.
+var isTokenOctet = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+// skipSpace returns a slice of the string s with all leading RFC 2616 linear
+// whitespace removed.
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if b := s[i]; b != ' ' && b != '\t' {
+ break
+ }
+ }
+ return s[i:]
+}
+
+// nextToken returns the leading RFC 2616 token of s and the string following
+// the token.
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if !isTokenOctet[s[i]] {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
+// and the string following the token or quoted string.
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/x_net_proxy.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 00000000000..2e668f6b882
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 00000000000..1eb75ef68e4
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/deflate.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 00000000000..2b101d26b25
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,819 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we put into a single flat block, just too
+ // stop things from getting too large.
+ maxFlateBlockTokens = 1 << 14
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 24
+
+ skipNever = math.MaxInt32
+
+ debugDeflate = false
+)
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-6 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ {0, 0, 0, 0, 0, 5},
+ {0, 0, 0, 0, 0, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 8, 24, 16, skipNever, 7},
+ {10, 16, 24, 64, skipNever, 8},
+ {32, 258, 258, 4096, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ hash uint32
+ maxInsertIndex int
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ chainHead int
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+ hashOffset int
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ byteAvailable bool // if true, still need to process window[index-1].
+ err error
+
+ // queued output tokens
+ tokens tokens
+ fast fastEnc
+ state *advancedState
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ copy(d.window[:], d.window[windowSize:2*windowSize])
+ s.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range s.hashPrev[:] {
+ if int(v) > delta {
+ s.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ s.hashPrev[i] = 0
+ }
+ }
+ for i, v := range s.hashHead[:] {
+ if int(v) > delta {
+ s.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ s.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ d.w.writeBlock(tok, eof, window)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window, d.sync)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ }
+ } else {
+ d.w.writeBlock(tok, eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only or huffman mode.
+ if d.level <= 0 {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
+ return
+ }
+ s := d.state
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ s.hash = newH
+ }
+ // Update window information.
+ d.windowEnd += n
+ s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = prevLength
+ if length >= d.good {
+ tries >>= 2
+ }
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+
+ if n > length && (n > minMatchLength || pos-i <= 4096) {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i == minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex || i < 0 {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ b = b[:4]
+ return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < 4 {
+ return
+ }
+ hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
+ for i := 1; i < end; i++ {
+ hb = (hb << 8) | uint32(b[i+3])
+ dst[i] = hash4u(hb, hashBits)
+ }
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.byteAvailable = false
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ s := d.state
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = debugDeflate
+
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if s.index < s.maxInsertIndex {
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ }
+
+ for {
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - s.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ return
+ }
+ }
+ if s.index < s.maxInsertIndex {
+ // Update the hash
+ s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ ch := s.hashHead[s.hash&hashMask]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
+ }
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
+ }
+ }
+ if prevLength >= minMatchLength && s.length <= prevLength {
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ var newIndex int
+ newIndex = s.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ s.hash = newH
+ }
+
+ s.index = newIndex
+ d.byteAvailable = false
+ s.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ } else {
+ // Reset, if we got a match this run.
+ if s.length >= minMatchLength {
+ s.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ s.ii++
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when s.ii overflows after 64KB.
+ if s.ii > 31 {
+ n := int(s.ii >> 5)
+ for j := 0; j < n; j++ {
+ if s.index >= d.windowEnd-1 {
+ break
+ }
+
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ }
+ // Flush last byte
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ }
+ } else {
+ s.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < len(d.window) {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+ d.fast.Reset()
+ return
+ }
+ }
+
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if d.tokens.n == 0 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ d.step(d)
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.w.logNewTablePenalty = 4
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case level >= 1 && level <= 6:
+ d.w.logNewTablePenalty = 6
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logNewTablePenalty = 10
+ d.state = &advancedState{}
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflateLazy
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ d.level = level
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.fast != nil {
+ d.fast.Reset()
+ d.windowEnd = 0
+ d.tokens.Reset()
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompresssion.
+ d.windowEnd = 0
+ default:
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
+ }
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
+ }
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.Reset()
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.hash = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ d.w.reset(nil)
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ zw, err := NewWriter(w, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if len(w.dict) > 0 {
+ // w was created with NewWriterDict
+ w.d.reset(dst)
+ if dst != nil {
+ w.d.fillWindow(w.dict)
+ }
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 00000000000..71c75a065ea
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// * Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// * Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 00000000000..6d4c1e98bc5
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 17 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load32(b []byte, i int) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func load3232(b []byte, i int32) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func hash(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> tableShift
+}
+
+type tableEntry struct {
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
+
+// matchLen returns the maximum length.
+// 'a' must be the shortest of the two.
+func matchLen(a, b []byte) int {
+ b = b[:len(a)]
+ var checked int
+ if len(a) > 4 {
+ // Try 4 bytes first
+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
+ return bits.TrailingZeros32(diff) >> 3
+ }
+ // Switch to 8 byte matching.
+ checked = 4
+ a = a[4:]
+ b = b[4:]
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
+ }
+ checked += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ }
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int(i) + checked
+ }
+ }
+ return len(a) + checked
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/gen_inflate.go
new file mode 100644
index 00000000000..c74a95fe7f6
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/gen_inflate.go
@@ -0,0 +1,274 @@
+// +build generate
+
+//go:generate go run $GOFILE && gofmt -w inflate_gen.go
+
+package main
+
+import (
+ "os"
+ "strings"
+)
+
+func main() {
+ f, err := os.Create("inflate_gen.go")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
+ names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
+ imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
+ f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+`)
+
+ for _, imp := range imports {
+ f.WriteString("\t\"" + imp + "\"\n")
+ }
+ f.WriteString(")\n\n")
+
+ template := `
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) $FUNCNAME$() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.($TYPE$)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).$FUNCNAME$
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+`
+ for i, t := range types {
+ s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
+ s = strings.Replace(s, "$TYPE$", t, -1)
+ f.WriteString(s)
+ }
+ f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
+ f.WriteString("\tswitch f.r.(type) {\n")
+ for i, t := range types {
+ f.WriteString("\t\tcase " + t + ":\n")
+ f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
+ }
+ f.WriteString("\t\tdefault:\n")
+ f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
+ f.WriteString("\t}\n}\n")
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 00000000000..53fe1d06e25
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,911 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "io"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 240
+
+ // bufferSize is the actual output byte buffer size.
+ // It must have additional headroom for a flush
+ // which can contain up to 8 bytes.
+ bufferSize = bufferFlushSize + 8
+)
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]int8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// offset code word extra bits.
+var offsetExtraBits = [64]int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+}
+
+var offsetBase = [64]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
+ 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
+ 0x100000, 0x180000, 0x200000, 0x300000,
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint16
+ nbytes uint8
+ literalEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logNewTablePenalty uint
+ lastHuffMan bool
+ bytes [256]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalEncoding: newHuffmanEncoder(literalCount),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
+ offsets, lits = true, true
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ offsets = false
+ break
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalFreq[256:literalCount]
+ b = b[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ if lits {
+ a = t.litHist[:]
+ b = w.literalFreq[:len(a)]
+ for i := range a {
+ if b[i] == 0 && a[i] != 0 {
+ lits = false
+ break
+ }
+ }
+ }
+ return
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
+ w.bits |= uint64(b) << (w.nbits & 63)
+ w.nbits += nb
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen[:] // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = uint8(litEnc.codes[i].len)
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = uint8(offEnc.codes[i].len)
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+ size = litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:])
+ return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
+ size = header +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
+ extraBits
+ return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord = uint32(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[codeWord])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ }
+ }
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+ if length == 0 && isEof {
+ w.writeFixedHeader(isEof)
+ // EOB: 7 bits, value: 0
+ w.writeBits(0, 7)
+ w.flush()
+ return
+ }
+
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate(tokens)
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ extraBits = w.extraBitSize()
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = w.fixedSize(extraBits)
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize < size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
+
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+ if !sync {
+ tokens.Fill()
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+
+ var size int
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table.
+ // Use the previous header size as the best estimate.
+ newSize := w.lastHeader + tokens.EstimatedBits()
+ newSize += newSize >> w.logNewTablePenalty
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+ // Check if we get a reasonable size decrease.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
+ }
+
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ w.generate(tokens)
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ var numCodegens int
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ w.lastHeader = 0
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHeader, _ = w.headerSize()
+ w.lastHuffMan = false
+ }
+
+ if sync {
+ w.lastHeader = 0
+ }
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ copy(w.literalFreq[:], t.litHist[:])
+ copy(w.literalFreq[256:], t.extraHist[:])
+ copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
+
+ if t.n == 0 {
+ return
+ }
+ if filled {
+ return maxNumLit, maxNumDist
+ }
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ return
+}
+
+func (w *huffmanBitWriter) generate(t *tokens) {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
+ for _, t := range tokens {
+ if t < matchType {
+ w.writeCode(lits[t.literal()])
+ continue
+ }
+
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length)
+ if false {
+ w.writeCode(lengths[lengthCode&31])
+ } else {
+ // inlined
+ c := lengths[lengthCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+
+ extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
+ if extraLengthBits > 0 {
+ extraLength := int32(length - lengthBase[lengthCode&31])
+ w.writeBits(extraLength, extraLengthBits)
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := offsetCode(offset)
+ if false {
+ w.writeCode(offs[offsetCode&31])
+ } else {
+ // inlined
+ c := offs[offsetCode&31]
+ w.bits |= uint64(c.code) << (w.nbits & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+ }
+ extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
+ if extraOffsetBits > 0 {
+ extraOffset := int32(offset - offsetBase[offsetCode&63])
+ w.writeBits(extraOffset, extraOffsetBits)
+ }
+ }
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq[:] {
+ w.literalFreq[i] = 0
+ }
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
+
+ // Add everything as literals
+ // We have to estimate the header size.
+ // Assume header is around 70 bytes:
+ // https://stackoverflow.com/a/25454430
+ const guessHeaderSizeBits = 70 * 8
+ estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync)
+ estBits += w.lastHeader + 15
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
+
+ // Store bytes, if we don't get a reasonable improvement.
+ ssize, storable := w.storedSize(input)
+ if storable && ssize < estBits {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ if w.lastHeader > 0 {
+ reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256])
+ estBits += estExtra
+
+ if estBits < reuseSize {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ }
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+ if w.lastHeader == 0 {
+ w.literalFreq[endBlockMarker] = 1
+ w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
+ }
+
+ encoding := w.literalEncoding.codes[:257]
+ for _, t := range input {
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ w.bits |= uint64(c.code) << ((w.nbits) & 63)
+ w.nbits += c.len
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ w.bytes[n] = byte(bits)
+ w.bytes[n+1] = byte(bits >> 8)
+ w.bytes[n+2] = byte(bits >> 16)
+ w.bytes[n+3] = byte(bits >> 24)
+ w.bytes[n+4] = byte(bits >> 32)
+ w.bytes[n+5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+ }
+ if eof || sync {
+ w.writeCode(encoding[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_code.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 00000000000..4c39a301871
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,363 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+)
+
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode struct {
+ code, len uint16
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ freqcache []literalNode
+ bitCount [17]int32
+}
+
+type literalNode struct {
+ literal uint16
+ freq uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint16) {
+ h.len = length
+ h.code = code
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+// maxBits The maximum number of bits that should be used to encode any literal.
+// Must be less than 16.
+// return An integer array in which array[i] indicates the number of literals
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: int32(list[1].freq),
+ nextCharFreq: int32(list[2].freq),
+ nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := maxBits
+ for {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ sortByLiteral(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+ if h.freqcache == nil {
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ h.freqcache = make([]literalNode, literalCount+1)
+ }
+ list := h.freqcache[:len(freq)+1]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ list[count] = literalNode{}
+ h.codes[i].len = 0
+ }
+ }
+ list[len(freq)] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ sortByFreq(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+func atLeastOne(v float32) float32 {
+ if v < 1 {
+ return 1
+ }
+ return v
+}
+
+// histogramSize accumulates a histogram of b in h.
+// An estimated size in bits is returned.
+// Unassigned values are assigned '1' in the histogram.
+// len(h) must be >= 256, and h's elements must be all zeroes.
+func histogramSize(b []byte, h []uint16, fill bool) (int, int) {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ invTotal := 1.0 / float32(len(b))
+ shannon := float32(0.0)
+ var extra float32
+ if fill {
+ oneBits := atLeastOne(-mFastLog2(invTotal))
+ for i, v := range h[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ } else {
+ h[i] = 1
+ extra += oneBits
+ }
+ }
+ } else {
+ for _, v := range h[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ }
+ }
+ }
+
+ return int(shannon + 0.99), int(extra + 0.99)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 00000000000..20778029900
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,178 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+ n := len(data)
+ quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivotByFreq(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSortByFreq(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSortByFreq(data, mhi, b)
+ } else {
+ quickSortByFreq(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSortByFreq(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSortByFreq(data, a, b)
+ }
+}
+
+// siftDownByFreq implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDownByFreq(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
+ child++
+ }
+ if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+ medianOfThreeSortByFreq(data, m, m-s, m+s)
+ medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+ }
+ for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+ }
+ for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 00000000000..93f1aea109e
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+ n := len(data)
+ quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].literal < data[i-6].literal {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+func heapSort(data []literalNode, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDown(data, lo, i, first)
+ }
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+ child++
+ }
+ if data[first+root].literal > data[first+child].literal {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data[a].literal < data[pivot].literal; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+ }
+ for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].literal > data[pivot].literal { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+ }
+ for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].literal < data[m1].literal {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 00000000000..7f175a4ec26
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,1000 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "math/bits"
+ "strconv"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
+)
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError int64
+
+func (e CorruptInputError) Error() string {
+ return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
+}
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Read
+}
+
+func (e *ReadError) Error() string {
+ return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Write
+}
+
+func (e *WriteError) Error() string {
+ return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ maxRead int // the maximum number of bits we can read and not overread
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = &[huffmanNumChunks]uint16{}
+ }
+ if h.maxRead != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute maxRead and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1< huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint16, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint16(off<>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// The actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Input bits, in top of b.
+ b uint32
+ nb uint
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Next step in the decompression,
+ // and decompression state.
+ step func(*decompressor)
+ stepState int
+ final bool
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlockDecoder()()
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlockDecoder()()
+ default:
+ // 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+ f.step(f)
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.step(f)
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<>= nb
+ f.nb -= nb
+ if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the maxRead bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.maxRead < f.bits[endBlockMarker] {
+ f.h1.maxRead = f.bits[endBlockMarker]
+ }
+ if !f.final {
+ // If not the final block, the smallest block possible is
+ // a predefined table, BTYPE=01, with a single EOB marker.
+ // This will take up 3 + 7 bits.
+ f.h1.maxRead += 10
+ }
+
+ return nil
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBlockGeneric() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlockGeneric
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ left := (f.nb) & 7
+ f.nb -= left
+ f.b >>= left
+
+ offBytes := f.nb >> 3
+ // Unfilled values will be overwritten.
+ f.buf[0] = uint8(f.b)
+ f.buf[1] = uint8(f.b >> 8)
+ f.buf[2] = uint8(f.b >> 16)
+ f.buf[3] = uint8(f.b >> 24)
+
+ f.roffset += int64(offBytes)
+ f.nb, f.b = 0, 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+ nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+ if nn != ^n {
+ if debugDecode {
+ ncomp := ^n
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ f.toRead = f.dict.readFlush()
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = int(n)
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+ f.err = io.EOF
+ }
+ f.step = (*decompressor).nextBlock
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: (*decompressor).nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, nil)
+ return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, dict)
+ return &f
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 00000000000..397dc1b1a13
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,922 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Buffer)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBytesReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bufio.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBufioReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*strings.Reader)
+ moreBits := func() error {
+ c, err := fr.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+ }
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var n uint // number of bits extra
+ var length int
+ var err error
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ if debugDecode {
+ fmt.Println("huffsym:", err)
+ }
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+func (f *decompressor) huffmanBlockDecoder() func() {
+ switch f.r.(type) {
+ case *bytes.Buffer:
+ return f.huffmanBytesBuffer
+ case *bytes.Reader:
+ return f.huffmanBytesReader
+ case *bufio.Reader:
+ return f.huffmanBufioReader
+ case *strings.Reader:
+ return f.huffmanStringsReader
+ default:
+ return f.huffmanBlockGeneric
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level1.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 00000000000..1e5eea3968a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,179 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hash(uint32(now))
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash(cv)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hash(uint32(x))
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level2.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 00000000000..5b986a1944e
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,205 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash4u(cv, bTableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hash4u(uint32(now), bTableBits)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ cv = uint32(now)
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+4) < len(src) {
+ cv := load3232(src, s)
+ e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, int32(i))
+ nextHash := hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+ // Skip one
+ x >>= 16
+ nextHash = hash4u(uint32(x), bTableBits)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hash4u(uint32(x), bTableBits)
+ prevHash2 := hash4u(uint32(x>>8), bTableBits)
+ e.table[prevHash] = tableEntry{offset: o}
+ e.table[prevHash2] = tableEntry{offset: o + 1}
+ currHash := hash4u(uint32(x>>16), bTableBits)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+ cv = uint32(x >> 24)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level3.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 00000000000..c22b4244a5c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,229 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [tableSize]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3232(src, s)
+ for {
+ const skipLog = 6
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hash(cv)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load3232(src, nextS)
+
+ // Safe offset distance until s + 4...
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if candidate.offset < minOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+
+ if cv == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) {
+ break
+ }
+ // Both match and are valid, pick longest.
+ offset := s - (candidate.offset - e.cur)
+ o2 := s - (candidates.Prev.offset - e.cur)
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ nextHash := hash(cv)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-3 to s.
+ x := load6432(src, s-3)
+ prevHash := hash(uint32(x))
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 3},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1},
+ }
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidates := e.table[currHash]
+ cv = uint32(x)
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+
+ if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ }
+ }
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level4.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 00000000000..e62f0c02b1e
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,212 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := nextS - (lCandidate.offset - e.cur)
+ if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(s+4, t+4, src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if debugDeflate {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ e.bTable[prevHashL] = tableEntry{offset: o}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level5.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 00000000000..d513f1ffd37
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,279 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hash4x64(cv, tableBits)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hash4x64(x, tableBits)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level6.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 00000000000..a52c80ea456
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,282 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hash4x64(cv, tableBits)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hash4x64(next, tableBits)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hash4x64(cv, tableBits)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/stateless.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 00000000000..53e89912463
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,297 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false, nil)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
+ for len(in) > 0 {
+ todo := in
+ if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
+ }
+ in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
+ // Compress
+ statelessEnc(&dst, todo, int16(len(dict)))
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(uncompressed), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ dict = todo[len(todo)-maxStatelessDict:]
+ dst.Reset()
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ }
+ if !eof {
+ // Align, only a stored block can do that.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:4]
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6416(b []byte, i int16) uint64 {
+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+ b = b[i:]
+ b = b[:8]
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = 0
+ return
+ }
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
+
+ s := startAt + 1
+ nextEmit := startAt
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ emitLiteral(dst, src[nextEmit:s])
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/token.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 00000000000..f9abf606d67
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,375 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
+ // 8 bits: xlength = length - MIN_MATCH_LENGTH
+ // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ lengthShift = 22
+ offsetMask = 1<maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nLits = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nLits++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nLits++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.AddLiteral(tok.literal())
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset())
+ }
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ ol := int(dst.n)
+ for i, v := range lit {
+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ dst.litHist[v]++
+ }
+ dst.n += uint16(len(lit))
+ dst.nLits += len(lit)
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+ t.nLits++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+ ux := int32(math.Float32bits(val))
+ log2 := (float32)(((ux >> 23) & 255) - 128)
+ ux &= -0x7f800001
+ ux += 127 << 23
+ uval := math.Float32frombits(uint32(ux))
+ log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+ return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float32(0)
+ bits := int(0)
+ nMatches := 0
+ if t.nLits > 0 {
+ invTotal := 1.0 / float32(t.nLits)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for i, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ bits += int(lengthExtraBits[i&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float32(nMatches)
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float32(v)
+ shannon += -mFastLog2(n*invTotal) * n
+ bits += int(offsetExtraBits[i&31]) * int(v)
+ }
+ }
+ }
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDeflate {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xlength)] & 31
+ t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset) & 31
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ xl = 258 - baseMatchLength
+ }
+ xlength -= xl
+ xl -= 3
+ t.nLits++
+ lengthCode := lengthCodes1[uint8(xl)] & 31
+ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
+
+// The code is never more than 8 bits, but is returned as uint32 for convenience.
+func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[uint8(off)]
+ }
+ return offsetCodes14[uint8(off>>7)]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/.gitignore b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/.gitignore
new file mode 100644
index 00000000000..f1c181ec9c5
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/.gitignore
@@ -0,0 +1,12 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/LICENSE
new file mode 100644
index 00000000000..7364c76bad1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 sachin shinde
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logging.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logging.go
new file mode 100644
index 00000000000..12d377d8056
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logging.go
@@ -0,0 +1,82 @@
+package logging
+
+import (
+ "log"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+type Logger struct {
+ Name string
+ Trace *log.Logger
+ Info *log.Logger
+ Warning *log.Logger
+ Error *log.Logger
+ level LoggingLevel
+}
+
+var loggers = make(map[string]Logger)
+
+func GetLogger(name string) Logger {
+ return New(name, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
+}
+
+func (logger Logger) SetLevel(level LoggingLevel) Logger{
+ switch level {
+ case TRACE:
+ logger.Trace.SetOutput(os.Stdout);
+ logger.Info.SetOutput(os.Stdout);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case INFO:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(os.Stdout);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case WARNING:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(os.Stdout);
+ logger.Error.SetOutput(os.Stderr);
+ case ERROR:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(ioutil.Discard);
+ logger.Error.SetOutput(os.Stderr);
+ case OFF:
+ logger.Trace.SetOutput(ioutil.Discard);
+ logger.Info.SetOutput(ioutil.Discard);
+ logger.Warning.SetOutput(ioutil.Discard);
+ logger.Error.SetOutput(ioutil.Discard);
+ }
+ return logger;
+}
+
+func (logger Logger) GetLevel() LoggingLevel {
+ return logger.level;
+}
+
+func New(
+ name string,
+ traceHandle io.Writer,
+ infoHandle io.Writer,
+ warningHandle io.Writer,
+ errorHandle io.Writer) Logger {
+ loggers[name] = Logger{
+ Name: name,
+ Trace: log.New(traceHandle,
+ "TRACE: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Info: log.New(infoHandle,
+ "INFO: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Warning: log.New(warningHandle,
+ "WARNING: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ Error: log.New(errorHandle,
+ "ERROR: ",
+ log.Ldate|log.Ltime|log.Lshortfile),
+ }
+ return loggers[name]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/loggingL.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/loggingL.go
new file mode 100644
index 00000000000..aab5a8567af
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/loggingL.go
@@ -0,0 +1,13 @@
+package logging
+
+type LoggingLevel int
+
+//go:generate stringer -type=LoggingLevel
+
+const (
+ TRACE LoggingLevel = iota
+ INFO
+ WARNING
+ ERROR
+ OFF
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logginglevel_string.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
new file mode 100644
index 00000000000..9f24f0acbfe
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=LoggingLevel"; DO NOT EDIT.
+
+package logging
+
+import "strconv"
+
+const _LoggingLevel_name = "TRACEINFOWARNINGERROROFF"
+
+var _LoggingLevel_index = [...]uint8{0, 5, 9, 16, 21, 24}
+
+func (i LoggingLevel) String() string {
+ if i < 0 || i >= LoggingLevel(len(_LoggingLevel_index)-1) {
+ return "LoggingLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _LoggingLevel_name[_LoggingLevel_index[i]:_LoggingLevel_index[i+1]]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/.gitignore b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/.gitignore
new file mode 100644
index 00000000000..9a289397844
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/.gitignore
@@ -0,0 +1,21 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# ignore build under build directory
+build/
+bin/
+
+#ignore any IDE based files
+.idea/**
\ No newline at end of file
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/LICENSE
new file mode 100644
index 00000000000..95ab2c9a687
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2018 Sachin Shinde
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/README.md b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/README.md
new file mode 100644
index 00000000000..2439bc6a7e1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/README.md
@@ -0,0 +1,157 @@
+# GoWebsocket
+Gorilla websocket based simplified client implementation in GO.
+
+Overview
+--------
+This client provides following easy to implement functionality
+- Support for emitting and receiving text and binary data
+- Data compression
+- Concurrency control
+- Proxy support
+- Setting request headers
+- Subprotocols support
+- SSL verification enable/disable
+
+To install use
+
+```markdown
+ go get github.com/sacOO7/gowebsocket
+```
+
+Description
+-----------
+
+Create instance of `Websocket` by passing url of websocket-server end-point
+
+```go
+ //Create a client instance
+ socket := gowebsocket.New("ws://echo.websocket.org/")
+
+```
+
+**Important Note** : url to websocket server must be specified with either **ws** or **wss**.
+
+#### Connecting to server
+- For connecting to server:
+
+```go
+ //This will send websocket handshake request to socketcluster-server
+ socket.Connect()
+```
+
+#### Registering All Listeners
+```go
+ package main
+
+ import (
+ "log"
+ "github.com/sacOO7/gowebsocket"
+ "os"
+ "os/signal"
+ )
+
+ func main() {
+
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+
+ socket := gowebsocket.New("ws://echo.websocket.org/");
+
+ socket.OnConnected = func(socket gowebsocket.Socket) {
+ log.Println("Connected to server");
+ };
+
+ socket.OnConnectError = func(err error, socket gowebsocket.Socket) {
+ log.Println("Recieved connect error ", err)
+ };
+
+ socket.OnTextMessage = func(message string, socket gowebsocket.Socket) {
+ log.Println("Recieved message " + message)
+ };
+
+ socket.OnBinaryMessage = func(data [] byte, socket gowebsocket.Socket) {
+ log.Println("Recieved binary data ", data)
+ };
+
+ socket.OnPingReceived = func(data string, socket gowebsocket.Socket) {
+ log.Println("Recieved ping " + data)
+ };
+
+ socket.OnPongReceived = func(data string, socket gowebsocket.Socket) {
+ log.Println("Recieved pong " + data)
+ };
+
+ socket.OnDisconnected = func(err error, socket gowebsocket.Socket) {
+ log.Println("Disconnected from server ")
+ return
+ };
+
+ socket.Connect()
+
+ for {
+ select {
+ case <-interrupt:
+ log.Println("interrupt")
+ socket.Close()
+ return
+ }
+ }
+ }
+
+```
+
+#### Sending Text message
+
+```go
+ socket.SendText("Hi there, this is my sample test message")
+```
+
+#### Sending Binary data
+```go
+ token := make([]byte, 4)
+ // rand.Read(token) putting some random value in token
+ socket.SendBinary(token)
+```
+
+#### Closing the connection with server
+```go
+ socket.Close()
+```
+
+#### Setting request headers
+```go
+ socket.RequestHeader.Set("Accept-Encoding","gzip, deflate, sdch")
+ socket.RequestHeader.Set("Accept-Language","en-US,en;q=0.8")
+ socket.RequestHeader.Set("Pragma","no-cache")
+ socket.RequestHeader.Set("User-Agent","Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36")
+
+```
+
+#### Setting proxy server
+- It can be set using connectionOptions by providing url to proxy server
+
+```go
+ socket.ConnectionOptions = gowebsocket.ConnectionOptions {
+ Proxy: gowebsocket.BuildProxy("http://example.com"),
+ }
+```
+
+#### Setting data compression, ssl verification and subprotocols
+
+- It can be set using connectionOptions inside socket
+
+```go
+ socket.ConnectionOptions = gowebsocket.ConnectionOptions {
+ UseSSL:true,
+ UseCompression:true,
+ Subprotocols: [] string{"chat","superchat"},
+ }
+```
+
+- ConnectionOptions needs to be applied before connecting to server
+- Please checkout [**examples/gowebsocket**](!https://github.com/sacOO7/GoWebsocket/tree/master/examples/gowebsocket) directory for detailed code..
+
+License
+-------
+Apache License, Version 2.0
+
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
new file mode 100644
index 00000000000..1ea2b0d7a71
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
@@ -0,0 +1,186 @@
+package gowebsocket
+
+import (
+ "github.com/gorilla/websocket"
+ "net/http"
+ "errors"
+ "crypto/tls"
+ "net/url"
+ "sync"
+ "github.com/sacOO7/go-logger"
+ "reflect"
+)
+
+type Empty struct {
+}
+
+var logger = logging.GetLogger(reflect.TypeOf(Empty{}).PkgPath()).SetLevel(logging.OFF)
+
+func (socket Socket) EnableLogging() {
+ logger.SetLevel(logging.TRACE)
+}
+
+func (socket Socket) GetLogger() logging.Logger {
+ return logger;
+}
+
+type Socket struct {
+ Conn *websocket.Conn
+ WebsocketDialer *websocket.Dialer
+ Url string
+ ConnectionOptions ConnectionOptions
+ RequestHeader http.Header
+ OnConnected func(socket Socket)
+ OnTextMessage func(message string, socket Socket)
+ OnBinaryMessage func(data [] byte, socket Socket)
+ OnConnectError func(err error, socket Socket)
+ OnDisconnected func(err error, socket Socket)
+ OnPingReceived func(data string, socket Socket)
+ OnPongReceived func(data string, socket Socket)
+ IsConnected bool
+ sendMu *sync.Mutex // Prevent "concurrent write to websocket connection"
+ receiveMu *sync.Mutex
+}
+
+type ConnectionOptions struct {
+ UseCompression bool
+ UseSSL bool
+ Proxy func(*http.Request) (*url.URL, error)
+ Subprotocols [] string
+}
+
+// todo Yet to be done
+type ReconnectionOptions struct {
+}
+
+func New(url string) Socket {
+ return Socket{
+ Url: url,
+ RequestHeader: http.Header{},
+ ConnectionOptions: ConnectionOptions{
+ UseCompression: false,
+ UseSSL: true,
+ },
+ WebsocketDialer: &websocket.Dialer{},
+ sendMu: &sync.Mutex{},
+ receiveMu: &sync.Mutex{},
+ }
+}
+
+func (socket *Socket) setConnectionOptions() {
+ socket.WebsocketDialer.EnableCompression = socket.ConnectionOptions.UseCompression
+ socket.WebsocketDialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: socket.ConnectionOptions.UseSSL}
+ socket.WebsocketDialer.Proxy = socket.ConnectionOptions.Proxy
+ socket.WebsocketDialer.Subprotocols = socket.ConnectionOptions.Subprotocols
+}
+
+func (socket *Socket) Connect() {
+ var err error;
+ socket.setConnectionOptions()
+
+ socket.Conn, _, err = socket.WebsocketDialer.Dial(socket.Url, socket.RequestHeader)
+
+ if err != nil {
+ logger.Error.Println("Error while connecting to server ", err)
+ socket.IsConnected = false
+ if socket.OnConnectError != nil {
+ socket.OnConnectError(err, *socket)
+ }
+ return
+ }
+
+ logger.Info.Println("Connected to server")
+
+ if socket.OnConnected != nil {
+ socket.IsConnected = true
+ socket.OnConnected(*socket)
+ }
+
+ defaultPingHandler := socket.Conn.PingHandler()
+ socket.Conn.SetPingHandler(func(appData string) error {
+ logger.Trace.Println("Received PING from server")
+ if socket.OnPingReceived != nil {
+ socket.OnPingReceived(appData, *socket)
+ }
+ return defaultPingHandler(appData)
+ })
+
+ defaultPongHandler := socket.Conn.PongHandler()
+ socket.Conn.SetPongHandler(func(appData string) error {
+ logger.Trace.Println("Received PONG from server")
+ if socket.OnPongReceived != nil {
+ socket.OnPongReceived(appData, *socket)
+ }
+ return defaultPongHandler(appData)
+ })
+
+ defaultCloseHandler := socket.Conn.CloseHandler()
+ socket.Conn.SetCloseHandler(func(code int, text string) error {
+ result := defaultCloseHandler(code, text)
+ logger.Warning.Println("Disconnected from server ", result)
+ if socket.OnDisconnected != nil {
+ socket.IsConnected = false
+ socket.OnDisconnected(errors.New(text), *socket)
+ }
+ return result
+ })
+
+ go func() {
+ for {
+ socket.receiveMu.Lock()
+ messageType, message, err := socket.Conn.ReadMessage()
+ socket.receiveMu.Unlock()
+ if err != nil {
+ logger.Error.Println("read:", err)
+ return
+ }
+ logger.Info.Println("recv: %s", message)
+
+ switch messageType {
+ case websocket.TextMessage:
+ if socket.OnTextMessage != nil {
+ socket.OnTextMessage(string(message), *socket)
+ }
+ case websocket.BinaryMessage:
+ if socket.OnBinaryMessage != nil {
+ socket.OnBinaryMessage(message, *socket)
+ }
+ }
+ }
+ }()
+}
+
+func (socket *Socket) SendText(message string) {
+ err := socket.send(websocket.TextMessage, [] byte (message))
+ if err != nil {
+ logger.Error.Println("write:", err)
+ return
+ }
+}
+
+func (socket *Socket) SendBinary(data [] byte) {
+ err := socket.send(websocket.BinaryMessage, data)
+ if err != nil {
+ logger.Error.Println("write:", err)
+ return
+ }
+}
+
+func (socket *Socket) send(messageType int, data [] byte) error {
+ socket.sendMu.Lock()
+ err := socket.Conn.WriteMessage(messageType, data)
+ socket.sendMu.Unlock()
+ return err
+}
+
+func (socket *Socket) Close() {
+ err := socket.send(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
+ if err != nil {
+ logger.Error.Println("write close:", err)
+ }
+ socket.Conn.Close()
+ if socket.OnDisconnected != nil {
+ socket.IsConnected = false
+ socket.OnDisconnected(err, *socket)
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/utils.go b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/utils.go
new file mode 100644
index 00000000000..d8702ebb6df
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/utils.go
@@ -0,0 +1,15 @@
+package gowebsocket
+
+import (
+ "net/http"
+ "net/url"
+ "log"
+)
+
+func BuildProxy(Url string) func(*http.Request) (*url.URL, error) {
+ uProxy, err := url.Parse(Url)
+ if err != nil {
+ log.Fatal("Error while parsing url ", err)
+ }
+ return http.ProxyURL(uProxy)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/AUTHORS b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 00000000000..15167cd746c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/CONTRIBUTORS b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 00000000000..1c4577e9680
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/LICENSE b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 00000000000..6a66aea5eaf
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/PATENTS b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 00000000000..733099041f8
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/client.go b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 00000000000..69a4ac7eefe
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,106 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ var client net.Conn
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+ dialer := config.Dialer
+ if dialer == nil {
+ dialer = &net.Dialer{}
+ }
+ client, err = dialWithDialer(dialer, config)
+ if err != nil {
+ goto Error
+ }
+ ws, err = NewClient(config, client)
+ if err != nil {
+ client.Close()
+ goto Error
+ }
+ return
+
+Error:
+ return nil, &DialError{config, err}
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/dial.go b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/dial.go
new file mode 100644
index 00000000000..2dab943a489
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/dial.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
+ switch config.Location.Scheme {
+ case "ws":
+ conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
+
+ case "wss":
+ conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
+
+ default:
+ err = ErrBadScheme
+ }
+ return
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/hybi.go b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 00000000000..8cffdd16c91
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,583 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(ioutil.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifer from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/server.go b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 00000000000..0895dea1905
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/websocket.go b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 00000000000..6c45c735296
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,451 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+//
+// This package currently lacks some features found in alternative
+// and more actively maintained WebSocket packages:
+//
+// https://godoc.org/github.com/gorilla/websocket
+// https://godoc.org/nhooyr.io/websocket
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+
+ DefaultMaxPayloadBytes = 32 << 20 // 32MB
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// ErrFrameTooLarge is returned by Codec's Receive method if payload size
+// exceeds limit set by Conn.MaxPayloadBytes
+var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ // Dialer used when opening websocket connections.
+ Dialer *net.Dialer
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+
+ // MaxPayloadBytes limits the size of frame payload received over Conn
+ // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
+ MaxPayloadBytes int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(ioutil.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+// IsClientConn reports whether ws is a client-side connection.
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+
+// IsServerConn reports whether ws is a server-side connection.
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
+// in v. The whole frame payload is read to an in-memory buffer; max size of
+// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
+// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
+// completely. The next call to Receive would read and discard leftover data of
+// previous oversized frame before processing next frame.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(ioutil.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ maxPayloadBytes := ws.MaxPayloadBytes
+ if maxPayloadBytes == 0 {
+ maxPayloadBytes = DefaultMaxPayloadBytes
+ }
+ if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
+ // payload size exceeds limit, no need to call Unmarshal
+ //
+ // set frameReader to current oversized frame so that
+ // the next call to this function can drain leftover
+ // data before processing the next frame
+ ws.frameReader = frame
+ return ErrFrameTooLarge
+ }
+ payloadType := frame.PayloadType()
+ data, err := ioutil.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/modules.txt b/ql/test/query-tests/Security/CWE-918/vendor/modules.txt
new file mode 100644
index 00000000000..6551554790e
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/modules.txt
@@ -0,0 +1,30 @@
+# github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee
+github.com/gobwas/httphead
+# github.com/gobwas/pool v0.2.0
+github.com/gobwas/pool
+github.com/gobwas/pool/internal/pmath
+github.com/gobwas/pool/pbufio
+# github.com/gobwas/ws v1.0.3
+## explicit
+github.com/gobwas/ws
+# github.com/gorilla/websocket v1.4.2
+## explicit
+github.com/gorilla/websocket
+# github.com/klauspost/compress v1.10.3
+github.com/klauspost/compress/flate
+# github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d
+## explicit
+github.com/sacOO7/go-logger
+# github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
+## explicit
+github.com/sacOO7/gowebsocket
+# golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
+## explicit
+golang.org/x/net/websocket
+# nhooyr.io/websocket v1.8.5
+## explicit
+nhooyr.io/websocket
+nhooyr.io/websocket/internal/bpool
+nhooyr.io/websocket/internal/errd
+nhooyr.io/websocket/internal/wsjs
+nhooyr.io/websocket/internal/xsync
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.gitignore b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.gitignore
new file mode 100644
index 00000000000..6961e5c894a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.gitignore
@@ -0,0 +1 @@
+websocket.test
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.travis.yml b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.travis.yml
new file mode 100644
index 00000000000..41d3c201468
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.travis.yml
@@ -0,0 +1,40 @@
+language: go
+go: 1.x
+dist: bionic
+
+env:
+ global:
+ - SHFMT_URL=https://github.com/mvdan/sh/releases/download/v3.0.1/shfmt_v3.0.1_linux_amd64
+ - GOFLAGS="-mod=readonly"
+
+jobs:
+ include:
+ - name: Format
+ before_script:
+ - sudo apt-get install -y npm
+ - sudo npm install -g prettier
+ - sudo curl -L "$SHFMT_URL" > /usr/local/bin/shfmt && sudo chmod +x /usr/local/bin/shfmt
+ - go get golang.org/x/tools/cmd/stringer
+ - go get golang.org/x/tools/cmd/goimports
+ script: make -j16 fmt
+ - name: Lint
+ before_script:
+ - sudo apt-get install -y shellcheck
+ - go get golang.org/x/lint/golint
+ script: make -j16 lint
+ - name: Test
+ before_script:
+ - sudo apt-get install -y chromium-browser
+ - go get github.com/agnivade/wasmbrowsertest
+ - go get github.com/mattn/goveralls
+ script: make -j16 test
+
+addons:
+ apt:
+ update: true
+
+cache:
+ npm: true
+ directories:
+ - ~/.cache
+ - ~/gopath/pkg
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/LICENSE.txt b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/LICENSE.txt
new file mode 100644
index 00000000000..b5b5fef31f0
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Anmol Sethi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/Makefile b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/Makefile
new file mode 100644
index 00000000000..f9f31c49f1c
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/Makefile
@@ -0,0 +1,7 @@
+all: fmt lint test
+
+.SILENT:
+
+include ci/fmt.mk
+include ci/lint.mk
+include ci/test.mk
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/README.md b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/README.md
new file mode 100644
index 00000000000..14c392935e1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/README.md
@@ -0,0 +1,132 @@
+# websocket
+
+[](https://pkg.go.dev/nhooyr.io/websocket)
+
+websocket is a minimal and idiomatic WebSocket library for Go.
+
+## Install
+
+```bash
+go get nhooyr.io/websocket
+```
+
+## Features
+
+- Minimal and idiomatic API
+- First class [context.Context](https://blog.golang.org/context) support
+- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite)
+- Thorough tests with [90% coverage](https://coveralls.io/github/nhooyr/websocket)
+- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports)
+- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- Zero alloc reads and writes
+- Concurrent writes
+- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close)
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression
+- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm)
+
+## Roadmap
+
+- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4)
+
+## Examples
+
+For a production quality example that demonstrates the complete API, see the
+[echo example](./examples/echo).
+
+For a full stack example, see the [chat example](./examples/chat).
+
+### Server
+
+```go
+http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ c, err := websocket.Accept(w, r, nil)
+ if err != nil {
+ // ...
+ }
+ defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+ ctx, cancel := context.WithTimeout(r.Context(), time.Second*10)
+ defer cancel()
+
+ var v interface{}
+ err = wsjson.Read(ctx, c, &v)
+ if err != nil {
+ // ...
+ }
+
+ log.Printf("received: %v", v)
+
+ c.Close(websocket.StatusNormalClosure, "")
+})
+```
+
+### Client
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+defer cancel()
+
+c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil)
+if err != nil {
+ // ...
+}
+defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+err = wsjson.Write(ctx, c, "hi")
+if err != nil {
+ // ...
+}
+
+c.Close(websocket.StatusNormalClosure, "")
+```
+
+## Comparison
+
+### gorilla/websocket
+
+Advantages of [gorilla/websocket](https://github.com/gorilla/websocket):
+
+- Mature and widely used
+- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage)
+- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers)
+
+Advantages of nhooyr.io/websocket:
+
+- Minimal and idiomatic API
+ - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side.
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535))
+- Full [context.Context](https://blog.golang.org/context) support
+- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client)
+ - Will enable easy HTTP/2 support in the future
+ - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client.
+- Concurrent writes
+- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448))
+- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+ - Gorilla requires registering a pong callback before sending a Ping
+- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432))
+- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go
+ - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/).
+- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support
+ - Gorilla only supports no context takeover mode
+ - We use a vendored [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203))
+- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492))
+- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370))
+
+#### golang.org/x/net/websocket
+
+[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated.
+See [golang/go/issues/18152](https://github.com/golang/go/issues/18152).
+
+The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning
+to nhooyr.io/websocket.
+
+#### gobwas/ws
+
+[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used
+in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb).
+
+However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use.
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept.go
new file mode 100644
index 00000000000..6bed54da028
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept.go
@@ -0,0 +1,365 @@
+// +build !js
+
+package websocket
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client.
+ // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to
+ // reject it, close the connection when c.Subprotocol() == "".
+ Subprotocols []string
+
+ // InsecureSkipVerify is used to disable Accept's origin verification behaviour.
+ //
+ // Deprecated: Use OriginPatterns with a match all pattern of * instead to control
+ // origin authorization yourself.
+ InsecureSkipVerify bool
+
+ // OriginPatterns lists the host patterns for authorized origins.
+ // The request host is always authorized.
+ // Use this to enable cross origin WebSockets.
+ //
+ // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com.
+ // In such a case, example.com is the origin and chat.example.com is the request host.
+ // One would set this field to []string{"example.com"} to authorize example.com to connect.
+ //
+ // Each pattern is matched case insensitively against the request origin host
+ // with filepath.Match.
+ // See https://golang.org/pkg/path/filepath/#Match
+ //
+ // Please ensure you understand the ramifications of enabling this.
+ // If used incorrectly your WebSocket server will be open to CSRF attacks.
+ OriginPatterns []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Accept accepts a WebSocket handshake from a client and upgrades the
+// the connection to a WebSocket.
+//
+// Accept will not allow cross origin requests by default.
+// See the InsecureSkipVerify option to allow cross origin requests.
+//
+// Accept will write a response to w on all errors.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return accept(w, r, opts)
+}
+
+func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) {
+ defer errd.Wrap(&err, "failed to accept WebSocket connection")
+
+ if opts == nil {
+ opts = &AcceptOptions{}
+ }
+ opts = &*opts
+
+ errCode, err := verifyClientRequest(w, r)
+ if err != nil {
+ http.Error(w, err.Error(), errCode)
+ return nil, err
+ }
+
+ if !opts.InsecureSkipVerify {
+ err = authenticateOrigin(r, opts.OriginPatterns)
+ if err != nil {
+ if errors.Is(err, filepath.ErrBadPattern) {
+ log.Printf("websocket: %v", err)
+ err = errors.New(http.StatusText(http.StatusForbidden))
+ }
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return nil, err
+ }
+ }
+
+ hj, ok := w.(http.Hijacker)
+ if !ok {
+ err = errors.New("http.ResponseWriter does not implement http.Hijacker")
+ http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
+ return nil, err
+ }
+
+ w.Header().Set("Upgrade", "websocket")
+ w.Header().Set("Connection", "Upgrade")
+
+ key := r.Header.Get("Sec-WebSocket-Key")
+ w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key))
+
+ subproto := selectSubprotocol(r, opts.Subprotocols)
+ if subproto != "" {
+ w.Header().Set("Sec-WebSocket-Protocol", subproto)
+ }
+
+ copts, err := acceptCompression(r, w, opts.CompressionMode)
+ if err != nil {
+ return nil, err
+ }
+
+ w.WriteHeader(http.StatusSwitchingProtocols)
+
+ netConn, brw, err := hj.Hijack()
+ if err != nil {
+ err = fmt.Errorf("failed to hijack connection: %w", err)
+ http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+ return nil, err
+ }
+
+ // https://github.com/golang/go/issues/32314
+ b, _ := brw.Reader.Peek(brw.Reader.Buffered())
+ brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn))
+
+ return newConn(connConfig{
+ subprotocol: w.Header().Get("Sec-WebSocket-Protocol"),
+ rwc: netConn,
+ client: false,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+
+ br: brw.Reader,
+ bw: brw.Writer,
+ }), nil
+}
+
+func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) {
+ if !r.ProtoAtLeast(1, 1) {
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto)
+ }
+
+ if !headerContainsToken(r.Header, "Connection", "Upgrade") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection"))
+ }
+
+ if !headerContainsToken(r.Header, "Upgrade", "websocket") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade"))
+ }
+
+ if r.Method != "GET" {
+ return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method)
+ }
+
+ if r.Header.Get("Sec-WebSocket-Version") != "13" {
+ w.Header().Set("Sec-WebSocket-Version", "13")
+ return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version"))
+ }
+
+ if r.Header.Get("Sec-WebSocket-Key") == "" {
+ return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key")
+ }
+
+ return 0, nil
+}
+
+func authenticateOrigin(r *http.Request, originHosts []string) error {
+ origin := r.Header.Get("Origin")
+ if origin == "" {
+ return nil
+ }
+
+ u, err := url.Parse(origin)
+ if err != nil {
+ return fmt.Errorf("failed to parse Origin header %q: %w", origin, err)
+ }
+
+ if strings.EqualFold(r.Host, u.Host) {
+ return nil
+ }
+
+ for _, hostPattern := range originHosts {
+ matched, err := match(hostPattern, u.Host)
+ if err != nil {
+ return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err)
+ }
+ if matched {
+ return nil
+ }
+ }
+ return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host)
+}
+
+func match(pattern, s string) (bool, error) {
+ return filepath.Match(strings.ToLower(pattern), strings.ToLower(s))
+}
+
+func selectSubprotocol(r *http.Request, subprotocols []string) string {
+ cps := headerTokens(r.Header, "Sec-WebSocket-Protocol")
+ for _, sp := range subprotocols {
+ for _, cp := range cps {
+ if strings.EqualFold(sp, cp) {
+ return cp
+ }
+ }
+ }
+ return ""
+}
+
+func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) {
+ if mode == CompressionDisabled {
+ return nil, nil
+ }
+
+ for _, ext := range websocketExtensions(r.Header) {
+ switch ext.name {
+ case "permessage-deflate":
+ return acceptDeflate(w, ext, mode)
+ // Disabled for now, see https://github.com/nhooyr/websocket/issues/218
+ // case "x-webkit-deflate-frame":
+ // return acceptWebkitDeflate(w, ext, mode)
+ }
+ }
+ return nil, nil
+}
+
+func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ if strings.HasPrefix(p, "client_max_window_bits") {
+ // We cannot adjust the read sliding window so cannot make use of this.
+ continue
+ }
+
+ err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ copts.setHeader(w.Header())
+
+ return copts, nil
+}
+
+func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+ // The peer must explicitly request it.
+ copts.serverNoContextTakeover = false
+
+ for _, p := range ext.params {
+ if p == "no_context_takeover" {
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead
+ // of ignoring it as the draft spec is unclear. It says the server can ignore it
+ // but the server has no way of signalling to the client it was ignored as the parameters
+ // are set one way.
+ // Thus us ignoring it would make the client think we understood it which would cause issues.
+ // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1
+ //
+ // Either way, we're only implementing this for webkit which never sends the max_window_bits
+ // parameter so we don't need to worry about it.
+ err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ s := "x-webkit-deflate-frame"
+ if copts.clientNoContextTakeover {
+ s += "; no_context_takeover"
+ }
+ w.Header().Set("Sec-WebSocket-Extensions", s)
+
+ return copts, nil
+}
+
+func headerContainsToken(h http.Header, key, token string) bool {
+ token = strings.ToLower(token)
+
+ for _, t := range headerTokens(h, key) {
+ if t == token {
+ return true
+ }
+ }
+ return false
+}
+
+type websocketExtension struct {
+ name string
+ params []string
+}
+
+func websocketExtensions(h http.Header) []websocketExtension {
+ var exts []websocketExtension
+ extStrs := headerTokens(h, "Sec-WebSocket-Extensions")
+ for _, extStr := range extStrs {
+ if extStr == "" {
+ continue
+ }
+
+ vals := strings.Split(extStr, ";")
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+
+ e := websocketExtension{
+ name: vals[0],
+ params: vals[1:],
+ }
+
+ exts = append(exts, e)
+ }
+ return exts
+}
+
+func headerTokens(h http.Header, key string) []string {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ var tokens []string
+ for _, v := range h[key] {
+ v = strings.TrimSpace(v)
+ for _, t := range strings.Split(v, ",") {
+ t = strings.ToLower(t)
+ t = strings.TrimSpace(t)
+ tokens = append(tokens, t)
+ }
+ }
+ return tokens
+}
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func secWebSocketAccept(secWebSocketKey string) string {
+ h := sha1.New()
+ h.Write([]byte(secWebSocketKey))
+ h.Write(keyGUID)
+
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept_js.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept_js.go
new file mode 100644
index 00000000000..daad4b79fec
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept_js.go
@@ -0,0 +1,20 @@
+package websocket
+
+import (
+ "errors"
+ "net/http"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ Subprotocols []string
+ InsecureSkipVerify bool
+ OriginPatterns []string
+ CompressionMode CompressionMode
+ CompressionThreshold int
+}
+
+// Accept is stubbed out for Wasm.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return nil, errors.New("unimplemented")
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close.go
new file mode 100644
index 00000000000..7cbc19e9def
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close.go
@@ -0,0 +1,76 @@
+package websocket
+
+import (
+ "errors"
+ "fmt"
+)
+
+// StatusCode represents a WebSocket status code.
+// https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode int
+
+// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+//
+// These are only the status codes defined by the protocol.
+//
+// You can define custom codes in the 3000-4999 range.
+// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
+// The 4000-4999 range is reserved for private use.
+const (
+ StatusNormalClosure StatusCode = 1000
+ StatusGoingAway StatusCode = 1001
+ StatusProtocolError StatusCode = 1002
+ StatusUnsupportedData StatusCode = 1003
+
+ // 1004 is reserved and so unexported.
+ statusReserved StatusCode = 1004
+
+ // StatusNoStatusRcvd cannot be sent in a close message.
+ // It is reserved for when a close message is received without
+ // a status code.
+ StatusNoStatusRcvd StatusCode = 1005
+
+ // StatusAbnormalClosure is exported for use only with Wasm.
+ // In non Wasm Go, the returned error will indicate whether the
+ // connection was closed abnormally.
+ StatusAbnormalClosure StatusCode = 1006
+
+ StatusInvalidFramePayloadData StatusCode = 1007
+ StatusPolicyViolation StatusCode = 1008
+ StatusMessageTooBig StatusCode = 1009
+ StatusMandatoryExtension StatusCode = 1010
+ StatusInternalError StatusCode = 1011
+ StatusServiceRestart StatusCode = 1012
+ StatusTryAgainLater StatusCode = 1013
+ StatusBadGateway StatusCode = 1014
+
+ // StatusTLSHandshake is only exported for use with Wasm.
+ // In non Wasm Go, the returned error will indicate whether there was
+ // a TLS handshake failure.
+ StatusTLSHandshake StatusCode = 1015
+)
+
+// CloseError is returned when the connection is closed with a status and reason.
+//
+// Use Go 1.13's errors.As to check for this error.
+// Also see the CloseStatus helper.
+type CloseError struct {
+ Code StatusCode
+ Reason string
+}
+
+func (ce CloseError) Error() string {
+ return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
+}
+
+// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
+// the status code from a CloseError.
+//
+// -1 will be returned if the passed error is nil or not a CloseError.
+func CloseStatus(err error) StatusCode {
+ var ce CloseError
+ if errors.As(err, &ce) {
+ return ce.Code
+ }
+ return -1
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close_notjs.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close_notjs.go
new file mode 100644
index 00000000000..4251311d2e6
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close_notjs.go
@@ -0,0 +1,211 @@
+// +build !js
+
+package websocket
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "log"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Close performs the WebSocket close handshake with the given status code and reason.
+//
+// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for
+// the peer to send a close frame.
+// All data messages received from the peer during the close handshake will be discarded.
+//
+// The connection can only be closed once. Additional calls to Close
+// are no-ops.
+//
+// The maximum length of reason must be 125 bytes. Avoid
+// sending a dynamic reason.
+//
+// Close will unblock all goroutines interacting with the connection once
+// complete.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ return c.closeHandshake(code, reason)
+}
+
+func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) {
+ defer errd.Wrap(&err, "failed to close WebSocket")
+
+ writeErr := c.writeClose(code, reason)
+ closeHandshakeErr := c.waitCloseHandshake()
+
+ if writeErr != nil {
+ return writeErr
+ }
+
+ if CloseStatus(closeHandshakeErr) == -1 {
+ return closeHandshakeErr
+ }
+
+ return nil
+}
+
+var errAlreadyWroteClose = errors.New("already wrote close")
+
+func (c *Conn) writeClose(code StatusCode, reason string) error {
+ c.closeMu.Lock()
+ wroteClose := c.wroteClose
+ c.wroteClose = true
+ c.closeMu.Unlock()
+ if wroteClose {
+ return errAlreadyWroteClose
+ }
+
+ ce := CloseError{
+ Code: code,
+ Reason: reason,
+ }
+
+ var p []byte
+ var marshalErr error
+ if ce.Code != StatusNoStatusRcvd {
+ p, marshalErr = ce.bytes()
+ if marshalErr != nil {
+ log.Printf("websocket: %v", marshalErr)
+ }
+ }
+
+ writeErr := c.writeControl(context.Background(), opClose, p)
+ if CloseStatus(writeErr) != -1 {
+ // Not a real error if it's due to a close frame being received.
+ writeErr = nil
+ }
+
+ // We do this after in case there was an error writing the close frame.
+ c.setCloseErr(fmt.Errorf("sent close frame: %w", ce))
+
+ if marshalErr != nil {
+ return marshalErr
+ }
+ return writeErr
+}
+
+func (c *Conn) waitCloseHandshake() error {
+ defer c.close(nil)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ err := c.readMu.lock(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.readMu.unlock()
+
+ if c.readCloseFrameErr != nil {
+ return c.readCloseFrameErr
+ }
+
+ for {
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return err
+ }
+
+ for i := int64(0); i < h.payloadLength; i++ {
+ _, err := c.br.ReadByte()
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func parseClosePayload(p []byte) (CloseError, error) {
+ if len(p) == 0 {
+ return CloseError{
+ Code: StatusNoStatusRcvd,
+ }, nil
+ }
+
+ if len(p) < 2 {
+ return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p)
+ }
+
+ ce := CloseError{
+ Code: StatusCode(binary.BigEndian.Uint16(p)),
+ Reason: string(p[2:]),
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code)
+ }
+
+ return ce, nil
+}
+
+// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+// and https://tools.ietf.org/html/rfc6455#section-7.4.1
+func validWireCloseCode(code StatusCode) bool {
+ switch code {
+ case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+ return false
+ }
+
+ if code >= StatusNormalClosure && code <= StatusBadGateway {
+ return true
+ }
+ if code >= 3000 && code <= 4999 {
+ return true
+ }
+
+ return false
+}
+
+func (ce CloseError) bytes() ([]byte, error) {
+ p, err := ce.bytesErr()
+ if err != nil {
+ err = fmt.Errorf("failed to marshal close frame: %w", err)
+ ce = CloseError{
+ Code: StatusInternalError,
+ }
+ p, _ = ce.bytesErr()
+ }
+ return p, err
+}
+
+const maxCloseReason = maxControlPayload - 2
+
+func (ce CloseError) bytesErr() ([]byte, error) {
+ if len(ce.Reason) > maxCloseReason {
+ return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason))
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return nil, fmt.Errorf("status code %v cannot be set", ce.Code)
+ }
+
+ buf := make([]byte, 2+len(ce.Reason))
+ binary.BigEndian.PutUint16(buf, uint16(ce.Code))
+ copy(buf[2:], ce.Reason)
+ return buf, nil
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeMu.Lock()
+ c.setCloseErrLocked(err)
+ c.closeMu.Unlock()
+}
+
+func (c *Conn) setCloseErrLocked(err error) {
+ if c.closeErr == nil {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ }
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress.go
new file mode 100644
index 00000000000..80b46d1c1d3
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress.go
@@ -0,0 +1,39 @@
+package websocket
+
+// CompressionMode represents the modes available to the deflate extension.
+// See https://tools.ietf.org/html/rfc7692
+//
+// A compatibility layer is implemented for the older deflate-frame extension used
+// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06
+// It will work the same in every way except that we cannot signal to the peer we
+// want to use no context takeover on our side, we can only signal that they should.
+// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218
+type CompressionMode int
+
+const (
+ // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed
+ // for every message. This applies to both server and client side.
+ //
+ // This means less efficient compression as the sliding window from previous messages
+ // will not be used but the memory overhead will be lower if the connections
+ // are long lived and seldom used.
+ //
+ // The message will only be compressed if greater than 512 bytes.
+ CompressionNoContextTakeover CompressionMode = iota
+
+ // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection.
+ // This enables reusing the sliding window from previous messages.
+ // As most WebSocket protocols are repetitive, this can be very efficient.
+ // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover.
+ //
+ // If the peer negotiates NoContextTakeover on the client or server side, it will be
+ // used instead as this is required by the RFC.
+ CompressionContextTakeover
+
+ // CompressionDisabled disables the deflate extension.
+ //
+ // Use this if you are using a predominantly binary protocol with very
+ // little duplication in between messages or CPU and memory are more
+ // important than bandwidth.
+ CompressionDisabled
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress_notjs.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress_notjs.go
new file mode 100644
index 00000000000..809a272c3d1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress_notjs.go
@@ -0,0 +1,181 @@
+// +build !js
+
+package websocket
+
+import (
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/klauspost/compress/flate"
+)
+
+func (m CompressionMode) opts() *compressionOptions {
+ return &compressionOptions{
+ clientNoContextTakeover: m == CompressionNoContextTakeover,
+ serverNoContextTakeover: m == CompressionNoContextTakeover,
+ }
+}
+
+type compressionOptions struct {
+ clientNoContextTakeover bool
+ serverNoContextTakeover bool
+}
+
+func (copts *compressionOptions) setHeader(h http.Header) {
+ s := "permessage-deflate"
+ if copts.clientNoContextTakeover {
+ s += "; client_no_context_takeover"
+ }
+ if copts.serverNoContextTakeover {
+ s += "; server_no_context_takeover"
+ }
+ h.Set("Sec-WebSocket-Extensions", s)
+}
+
+// These bytes are required to get flate.Reader to return.
+// They are removed when sending to avoid the overhead as
+// WebSocket framing tell's when the message has ended but then
+// we need to add them back otherwise flate.Reader keeps
+// trying to return more bytes.
+const deflateMessageTail = "\x00\x00\xff\xff"
+
+type trimLastFourBytesWriter struct {
+ w io.Writer
+ tail []byte
+}
+
+func (tw *trimLastFourBytesWriter) reset() {
+ if tw != nil && tw.tail != nil {
+ tw.tail = tw.tail[:0]
+ }
+}
+
+func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) {
+ if tw.tail == nil {
+ tw.tail = make([]byte, 0, 4)
+ }
+
+ extra := len(tw.tail) + len(p) - 4
+
+ if extra <= 0 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Now we need to write as many extra bytes as we can from the previous tail.
+ if extra > len(tw.tail) {
+ extra = len(tw.tail)
+ }
+ if extra > 0 {
+ _, err := tw.w.Write(tw.tail[:extra])
+ if err != nil {
+ return 0, err
+ }
+
+ // Shift remaining bytes in tail over.
+ n := copy(tw.tail, tw.tail[extra:])
+ tw.tail = tw.tail[:n]
+ }
+
+ // If p is less than or equal to 4 bytes,
+ // all of it is is part of the tail.
+ if len(p) <= 4 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Otherwise, only the last 4 bytes are.
+ tw.tail = append(tw.tail, p[len(p)-4:]...)
+
+ p = p[:len(p)-4]
+ n, err := tw.w.Write(p)
+ return n + 4, err
+}
+
+var flateReaderPool sync.Pool
+
+func getFlateReader(r io.Reader, dict []byte) io.Reader {
+ fr, ok := flateReaderPool.Get().(io.Reader)
+ if !ok {
+ return flate.NewReaderDict(r, dict)
+ }
+ fr.(flate.Resetter).Reset(r, dict)
+ return fr
+}
+
+func putFlateReader(fr io.Reader) {
+ flateReaderPool.Put(fr)
+}
+
+type slidingWindow struct {
+ buf []byte
+}
+
+var swPoolMu sync.RWMutex
+var swPool = map[int]*sync.Pool{}
+
+func slidingWindowPool(n int) *sync.Pool {
+ swPoolMu.RLock()
+ p, ok := swPool[n]
+ swPoolMu.RUnlock()
+ if ok {
+ return p
+ }
+
+ p = &sync.Pool{}
+
+ swPoolMu.Lock()
+ swPool[n] = p
+ swPoolMu.Unlock()
+
+ return p
+}
+
+func (sw *slidingWindow) init(n int) {
+ if sw.buf != nil {
+ return
+ }
+
+ if n == 0 {
+ n = 32768
+ }
+
+ p := slidingWindowPool(n)
+ buf, ok := p.Get().([]byte)
+ if ok {
+ sw.buf = buf[:0]
+ } else {
+ sw.buf = make([]byte, 0, n)
+ }
+}
+
+func (sw *slidingWindow) close() {
+ if sw.buf == nil {
+ return
+ }
+
+ swPoolMu.Lock()
+ swPool[cap(sw.buf)].Put(sw.buf)
+ swPoolMu.Unlock()
+ sw.buf = nil
+}
+
+func (sw *slidingWindow) write(p []byte) {
+ if len(p) >= cap(sw.buf) {
+ sw.buf = sw.buf[:cap(sw.buf)]
+ p = p[len(p)-cap(sw.buf):]
+ copy(sw.buf, p)
+ return
+ }
+
+ left := cap(sw.buf) - len(sw.buf)
+ if left < len(p) {
+ // We need to shift spaceNeeded bytes from the end to make room for p at the end.
+ spaceNeeded := len(p) - left
+ copy(sw.buf, sw.buf[spaceNeeded:])
+ sw.buf = sw.buf[:len(sw.buf)-spaceNeeded]
+ }
+
+ sw.buf = append(sw.buf, p...)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn.go
new file mode 100644
index 00000000000..a41808be3fa
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn.go
@@ -0,0 +1,13 @@
+package websocket
+
+// MessageType represents the type of a WebSocket message.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+type MessageType int
+
+// MessageType constants.
+const (
+ // MessageText is for UTF-8 encoded text messages like JSON.
+ MessageText MessageType = iota + 1
+ // MessageBinary is for binary messages like protobufs.
+ MessageBinary
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn_notjs.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn_notjs.go
new file mode 100644
index 00000000000..bb2eb22f7db
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn_notjs.go
@@ -0,0 +1,265 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+// Conn represents a WebSocket connection.
+// All methods may be called concurrently except for Reader and Read.
+//
+// You must always read from the connection. Otherwise control
+// frames will not be handled. See Reader and CloseRead.
+//
+// Be sure to call Close on the connection when you
+// are finished with it to release associated resources.
+//
+// On any error from any method, the connection is closed
+// with an appropriate reason.
+type Conn struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+ br *bufio.Reader
+ bw *bufio.Writer
+
+ readTimeout chan context.Context
+ writeTimeout chan context.Context
+
+ // Read state.
+ readMu *mu
+ readHeaderBuf [8]byte
+ readControlBuf [maxControlPayload]byte
+ msgReader *msgReader
+ readCloseFrameErr error
+
+ // Write state.
+ msgWriterState *msgWriterState
+ writeFrameMu *mu
+ writeBuf []byte
+ writeHeaderBuf [8]byte
+ writeHeader header
+
+ closed chan struct{}
+ closeMu sync.Mutex
+ closeErr error
+ wroteClose bool
+
+ pingCounter int32
+ activePingsMu sync.Mutex
+ activePings map[string]chan<- struct{}
+}
+
+type connConfig struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+
+ br *bufio.Reader
+ bw *bufio.Writer
+}
+
+func newConn(cfg connConfig) *Conn {
+ c := &Conn{
+ subprotocol: cfg.subprotocol,
+ rwc: cfg.rwc,
+ client: cfg.client,
+ copts: cfg.copts,
+ flateThreshold: cfg.flateThreshold,
+
+ br: cfg.br,
+ bw: cfg.bw,
+
+ readTimeout: make(chan context.Context),
+ writeTimeout: make(chan context.Context),
+
+ closed: make(chan struct{}),
+ activePings: make(map[string]chan<- struct{}),
+ }
+
+ c.readMu = newMu(c)
+ c.writeFrameMu = newMu(c)
+
+ c.msgReader = newMsgReader(c)
+
+ c.msgWriterState = newMsgWriterState(c)
+ if c.client {
+ c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc)
+ }
+
+ if c.flate() && c.flateThreshold == 0 {
+ c.flateThreshold = 128
+ if !c.msgWriterState.flateContextTakeover() {
+ c.flateThreshold = 512
+ }
+ }
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.close(errors.New("connection garbage collected"))
+ })
+
+ go c.timeoutLoop()
+
+ return c
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+func (c *Conn) close(err error) {
+ c.closeMu.Lock()
+ defer c.closeMu.Unlock()
+
+ if c.isClosed() {
+ return
+ }
+ c.setCloseErrLocked(err)
+ close(c.closed)
+ runtime.SetFinalizer(c, nil)
+
+ // Have to close after c.closed is closed to ensure any goroutine that wakes up
+ // from the connection being closed also sees that c.closed is closed and returns
+ // closeErr.
+ c.rwc.Close()
+
+ go func() {
+ c.msgWriterState.close()
+
+ c.msgReader.close()
+ }()
+}
+
+func (c *Conn) timeoutLoop() {
+ readCtx := context.Background()
+ writeCtx := context.Background()
+
+ for {
+ select {
+ case <-c.closed:
+ return
+
+ case writeCtx = <-c.writeTimeout:
+ case readCtx = <-c.readTimeout:
+
+ case <-readCtx.Done():
+ c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err()))
+ go c.writeError(StatusPolicyViolation, errors.New("timed out"))
+ case <-writeCtx.Done():
+ c.close(fmt.Errorf("write timed out: %w", writeCtx.Err()))
+ return
+ }
+ }
+}
+
+func (c *Conn) flate() bool {
+ return c.copts != nil
+}
+
+// Ping sends a ping to the peer and waits for a pong.
+// Use this to measure latency or ensure the peer is responsive.
+// Ping must be called concurrently with Reader as it does
+// not read from the connection but instead waits for a Reader call
+// to read the pong.
+//
+// TCP Keepalives should suffice for most use cases.
+func (c *Conn) Ping(ctx context.Context) error {
+ p := atomic.AddInt32(&c.pingCounter, 1)
+
+ err := c.ping(ctx, strconv.Itoa(int(p)))
+ if err != nil {
+ return fmt.Errorf("failed to ping: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) ping(ctx context.Context, p string) error {
+ pong := make(chan struct{})
+
+ c.activePingsMu.Lock()
+ c.activePings[p] = pong
+ c.activePingsMu.Unlock()
+
+ defer func() {
+ c.activePingsMu.Lock()
+ delete(c.activePings, p)
+ c.activePingsMu.Unlock()
+ }()
+
+ err := c.writeControl(ctx, opPing, []byte(p))
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-c.closed:
+ return c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to wait for pong: %w", ctx.Err())
+ c.close(err)
+ return err
+ case <-pong:
+ return nil
+ }
+}
+
+type mu struct {
+ c *Conn
+ ch chan struct{}
+}
+
+func newMu(c *Conn) *mu {
+ return &mu{
+ c: c,
+ ch: make(chan struct{}, 1),
+ }
+}
+
+func (m *mu) forceLock() {
+ m.ch <- struct{}{}
+}
+
+func (m *mu) lock(ctx context.Context) error {
+ select {
+ case <-m.c.closed:
+ return m.c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to acquire lock: %w", ctx.Err())
+ m.c.close(err)
+ return err
+ case m.ch <- struct{}{}:
+ // To make sure the connection is certainly alive.
+ // As it's possible the send on m.ch was selected
+ // over the receive on closed.
+ select {
+ case <-m.c.closed:
+ // Make sure to release.
+ m.unlock()
+ return m.c.closeErr
+ default:
+ }
+ return nil
+ }
+}
+
+func (m *mu) unlock() {
+ select {
+ case <-m.ch:
+ default:
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/dial.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/dial.go
new file mode 100644
index 00000000000..2b25e3517d6
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/dial.go
@@ -0,0 +1,287 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// DialOptions represents Dial's options.
+type DialOptions struct {
+ // HTTPClient is used for the connection.
+ // Its Transport must return writable bodies for WebSocket handshakes.
+ // http.Transport does beginning with Go 1.12.
+ HTTPClient *http.Client
+
+ // HTTPHeader specifies the HTTP headers included in the handshake request.
+ HTTPHeader http.Header
+
+ // Subprotocols lists the WebSocket subprotocols to negotiate with the server.
+ Subprotocols []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Dial performs a WebSocket handshake on url.
+//
+// The response is the WebSocket handshake response from the server.
+// You never need to close resp.Body yourself.
+//
+// If an error occurs, the returned response may be non nil.
+// However, you can only read the first 1024 bytes of the body.
+//
+// This function requires at least Go 1.12 as it uses a new feature
+// in net/http to perform WebSocket handshakes.
+// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861
+//
+// URLs with http/https schemes will work and are interpreted as ws/wss.
+func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) {
+ return dial(ctx, u, opts, nil)
+}
+
+func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) {
+ defer errd.Wrap(&err, "failed to WebSocket dial")
+
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ opts = &*opts
+ if opts.HTTPClient == nil {
+ opts.HTTPClient = http.DefaultClient
+ }
+ if opts.HTTPHeader == nil {
+ opts.HTTPHeader = http.Header{}
+ }
+
+ secWebSocketKey, err := secWebSocketKey(rand)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err)
+ }
+
+ var copts *compressionOptions
+ if opts.CompressionMode != CompressionDisabled {
+ copts = opts.CompressionMode.opts()
+ }
+
+ resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey)
+ if err != nil {
+ return nil, resp, err
+ }
+ respBody := resp.Body
+ resp.Body = nil
+ defer func() {
+ if err != nil {
+ // We read a bit of the body for easier debugging.
+ r := io.LimitReader(respBody, 1024)
+
+ timer := time.AfterFunc(time.Second*3, func() {
+ respBody.Close()
+ })
+ defer timer.Stop()
+
+ b, _ := ioutil.ReadAll(r)
+ respBody.Close()
+ resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ }()
+
+ copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ rwc, ok := respBody.(io.ReadWriteCloser)
+ if !ok {
+ return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody)
+ }
+
+ return newConn(connConfig{
+ subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"),
+ rwc: rwc,
+ client: true,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+ br: getBufioReader(rwc),
+ bw: getBufioWriter(rwc),
+ }), resp, nil
+}
+
+func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) {
+ if opts.HTTPClient.Timeout > 0 {
+ return nil, errors.New("use context for cancellation instead of http.Client.Timeout; see https://github.com/nhooyr/websocket/issues/67")
+ }
+
+ u, err := url.Parse(urls)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse url: %w", err)
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ case "http", "https":
+ default:
+ return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme)
+ }
+
+ req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
+ req.Header = opts.HTTPHeader.Clone()
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "websocket")
+ req.Header.Set("Sec-WebSocket-Version", "13")
+ req.Header.Set("Sec-WebSocket-Key", secWebSocketKey)
+ if len(opts.Subprotocols) > 0 {
+ req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ","))
+ }
+ if copts != nil {
+ copts.setHeader(req.Header)
+ }
+
+ resp, err := opts.HTTPClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to send handshake request: %w", err)
+ }
+ return resp, nil
+}
+
+func secWebSocketKey(rr io.Reader) (string, error) {
+ if rr == nil {
+ rr = rand.Reader
+ }
+ b := make([]byte, 16)
+ _, err := io.ReadFull(rr, b)
+ if err != nil {
+ return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err)
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) {
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode)
+ }
+
+ if !headerContainsToken(resp.Header, "Connection", "Upgrade") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection"))
+ }
+
+ if !headerContainsToken(resp.Header, "Upgrade", "WebSocket") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade"))
+ }
+
+ if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) {
+ return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q",
+ resp.Header.Get("Sec-WebSocket-Accept"),
+ secWebSocketKey,
+ )
+ }
+
+ err := verifySubprotocol(opts.Subprotocols, resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return verifyServerExtensions(copts, resp.Header)
+}
+
+func verifySubprotocol(subprotos []string, resp *http.Response) error {
+ proto := resp.Header.Get("Sec-WebSocket-Protocol")
+ if proto == "" {
+ return nil
+ }
+
+ for _, sp2 := range subprotos {
+ if strings.EqualFold(sp2, proto) {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto)
+}
+
+func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) {
+ exts := websocketExtensions(h)
+ if len(exts) == 0 {
+ return nil, nil
+ }
+
+ ext := exts[0]
+ if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil {
+ return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:])
+ }
+
+ copts = &*copts
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ }
+
+ return copts, nil
+}
+
+var bufioReaderPool sync.Pool
+
+func getBufioReader(r io.Reader) *bufio.Reader {
+ br, ok := bufioReaderPool.Get().(*bufio.Reader)
+ if !ok {
+ return bufio.NewReader(r)
+ }
+ br.Reset(r)
+ return br
+}
+
+func putBufioReader(br *bufio.Reader) {
+ bufioReaderPool.Put(br)
+}
+
+var bufioWriterPool sync.Pool
+
+func getBufioWriter(w io.Writer) *bufio.Writer {
+ bw, ok := bufioWriterPool.Get().(*bufio.Writer)
+ if !ok {
+ return bufio.NewWriter(w)
+ }
+ bw.Reset(w)
+ return bw
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+ bufioWriterPool.Put(bw)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/doc.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/doc.go
new file mode 100644
index 00000000000..efa920e3b61
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/doc.go
@@ -0,0 +1,32 @@
+// +build !js
+
+// Package websocket implements the RFC 6455 WebSocket protocol.
+//
+// https://tools.ietf.org/html/rfc6455
+//
+// Use Dial to dial a WebSocket server.
+//
+// Use Accept to accept a WebSocket client.
+//
+// Conn represents the resulting WebSocket connection.
+//
+// The examples are the best way to understand how to correctly use the library.
+//
+// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages.
+//
+// More documentation at https://nhooyr.io/websocket.
+//
+// Wasm
+//
+// The client side supports compiling to Wasm.
+// It wraps the WebSocket browser API.
+//
+// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+//
+// Some important caveats to be aware of:
+//
+// - Accept always errors out
+// - Conn.Ping is no-op
+// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op
+// - *http.Response from Dial is &http.Response{} with a 101 status code on success
+package websocket // import "nhooyr.io/websocket"
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/frame.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/frame.go
new file mode 100644
index 00000000000..2a036f944ac
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/frame.go
@@ -0,0 +1,294 @@
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// opcode represents a WebSocket opcode.
+type opcode int
+
+// https://tools.ietf.org/html/rfc6455#section-11.8.
+const (
+ opContinuation opcode = iota
+ opText
+ opBinary
+ // 3 - 7 are reserved for further non-control frames.
+ _
+ _
+ _
+ _
+ _
+ opClose
+ opPing
+ opPong
+ // 11-16 are reserved for further control frames.
+)
+
+// header represents a WebSocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+type header struct {
+ fin bool
+ rsv1 bool
+ rsv2 bool
+ rsv3 bool
+ opcode opcode
+
+ payloadLength int64
+
+ masked bool
+ maskKey uint32
+}
+
+// readFrameHeader reads a header from the reader.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) {
+ defer errd.Wrap(&err, "failed to read frame header")
+
+ b, err := r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.fin = b&(1<<7) != 0
+ h.rsv1 = b&(1<<6) != 0
+ h.rsv2 = b&(1<<5) != 0
+ h.rsv3 = b&(1<<4) != 0
+
+ h.opcode = opcode(b & 0xf)
+
+ b, err = r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.masked = b&(1<<7) != 0
+
+ payloadLength := b &^ (1 << 7)
+ switch {
+ case payloadLength < 126:
+ h.payloadLength = int64(payloadLength)
+ case payloadLength == 126:
+ _, err = io.ReadFull(r, readBuf[:2])
+ h.payloadLength = int64(binary.BigEndian.Uint16(readBuf))
+ case payloadLength == 127:
+ _, err = io.ReadFull(r, readBuf)
+ h.payloadLength = int64(binary.BigEndian.Uint64(readBuf))
+ }
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.payloadLength < 0 {
+ return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength)
+ }
+
+ if h.masked {
+ _, err = io.ReadFull(r, readBuf[:4])
+ if err != nil {
+ return header{}, err
+ }
+ h.maskKey = binary.LittleEndian.Uint32(readBuf)
+ }
+
+ return h, nil
+}
+
+// maxControlPayload is the maximum length of a control frame payload.
+// See https://tools.ietf.org/html/rfc6455#section-5.5.
+const maxControlPayload = 125
+
+// writeFrameHeader writes the bytes of the header to w.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {
+ defer errd.Wrap(&err, "failed to write frame header")
+
+ var b byte
+ if h.fin {
+ b |= 1 << 7
+ }
+ if h.rsv1 {
+ b |= 1 << 6
+ }
+ if h.rsv2 {
+ b |= 1 << 5
+ }
+ if h.rsv3 {
+ b |= 1 << 4
+ }
+
+ b |= byte(h.opcode)
+
+ err = w.WriteByte(b)
+ if err != nil {
+ return err
+ }
+
+ lengthByte := byte(0)
+ if h.masked {
+ lengthByte |= 1 << 7
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ lengthByte |= 127
+ case h.payloadLength > 125:
+ lengthByte |= 126
+ case h.payloadLength >= 0:
+ lengthByte |= byte(h.payloadLength)
+ }
+ err = w.WriteByte(lengthByte)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ binary.BigEndian.PutUint64(buf, uint64(h.payloadLength))
+ _, err = w.Write(buf)
+ case h.payloadLength > 125:
+ binary.BigEndian.PutUint16(buf, uint16(h.payloadLength))
+ _, err = w.Write(buf[:2])
+ }
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ binary.LittleEndian.PutUint32(buf, h.maskKey)
+ _, err = w.Write(buf[:4])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// mask applies the WebSocket masking algorithm to p
+// with the given key.
+// See https://tools.ietf.org/html/rfc6455#section-5.3
+//
+// The returned value is the correctly rotated key to
+// to continue to mask/unmask the message.
+//
+// It is optimized for LittleEndian and expects the key
+// to be in little endian.
+//
+// See https://github.com/golang/go/issues/31586
+func mask(key uint32, b []byte) uint32 {
+ if len(b) >= 8 {
+ key64 := uint64(key)<<32 | uint64(key)
+
+ // At some point in the future we can clean these unrolled loops up.
+ // See https://github.com/golang/go/issues/31586#issuecomment-487436401
+
+ // Then we xor until b is less than 128 bytes.
+ for len(b) >= 128 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ v = binary.LittleEndian.Uint64(b[64:72])
+ binary.LittleEndian.PutUint64(b[64:72], v^key64)
+ v = binary.LittleEndian.Uint64(b[72:80])
+ binary.LittleEndian.PutUint64(b[72:80], v^key64)
+ v = binary.LittleEndian.Uint64(b[80:88])
+ binary.LittleEndian.PutUint64(b[80:88], v^key64)
+ v = binary.LittleEndian.Uint64(b[88:96])
+ binary.LittleEndian.PutUint64(b[88:96], v^key64)
+ v = binary.LittleEndian.Uint64(b[96:104])
+ binary.LittleEndian.PutUint64(b[96:104], v^key64)
+ v = binary.LittleEndian.Uint64(b[104:112])
+ binary.LittleEndian.PutUint64(b[104:112], v^key64)
+ v = binary.LittleEndian.Uint64(b[112:120])
+ binary.LittleEndian.PutUint64(b[112:120], v^key64)
+ v = binary.LittleEndian.Uint64(b[120:128])
+ binary.LittleEndian.PutUint64(b[120:128], v^key64)
+ b = b[128:]
+ }
+
+ // Then we xor until b is less than 64 bytes.
+ for len(b) >= 64 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ b = b[64:]
+ }
+
+ // Then we xor until b is less than 32 bytes.
+ for len(b) >= 32 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ b = b[32:]
+ }
+
+ // Then we xor until b is less than 16 bytes.
+ for len(b) >= 16 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ b = b[16:]
+ }
+
+ // Then we xor until b is less than 8 bytes.
+ for len(b) >= 8 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ b = b[8:]
+ }
+ }
+
+ // Then we xor until b is less than 4 bytes.
+ for len(b) >= 4 {
+ v := binary.LittleEndian.Uint32(b)
+ binary.LittleEndian.PutUint32(b, v^key)
+ b = b[4:]
+ }
+
+ // xor remaining bytes.
+ for i := range b {
+ b[i] ^= byte(key)
+ key = bits.RotateLeft32(key, -8)
+ }
+
+ return key
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/go.mod b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/go.mod
new file mode 100644
index 00000000000..60377823cba
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/go.mod
@@ -0,0 +1,14 @@
+module nhooyr.io/websocket
+
+go 1.13
+
+require (
+ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee // indirect
+ github.com/gobwas/pool v0.2.0 // indirect
+ github.com/gobwas/ws v1.0.2
+ github.com/golang/protobuf v1.3.5
+ github.com/google/go-cmp v0.4.0
+ github.com/gorilla/websocket v1.4.1
+ github.com/klauspost/compress v1.10.3
+ golang.org/x/time v0.0.0-20191024005414-555d28b269f0
+)
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
new file mode 100644
index 00000000000..aa826fba2b1
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
@@ -0,0 +1,24 @@
+package bpool
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bpool sync.Pool
+
+// Get returns a buffer from the pool or creates a new one if
+// the pool is empty.
+func Get() *bytes.Buffer {
+ b := bpool.Get()
+ if b == nil {
+ return &bytes.Buffer{}
+ }
+ return b.(*bytes.Buffer)
+}
+
+// Put returns a buffer into the pool.
+func Put(b *bytes.Buffer) {
+ b.Reset()
+ bpool.Put(b)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/errd/wrap.go
new file mode 100644
index 00000000000..6e779131af8
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/errd/wrap.go
@@ -0,0 +1,14 @@
+package errd
+
+import (
+ "fmt"
+)
+
+// Wrap wraps err with fmt.Errorf if err is non nil.
+// Intended for use with defer and a named error return.
+// Inspired by https://github.com/golang/go/issues/32676.
+func Wrap(err *error, f string, v ...interface{}) {
+ if *err != nil {
+ *err = fmt.Errorf(f+": %w", append(v, *err)...)
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
new file mode 100644
index 00000000000..26ffb45625b
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
@@ -0,0 +1,170 @@
+// +build js
+
+// Package wsjs implements typed access to the browser javascript WebSocket API.
+//
+// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+package wsjs
+
+import (
+ "syscall/js"
+)
+
+func handleJSError(err *error, onErr func()) {
+ r := recover()
+
+ if jsErr, ok := r.(js.Error); ok {
+ *err = jsErr
+
+ if onErr != nil {
+ onErr()
+ }
+ return
+ }
+
+ if r != nil {
+ panic(r)
+ }
+}
+
+// New is a wrapper around the javascript WebSocket constructor.
+func New(url string, protocols []string) (c WebSocket, err error) {
+ defer handleJSError(&err, func() {
+ c = WebSocket{}
+ })
+
+ jsProtocols := make([]interface{}, len(protocols))
+ for i, p := range protocols {
+ jsProtocols[i] = p
+ }
+
+ c = WebSocket{
+ v: js.Global().Get("WebSocket").New(url, jsProtocols),
+ }
+
+ c.setBinaryType("arraybuffer")
+
+ return c, nil
+}
+
+// WebSocket is a wrapper around a javascript WebSocket object.
+type WebSocket struct {
+ v js.Value
+}
+
+func (c WebSocket) setBinaryType(typ string) {
+ c.v.Set("binaryType", string(typ))
+}
+
+func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() {
+ f := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
+ fn(args[0])
+ return nil
+ })
+ c.v.Call("addEventListener", eventType, f)
+
+ return func() {
+ c.v.Call("removeEventListener", eventType, f)
+ f.Release()
+ }
+}
+
+// CloseEvent is the type passed to a WebSocket close handler.
+type CloseEvent struct {
+ Code uint16
+ Reason string
+ WasClean bool
+}
+
+// OnClose registers a function to be called when the WebSocket is closed.
+func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) {
+ return c.addEventListener("close", func(e js.Value) {
+ ce := CloseEvent{
+ Code: uint16(e.Get("code").Int()),
+ Reason: e.Get("reason").String(),
+ WasClean: e.Get("wasClean").Bool(),
+ }
+ fn(ce)
+ })
+}
+
+// OnError registers a function to be called when there is an error
+// with the WebSocket.
+func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("error", fn)
+}
+
+// MessageEvent is the type passed to a message handler.
+type MessageEvent struct {
+ // string or []byte.
+ Data interface{}
+
+ // There are more fields to the interface but we don't use them.
+ // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent
+}
+
+// OnMessage registers a function to be called when the WebSocket receives a message.
+func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) {
+ return c.addEventListener("message", func(e js.Value) {
+ var data interface{}
+
+ arrayBuffer := e.Get("data")
+ if arrayBuffer.Type() == js.TypeString {
+ data = arrayBuffer.String()
+ } else {
+ data = extractArrayBuffer(arrayBuffer)
+ }
+
+ me := MessageEvent{
+ Data: data,
+ }
+ fn(me)
+
+ return
+ })
+}
+
+// Subprotocol returns the WebSocket subprotocol in use.
+func (c WebSocket) Subprotocol() string {
+ return c.v.Get("protocol").String()
+}
+
+// OnOpen registers a function to be called when the WebSocket is opened.
+func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("open", fn)
+}
+
+// Close closes the WebSocket with the given code and reason.
+func (c WebSocket) Close(code int, reason string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("close", code, reason)
+ return err
+}
+
+// SendText sends the given string as a text message
+// on the WebSocket.
+func (c WebSocket) SendText(v string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", v)
+ return err
+}
+
+// SendBytes sends the given message as a binary message
+// on the WebSocket.
+func (c WebSocket) SendBytes(v []byte) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", uint8Array(v))
+ return err
+}
+
+func extractArrayBuffer(arrayBuffer js.Value) []byte {
+ uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer)
+ dst := make([]byte, uint8Array.Length())
+ js.CopyBytesToGo(dst, uint8Array)
+ return dst
+}
+
+func uint8Array(src []byte) js.Value {
+ uint8Array := js.Global().Get("Uint8Array").New(len(src))
+ js.CopyBytesToJS(uint8Array, src)
+ return uint8Array
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/go.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/go.go
new file mode 100644
index 00000000000..7a61f27fa2a
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/go.go
@@ -0,0 +1,25 @@
+package xsync
+
+import (
+ "fmt"
+)
+
+// Go allows running a function in another goroutine
+// and waiting for its error.
+func Go(fn func() error) <-chan error {
+ errs := make(chan error, 1)
+ go func() {
+ defer func() {
+ r := recover()
+ if r != nil {
+ select {
+ case errs <- fmt.Errorf("panic in go fn: %v", r):
+ default:
+ }
+ }
+ }()
+ errs <- fn()
+ }()
+
+ return errs
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/int64.go
new file mode 100644
index 00000000000..a0c40204156
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/int64.go
@@ -0,0 +1,23 @@
+package xsync
+
+import (
+ "sync/atomic"
+)
+
+// Int64 represents an atomic int64.
+type Int64 struct {
+ // We do not use atomic.Load/StoreInt64 since it does not
+ // work on 32 bit computers but we need 64 bit integers.
+ i atomic.Value
+}
+
+// Load loads the int64.
+func (v *Int64) Load() int64 {
+ i, _ := v.i.Load().(int64)
+ return i
+}
+
+// Store stores the int64.
+func (v *Int64) Store(i int64) {
+ v.i.Store(i)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/netconn.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/netconn.go
new file mode 100644
index 00000000000..64aadf0b998
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/netconn.go
@@ -0,0 +1,166 @@
+package websocket
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "sync"
+ "time"
+)
+
+// NetConn converts a *websocket.Conn into a net.Conn.
+//
+// It's for tunneling arbitrary protocols over WebSockets.
+// Few users of the library will need this but it's tricky to implement
+// correctly and so provided in the library.
+// See https://github.com/nhooyr/websocket/issues/100.
+//
+// Every Write to the net.Conn will correspond to a message write of
+// the given type on *websocket.Conn.
+//
+// The passed ctx bounds the lifetime of the net.Conn. If cancelled,
+// all reads and writes on the net.Conn will be cancelled.
+//
+// If a message is read that is not of the correct type, the connection
+// will be closed with StatusUnsupportedData and an error will be returned.
+//
+// Close will close the *websocket.Conn with StatusNormalClosure.
+//
+// When a deadline is hit, the connection will be closed. This is
+// different from most net.Conn implementations where only the
+// reading/writing goroutines are interrupted but the connection is kept alive.
+//
+// The Addr methods will return a mock net.Addr that returns "websocket" for Network
+// and "websocket/unknown-addr" for String.
+//
+// A received StatusNormalClosure or StatusGoingAway close frame will be translated to
+// io.EOF when reading.
+func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn {
+ nc := &netConn{
+ c: c,
+ msgType: msgType,
+ }
+
+ var cancel context.CancelFunc
+ nc.writeContext, cancel = context.WithCancel(ctx)
+ nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.writeTimer.Stop() {
+ <-nc.writeTimer.C
+ }
+
+ nc.readContext, cancel = context.WithCancel(ctx)
+ nc.readTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.readTimer.Stop() {
+ <-nc.readTimer.C
+ }
+
+ return nc
+}
+
+type netConn struct {
+ c *Conn
+ msgType MessageType
+
+ writeTimer *time.Timer
+ writeContext context.Context
+
+ readTimer *time.Timer
+ readContext context.Context
+
+ readMu sync.Mutex
+ eofed bool
+ reader io.Reader
+}
+
+var _ net.Conn = &netConn{}
+
+func (c *netConn) Close() error {
+ return c.c.Close(StatusNormalClosure, "")
+}
+
+func (c *netConn) Write(p []byte) (int, error) {
+ err := c.c.Write(c.writeContext, c.msgType, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (c *netConn) Read(p []byte) (int, error) {
+ c.readMu.Lock()
+ defer c.readMu.Unlock()
+
+ if c.eofed {
+ return 0, io.EOF
+ }
+
+ if c.reader == nil {
+ typ, r, err := c.c.Reader(c.readContext)
+ if err != nil {
+ switch CloseStatus(err) {
+ case StatusNormalClosure, StatusGoingAway:
+ c.eofed = true
+ return 0, io.EOF
+ }
+ return 0, err
+ }
+ if typ != c.msgType {
+ err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ)
+ c.c.Close(StatusUnsupportedData, err.Error())
+ return 0, err
+ }
+ c.reader = r
+ }
+
+ n, err := c.reader.Read(p)
+ if err == io.EOF {
+ c.reader = nil
+ err = nil
+ }
+ return n, err
+}
+
+type websocketAddr struct {
+}
+
+func (a websocketAddr) Network() string {
+ return "websocket"
+}
+
+func (a websocketAddr) String() string {
+ return "websocket/unknown-addr"
+}
+
+func (c *netConn) RemoteAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) LocalAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) SetDeadline(t time.Time) error {
+ c.SetWriteDeadline(t)
+ c.SetReadDeadline(t)
+ return nil
+}
+
+func (c *netConn) SetWriteDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.writeTimer.Stop()
+ } else {
+ c.writeTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
+
+func (c *netConn) SetReadDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.readTimer.Stop()
+ } else {
+ c.readTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/read.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/read.go
new file mode 100644
index 00000000000..afd08cc7cde
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/read.go
@@ -0,0 +1,471 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Reader reads from the connection until until there is a WebSocket
+// data message to be read. It will handle ping, pong and close frames as appropriate.
+//
+// It returns the type of the message and an io.Reader to read it.
+// The passed context will also bound the reader.
+// Ensure you read to EOF otherwise the connection will hang.
+//
+// Call CloseRead if you do not expect any data messages from the peer.
+//
+// Only one Reader may be open at a time.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ return c.reader(ctx)
+}
+
+// Read is a convenience method around Reader to read a single message
+// from the connection.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ typ, r, err := c.Reader(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ b, err := ioutil.ReadAll(r)
+ return typ, b, err
+}
+
+// CloseRead starts a goroutine to read from the connection until it is closed
+// or a data message is received.
+//
+// Once CloseRead is called you cannot read any messages from the connection.
+// The returned context will be cancelled when the connection is closed.
+//
+// If a data message is received, the connection will be closed with StatusPolicyViolation.
+//
+// Call CloseRead when you do not expect to read any more messages.
+// Since it actively reads from the connection, it will ensure that ping, pong and close
+// frames are responded to. This means c.Ping and c.Close will still work as expected.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.Reader(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit sets the max number of bytes to read for a single message.
+// It applies to the Reader and Read methods.
+//
+// By default, the connection has a message read limit of 32768 bytes.
+//
+// When the limit is hit, the connection will be closed with StatusMessageTooBig.
+func (c *Conn) SetReadLimit(n int64) {
+ // We add read one more byte than the limit in case
+ // there is a fin frame that needs to be read.
+ c.msgReader.limitReader.limit.Store(n + 1)
+}
+
+const defaultReadLimit = 32768
+
+func newMsgReader(c *Conn) *msgReader {
+ mr := &msgReader{
+ c: c,
+ fin: true,
+ }
+ mr.readFunc = mr.read
+
+ mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1)
+ return mr
+}
+
+func (mr *msgReader) resetFlate() {
+ if mr.flateContextTakeover() {
+ mr.dict.init(32768)
+ }
+ if mr.flateBufio == nil {
+ mr.flateBufio = getBufioReader(mr.readFunc)
+ }
+
+ mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf)
+ mr.limitReader.r = mr.flateReader
+ mr.flateTail.Reset(deflateMessageTail)
+}
+
+func (mr *msgReader) putFlateReader() {
+ if mr.flateReader != nil {
+ putFlateReader(mr.flateReader)
+ mr.flateReader = nil
+ }
+}
+
+func (mr *msgReader) close() {
+ mr.c.readMu.forceLock()
+ mr.putFlateReader()
+ mr.dict.close()
+ if mr.flateBufio != nil {
+ putBufioReader(mr.flateBufio)
+ }
+
+ if mr.c.client {
+ putBufioReader(mr.c.br)
+ mr.c.br = nil
+ }
+}
+
+func (mr *msgReader) flateContextTakeover() bool {
+ if mr.c.client {
+ return !mr.c.copts.serverNoContextTakeover
+ }
+ return !mr.c.copts.clientNoContextTakeover
+}
+
+func (c *Conn) readRSV1Illegal(h header) bool {
+ // If compression is disabled, rsv1 is illegal.
+ if !c.flate() {
+ return true
+ }
+ // rsv1 is only allowed on data frames beginning messages.
+ if h.opcode != opText && h.opcode != opBinary {
+ return true
+ }
+ return false
+}
+
+func (c *Conn) readLoop(ctx context.Context) (header, error) {
+ for {
+ h, err := c.readFrameHeader(ctx)
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 {
+ err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+
+ if !c.client && !h.masked {
+ return header{}, errors.New("received unmasked frame from client")
+ }
+
+ switch h.opcode {
+ case opClose, opPing, opPong:
+ err = c.handleControl(ctx, h)
+ if err != nil {
+ // Pass through CloseErrors when receiving a close frame.
+ if h.opcode == opClose && CloseStatus(err) != -1 {
+ return header{}, err
+ }
+ return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err)
+ }
+ case opContinuation, opText, opBinary:
+ return h, nil
+ default:
+ err := fmt.Errorf("received unknown opcode %v", h.opcode)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+ }
+}
+
+func (c *Conn) readFrameHeader(ctx context.Context) (header, error) {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ h, err := readFrameHeader(c.br, c.readHeaderBuf[:])
+ if err != nil {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case <-ctx.Done():
+ return header{}, ctx.Err()
+ default:
+ c.close(err)
+ return header{}, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return h, nil
+}
+
+func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) {
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ n, err := io.ReadFull(c.br, p)
+ if err != nil {
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case <-ctx.Done():
+ return n, ctx.Err()
+ default:
+ err = fmt.Errorf("failed to read frame payload: %w", err)
+ c.close(err)
+ return n, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return n, err
+}
+
+func (c *Conn) handleControl(ctx context.Context, h header) (err error) {
+ if h.payloadLength < 0 || h.payloadLength > maxControlPayload {
+ err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ if !h.fin {
+ err := errors.New("received fragmented control frame")
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ b := c.readControlBuf[:h.payloadLength]
+ _, err = c.readFramePayload(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ mask(h.maskKey, b)
+ }
+
+ switch h.opcode {
+ case opPing:
+ return c.writeControl(ctx, opPong, b)
+ case opPong:
+ c.activePingsMu.Lock()
+ pong, ok := c.activePings[string(b)]
+ c.activePingsMu.Unlock()
+ if ok {
+ close(pong)
+ }
+ return nil
+ }
+
+ defer func() {
+ c.readCloseFrameErr = err
+ }()
+
+ ce, err := parseClosePayload(b)
+ if err != nil {
+ err = fmt.Errorf("received invalid close payload: %w", err)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ err = fmt.Errorf("received close frame: %w", ce)
+ c.setCloseErr(err)
+ c.writeClose(ce.Code, ce.Reason)
+ c.close(err)
+ return err
+}
+
+func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) {
+ defer errd.Wrap(&err, "failed to get reader")
+
+ err = c.readMu.lock(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ defer c.readMu.unlock()
+
+ if !c.msgReader.fin {
+ err = errors.New("previous message not read to completion")
+ c.close(fmt.Errorf("failed to get reader: %w", err))
+ return 0, nil, err
+ }
+
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ if h.opcode == opContinuation {
+ err := errors.New("received continuation frame without text or binary frame")
+ c.writeError(StatusProtocolError, err)
+ return 0, nil, err
+ }
+
+ c.msgReader.reset(ctx, h)
+
+ return MessageType(h.opcode), c.msgReader, nil
+}
+
+type msgReader struct {
+ c *Conn
+
+ ctx context.Context
+ flate bool
+ flateReader io.Reader
+ flateBufio *bufio.Reader
+ flateTail strings.Reader
+ limitReader *limitReader
+ dict slidingWindow
+
+ fin bool
+ payloadLength int64
+ maskKey uint32
+
+ // readerFunc(mr.Read) to avoid continuous allocations.
+ readFunc readerFunc
+}
+
+func (mr *msgReader) reset(ctx context.Context, h header) {
+ mr.ctx = ctx
+ mr.flate = h.rsv1
+ mr.limitReader.reset(mr.readFunc)
+
+ if mr.flate {
+ mr.resetFlate()
+ }
+
+ mr.setFrame(h)
+}
+
+func (mr *msgReader) setFrame(h header) {
+ mr.fin = h.fin
+ mr.payloadLength = h.payloadLength
+ mr.maskKey = h.maskKey
+}
+
+func (mr *msgReader) Read(p []byte) (n int, err error) {
+ err = mr.c.readMu.lock(mr.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to read: %w", err)
+ }
+ defer mr.c.readMu.unlock()
+
+ n, err = mr.limitReader.Read(p)
+ if mr.flate && mr.flateContextTakeover() {
+ p = p[:n]
+ mr.dict.write(p)
+ }
+ if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate {
+ mr.putFlateReader()
+ return n, io.EOF
+ }
+ if err != nil {
+ err = fmt.Errorf("failed to read: %w", err)
+ mr.c.close(err)
+ }
+ return n, err
+}
+
+func (mr *msgReader) read(p []byte) (int, error) {
+ for {
+ if mr.payloadLength == 0 {
+ if mr.fin {
+ if mr.flate {
+ return mr.flateTail.Read(p)
+ }
+ return 0, io.EOF
+ }
+
+ h, err := mr.c.readLoop(mr.ctx)
+ if err != nil {
+ return 0, err
+ }
+ if h.opcode != opContinuation {
+ err := errors.New("received new data message without finishing the previous message")
+ mr.c.writeError(StatusProtocolError, err)
+ return 0, err
+ }
+ mr.setFrame(h)
+
+ continue
+ }
+
+ if int64(len(p)) > mr.payloadLength {
+ p = p[:mr.payloadLength]
+ }
+
+ n, err := mr.c.readFramePayload(mr.ctx, p)
+ if err != nil {
+ return n, err
+ }
+
+ mr.payloadLength -= int64(n)
+
+ if !mr.c.client {
+ mr.maskKey = mask(mr.maskKey, p)
+ }
+
+ return n, nil
+ }
+}
+
+type limitReader struct {
+ c *Conn
+ r io.Reader
+ limit xsync.Int64
+ n int64
+}
+
+func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader {
+ lr := &limitReader{
+ c: c,
+ }
+ lr.limit.Store(limit)
+ lr.reset(r)
+ return lr
+}
+
+func (lr *limitReader) reset(r io.Reader) {
+ lr.n = lr.limit.Load()
+ lr.r = r
+}
+
+func (lr *limitReader) Read(p []byte) (int, error) {
+ if lr.n <= 0 {
+ err := fmt.Errorf("read limited at %v bytes", lr.limit.Load())
+ lr.c.writeError(StatusMessageTooBig, err)
+ return 0, err
+ }
+
+ if int64(len(p)) > lr.n {
+ p = p[:lr.n]
+ }
+ n, err := lr.r.Read(p)
+ lr.n -= int64(n)
+ return n, err
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (f readerFunc) Read(p []byte) (int, error) {
+ return f(p)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stringer.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stringer.go
new file mode 100644
index 00000000000..5a66ba29076
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stringer.go
@@ -0,0 +1,91 @@
+// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT.
+
+package websocket
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[opContinuation-0]
+ _ = x[opText-1]
+ _ = x[opBinary-2]
+ _ = x[opClose-8]
+ _ = x[opPing-9]
+ _ = x[opPong-10]
+}
+
+const (
+ _opcode_name_0 = "opContinuationopTextopBinary"
+ _opcode_name_1 = "opCloseopPingopPong"
+)
+
+var (
+ _opcode_index_0 = [...]uint8{0, 14, 20, 28}
+ _opcode_index_1 = [...]uint8{0, 7, 13, 19}
+)
+
+func (i opcode) String() string {
+ switch {
+ case 0 <= i && i <= 2:
+ return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]]
+ case 8 <= i && i <= 10:
+ i -= 8
+ return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]]
+ default:
+ return "opcode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[MessageText-1]
+ _ = x[MessageBinary-2]
+}
+
+const _MessageType_name = "MessageTextMessageBinary"
+
+var _MessageType_index = [...]uint8{0, 11, 24}
+
+func (i MessageType) String() string {
+ i -= 1
+ if i < 0 || i >= MessageType(len(_MessageType_index)-1) {
+ return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StatusNormalClosure-1000]
+ _ = x[StatusGoingAway-1001]
+ _ = x[StatusProtocolError-1002]
+ _ = x[StatusUnsupportedData-1003]
+ _ = x[statusReserved-1004]
+ _ = x[StatusNoStatusRcvd-1005]
+ _ = x[StatusAbnormalClosure-1006]
+ _ = x[StatusInvalidFramePayloadData-1007]
+ _ = x[StatusPolicyViolation-1008]
+ _ = x[StatusMessageTooBig-1009]
+ _ = x[StatusMandatoryExtension-1010]
+ _ = x[StatusInternalError-1011]
+ _ = x[StatusServiceRestart-1012]
+ _ = x[StatusTryAgainLater-1013]
+ _ = x[StatusBadGateway-1014]
+ _ = x[StatusTLSHandshake-1015]
+}
+
+const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake"
+
+var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312}
+
+func (i StatusCode) String() string {
+ i -= 1000
+ if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) {
+ return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")"
+ }
+ return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]]
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/write.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/write.go
new file mode 100644
index 00000000000..60a4fba0644
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/write.go
@@ -0,0 +1,386 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Writer returns a writer bounded by the context that will write
+// a WebSocket message of type dataType to the connection.
+//
+// You must close the writer once you have written the entire message.
+//
+// Only one writer can be open at a time, multiple calls will block until the previous writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ w, err := c.writer(ctx, typ)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get writer: %w", err)
+ }
+ return w, nil
+}
+
+// Write writes a message to the connection.
+//
+// See the Writer method if you want to stream a message.
+//
+// If compression is disabled or the threshold is not met, then it
+// will write the message in a single frame.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ _, err := c.write(ctx, typ, p)
+ if err != nil {
+ return fmt.Errorf("failed to write msg: %w", err)
+ }
+ return nil
+}
+
+type msgWriter struct {
+ mw *msgWriterState
+ closed bool
+}
+
+func (mw *msgWriter) Write(p []byte) (int, error) {
+ if mw.closed {
+ return 0, errors.New("cannot use closed writer")
+ }
+ return mw.mw.Write(p)
+}
+
+func (mw *msgWriter) Close() error {
+ if mw.closed {
+ return errors.New("cannot use closed writer")
+ }
+ mw.closed = true
+ return mw.mw.Close()
+}
+
+type msgWriterState struct {
+ c *Conn
+
+ mu *mu
+ writeMu *mu
+
+ ctx context.Context
+ opcode opcode
+ flate bool
+
+ trimWriter *trimLastFourBytesWriter
+ dict slidingWindow
+}
+
+func newMsgWriterState(c *Conn) *msgWriterState {
+ mw := &msgWriterState{
+ c: c,
+ mu: newMu(c),
+ writeMu: newMu(c),
+ }
+ return mw
+}
+
+func (mw *msgWriterState) ensureFlate() {
+ if mw.trimWriter == nil {
+ mw.trimWriter = &trimLastFourBytesWriter{
+ w: writerFunc(mw.write),
+ }
+ }
+
+ mw.dict.init(8192)
+ mw.flate = true
+}
+
+func (mw *msgWriterState) flateContextTakeover() bool {
+ if mw.c.client {
+ return !mw.c.copts.clientNoContextTakeover
+ }
+ return !mw.c.copts.serverNoContextTakeover
+}
+
+func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ err := c.msgWriterState.reset(ctx, typ)
+ if err != nil {
+ return nil, err
+ }
+ return &msgWriter{
+ mw: c.msgWriterState,
+ closed: false,
+ }, nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) {
+ mw, err := c.writer(ctx, typ)
+ if err != nil {
+ return 0, err
+ }
+
+ if !c.flate() {
+ defer c.msgWriterState.mu.unlock()
+ return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p)
+ }
+
+ n, err := mw.Write(p)
+ if err != nil {
+ return n, err
+ }
+
+ err = mw.Close()
+ return n, err
+}
+
+func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error {
+ err := mw.mu.lock(ctx)
+ if err != nil {
+ return err
+ }
+
+ mw.ctx = ctx
+ mw.opcode = opcode(typ)
+ mw.flate = false
+
+ mw.trimWriter.reset()
+
+ return nil
+}
+
+// Write writes the given bytes to the WebSocket connection.
+func (mw *msgWriterState) Write(p []byte) (_ int, err error) {
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to write: %w", err)
+ }
+ defer mw.writeMu.unlock()
+
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("failed to write: %w", err)
+ mw.c.close(err)
+ }
+ }()
+
+ if mw.c.flate() {
+ // Only enables flate if the length crosses the
+ // threshold on the first frame
+ if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold {
+ mw.ensureFlate()
+ }
+ }
+
+ if mw.flate {
+ err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf)
+ if err != nil {
+ return 0, err
+ }
+ mw.dict.write(p)
+ return len(p), nil
+ }
+
+ return mw.write(p)
+}
+
+func (mw *msgWriterState) write(p []byte) (int, error) {
+ n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write data frame: %w", err)
+ }
+ mw.opcode = opContinuation
+ return n, nil
+}
+
+// Close flushes the frame to the connection.
+func (mw *msgWriterState) Close() (err error) {
+ defer errd.Wrap(&err, "failed to close writer")
+
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return err
+ }
+ defer mw.writeMu.unlock()
+
+ _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil)
+ if err != nil {
+ return fmt.Errorf("failed to write fin frame: %w", err)
+ }
+
+ if mw.flate && !mw.flateContextTakeover() {
+ mw.dict.close()
+ }
+ mw.mu.unlock()
+ return nil
+}
+
+func (mw *msgWriterState) close() {
+ if mw.c.client {
+ mw.c.writeFrameMu.forceLock()
+ putBufioWriter(mw.c.bw)
+ }
+
+ mw.writeMu.forceLock()
+ mw.dict.close()
+}
+
+func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error {
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ _, err := c.writeFrame(ctx, true, false, opcode, p)
+ if err != nil {
+ return fmt.Errorf("failed to write control frame %v: %w", opcode, err)
+ }
+ return nil
+}
+
+// frame handles all writes to the connection.
+func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) {
+ err = c.writeFrameMu.lock(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ // We leave it locked when writing the close frame to avoid
+ // any other goroutine writing any other frame.
+ if opcode != opClose {
+ c.writeFrameMu.unlock()
+ }
+ }()
+
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.writeTimeout <- ctx:
+ }
+
+ defer func() {
+ if err != nil {
+ select {
+ case <-c.closed:
+ err = c.closeErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ c.close(err)
+ err = fmt.Errorf("failed to write frame: %w", err)
+ }
+ }()
+
+ c.writeHeader.fin = fin
+ c.writeHeader.opcode = opcode
+ c.writeHeader.payloadLength = int64(len(p))
+
+ if c.client {
+ c.writeHeader.masked = true
+ _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4])
+ if err != nil {
+ return 0, fmt.Errorf("failed to generate masking key: %w", err)
+ }
+ c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:])
+ }
+
+ c.writeHeader.rsv1 = false
+ if flate && (opcode == opText || opcode == opBinary) {
+ c.writeHeader.rsv1 = true
+ }
+
+ err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:])
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := c.writeFramePayload(p)
+ if err != nil {
+ return n, err
+ }
+
+ if c.writeHeader.fin {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, fmt.Errorf("failed to flush: %w", err)
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.writeTimeout <- context.Background():
+ }
+
+ return n, nil
+}
+
+func (c *Conn) writeFramePayload(p []byte) (n int, err error) {
+ defer errd.Wrap(&err, "failed to write frame payload")
+
+ if !c.writeHeader.masked {
+ return c.bw.Write(p)
+ }
+
+ maskKey := c.writeHeader.maskKey
+ for len(p) > 0 {
+ // If the buffer is full, we need to flush.
+ if c.bw.Available() == 0 {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, err
+ }
+ }
+
+ // Start of next write in the buffer.
+ i := c.bw.Buffered()
+
+ j := len(p)
+ if j > c.bw.Available() {
+ j = c.bw.Available()
+ }
+
+ _, err := c.bw.Write(p[:j])
+ if err != nil {
+ return n, err
+ }
+
+ maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()])
+
+ p = p[j:]
+ n += j
+ }
+
+ return n, nil
+}
+
+type writerFunc func(p []byte) (int, error)
+
+func (f writerFunc) Write(p []byte) (int, error) {
+ return f(p)
+}
+
+// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer
+// and returns it.
+func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte {
+ var writeBuf []byte
+ bw.Reset(writerFunc(func(p2 []byte) (int, error) {
+ writeBuf = p2[:cap(p2)]
+ return len(p2), nil
+ }))
+
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(w)
+
+ return writeBuf
+}
+
+func (c *Conn) writeError(code StatusCode, err error) {
+ c.setCloseErr(err)
+ c.writeClose(code, err.Error())
+ c.close(nil)
+}
diff --git a/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/ws_js.go b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/ws_js.go
new file mode 100644
index 00000000000..b87e32cdafb
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/ws_js.go
@@ -0,0 +1,379 @@
+package websocket // import "nhooyr.io/websocket"
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall/js"
+
+ "nhooyr.io/websocket/internal/bpool"
+ "nhooyr.io/websocket/internal/wsjs"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Conn provides a wrapper around the browser WebSocket API.
+type Conn struct {
+ ws wsjs.WebSocket
+
+ // read limit for a message in bytes.
+ msgReadLimit xsync.Int64
+
+ closingMu sync.Mutex
+ isReadClosed xsync.Int64
+ closeOnce sync.Once
+ closed chan struct{}
+ closeErrOnce sync.Once
+ closeErr error
+ closeWasClean bool
+
+ releaseOnClose func()
+ releaseOnMessage func()
+
+ readSignal chan struct{}
+ readBufMu sync.Mutex
+ readBuf []wsjs.MessageEvent
+}
+
+func (c *Conn) close(err error, wasClean bool) {
+ c.closeOnce.Do(func() {
+ runtime.SetFinalizer(c, nil)
+
+ if !wasClean {
+ err = fmt.Errorf("unclean connection close: %w", err)
+ }
+ c.setCloseErr(err)
+ c.closeWasClean = wasClean
+ close(c.closed)
+ })
+}
+
+func (c *Conn) init() {
+ c.closed = make(chan struct{})
+ c.readSignal = make(chan struct{}, 1)
+
+ c.msgReadLimit.Store(32768)
+
+ c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) {
+ err := CloseError{
+ Code: StatusCode(e.Code),
+ Reason: e.Reason,
+ }
+ // We do not know if we sent or received this close as
+ // its possible the browser triggered it without us
+ // explicitly sending it.
+ c.close(err, e.WasClean)
+
+ c.releaseOnClose()
+ c.releaseOnMessage()
+ })
+
+ c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) {
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ c.readBuf = append(c.readBuf, e)
+
+ // Lets the read goroutine know there is definitely something in readBuf.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ })
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.setCloseErr(errors.New("connection garbage collected"))
+ c.closeWithInternal()
+ })
+}
+
+func (c *Conn) closeWithInternal() {
+ c.Close(StatusInternalError, "something went wrong")
+}
+
+// Read attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ if c.isReadClosed.Load() == 1 {
+ return 0, nil, errors.New("WebSocket connection read closed")
+ }
+
+ typ, p, err := c.read(ctx)
+ if err != nil {
+ return 0, nil, fmt.Errorf("failed to read: %w", err)
+ }
+ if int64(len(p)) > c.msgReadLimit.Load() {
+ err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load())
+ c.Close(StatusMessageTooBig, err.Error())
+ return 0, nil, err
+ }
+ return typ, p, nil
+}
+
+func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) {
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "read timed out")
+ return 0, nil, ctx.Err()
+ case <-c.readSignal:
+ case <-c.closed:
+ return 0, nil, c.closeErr
+ }
+
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ me := c.readBuf[0]
+ // We copy the messages forward and decrease the size
+ // of the slice to avoid reallocating.
+ copy(c.readBuf, c.readBuf[1:])
+ c.readBuf = c.readBuf[:len(c.readBuf)-1]
+
+ if len(c.readBuf) > 0 {
+ // Next time we read, we'll grab the message.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ }
+
+ switch p := me.Data.(type) {
+ case string:
+ return MessageText, []byte(p), nil
+ case []byte:
+ return MessageBinary, p, nil
+ default:
+ panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String())
+ }
+}
+
+// Ping is mocked out for Wasm.
+func (c *Conn) Ping(ctx context.Context) error {
+ return nil
+}
+
+// Write writes a message of the given type to the connection.
+// Always non blocking.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ err := c.write(ctx, typ, p)
+ if err != nil {
+ // Have to ensure the WebSocket is closed after a write error
+ // to match the Go API. It can only error if the message type
+ // is unexpected or the passed bytes contain invalid UTF-8 for
+ // MessageText.
+ err := fmt.Errorf("failed to write: %w", err)
+ c.setCloseErr(err)
+ c.closeWithInternal()
+ return err
+ }
+ return nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error {
+ if c.isClosed() {
+ return c.closeErr
+ }
+ switch typ {
+ case MessageBinary:
+ return c.ws.SendBytes(p)
+ case MessageText:
+ return c.ws.SendText(string(p))
+ default:
+ return fmt.Errorf("unexpected message type: %v", typ)
+ }
+}
+
+// Close closes the WebSocket with the given code and reason.
+// It will wait until the peer responds with a close frame
+// or the connection is closed.
+// It thus performs the full WebSocket close handshake.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ err := c.exportedClose(code, reason)
+ if err != nil {
+ return fmt.Errorf("failed to close WebSocket: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) exportedClose(code StatusCode, reason string) error {
+ c.closingMu.Lock()
+ defer c.closingMu.Unlock()
+
+ ce := fmt.Errorf("sent close: %w", CloseError{
+ Code: code,
+ Reason: reason,
+ })
+
+ if c.isClosed() {
+ return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr)
+ }
+
+ c.setCloseErr(ce)
+ err := c.ws.Close(int(code), reason)
+ if err != nil {
+ return err
+ }
+
+ <-c.closed
+ if !c.closeWasClean {
+ return c.closeErr
+ }
+ return nil
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.ws.Subprotocol()
+}
+
+// DialOptions represents the options available to pass to Dial.
+type DialOptions struct {
+ // Subprotocols lists the subprotocols to negotiate with the server.
+ Subprotocols []string
+}
+
+// Dial creates a new WebSocket connection to the given url with the given options.
+// The passed context bounds the maximum time spent waiting for the connection to open.
+// The returned *http.Response is always nil or a mock. It's only in the signature
+// to match the core API.
+func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ c, resp, err := dial(ctx, url, opts)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err)
+ }
+ return c, resp, nil
+}
+
+func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ url = strings.Replace(url, "http://", "ws://", 1)
+ url = strings.Replace(url, "https://", "wss://", 1)
+
+ ws, err := wsjs.New(url, opts.Subprotocols)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := &Conn{
+ ws: ws,
+ }
+ c.init()
+
+ opench := make(chan struct{})
+ releaseOpen := ws.OnOpen(func(e js.Value) {
+ close(opench)
+ })
+ defer releaseOpen()
+
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "dial timed out")
+ return nil, nil, ctx.Err()
+ case <-opench:
+ return c, &http.Response{
+ StatusCode: http.StatusSwitchingProtocols,
+ }, nil
+ case <-c.closed:
+ return nil, nil, c.closeErr
+ }
+}
+
+// Reader attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ typ, p, err := c.Read(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ return typ, bytes.NewReader(p), nil
+}
+
+// Writer returns a writer to write a WebSocket data message to the connection.
+// It buffers the entire message in memory and then sends it when the writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ return writer{
+ c: c,
+ ctx: ctx,
+ typ: typ,
+ b: bpool.Get(),
+ }, nil
+}
+
+type writer struct {
+ closed bool
+
+ c *Conn
+ ctx context.Context
+ typ MessageType
+
+ b *bytes.Buffer
+}
+
+func (w writer) Write(p []byte) (int, error) {
+ if w.closed {
+ return 0, errors.New("cannot write to closed writer")
+ }
+ n, err := w.b.Write(p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write message: %w", err)
+ }
+ return n, nil
+}
+
+func (w writer) Close() error {
+ if w.closed {
+ return errors.New("cannot close closed writer")
+ }
+ w.closed = true
+ defer bpool.Put(w.b)
+
+ err := w.c.Write(w.ctx, w.typ, w.b.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to close writer: %w", err)
+ }
+ return nil
+}
+
+// CloseRead implements *Conn.CloseRead for wasm.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ c.isReadClosed.Store(1)
+
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.read(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit implements *Conn.SetReadLimit for wasm.
+func (c *Conn) SetReadLimit(n int64) {
+ c.msgReadLimit.Store(n)
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeErrOnce.Do(func() {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ })
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/ql/test/query-tests/Security/CWE-918/websocket.go b/ql/test/query-tests/Security/CWE-918/websocket.go
new file mode 100644
index 00000000000..6b24be66840
--- /dev/null
+++ b/ql/test/query-tests/Security/CWE-918/websocket.go
@@ -0,0 +1,203 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "regexp"
+ "strings"
+
+ gobwas "github.com/gobwas/ws"
+ gorilla "github.com/gorilla/websocket"
+ sac "github.com/sacOO7/gowebsocket"
+ "golang.org/x/net/websocket"
+ nhooyr "nhooyr.io/websocket"
+)
+
+func main() {
+ // x net websocket Dial good
+ http.HandleFunc("/ex0", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ origin := "http://localhost/"
+
+ untrustedInputTrimmed := strings.TrimRight(untrustedInput, "\n\r")
+ if untrustedInputTrimmed == "ws://localhost:12345/ws" {
+ // good as input is checked against fixed set of urls.
+ ws, _ := websocket.Dial(untrustedInputTrimmed, "", origin)
+ var msg = make([]byte, 512)
+ var n int
+ n, _ = ws.Read(msg)
+ fmt.Printf("Received: %s.\n", msg[:n])
+ }
+ })
+
+ // x net websocket DialConfig good
+ http.HandleFunc("/ex1", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ origin := "http://localhost/"
+ // good as input is tested against a regex
+ if m, _ := regexp.MatchString("ws://localhost:12345/*", untrustedInput); m {
+ config, _ := websocket.NewConfig(untrustedInput, origin) // good
+ ws2, _ := websocket.DialConfig(config)
+ var msg = make([]byte, 512)
+ var n int
+ n, _ = ws2.Read(msg)
+ fmt.Printf("Received: %s.\n", msg[:n])
+ }
+ })
+
+ // x net websocket dial bad
+ http.HandleFunc("/ex2", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ origin := "http://localhost/"
+
+ // bad as input is directly passed to dial function
+ ws, _ := websocket.Dial(untrustedInput, "", origin)
+ var msg = make([]byte, 512)
+ var n int
+ n, _ = ws.Read(msg)
+ fmt.Printf("Received: %s.\n", msg[:n])
+ })
+
+ // x net websocket dialConfig bad
+ http.HandleFunc("/ex3", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ origin := "http://localhost/"
+ // bad as input is directly used
+ config, _ := websocket.NewConfig(untrustedInput, origin) // good
+ ws2, _ := websocket.DialConfig(config)
+ var msg = make([]byte, 512)
+ var n int
+ n, _ = ws2.Read(msg)
+ fmt.Printf("Received: %s.\n", msg[:n])
+ })
+
+ // nhooyr websocket dial bad
+ http.HandleFunc("/ex4", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ // bad as input is used directly
+ nhooyr.Dial(context.TODO(), untrustedInput, nil)
+ w.WriteHeader(500)
+ })
+
+ // nhooyr websocket dial good
+ http.HandleFunc("/ex5", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ // good as input is tested againt regex
+ if m, _ := regexp.MatchString("ws://localhost:12345/*", untrustedInput); m {
+ nhooyr.Dial(context.TODO(), untrustedInput, nil)
+ }
+ })
+
+ // gorilla websocket Dialer.Dial bad
+ http.HandleFunc("/ex6", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ dialer := gorilla.Dialer{}
+ dialer.Dial(untrustedInput, r.Header)
+ })
+
+ // gorilla websocket Dialer.Dial good
+ http.HandleFunc("/ex7", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if untrustedInput == "localhost" {
+
+ dialer := gorilla.Dialer{}
+ dialer.Dial(untrustedInput, r.Header)
+ }
+ })
+
+ // gorilla websocket Dialer.DialContext bad
+ http.HandleFunc("/ex8", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ dialer := gorilla.Dialer{}
+ dialer.DialContext(context.TODO(), untrustedInput, r.Header)
+ })
+
+ // gorilla websocket Dialer.DialContext good
+ http.HandleFunc("/ex9", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if untrustedInput == "localhost" {
+
+ dialer := gorilla.Dialer{}
+ dialer.DialContext(context.TODO(), untrustedInput, r.Header)
+ }
+ })
+
+ // gobwas websocket Dial good
+ http.HandleFunc("/ex10", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if untrustedInput == "localhost" {
+ gobwas.Dial(context.TODO(), untrustedInput)
+ }
+ })
+
+ // gobwas websocket Dial bad
+ http.HandleFunc("/ex11", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+ gobwas.Dial(context.TODO(), untrustedInput)
+ })
+
+ // gobwas websocket Dialer.Dial bad
+ http.HandleFunc("/ex12", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+ dialer := gobwas.Dialer{}
+ dialer.Dial(context.TODO(), untrustedInput)
+ })
+
+ // gobwas websocket Dialer.Dial good
+ http.HandleFunc("/ex12", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if "localhost" == untrustedInput {
+ dialer := gobwas.Dialer{}
+ dialer.Dial(context.TODO(), untrustedInput)
+ }
+ })
+
+ // sac007 websocket New good
+ http.HandleFunc("/ex13", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if "localhost" == untrustedInput {
+ sac.New(untrustedInput)
+ }
+ })
+
+ // sac007 websocket BuildProxy good
+ http.HandleFunc("/ex14", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ if "localhost" == untrustedInput {
+ sac.BuildProxy(untrustedInput)
+ }
+ })
+
+ // sac007 websocket BuildProxy bad
+ http.HandleFunc("/ex15", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ sac.BuildProxy(untrustedInput)
+ })
+
+ // sac007 websocket New bad
+ http.HandleFunc("/ex16", func(w http.ResponseWriter, r *http.Request) {
+ untrustedInput := r.Referer()
+
+ sac.New(untrustedInput)
+ })
+
+ log.Println(http.ListenAndServe(":80", nil))
+
+}
From f2bbbe30e2205040da6e387fdc20c3debb001552 Mon Sep 17 00:00:00 2001
From: Sauyon Lee
Date: Fri, 15 May 2020 07:32:51 -0700
Subject: [PATCH 114/157] Stub WebSocket dependencies
---
.../semmle/go/dataflow/BarrierGuardUtil.qll | 10 +-
.../Websocket/DialFunction.expected | 18 +-
.../go/frameworks/Websocket/DialFunction.go | 6 +
.../semmle/go/frameworks/Websocket/go.mod | 1 -
.../vendor/github.com/gobwas/httphead/LICENSE | 21 -
.../github.com/gobwas/httphead/README.md | 63 -
.../github.com/gobwas/httphead/cookie.go | 200 ---
.../vendor/github.com/gobwas/httphead/head.go | 275 ----
.../github.com/gobwas/httphead/httphead.go | 331 -----
.../github.com/gobwas/httphead/lexer.go | 360 -----
.../github.com/gobwas/httphead/octet.go | 83 --
.../github.com/gobwas/httphead/option.go | 187 ---
.../github.com/gobwas/httphead/writer.go | 101 --
.../vendor/github.com/gobwas/pool/README.md | 107 --
.../vendor/github.com/gobwas/pool/generic.go | 87 --
.../gobwas/pool/internal/pmath/pmath.go | 65 -
.../vendor/github.com/gobwas/pool/option.go | 43 -
.../github.com/gobwas/pool/pbufio/pbufio.go | 106 --
.../gobwas/pool/pbufio/pbufio_go110.go | 13 -
.../gobwas/pool/pbufio/pbufio_go19.go | 27 -
.../vendor/github.com/gobwas/pool/pool.go | 25 -
.../vendor/github.com/gobwas/ws/.gitignore | 5 -
.../vendor/github.com/gobwas/ws/.travis.yml | 25 -
.../vendor/github.com/gobwas/ws/Makefile | 47 -
.../vendor/github.com/gobwas/ws/README.md | 360 -----
.../vendor/github.com/gobwas/ws/check.go | 145 --
.../vendor/github.com/gobwas/ws/cipher.go | 59 -
.../vendor/github.com/gobwas/ws/dialer.go | 556 --------
.../github.com/gobwas/ws/dialer_tls_go17.go | 35 -
.../github.com/gobwas/ws/dialer_tls_go18.go | 9 -
.../vendor/github.com/gobwas/ws/doc.go | 81 --
.../vendor/github.com/gobwas/ws/errors.go | 54 -
.../vendor/github.com/gobwas/ws/frame.go | 389 ------
.../vendor/github.com/gobwas/ws/http.go | 468 -------
.../vendor/github.com/gobwas/ws/nonce.go | 80 --
.../vendor/github.com/gobwas/ws/read.go | 147 --
.../vendor/github.com/gobwas/ws/server.go | 607 ---------
.../vendor/github.com/gobwas/ws/server_test.s | 0
.../vendor/github.com/gobwas/ws/stub.go | 54 +
.../vendor/github.com/gobwas/ws/util.go | 214 ---
.../vendor/github.com/gobwas/ws/write.go | 104 --
.../github.com/gorilla/websocket/.gitignore | 25 -
.../github.com/gorilla/websocket/AUTHORS | 9 -
.../github.com/gorilla/websocket/README.md | 64 -
.../github.com/gorilla/websocket/client.go | 395 ------
.../gorilla/websocket/client_clone.go | 16 -
.../gorilla/websocket/client_clone_legacy.go | 38 -
.../gorilla/websocket/compression.go | 148 --
.../github.com/gorilla/websocket/conn.go | 1201 -----------------
.../gorilla/websocket/conn_write.go | 15 -
.../gorilla/websocket/conn_write_legacy.go | 18 -
.../github.com/gorilla/websocket/doc.go | 227 ----
.../github.com/gorilla/websocket/go.mod | 3 -
.../github.com/gorilla/websocket/join.go | 42 -
.../github.com/gorilla/websocket/json.go | 60 -
.../github.com/gorilla/websocket/mask.go | 54 -
.../github.com/gorilla/websocket/mask_safe.go | 15 -
.../github.com/gorilla/websocket/prepared.go | 102 --
.../github.com/gorilla/websocket/proxy.go | 77 --
.../github.com/gorilla/websocket/server.go | 363 -----
.../github.com/gorilla/websocket/stub.go | 135 ++
.../github.com/gorilla/websocket/trace.go | 19 -
.../github.com/gorilla/websocket/trace_17.go | 12 -
.../github.com/gorilla/websocket/util.go | 283 ----
.../gorilla/websocket/x_net_proxy.go | 473 -------
.../github.com/klauspost/compress/LICENSE | 28 -
.../klauspost/compress/flate/deflate.go | 819 -----------
.../klauspost/compress/flate/dict_decoder.go | 184 ---
.../klauspost/compress/flate/fast_encoder.go | 254 ----
.../klauspost/compress/flate/gen_inflate.go | 274 ----
.../compress/flate/huffman_bit_writer.go | 911 -------------
.../klauspost/compress/flate/huffman_code.go | 363 -----
.../compress/flate/huffman_sortByFreq.go | 178 ---
.../compress/flate/huffman_sortByLiteral.go | 201 ---
.../klauspost/compress/flate/inflate.go | 1000 --------------
.../klauspost/compress/flate/inflate_gen.go | 922 -------------
.../klauspost/compress/flate/level1.go | 179 ---
.../klauspost/compress/flate/level2.go | 205 ---
.../klauspost/compress/flate/level3.go | 229 ----
.../klauspost/compress/flate/level4.go | 212 ---
.../klauspost/compress/flate/level5.go | 279 ----
.../klauspost/compress/flate/level6.go | 282 ----
.../klauspost/compress/flate/stateless.go | 297 ----
.../klauspost/compress/flate/token.go | 375 -----
.../github.com/sacOO7/go-logger/.gitignore | 12 -
.../github.com/sacOO7/go-logger/LICENSE | 21 -
.../github.com/sacOO7/go-logger/logging.go | 82 --
.../github.com/sacOO7/go-logger/loggingL.go | 13 -
.../sacOO7/go-logger/logginglevel_string.go | 16 -
.../github.com/sacOO7/gowebsocket/.gitignore | 21 -
.../github.com/sacOO7/gowebsocket/README.md | 157 ---
.../sacOO7/gowebsocket/gowebsocket.go | 186 ---
.../github.com/sacOO7/gowebsocket/stub.go | 58 +
.../github.com/sacOO7/gowebsocket/utils.go | 15 -
.../Websocket/vendor/golang.org/x/net/AUTHORS | 3 -
.../vendor/golang.org/x/net/CONTRIBUTORS | 3 -
.../Websocket/vendor/golang.org/x/net/PATENTS | 22 -
.../golang.org/x/net/{ => websocket}/LICENSE | 0
.../golang.org/x/net/websocket/client.go | 106 --
.../vendor/golang.org/x/net/websocket/dial.go | 24 -
.../vendor/golang.org/x/net/websocket/hybi.go | 583 --------
.../golang.org/x/net/websocket/server.go | 113 --
.../vendor/golang.org/x/net/websocket/stub.go | 120 ++
.../golang.org/x/net/websocket/websocket.go | 451 -------
.../frameworks/Websocket/vendor/modules.txt | 17 +-
.../vendor/nhooyr.io/websocket/.gitignore | 1 -
.../vendor/nhooyr.io/websocket/.travis.yml | 40 -
.../websocket/{LICENSE.txt => LICENSE} | 0
.../vendor/nhooyr.io/websocket/Makefile | 7 -
.../vendor/nhooyr.io/websocket/README.md | 132 --
.../vendor/nhooyr.io/websocket/accept.go | 365 -----
.../vendor/nhooyr.io/websocket/accept_js.go | 20 -
.../vendor/nhooyr.io/websocket/close.go | 76 --
.../vendor/nhooyr.io/websocket/close_notjs.go | 211 ---
.../vendor/nhooyr.io/websocket/compress.go | 39 -
.../nhooyr.io/websocket/compress_notjs.go | 181 ---
.../vendor/nhooyr.io/websocket/conn.go | 13 -
.../vendor/nhooyr.io/websocket/conn_notjs.go | 265 ----
.../vendor/nhooyr.io/websocket/dial.go | 287 ----
.../vendor/nhooyr.io/websocket/doc.go | 32 -
.../vendor/nhooyr.io/websocket/frame.go | 294 ----
.../vendor/nhooyr.io/websocket/go.mod | 14 -
.../websocket/internal/bpool/bpool.go | 24 -
.../nhooyr.io/websocket/internal/errd/wrap.go | 14 -
.../websocket/internal/wsjs/wsjs_js.go | 170 ---
.../nhooyr.io/websocket/internal/xsync/go.go | 25 -
.../websocket/internal/xsync/int64.go | 23 -
.../vendor/nhooyr.io/websocket/netconn.go | 166 ---
.../vendor/nhooyr.io/websocket/read.go | 471 -------
.../vendor/nhooyr.io/websocket/stringer.go | 91 --
.../vendor/nhooyr.io/websocket/stub.go | 76 ++
.../vendor/nhooyr.io/websocket/write.go | 386 ------
.../vendor/nhooyr.io/websocket/ws_js.go | 379 ------
.../Security/CWE-918/RequestForgery.expected | 70 +-
ql/test/query-tests/Security/CWE-918/go.mod | 1 -
ql/test/query-tests/Security/CWE-918/main | Bin 0 -> 6935277 bytes
.../vendor/github.com/gobwas/httphead/LICENSE | 21 -
.../github.com/gobwas/httphead/README.md | 63 -
.../github.com/gobwas/httphead/cookie.go | 200 ---
.../vendor/github.com/gobwas/httphead/head.go | 275 ----
.../github.com/gobwas/httphead/httphead.go | 331 -----
.../github.com/gobwas/httphead/lexer.go | 360 -----
.../github.com/gobwas/httphead/octet.go | 83 --
.../github.com/gobwas/httphead/option.go | 187 ---
.../github.com/gobwas/httphead/writer.go | 101 --
.../vendor/github.com/gobwas/pool/README.md | 107 --
.../vendor/github.com/gobwas/pool/generic.go | 87 --
.../gobwas/pool/internal/pmath/pmath.go | 65 -
.../vendor/github.com/gobwas/pool/option.go | 43 -
.../github.com/gobwas/pool/pbufio/pbufio.go | 106 --
.../gobwas/pool/pbufio/pbufio_go110.go | 13 -
.../gobwas/pool/pbufio/pbufio_go19.go | 27 -
.../vendor/github.com/gobwas/pool/pool.go | 25 -
.../vendor/github.com/gobwas/ws/.gitignore | 5 -
.../vendor/github.com/gobwas/ws/.travis.yml | 25 -
.../vendor/github.com/gobwas/ws/Makefile | 47 -
.../vendor/github.com/gobwas/ws/README.md | 360 -----
.../vendor/github.com/gobwas/ws/check.go | 145 --
.../vendor/github.com/gobwas/ws/cipher.go | 59 -
.../vendor/github.com/gobwas/ws/dialer.go | 556 --------
.../github.com/gobwas/ws/dialer_tls_go17.go | 35 -
.../github.com/gobwas/ws/dialer_tls_go18.go | 9 -
.../vendor/github.com/gobwas/ws/doc.go | 81 --
.../vendor/github.com/gobwas/ws/errors.go | 54 -
.../vendor/github.com/gobwas/ws/frame.go | 389 ------
.../vendor/github.com/gobwas/ws/http.go | 468 -------
.../vendor/github.com/gobwas/ws/nonce.go | 80 --
.../vendor/github.com/gobwas/ws/read.go | 147 --
.../vendor/github.com/gobwas/ws/server.go | 607 ---------
.../vendor/github.com/gobwas/ws/server_test.s | 0
.../vendor/github.com/gobwas/ws/stub.go | 54 +
.../vendor/github.com/gobwas/ws/util.go | 214 ---
.../vendor/github.com/gobwas/ws/write.go | 104 --
.../github.com/gorilla/websocket/.gitignore | 25 -
.../github.com/gorilla/websocket/AUTHORS | 9 -
.../github.com/gorilla/websocket/README.md | 64 -
.../github.com/gorilla/websocket/client.go | 395 ------
.../gorilla/websocket/client_clone.go | 16 -
.../gorilla/websocket/client_clone_legacy.go | 38 -
.../gorilla/websocket/compression.go | 148 --
.../github.com/gorilla/websocket/conn.go | 1201 -----------------
.../gorilla/websocket/conn_write.go | 15 -
.../gorilla/websocket/conn_write_legacy.go | 18 -
.../github.com/gorilla/websocket/doc.go | 227 ----
.../github.com/gorilla/websocket/go.mod | 3 -
.../github.com/gorilla/websocket/join.go | 42 -
.../github.com/gorilla/websocket/json.go | 60 -
.../github.com/gorilla/websocket/mask.go | 54 -
.../github.com/gorilla/websocket/mask_safe.go | 15 -
.../github.com/gorilla/websocket/prepared.go | 102 --
.../github.com/gorilla/websocket/proxy.go | 77 --
.../github.com/gorilla/websocket/server.go | 363 -----
.../github.com/gorilla/websocket/stub.go | 135 ++
.../github.com/gorilla/websocket/trace.go | 19 -
.../github.com/gorilla/websocket/trace_17.go | 12 -
.../github.com/gorilla/websocket/util.go | 283 ----
.../gorilla/websocket/x_net_proxy.go | 473 -------
.../github.com/klauspost/compress/LICENSE | 28 -
.../klauspost/compress/flate/deflate.go | 819 -----------
.../klauspost/compress/flate/dict_decoder.go | 184 ---
.../klauspost/compress/flate/fast_encoder.go | 254 ----
.../klauspost/compress/flate/gen_inflate.go | 274 ----
.../compress/flate/huffman_bit_writer.go | 911 -------------
.../klauspost/compress/flate/huffman_code.go | 363 -----
.../compress/flate/huffman_sortByFreq.go | 178 ---
.../compress/flate/huffman_sortByLiteral.go | 201 ---
.../klauspost/compress/flate/inflate.go | 1000 --------------
.../klauspost/compress/flate/inflate_gen.go | 922 -------------
.../klauspost/compress/flate/level1.go | 179 ---
.../klauspost/compress/flate/level2.go | 205 ---
.../klauspost/compress/flate/level3.go | 229 ----
.../klauspost/compress/flate/level4.go | 212 ---
.../klauspost/compress/flate/level5.go | 279 ----
.../klauspost/compress/flate/level6.go | 282 ----
.../klauspost/compress/flate/stateless.go | 297 ----
.../klauspost/compress/flate/token.go | 375 -----
.../github.com/sacOO7/go-logger/.gitignore | 12 -
.../github.com/sacOO7/go-logger/LICENSE | 21 -
.../github.com/sacOO7/go-logger/logging.go | 82 --
.../github.com/sacOO7/go-logger/loggingL.go | 13 -
.../sacOO7/go-logger/logginglevel_string.go | 16 -
.../github.com/sacOO7/gowebsocket/.gitignore | 21 -
.../github.com/sacOO7/gowebsocket/README.md | 157 ---
.../sacOO7/gowebsocket/gowebsocket.go | 186 ---
.../github.com/sacOO7/gowebsocket/stub.go | 58 +
.../github.com/sacOO7/gowebsocket/utils.go | 15 -
.../CWE-918/vendor/golang.org/x/net/AUTHORS | 3 -
.../vendor/golang.org/x/net/CONTRIBUTORS | 3 -
.../CWE-918/vendor/golang.org/x/net/PATENTS | 22 -
.../golang.org/x/net/{ => websocket}/LICENSE | 0
.../golang.org/x/net/websocket/client.go | 106 --
.../vendor/golang.org/x/net/websocket/dial.go | 24 -
.../vendor/golang.org/x/net/websocket/hybi.go | 583 --------
.../golang.org/x/net/websocket/server.go | 113 --
.../vendor/golang.org/x/net/websocket/stub.go | 120 ++
.../golang.org/x/net/websocket/websocket.go | 451 -------
.../Security/CWE-918/vendor/modules.txt | 17 +-
.../vendor/nhooyr.io/websocket/.gitignore | 1 -
.../vendor/nhooyr.io/websocket/.travis.yml | 40 -
.../websocket/{LICENSE.txt => LICENSE} | 0
.../vendor/nhooyr.io/websocket/Makefile | 7 -
.../vendor/nhooyr.io/websocket/README.md | 132 --
.../vendor/nhooyr.io/websocket/accept.go | 365 -----
.../vendor/nhooyr.io/websocket/accept_js.go | 20 -
.../vendor/nhooyr.io/websocket/close.go | 76 --
.../vendor/nhooyr.io/websocket/close_notjs.go | 211 ---
.../vendor/nhooyr.io/websocket/compress.go | 39 -
.../nhooyr.io/websocket/compress_notjs.go | 181 ---
.../vendor/nhooyr.io/websocket/conn.go | 13 -
.../vendor/nhooyr.io/websocket/conn_notjs.go | 265 ----
.../vendor/nhooyr.io/websocket/dial.go | 287 ----
.../CWE-918/vendor/nhooyr.io/websocket/doc.go | 32 -
.../vendor/nhooyr.io/websocket/frame.go | 294 ----
.../CWE-918/vendor/nhooyr.io/websocket/go.mod | 14 -
.../websocket/internal/bpool/bpool.go | 24 -
.../nhooyr.io/websocket/internal/errd/wrap.go | 14 -
.../websocket/internal/wsjs/wsjs_js.go | 170 ---
.../nhooyr.io/websocket/internal/xsync/go.go | 25 -
.../websocket/internal/xsync/int64.go | 23 -
.../vendor/nhooyr.io/websocket/netconn.go | 166 ---
.../vendor/nhooyr.io/websocket/read.go | 471 -------
.../vendor/nhooyr.io/websocket/stringer.go | 91 --
.../vendor/nhooyr.io/websocket/stub.go | 76 ++
.../vendor/nhooyr.io/websocket/write.go | 386 ------
.../vendor/nhooyr.io/websocket/ws_js.go | 379 ------
.../query-tests/Security/CWE-918/websocket.go | 8 +-
266 files changed, 950 insertions(+), 43852 deletions(-)
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/stub.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/stub.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/stub.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
rename ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/{ => websocket}/LICENSE (100%)
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/stub.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
rename ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/{LICENSE.txt => LICENSE} (100%)
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
create mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stub.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
delete mode 100644 ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
create mode 100755 ql/test/query-tests/Security/CWE-918/main
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/LICENSE
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/cookie.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/head.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/httphead.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/lexer.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/octet.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/option.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/httphead/writer.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/generic.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/option.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/pool/pool.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.gitignore
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/.travis.yml
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/Makefile
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/check.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/cipher.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go17.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/dialer_tls_go18.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/doc.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/errors.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/frame.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/http.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/nonce.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/read.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/server_test.s
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/stub.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/util.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gobwas/ws/write.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/.gitignore
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/AUTHORS
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/client_clone_legacy.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/compression.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/conn_write_legacy.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/doc.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/go.mod
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/join.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/json.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/mask_safe.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/prepared.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/proxy.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/server.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/stub.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/trace_17.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/util.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/gorilla/websocket/x_net_proxy.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/LICENSE
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/deflate.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/dict_decoder.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/fast_encoder.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/gen_inflate.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_code.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/inflate_gen.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level1.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level2.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level3.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level4.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level5.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/level6.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/stateless.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/klauspost/compress/flate/token.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/.gitignore
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/LICENSE
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logging.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/loggingL.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/.gitignore
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/stub.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/github.com/sacOO7/gowebsocket/utils.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/AUTHORS
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/CONTRIBUTORS
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/PATENTS
rename ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/{ => websocket}/LICENSE (100%)
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/client.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/dial.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/hybi.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/server.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/stub.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/golang.org/x/net/websocket/websocket.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.gitignore
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/.travis.yml
rename ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/{LICENSE.txt => LICENSE} (100%)
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/Makefile
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/README.md
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/accept_js.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/close_notjs.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/compress_notjs.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/conn_notjs.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/dial.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/doc.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/frame.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/go.mod
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/errd/wrap.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/go.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/internal/xsync/int64.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/netconn.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/read.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stringer.go
create mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/stub.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/write.go
delete mode 100644 ql/test/query-tests/Security/CWE-918/vendor/nhooyr.io/websocket/ws_js.go
diff --git a/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll b/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
index c94307dd1ae..0dfbb5dc046 100644
--- a/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
+++ b/ql/src/semmle/go/dataflow/BarrierGuardUtil.qll
@@ -25,7 +25,7 @@ class RedirectCheckBarrierGuard extends DataFlow::BarrierGuard, DataFlow::CallNo
* An equality check comparing a data-flow node against a constant string, considered as
* a barrier guard for sanitizing untrusted URLs.
*
- * Additionally, a check comparing `url.Hostname()` against a constant string is also
+ * Additionally, a check comparing `url.Hostname()` against a constant string is also
* considered a barrier guard for `url`.
*/
class UrlCheck extends DataFlow::BarrierGuard, DataFlow::EqualityTestNode {
@@ -40,11 +40,11 @@ class UrlCheck extends DataFlow::BarrierGuard, DataFlow::EqualityTestNode {
mc.getTarget().getName() = "Hostname" and
url = mc.getReceiver()
)
- }
+ )
+ }
- override predicate checks(Expr e, boolean outcome) {
- e = url.asExpr() and outcome = this.getPolarity()
- }
+ override predicate checks(Expr e, boolean outcome) {
+ e = url.asExpr() and outcome = this.getPolarity()
}
}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
index 80e2b9058de..e714b20e5d6 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.expected
@@ -1,9 +1,9 @@
-| DialFunction.go:19:11:19:52 | call to Dial | DialFunction.go:19:26:19:39 | untrustedInput |
-| DialFunction.go:22:12:22:39 | call to DialConfig | DialFunction.go:21:35:21:48 | untrustedInput |
-| DialFunction.go:24:2:24:49 | call to Dial | DialFunction.go:24:30:24:43 | untrustedInput |
-| DialFunction.go:27:2:27:38 | call to Dial | DialFunction.go:27:14:27:27 | untrustedInput |
-| DialFunction.go:29:2:29:61 | call to DialContext | DialFunction.go:29:37:29:50 | untrustedInput |
-| DialFunction.go:31:2:31:44 | call to Dial | DialFunction.go:31:30:31:43 | untrustedInput |
-| DialFunction.go:34:2:34:45 | call to Dial | DialFunction.go:34:31:34:44 | untrustedInput |
-| DialFunction.go:36:2:36:31 | call to BuildProxy | DialFunction.go:36:17:36:30 | untrustedInput |
-| DialFunction.go:37:2:37:24 | call to New | DialFunction.go:37:10:37:23 | untrustedInput |
+| DialFunction.go:25:11:25:52 | call to Dial | DialFunction.go:25:26:25:39 | untrustedInput |
+| DialFunction.go:28:12:28:39 | call to DialConfig | DialFunction.go:27:35:27:48 | untrustedInput |
+| DialFunction.go:30:2:30:49 | call to Dial | DialFunction.go:30:30:30:43 | untrustedInput |
+| DialFunction.go:33:2:33:38 | call to Dial | DialFunction.go:33:14:33:27 | untrustedInput |
+| DialFunction.go:35:2:35:61 | call to DialContext | DialFunction.go:35:37:35:50 | untrustedInput |
+| DialFunction.go:37:2:37:44 | call to Dial | DialFunction.go:37:30:37:43 | untrustedInput |
+| DialFunction.go:40:2:40:45 | call to Dial | DialFunction.go:40:31:40:44 | untrustedInput |
+| DialFunction.go:42:2:42:31 | call to BuildProxy | DialFunction.go:42:17:42:30 | untrustedInput |
+| DialFunction.go:43:2:43:24 | call to New | DialFunction.go:43:10:43:23 | untrustedInput |
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
index ed59e3a82cd..520bd08f945 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/DialFunction.go
@@ -1,5 +1,11 @@
package main
+//go:generate depstubber -vendor github.com/gobwas/ws Dialer Dial
+//go:generate depstubber -vendor github.com/gorilla/websocket Dialer
+//go:generate depstubber -vendor github.com/sacOO7/gowebsocket "" New,BuildProxy
+//go:generate depstubber -vendor golang.org/x/net/websocket "" Dial,NewConfig,DialConfig
+//go:generate depstubber -vendor nhooyr.io/websocket "" Dial
+
import (
"context"
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
index 5f614a3d1d3..ce6c493a190 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/go.mod
@@ -5,7 +5,6 @@ go 1.14
require (
github.com/gobwas/ws v1.0.3
github.com/gorilla/websocket v1.4.2
- github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d // indirect
github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
nhooyr.io/websocket v1.8.5
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
deleted file mode 100644
index 274431766fa..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017 Sergey Kamardin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
deleted file mode 100644
index 67a97fdbe92..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# httphead.[go](https://golang.org)
-
-[![GoDoc][godoc-image]][godoc-url]
-
-> Tiny HTTP header value parsing library in go.
-
-## Overview
-
-This library contains low-level functions for scanning HTTP RFC2616 compatible header value grammars.
-
-## Install
-
-```shell
- go get github.com/gobwas/httphead
-```
-
-## Example
-
-The example below shows how multiple-choise HTTP header value could be parsed with this library:
-
-```go
- options, ok := httphead.ParseOptions([]byte(`foo;bar=1,baz`), nil)
- fmt.Println(options, ok)
- // Output: [{foo map[bar:1]} {baz map[]}] true
-```
-
-The low-level example below shows how to optimize keys skipping and selection
-of some key:
-
-```go
- // The right part of full header line like:
- // X-My-Header: key;foo=bar;baz,key;baz
- header := []byte(`foo;a=0,foo;a=1,foo;a=2,foo;a=3`)
-
- // We want to search key "foo" with an "a" parameter that equal to "2".
- var (
- foo = []byte(`foo`)
- a = []byte(`a`)
- v = []byte(`2`)
- )
- var found bool
- httphead.ScanOptions(header, func(i int, key, param, value []byte) Control {
- if !bytes.Equal(key, foo) {
- return ControlSkip
- }
- if !bytes.Equal(param, a) {
- if bytes.Equal(value, v) {
- // Found it!
- found = true
- return ControlBreak
- }
- return ControlSkip
- }
- return ControlContinue
- })
-```
-
-For more usage examples please see [docs][godoc-url] or package tests.
-
-[godoc-image]: https://godoc.org/github.com/gobwas/httphead?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/httphead
-[travis-image]: https://travis-ci.org/gobwas/httphead.svg?branch=master
-[travis-url]: https://travis-ci.org/gobwas/httphead
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
deleted file mode 100644
index 05c9a1fb6a1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/cookie.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package httphead
-
-import (
- "bytes"
-)
-
-// ScanCookie scans cookie pairs from data using DefaultCookieScanner.Scan()
-// method.
-func ScanCookie(data []byte, it func(key, value []byte) bool) bool {
- return DefaultCookieScanner.Scan(data, it)
-}
-
-// DefaultCookieScanner is a CookieScanner which is used by ScanCookie().
-// Note that it is intended to have the same behavior as http.Request.Cookies()
-// has.
-var DefaultCookieScanner = CookieScanner{}
-
-// CookieScanner contains options for scanning cookie pairs.
-// See https://tools.ietf.org/html/rfc6265#section-4.1.1
-type CookieScanner struct {
- // DisableNameValidation disables name validation of a cookie. If false,
- // only RFC2616 "tokens" are accepted.
- DisableNameValidation bool
-
- // DisableValueValidation disables value validation of a cookie. If false,
- // only RFC6265 "cookie-octet" characters are accepted.
- //
- // Note that Strict option also affects validation of a value.
- //
- // If Strict is false, then scanner begins to allow space and comma
- // characters inside the value for better compatibility with non standard
- // cookies implementations.
- DisableValueValidation bool
-
- // BreakOnPairError sets scanner to immediately return after first pair syntax
- // validation error.
- // If false, scanner will try to skip invalid pair bytes and go ahead.
- BreakOnPairError bool
-
- // Strict enables strict RFC6265 mode scanning. It affects name and value
- // validation, as also some other rules.
- // If false, it is intended to bring the same behavior as
- // http.Request.Cookies().
- Strict bool
-}
-
-// Scan maps data to name and value pairs. Usually data represents value of the
-// Cookie header.
-func (c CookieScanner) Scan(data []byte, it func(name, value []byte) bool) bool {
- lexer := &Scanner{data: data}
-
- const (
- statePair = iota
- stateBefore
- )
-
- state := statePair
-
- for lexer.Buffered() > 0 {
- switch state {
- case stateBefore:
- // Pairs separated by ";" and space, according to the RFC6265:
- // cookie-pair *( ";" SP cookie-pair )
- //
- // Cookie pairs MUST be separated by (";" SP). So our only option
- // here is to fail as syntax error.
- a, b := lexer.Peek2()
- if a != ';' {
- return false
- }
-
- state = statePair
-
- advance := 1
- if b == ' ' {
- advance++
- } else if c.Strict {
- return false
- }
-
- lexer.Advance(advance)
-
- case statePair:
- if !lexer.FetchUntil(';') {
- return false
- }
-
- var value []byte
- name := lexer.Bytes()
- if i := bytes.IndexByte(name, '='); i != -1 {
- value = name[i+1:]
- name = name[:i]
- } else if c.Strict {
- if !c.BreakOnPairError {
- goto nextPair
- }
- return false
- }
-
- if !c.Strict {
- trimLeft(name)
- }
- if !c.DisableNameValidation && !ValidCookieName(name) {
- if !c.BreakOnPairError {
- goto nextPair
- }
- return false
- }
-
- if !c.Strict {
- value = trimRight(value)
- }
- value = stripQuotes(value)
- if !c.DisableValueValidation && !ValidCookieValue(value, c.Strict) {
- if !c.BreakOnPairError {
- goto nextPair
- }
- return false
- }
-
- if !it(name, value) {
- return true
- }
-
- nextPair:
- state = stateBefore
- }
- }
-
- return true
-}
-
-// ValidCookieValue reports whether given value is a valid RFC6265
-// "cookie-octet" bytes.
-//
-// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
-// ; US-ASCII characters excluding CTLs,
-// ; whitespace DQUOTE, comma, semicolon,
-// ; and backslash
-//
-// Note that the false strict parameter disables errors on space 0x20 and comma
-// 0x2c. This could be useful to bring some compatibility with non-compliant
-// clients/servers in the real world.
-// It acts the same as standard library cookie parser if strict is false.
-func ValidCookieValue(value []byte, strict bool) bool {
- if len(value) == 0 {
- return true
- }
- for _, c := range value {
- switch c {
- case '"', ';', '\\':
- return false
- case ',', ' ':
- if strict {
- return false
- }
- default:
- if c <= 0x20 {
- return false
- }
- if c >= 0x7f {
- return false
- }
- }
- }
- return true
-}
-
-// ValidCookieName reports wheter given bytes is a valid RFC2616 "token" bytes.
-func ValidCookieName(name []byte) bool {
- for _, c := range name {
- if !OctetTypes[c].IsToken() {
- return false
- }
- }
- return true
-}
-
-func stripQuotes(bts []byte) []byte {
- if last := len(bts) - 1; last > 0 && bts[0] == '"' && bts[last] == '"' {
- return bts[1:last]
- }
- return bts
-}
-
-func trimLeft(p []byte) []byte {
- var i int
- for i < len(p) && OctetTypes[p[i]].IsSpace() {
- i++
- }
- return p[i:]
-}
-
-func trimRight(p []byte) []byte {
- j := len(p)
- for j > 0 && OctetTypes[p[j-1]].IsSpace() {
- j--
- }
- return p[:j]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
deleted file mode 100644
index a50e907dd18..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/head.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package httphead
-
-import (
- "bufio"
- "bytes"
-)
-
-// Version contains protocol major and minor version.
-type Version struct {
- Major int
- Minor int
-}
-
-// RequestLine contains parameters parsed from the first request line.
-type RequestLine struct {
- Method []byte
- URI []byte
- Version Version
-}
-
-// ResponseLine contains parameters parsed from the first response line.
-type ResponseLine struct {
- Version Version
- Status int
- Reason []byte
-}
-
-// SplitRequestLine splits given slice of bytes into three chunks without
-// parsing.
-func SplitRequestLine(line []byte) (method, uri, version []byte) {
- return split3(line, ' ')
-}
-
-// ParseRequestLine parses http request line like "GET / HTTP/1.0".
-func ParseRequestLine(line []byte) (r RequestLine, ok bool) {
- var i int
- for i = 0; i < len(line); i++ {
- c := line[i]
- if !OctetTypes[c].IsToken() {
- if i > 0 && c == ' ' {
- break
- }
- return
- }
- }
- if i == len(line) {
- return
- }
-
- var proto []byte
- r.Method = line[:i]
- r.URI, proto = split2(line[i+1:], ' ')
- if len(r.URI) == 0 {
- return
- }
- if major, minor, ok := ParseVersion(proto); ok {
- r.Version.Major = major
- r.Version.Minor = minor
- return r, true
- }
-
- return r, false
-}
-
-// SplitResponseLine splits given slice of bytes into three chunks without
-// parsing.
-func SplitResponseLine(line []byte) (version, status, reason []byte) {
- return split3(line, ' ')
-}
-
-// ParseResponseLine parses first response line into ResponseLine struct.
-func ParseResponseLine(line []byte) (r ResponseLine, ok bool) {
- var (
- proto []byte
- status []byte
- )
- proto, status, r.Reason = split3(line, ' ')
- if major, minor, ok := ParseVersion(proto); ok {
- r.Version.Major = major
- r.Version.Minor = minor
- } else {
- return r, false
- }
- if n, ok := IntFromASCII(status); ok {
- r.Status = n
- } else {
- return r, false
- }
- // TODO(gobwas): parse here r.Reason fot TEXT rule:
- // TEXT =
- return r, true
-}
-
-var (
- httpVersion10 = []byte("HTTP/1.0")
- httpVersion11 = []byte("HTTP/1.1")
- httpVersionPrefix = []byte("HTTP/")
-)
-
-// ParseVersion parses major and minor version of HTTP protocol.
-// It returns parsed values and true if parse is ok.
-func ParseVersion(bts []byte) (major, minor int, ok bool) {
- switch {
- case bytes.Equal(bts, httpVersion11):
- return 1, 1, true
- case bytes.Equal(bts, httpVersion10):
- return 1, 0, true
- case len(bts) < 8:
- return
- case !bytes.Equal(bts[:5], httpVersionPrefix):
- return
- }
-
- bts = bts[5:]
-
- dot := bytes.IndexByte(bts, '.')
- if dot == -1 {
- return
- }
- major, ok = IntFromASCII(bts[:dot])
- if !ok {
- return
- }
- minor, ok = IntFromASCII(bts[dot+1:])
- if !ok {
- return
- }
-
- return major, minor, true
-}
-
-// ReadLine reads line from br. It reads until '\n' and returns bytes without
-// '\n' or '\r\n' at the end.
-// It returns err if and only if line does not end in '\n'. Note that read
-// bytes returned in any case of error.
-//
-// It is much like the textproto/Reader.ReadLine() except the thing that it
-// returns raw bytes, instead of string. That is, it avoids copying bytes read
-// from br.
-//
-// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
-// safe with future I/O operations on br.
-//
-// We could control I/O operations on br and do not need to make additional
-// copy for safety.
-func ReadLine(br *bufio.Reader) ([]byte, error) {
- var line []byte
- for {
- bts, err := br.ReadSlice('\n')
- if err == bufio.ErrBufferFull {
- // Copy bytes because next read will discard them.
- line = append(line, bts...)
- continue
- }
- // Avoid copy of single read.
- if line == nil {
- line = bts
- } else {
- line = append(line, bts...)
- }
- if err != nil {
- return line, err
- }
- // Size of line is at least 1.
- // In other case bufio.ReadSlice() returns error.
- n := len(line)
- // Cut '\n' or '\r\n'.
- if n > 1 && line[n-2] == '\r' {
- line = line[:n-2]
- } else {
- line = line[:n-1]
- }
- return line, nil
- }
-}
-
-// ParseHeaderLine parses HTTP header as key-value pair. It returns parsed
-// values and true if parse is ok.
-func ParseHeaderLine(line []byte) (k, v []byte, ok bool) {
- colon := bytes.IndexByte(line, ':')
- if colon == -1 {
- return
- }
- k = trim(line[:colon])
- for _, c := range k {
- if !OctetTypes[c].IsToken() {
- return nil, nil, false
- }
- }
- v = trim(line[colon+1:])
- return k, v, true
-}
-
-// IntFromASCII converts ascii encoded decimal numeric value from HTTP entities
-// to an integer.
-func IntFromASCII(bts []byte) (ret int, ok bool) {
- // ASCII numbers all start with the high-order bits 0011.
- // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
- // bits and interpret them directly as an integer.
- var n int
- if n = len(bts); n < 1 {
- return 0, false
- }
- for i := 0; i < n; i++ {
- if bts[i]&0xf0 != 0x30 {
- return 0, false
- }
- ret += int(bts[i]&0xf) * pow(10, n-i-1)
- }
- return ret, true
-}
-
-const (
- toLower = 'a' - 'A' // for use with OR.
- toUpper = ^byte(toLower) // for use with AND.
-)
-
-// CanonicalizeHeaderKey is like standard textproto/CanonicalMIMEHeaderKey,
-// except that it operates with slice of bytes and modifies it inplace without
-// copying.
-func CanonicalizeHeaderKey(k []byte) {
- upper := true
- for i, c := range k {
- if upper && 'a' <= c && c <= 'z' {
- k[i] &= toUpper
- } else if !upper && 'A' <= c && c <= 'Z' {
- k[i] |= toLower
- }
- upper = c == '-'
- }
-}
-
-// pow for integers implementation.
-// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
-func pow(a, b int) int {
- p := 1
- for b > 0 {
- if b&1 != 0 {
- p *= a
- }
- b >>= 1
- a *= a
- }
- return p
-}
-
-func split3(p []byte, sep byte) (p1, p2, p3 []byte) {
- a := bytes.IndexByte(p, sep)
- b := bytes.IndexByte(p[a+1:], sep)
- if a == -1 || b == -1 {
- return p, nil, nil
- }
- b += a + 1
- return p[:a], p[a+1 : b], p[b+1:]
-}
-
-func split2(p []byte, sep byte) (p1, p2 []byte) {
- i := bytes.IndexByte(p, sep)
- if i == -1 {
- return p, nil
- }
- return p[:i], p[i+1:]
-}
-
-func trim(p []byte) []byte {
- var i, j int
- for i = 0; i < len(p) && (p[i] == ' ' || p[i] == '\t'); {
- i++
- }
- for j = len(p); j > i && (p[j-1] == ' ' || p[j-1] == '\t'); {
- j--
- }
- return p[i:j]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
deleted file mode 100644
index 2387e8033c9..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/httphead.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Package httphead contains utils for parsing HTTP and HTTP-grammar compatible
-// text protocols headers.
-//
-// That is, this package first aim is to bring ability to easily parse
-// constructions, described here https://tools.ietf.org/html/rfc2616#section-2
-package httphead
-
-import (
- "bytes"
- "strings"
-)
-
-// ScanTokens parses data in this form:
-//
-// list = 1#token
-//
-// It returns false if data is malformed.
-func ScanTokens(data []byte, it func([]byte) bool) bool {
- lexer := &Scanner{data: data}
-
- var ok bool
- for lexer.Next() {
- switch lexer.Type() {
- case ItemToken:
- ok = true
- if !it(lexer.Bytes()) {
- return true
- }
- case ItemSeparator:
- if !isComma(lexer.Bytes()) {
- return false
- }
- default:
- return false
- }
- }
-
- return ok && !lexer.err
-}
-
-// ParseOptions parses all header options and appends it to given slice of
-// Option. It returns flag of successful (wellformed input) parsing.
-//
-// Note that appended options are all consist of subslices of data. That is,
-// mutation of data will mutate appended options.
-func ParseOptions(data []byte, options []Option) ([]Option, bool) {
- var i int
- index := -1
- return options, ScanOptions(data, func(idx int, name, attr, val []byte) Control {
- if idx != index {
- index = idx
- i = len(options)
- options = append(options, Option{Name: name})
- }
- if attr != nil {
- options[i].Parameters.Set(attr, val)
- }
- return ControlContinue
- })
-}
-
-// SelectFlag encodes way of options selection.
-type SelectFlag byte
-
-// String represetns flag as string.
-func (f SelectFlag) String() string {
- var flags [2]string
- var n int
- if f&SelectCopy != 0 {
- flags[n] = "copy"
- n++
- }
- if f&SelectUnique != 0 {
- flags[n] = "unique"
- n++
- }
- return "[" + strings.Join(flags[:n], "|") + "]"
-}
-
-const (
- // SelectCopy causes selector to copy selected option before appending it
- // to resulting slice.
- // If SelectCopy flag is not passed to selector, then appended options will
- // contain sub-slices of the initial data.
- SelectCopy SelectFlag = 1 << iota
-
- // SelectUnique causes selector to append only not yet existing option to
- // resulting slice. Unique is checked by comparing option names.
- SelectUnique
-)
-
-// OptionSelector contains configuration for selecting Options from header value.
-type OptionSelector struct {
- // Check is a filter function that applied to every Option that possibly
- // could be selected.
- // If Check is nil all options will be selected.
- Check func(Option) bool
-
- // Flags contains flags for options selection.
- Flags SelectFlag
-
- // Alloc used to allocate slice of bytes when selector is configured with
- // SelectCopy flag. It will be called with number of bytes needed for copy
- // of single Option.
- // If Alloc is nil make is used.
- Alloc func(n int) []byte
-}
-
-// Select parses header data and appends it to given slice of Option.
-// It also returns flag of successful (wellformed input) parsing.
-func (s OptionSelector) Select(data []byte, options []Option) ([]Option, bool) {
- var current Option
- var has bool
- index := -1
-
- alloc := s.Alloc
- if alloc == nil {
- alloc = defaultAlloc
- }
- check := s.Check
- if check == nil {
- check = defaultCheck
- }
-
- ok := ScanOptions(data, func(idx int, name, attr, val []byte) Control {
- if idx != index {
- if has && check(current) {
- if s.Flags&SelectCopy != 0 {
- current = current.Copy(alloc(current.Size()))
- }
- options = append(options, current)
- has = false
- }
- if s.Flags&SelectUnique != 0 {
- for i := len(options) - 1; i >= 0; i-- {
- if bytes.Equal(options[i].Name, name) {
- return ControlSkip
- }
- }
- }
- index = idx
- current = Option{Name: name}
- has = true
- }
- if attr != nil {
- current.Parameters.Set(attr, val)
- }
-
- return ControlContinue
- })
- if has && check(current) {
- if s.Flags&SelectCopy != 0 {
- current = current.Copy(alloc(current.Size()))
- }
- options = append(options, current)
- }
-
- return options, ok
-}
-
-func defaultAlloc(n int) []byte { return make([]byte, n) }
-func defaultCheck(Option) bool { return true }
-
-// Control represents operation that scanner should perform.
-type Control byte
-
-const (
- // ControlContinue causes scanner to continue scan tokens.
- ControlContinue Control = iota
- // ControlBreak causes scanner to stop scan tokens.
- ControlBreak
- // ControlSkip causes scanner to skip current entity.
- ControlSkip
-)
-
-// ScanOptions parses data in this form:
-//
-// values = 1#value
-// value = token *( ";" param )
-// param = token [ "=" (token | quoted-string) ]
-//
-// It calls given callback with the index of the option, option itself and its
-// parameter (attribute and its value, both could be nil). Index is useful when
-// header contains multiple choises for the same named option.
-//
-// Given callback should return one of the defined Control* values.
-// ControlSkip means that passed key is not in caller's interest. That is, all
-// parameters of that key will be skipped.
-// ControlBreak means that no more keys and parameters should be parsed. That
-// is, it must break parsing immediately.
-// ControlContinue means that caller want to receive next parameter and its
-// value or the next key.
-//
-// It returns false if data is malformed.
-func ScanOptions(data []byte, it func(index int, option, attribute, value []byte) Control) bool {
- lexer := &Scanner{data: data}
-
- var ok bool
- var state int
- const (
- stateKey = iota
- stateParamBeforeName
- stateParamName
- stateParamBeforeValue
- stateParamValue
- )
-
- var (
- index int
- key, param, value []byte
- mustCall bool
- )
- for lexer.Next() {
- var (
- call bool
- growIndex int
- )
-
- t := lexer.Type()
- v := lexer.Bytes()
-
- switch t {
- case ItemToken:
- switch state {
- case stateKey, stateParamBeforeName:
- key = v
- state = stateParamBeforeName
- mustCall = true
- case stateParamName:
- param = v
- state = stateParamBeforeValue
- mustCall = true
- case stateParamValue:
- value = v
- state = stateParamBeforeName
- call = true
- default:
- return false
- }
-
- case ItemString:
- if state != stateParamValue {
- return false
- }
- value = v
- state = stateParamBeforeName
- call = true
-
- case ItemSeparator:
- switch {
- case isComma(v) && state == stateKey:
- // Nothing to do.
-
- case isComma(v) && state == stateParamBeforeName:
- state = stateKey
- // Make call only if we have not called this key yet.
- call = mustCall
- if !call {
- // If we have already called callback with the key
- // that just ended.
- index++
- } else {
- // Else grow the index after calling callback.
- growIndex = 1
- }
-
- case isComma(v) && state == stateParamBeforeValue:
- state = stateKey
- growIndex = 1
- call = true
-
- case isSemicolon(v) && state == stateParamBeforeName:
- state = stateParamName
-
- case isSemicolon(v) && state == stateParamBeforeValue:
- state = stateParamName
- call = true
-
- case isEquality(v) && state == stateParamBeforeValue:
- state = stateParamValue
-
- default:
- return false
- }
-
- default:
- return false
- }
-
- if call {
- switch it(index, key, param, value) {
- case ControlBreak:
- // User want to stop to parsing parameters.
- return true
-
- case ControlSkip:
- // User want to skip current param.
- state = stateKey
- lexer.SkipEscaped(',')
-
- case ControlContinue:
- // User is interested in rest of parameters.
- // Nothing to do.
-
- default:
- panic("unexpected control value")
- }
- ok = true
- param = nil
- value = nil
- mustCall = false
- index += growIndex
- }
- }
- if mustCall {
- ok = true
- it(index, key, param, value)
- }
-
- return ok && !lexer.err
-}
-
-func isComma(b []byte) bool {
- return len(b) == 1 && b[0] == ','
-}
-func isSemicolon(b []byte) bool {
- return len(b) == 1 && b[0] == ';'
-}
-func isEquality(b []byte) bool {
- return len(b) == 1 && b[0] == '='
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
deleted file mode 100644
index 729855ed0d3..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/lexer.go
+++ /dev/null
@@ -1,360 +0,0 @@
-package httphead
-
-import (
- "bytes"
-)
-
-// ItemType encodes type of the lexing token.
-type ItemType int
-
-const (
- // ItemUndef reports that token is undefined.
- ItemUndef ItemType = iota
- // ItemToken reports that token is RFC2616 token.
- ItemToken
- // ItemSeparator reports that token is RFC2616 separator.
- ItemSeparator
- // ItemString reports that token is RFC2616 quouted string.
- ItemString
- // ItemComment reports that token is RFC2616 comment.
- ItemComment
- // ItemOctet reports that token is octet slice.
- ItemOctet
-)
-
-// Scanner represents header tokens scanner.
-// See https://tools.ietf.org/html/rfc2616#section-2
-type Scanner struct {
- data []byte
- pos int
-
- itemType ItemType
- itemBytes []byte
-
- err bool
-}
-
-// NewScanner creates new RFC2616 data scanner.
-func NewScanner(data []byte) *Scanner {
- return &Scanner{data: data}
-}
-
-// Next scans for next token. It returns true on successful scanning, and false
-// on error or EOF.
-func (l *Scanner) Next() bool {
- c, ok := l.nextChar()
- if !ok {
- return false
- }
- switch c {
- case '"': // quoted-string;
- return l.fetchQuotedString()
-
- case '(': // comment;
- return l.fetchComment()
-
- case '\\', ')': // unexpected chars;
- l.err = true
- return false
-
- default:
- return l.fetchToken()
- }
-}
-
-// FetchUntil fetches ItemOctet from current scanner position to first
-// occurence of the c or to the end of the underlying data.
-func (l *Scanner) FetchUntil(c byte) bool {
- l.resetItem()
- if l.pos == len(l.data) {
- return false
- }
- return l.fetchOctet(c)
-}
-
-// Peek reads byte at current position without advancing it. On end of data it
-// returns 0.
-func (l *Scanner) Peek() byte {
- if l.pos == len(l.data) {
- return 0
- }
- return l.data[l.pos]
-}
-
-// Peek2 reads two first bytes at current position without advancing it.
-// If there not enough data it returs 0.
-func (l *Scanner) Peek2() (a, b byte) {
- if l.pos == len(l.data) {
- return 0, 0
- }
- if l.pos+1 == len(l.data) {
- return l.data[l.pos], 0
- }
- return l.data[l.pos], l.data[l.pos+1]
-}
-
-// Buffered reporst how many bytes there are left to scan.
-func (l *Scanner) Buffered() int {
- return len(l.data) - l.pos
-}
-
-// Advance moves current position index at n bytes. It returns true on
-// successful move.
-func (l *Scanner) Advance(n int) bool {
- l.pos += n
- if l.pos > len(l.data) {
- l.pos = len(l.data)
- return false
- }
- return true
-}
-
-// Skip skips all bytes until first occurence of c.
-func (l *Scanner) Skip(c byte) {
- if l.err {
- return
- }
- // Reset scanner state.
- l.resetItem()
-
- if i := bytes.IndexByte(l.data[l.pos:], c); i == -1 {
- // Reached the end of data.
- l.pos = len(l.data)
- } else {
- l.pos += i + 1
- }
-}
-
-// SkipEscaped skips all bytes until first occurence of non-escaped c.
-func (l *Scanner) SkipEscaped(c byte) {
- if l.err {
- return
- }
- // Reset scanner state.
- l.resetItem()
-
- if i := ScanUntil(l.data[l.pos:], c); i == -1 {
- // Reached the end of data.
- l.pos = len(l.data)
- } else {
- l.pos += i + 1
- }
-}
-
-// Type reports current token type.
-func (l *Scanner) Type() ItemType {
- return l.itemType
-}
-
-// Bytes returns current token bytes.
-func (l *Scanner) Bytes() []byte {
- return l.itemBytes
-}
-
-func (l *Scanner) nextChar() (byte, bool) {
- // Reset scanner state.
- l.resetItem()
-
- if l.err {
- return 0, false
- }
- l.pos += SkipSpace(l.data[l.pos:])
- if l.pos == len(l.data) {
- return 0, false
- }
- return l.data[l.pos], true
-}
-
-func (l *Scanner) resetItem() {
- l.itemType = ItemUndef
- l.itemBytes = nil
-}
-
-func (l *Scanner) fetchOctet(c byte) bool {
- i := l.pos
- if j := bytes.IndexByte(l.data[l.pos:], c); j == -1 {
- // Reached the end of data.
- l.pos = len(l.data)
- } else {
- l.pos += j
- }
-
- l.itemType = ItemOctet
- l.itemBytes = l.data[i:l.pos]
-
- return true
-}
-
-func (l *Scanner) fetchToken() bool {
- n, t := ScanToken(l.data[l.pos:])
- if n == -1 {
- l.err = true
- return false
- }
-
- l.itemType = t
- l.itemBytes = l.data[l.pos : l.pos+n]
- l.pos += n
-
- return true
-}
-
-func (l *Scanner) fetchQuotedString() (ok bool) {
- l.pos++
-
- n := ScanUntil(l.data[l.pos:], '"')
- if n == -1 {
- l.err = true
- return false
- }
-
- l.itemType = ItemString
- l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
- l.pos += n + 1
-
- return true
-}
-
-func (l *Scanner) fetchComment() (ok bool) {
- l.pos++
-
- n := ScanPairGreedy(l.data[l.pos:], '(', ')')
- if n == -1 {
- l.err = true
- return false
- }
-
- l.itemType = ItemComment
- l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
- l.pos += n + 1
-
- return true
-}
-
-// ScanUntil scans for first non-escaped character c in given data.
-// It returns index of matched c and -1 if c is not found.
-func ScanUntil(data []byte, c byte) (n int) {
- for {
- i := bytes.IndexByte(data[n:], c)
- if i == -1 {
- return -1
- }
- n += i
- if n == 0 || data[n-1] != '\\' {
- break
- }
- n++
- }
- return
-}
-
-// ScanPairGreedy scans for complete pair of opening and closing chars in greedy manner.
-// Note that first opening byte must not be present in data.
-func ScanPairGreedy(data []byte, open, close byte) (n int) {
- var m int
- opened := 1
- for {
- i := bytes.IndexByte(data[n:], close)
- if i == -1 {
- return -1
- }
- n += i
- // If found index is not escaped then it is the end.
- if n == 0 || data[n-1] != '\\' {
- opened--
- }
-
- for m < i {
- j := bytes.IndexByte(data[m:i], open)
- if j == -1 {
- break
- }
- m += j + 1
- opened++
- }
-
- if opened == 0 {
- break
- }
-
- n++
- m = n
- }
- return
-}
-
-// RemoveByte returns data without c. If c is not present in data it returns
-// the same slice. If not, it copies data without c.
-func RemoveByte(data []byte, c byte) []byte {
- j := bytes.IndexByte(data, c)
- if j == -1 {
- return data
- }
-
- n := len(data) - 1
-
- // If character is present, than allocate slice with n-1 capacity. That is,
- // resulting bytes could be at most n-1 length.
- result := make([]byte, n)
- k := copy(result, data[:j])
-
- for i := j + 1; i < n; {
- j = bytes.IndexByte(data[i:], c)
- if j != -1 {
- k += copy(result[k:], data[i:i+j])
- i = i + j + 1
- } else {
- k += copy(result[k:], data[i:])
- break
- }
- }
-
- return result[:k]
-}
-
-// SkipSpace skips spaces and lws-sequences from p.
-// It returns number ob bytes skipped.
-func SkipSpace(p []byte) (n int) {
- for len(p) > 0 {
- switch {
- case len(p) >= 3 &&
- p[0] == '\r' &&
- p[1] == '\n' &&
- OctetTypes[p[2]].IsSpace():
- p = p[3:]
- n += 3
- case OctetTypes[p[0]].IsSpace():
- p = p[1:]
- n++
- default:
- return
- }
- }
- return
-}
-
-// ScanToken scan for next token in p. It returns length of the token and its
-// type. It do not trim p.
-func ScanToken(p []byte) (n int, t ItemType) {
- if len(p) == 0 {
- return 0, ItemUndef
- }
-
- c := p[0]
- switch {
- case OctetTypes[c].IsSeparator():
- return 1, ItemSeparator
-
- case OctetTypes[c].IsToken():
- for n = 1; n < len(p); n++ {
- c := p[n]
- if !OctetTypes[c].IsToken() {
- break
- }
- }
- return n, ItemToken
-
- default:
- return -1, ItemUndef
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
deleted file mode 100644
index 2a04cdd0909..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/octet.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package httphead
-
-// OctetType desribes character type.
-//
-// From the "Basic Rules" chapter of RFC2616
-// See https://tools.ietf.org/html/rfc2616#section-2.2
-//
-// OCTET =
-// CHAR =
-// UPALPHA =
-// LOALPHA =
-// ALPHA = UPALPHA | LOALPHA
-// DIGIT =
-// CTL =
-// CR =
-// LF =
-// SP =
-// HT =
-// <"> =
-// CRLF = CR LF
-// LWS = [CRLF] 1*( SP | HT )
-//
-// Many HTTP/1.1 header field values consist of words separated by LWS
-// or special characters. These special characters MUST be in a quoted
-// string to be used within a parameter value (as defined in section
-// 3.6).
-//
-// token = 1*
-// separators = "(" | ")" | "<" | ">" | "@"
-// | "," | ";" | ":" | "\" | <">
-// | "/" | "[" | "]" | "?" | "="
-// | "{" | "}" | SP | HT
-type OctetType byte
-
-// IsChar reports whether octet is CHAR.
-func (t OctetType) IsChar() bool { return t&octetChar != 0 }
-
-// IsControl reports whether octet is CTL.
-func (t OctetType) IsControl() bool { return t&octetControl != 0 }
-
-// IsSeparator reports whether octet is separator.
-func (t OctetType) IsSeparator() bool { return t&octetSeparator != 0 }
-
-// IsSpace reports whether octet is space (SP or HT).
-func (t OctetType) IsSpace() bool { return t&octetSpace != 0 }
-
-// IsToken reports whether octet is token.
-func (t OctetType) IsToken() bool { return t&octetToken != 0 }
-
-const (
- octetChar OctetType = 1 << iota
- octetControl
- octetSpace
- octetSeparator
- octetToken
-)
-
-// OctetTypes is a table of octets.
-var OctetTypes [256]OctetType
-
-func init() {
- for c := 32; c < 256; c++ {
- var t OctetType
- if c <= 127 {
- t |= octetChar
- }
- if 0 <= c && c <= 31 || c == 127 {
- t |= octetControl
- }
- switch c {
- case '(', ')', '<', '>', '@', ',', ';', ':', '"', '/', '[', ']', '?', '=', '{', '}', '\\':
- t |= octetSeparator
- case ' ', '\t':
- t |= octetSpace | octetSeparator
- }
-
- if t.IsChar() && !t.IsControl() && !t.IsSeparator() && !t.IsSpace() {
- t |= octetToken
- }
-
- OctetTypes[c] = t
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
deleted file mode 100644
index 243be08c9a0..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/option.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package httphead
-
-import (
- "bytes"
- "sort"
-)
-
-// Option represents a header option.
-type Option struct {
- Name []byte
- Parameters Parameters
-}
-
-// Size returns number of bytes need to be allocated for use in opt.Copy.
-func (opt Option) Size() int {
- return len(opt.Name) + opt.Parameters.bytes
-}
-
-// Copy copies all underlying []byte slices into p and returns new Option.
-// Note that p must be at least of opt.Size() length.
-func (opt Option) Copy(p []byte) Option {
- n := copy(p, opt.Name)
- opt.Name = p[:n]
- opt.Parameters, p = opt.Parameters.Copy(p[n:])
- return opt
-}
-
-// String represents option as a string.
-func (opt Option) String() string {
- return "{" + string(opt.Name) + " " + opt.Parameters.String() + "}"
-}
-
-// NewOption creates named option with given parameters.
-func NewOption(name string, params map[string]string) Option {
- p := Parameters{}
- for k, v := range params {
- p.Set([]byte(k), []byte(v))
- }
- return Option{
- Name: []byte(name),
- Parameters: p,
- }
-}
-
-// Equal reports whether option is equal to b.
-func (opt Option) Equal(b Option) bool {
- if bytes.Equal(opt.Name, b.Name) {
- return opt.Parameters.Equal(b.Parameters)
- }
- return false
-}
-
-// Parameters represents option's parameters.
-type Parameters struct {
- pos int
- bytes int
- arr [8]pair
- dyn []pair
-}
-
-// Equal reports whether a equal to b.
-func (p Parameters) Equal(b Parameters) bool {
- switch {
- case p.dyn == nil && b.dyn == nil:
- case p.dyn != nil && b.dyn != nil:
- default:
- return false
- }
-
- ad, bd := p.data(), b.data()
- if len(ad) != len(bd) {
- return false
- }
-
- sort.Sort(pairs(ad))
- sort.Sort(pairs(bd))
-
- for i := 0; i < len(ad); i++ {
- av, bv := ad[i], bd[i]
- if !bytes.Equal(av.key, bv.key) || !bytes.Equal(av.value, bv.value) {
- return false
- }
- }
- return true
-}
-
-// Size returns number of bytes that needed to copy p.
-func (p *Parameters) Size() int {
- return p.bytes
-}
-
-// Copy copies all underlying []byte slices into dst and returns new
-// Parameters.
-// Note that dst must be at least of p.Size() length.
-func (p *Parameters) Copy(dst []byte) (Parameters, []byte) {
- ret := Parameters{
- pos: p.pos,
- bytes: p.bytes,
- }
- if p.dyn != nil {
- ret.dyn = make([]pair, len(p.dyn))
- for i, v := range p.dyn {
- ret.dyn[i], dst = v.copy(dst)
- }
- } else {
- for i, p := range p.arr {
- ret.arr[i], dst = p.copy(dst)
- }
- }
- return ret, dst
-}
-
-// Get returns value by key and flag about existence such value.
-func (p *Parameters) Get(key string) (value []byte, ok bool) {
- for _, v := range p.data() {
- if string(v.key) == key {
- return v.value, true
- }
- }
- return nil, false
-}
-
-// Set sets value by key.
-func (p *Parameters) Set(key, value []byte) {
- p.bytes += len(key) + len(value)
-
- if p.pos < len(p.arr) {
- p.arr[p.pos] = pair{key, value}
- p.pos++
- return
- }
-
- if p.dyn == nil {
- p.dyn = make([]pair, len(p.arr), len(p.arr)+1)
- copy(p.dyn, p.arr[:])
- }
- p.dyn = append(p.dyn, pair{key, value})
-}
-
-// ForEach iterates over parameters key-value pairs and calls cb for each one.
-func (p *Parameters) ForEach(cb func(k, v []byte) bool) {
- for _, v := range p.data() {
- if !cb(v.key, v.value) {
- break
- }
- }
-}
-
-// String represents parameters as a string.
-func (p *Parameters) String() (ret string) {
- ret = "["
- for i, v := range p.data() {
- if i > 0 {
- ret += " "
- }
- ret += string(v.key) + ":" + string(v.value)
- }
- return ret + "]"
-}
-
-func (p *Parameters) data() []pair {
- if p.dyn != nil {
- return p.dyn
- }
- return p.arr[:p.pos]
-}
-
-type pair struct {
- key, value []byte
-}
-
-func (p pair) copy(dst []byte) (pair, []byte) {
- n := copy(dst, p.key)
- p.key = dst[:n]
- m := n + copy(dst[n:], p.value)
- p.value = dst[n:m]
-
- dst = dst[m:]
-
- return p, dst
-}
-
-type pairs []pair
-
-func (p pairs) Len() int { return len(p) }
-func (p pairs) Less(a, b int) bool { return bytes.Compare(p[a].key, p[b].key) == -1 }
-func (p pairs) Swap(a, b int) { p[a], p[b] = p[b], p[a] }
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
deleted file mode 100644
index e5df3ddf404..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/httphead/writer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package httphead
-
-import "io"
-
-var (
- comma = []byte{','}
- equality = []byte{'='}
- semicolon = []byte{';'}
- quote = []byte{'"'}
- escape = []byte{'\\'}
-)
-
-// WriteOptions write options list to the dest.
-// It uses the same form as {Scan,Parse}Options functions:
-// values = 1#value
-// value = token *( ";" param )
-// param = token [ "=" (token | quoted-string) ]
-//
-// It wraps valuse into the quoted-string sequence if it contains any
-// non-token characters.
-func WriteOptions(dest io.Writer, options []Option) (n int, err error) {
- w := writer{w: dest}
- for i, opt := range options {
- if i > 0 {
- w.write(comma)
- }
-
- writeTokenSanitized(&w, opt.Name)
-
- for _, p := range opt.Parameters.data() {
- w.write(semicolon)
- writeTokenSanitized(&w, p.key)
- if len(p.value) != 0 {
- w.write(equality)
- writeTokenSanitized(&w, p.value)
- }
- }
- }
- return w.result()
-}
-
-// writeTokenSanitized writes token as is or as quouted string if it contains
-// non-token characters.
-//
-// Note that is is not expects LWS sequnces be in s, cause LWS is used only as
-// header field continuation:
-// "A CRLF is allowed in the definition of TEXT only as part of a header field
-// continuation. It is expected that the folding LWS will be replaced with a
-// single SP before interpretation of the TEXT value."
-// See https://tools.ietf.org/html/rfc2616#section-2
-//
-// That is we sanitizing s for writing, so there could not be any header field
-// continuation.
-// That is any CRLF will be escaped as any other control characters not allowd in TEXT.
-func writeTokenSanitized(bw *writer, bts []byte) {
- var qt bool
- var pos int
- for i := 0; i < len(bts); i++ {
- c := bts[i]
- if !OctetTypes[c].IsToken() && !qt {
- qt = true
- bw.write(quote)
- }
- if OctetTypes[c].IsControl() || c == '"' {
- if !qt {
- qt = true
- bw.write(quote)
- }
- bw.write(bts[pos:i])
- bw.write(escape)
- bw.write(bts[i : i+1])
- pos = i + 1
- }
- }
- if !qt {
- bw.write(bts)
- } else {
- bw.write(bts[pos:])
- bw.write(quote)
- }
-}
-
-type writer struct {
- w io.Writer
- n int
- err error
-}
-
-func (w *writer) write(p []byte) {
- if w.err != nil {
- return
- }
- var n int
- n, w.err = w.w.Write(p)
- w.n += n
- return
-}
-
-func (w *writer) result() (int, error) {
- return w.n, w.err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
deleted file mode 100644
index 45685581dae..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# pool
-
-[![GoDoc][godoc-image]][godoc-url]
-
-> Tiny memory reuse helpers for Go.
-
-## generic
-
-Without use of subpackages, `pool` allows to reuse any struct distinguishable
-by size in generic way:
-
-```go
-package main
-
-import "github.com/gobwas/pool"
-
-func main() {
- x, n := pool.Get(100) // Returns object with size 128 or nil.
- if x == nil {
- // Create x somehow with knowledge that n is 128.
- }
- defer pool.Put(x, n)
-
- // Work with x.
-}
-```
-
-Pool allows you to pass specific options for constructing custom pool:
-
-```go
-package main
-
-import "github.com/gobwas/pool"
-
-func main() {
- p := pool.Custom(
- pool.WithLogSizeMapping(), // Will ceil size n passed to Get(n) to nearest power of two.
- pool.WithLogSizeRange(64, 512), // Will reuse objects in logarithmic range [64, 512].
- pool.WithSize(65536), // Will reuse object with size 65536.
- )
- x, n := p.Get(1000) // Returns nil and 1000 because mapped size 1000 => 1024 is not reusing by the pool.
- defer pool.Put(x, n) // Will not reuse x.
-
- // Work with x.
-}
-```
-
-Note that there are few non-generic pooling implementations inside subpackages.
-
-## pbytes
-
-Subpackage `pbytes` is intended for `[]byte` reuse.
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbytes"
-
-func main() {
- bts := pbytes.GetCap(100) // Returns make([]byte, 0, 128).
- defer pbytes.Put(bts)
-
- // Work with bts.
-}
-```
-
-You can also create your own range for pooling:
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbytes"
-
-func main() {
- // Reuse only slices whose capacity is 128, 256, 512 or 1024.
- pool := pbytes.New(128, 1024)
-
- bts := pool.GetCap(100) // Returns make([]byte, 0, 128).
- defer pool.Put(bts)
-
- // Work with bts.
-}
-```
-
-## pbufio
-
-Subpackage `pbufio` is intended for `*bufio.{Reader, Writer}` reuse.
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbufio"
-
-func main() {
- bw := pbufio.GetWriter(os.Stdout, 100) // Returns bufio.NewWriterSize(128).
- defer pbufio.PutWriter(bw)
-
- // Work with bw.
-}
-```
-
-Like with `pbytes`, you can also create pool with custom reuse bounds.
-
-
-
-[godoc-image]: https://godoc.org/github.com/gobwas/pool?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/pool
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
deleted file mode 100644
index d40b362458b..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/generic.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package pool
-
-import (
- "sync"
-
- "github.com/gobwas/pool/internal/pmath"
-)
-
-var DefaultPool = New(128, 65536)
-
-// Get pulls object whose generic size is at least of given size. It also
-// returns a real size of x for further pass to Put(). It returns -1 as real
-// size for nil x. Size >-1 does not mean that x is non-nil, so checks must be
-// done.
-//
-// Note that size could be ceiled to the next power of two.
-//
-// Get is a wrapper around DefaultPool.Get().
-func Get(size int) (interface{}, int) { return DefaultPool.Get(size) }
-
-// Put takes x and its size for future reuse.
-// Put is a wrapper around DefaultPool.Put().
-func Put(x interface{}, size int) { DefaultPool.Put(x, size) }
-
-// Pool contains logic of reusing objects distinguishable by size in generic
-// way.
-type Pool struct {
- pool map[int]*sync.Pool
- size func(int) int
-}
-
-// New creates new Pool that reuses objects which size is in logarithmic range
-// [min, max].
-//
-// Note that it is a shortcut for Custom() constructor with Options provided by
-// WithLogSizeMapping() and WithLogSizeRange(min, max) calls.
-func New(min, max int) *Pool {
- return Custom(
- WithLogSizeMapping(),
- WithLogSizeRange(min, max),
- )
-}
-
-// Custom creates new Pool with given options.
-func Custom(opts ...Option) *Pool {
- p := &Pool{
- pool: make(map[int]*sync.Pool),
- size: pmath.Identity,
- }
-
- c := (*poolConfig)(p)
- for _, opt := range opts {
- opt(c)
- }
-
- return p
-}
-
-// Get pulls object whose generic size is at least of given size.
-// It also returns a real size of x for further pass to Put() even if x is nil.
-// Note that size could be ceiled to the next power of two.
-func (p *Pool) Get(size int) (interface{}, int) {
- n := p.size(size)
- if pool := p.pool[n]; pool != nil {
- return pool.Get(), n
- }
- return nil, size
-}
-
-// Put takes x and its size for future reuse.
-func (p *Pool) Put(x interface{}, size int) {
- if pool := p.pool[size]; pool != nil {
- pool.Put(x)
- }
-}
-
-type poolConfig Pool
-
-// AddSize adds size n to the map.
-func (p *poolConfig) AddSize(n int) {
- p.pool[n] = new(sync.Pool)
-}
-
-// SetSizeMapping sets up incoming size mapping function.
-func (p *poolConfig) SetSizeMapping(size func(int) int) {
- p.size = size
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
deleted file mode 100644
index df152ed12a5..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package pmath
-
-const (
- bitsize = 32 << (^uint(0) >> 63)
- maxint = int(1<<(bitsize-1) - 1)
- maxintHeadBit = 1 << (bitsize - 2)
-)
-
-// LogarithmicRange iterates from ceiled to power of two min to max,
-// calling cb on each iteration.
-func LogarithmicRange(min, max int, cb func(int)) {
- if min == 0 {
- min = 1
- }
- for n := CeilToPowerOfTwo(min); n <= max; n <<= 1 {
- cb(n)
- }
-}
-
-// IsPowerOfTwo reports whether given integer is a power of two.
-func IsPowerOfTwo(n int) bool {
- return n&(n-1) == 0
-}
-
-// Identity is identity.
-func Identity(n int) int {
- return n
-}
-
-// CeilToPowerOfTwo returns the least power of two integer value greater than
-// or equal to n.
-func CeilToPowerOfTwo(n int) int {
- if n&maxintHeadBit != 0 && n > maxintHeadBit {
- panic("argument is too large")
- }
- if n <= 2 {
- return n
- }
- n--
- n = fillBits(n)
- n++
- return n
-}
-
-// FloorToPowerOfTwo returns the greatest power of two integer value less than
-// or equal to n.
-func FloorToPowerOfTwo(n int) int {
- if n <= 2 {
- return n
- }
- n = fillBits(n)
- n >>= 1
- n++
- return n
-}
-
-func fillBits(n int) int {
- n |= n >> 1
- n |= n >> 2
- n |= n >> 4
- n |= n >> 8
- n |= n >> 16
- n |= n >> 32
- return n
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
deleted file mode 100644
index d6e42b70055..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/option.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package pool
-
-import "github.com/gobwas/pool/internal/pmath"
-
-// Option configures pool.
-type Option func(Config)
-
-// Config describes generic pool configuration.
-type Config interface {
- AddSize(n int)
- SetSizeMapping(func(int) int)
-}
-
-// WithSizeLogRange returns an Option that will add logarithmic range of
-// pooling sizes containing [min, max] values.
-func WithLogSizeRange(min, max int) Option {
- return func(c Config) {
- pmath.LogarithmicRange(min, max, func(n int) {
- c.AddSize(n)
- })
- }
-}
-
-// WithSize returns an Option that will add given pooling size to the pool.
-func WithSize(n int) Option {
- return func(c Config) {
- c.AddSize(n)
- }
-}
-
-func WithSizeMapping(sz func(int) int) Option {
- return func(c Config) {
- c.SetSizeMapping(sz)
- }
-}
-
-func WithLogSizeMapping() Option {
- return WithSizeMapping(pmath.CeilToPowerOfTwo)
-}
-
-func WithIdentitySizeMapping() Option {
- return WithSizeMapping(pmath.Identity)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
deleted file mode 100644
index d526bd80da8..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Package pbufio contains tools for pooling bufio.Reader and bufio.Writers.
-package pbufio
-
-import (
- "bufio"
- "io"
-
- "github.com/gobwas/pool"
-)
-
-var (
- DefaultWriterPool = NewWriterPool(256, 65536)
- DefaultReaderPool = NewReaderPool(256, 65536)
-)
-
-// GetWriter returns bufio.Writer whose buffer has at least size bytes.
-// Note that size could be ceiled to the next power of two.
-// GetWriter is a wrapper around DefaultWriterPool.Get().
-func GetWriter(w io.Writer, size int) *bufio.Writer { return DefaultWriterPool.Get(w, size) }
-
-// PutWriter takes bufio.Writer for future reuse.
-// It does not reuse bufio.Writer which underlying buffer size is not power of
-// PutWriter is a wrapper around DefaultWriterPool.Put().
-func PutWriter(bw *bufio.Writer) { DefaultWriterPool.Put(bw) }
-
-// GetReader returns bufio.Reader whose buffer has at least size bytes. It returns
-// its capacity for further pass to Put().
-// Note that size could be ceiled to the next power of two.
-// GetReader is a wrapper around DefaultReaderPool.Get().
-func GetReader(w io.Reader, size int) *bufio.Reader { return DefaultReaderPool.Get(w, size) }
-
-// PutReader takes bufio.Reader and its size for future reuse.
-// It does not reuse bufio.Reader if size is not power of two or is out of pool
-// min/max range.
-// PutReader is a wrapper around DefaultReaderPool.Put().
-func PutReader(bw *bufio.Reader) { DefaultReaderPool.Put(bw) }
-
-// WriterPool contains logic of *bufio.Writer reuse with various size.
-type WriterPool struct {
- pool *pool.Pool
-}
-
-// NewWriterPool creates new WriterPool that reuses writers which size is in
-// logarithmic range [min, max].
-func NewWriterPool(min, max int) *WriterPool {
- return &WriterPool{pool.New(min, max)}
-}
-
-// CustomWriterPool creates new WriterPool with given options.
-func CustomWriterPool(opts ...pool.Option) *WriterPool {
- return &WriterPool{pool.Custom(opts...)}
-}
-
-// Get returns bufio.Writer whose buffer has at least size bytes.
-func (wp *WriterPool) Get(w io.Writer, size int) *bufio.Writer {
- v, n := wp.pool.Get(size)
- if v != nil {
- bw := v.(*bufio.Writer)
- bw.Reset(w)
- return bw
- }
- return bufio.NewWriterSize(w, n)
-}
-
-// Put takes ownership of bufio.Writer for further reuse.
-func (wp *WriterPool) Put(bw *bufio.Writer) {
- // Should reset even if we do Reset() inside Get().
- // This is done to prevent locking underlying io.Writer from GC.
- bw.Reset(nil)
- wp.pool.Put(bw, writerSize(bw))
-}
-
-// ReaderPool contains logic of *bufio.Reader reuse with various size.
-type ReaderPool struct {
- pool *pool.Pool
-}
-
-// NewReaderPool creates new ReaderPool that reuses writers which size is in
-// logarithmic range [min, max].
-func NewReaderPool(min, max int) *ReaderPool {
- return &ReaderPool{pool.New(min, max)}
-}
-
-// CustomReaderPool creates new ReaderPool with given options.
-func CustomReaderPool(opts ...pool.Option) *ReaderPool {
- return &ReaderPool{pool.Custom(opts...)}
-}
-
-// Get returns bufio.Reader whose buffer has at least size bytes.
-func (rp *ReaderPool) Get(r io.Reader, size int) *bufio.Reader {
- v, n := rp.pool.Get(size)
- if v != nil {
- br := v.(*bufio.Reader)
- br.Reset(r)
- return br
- }
- return bufio.NewReaderSize(r, n)
-}
-
-// Put takes ownership of bufio.Reader for further reuse.
-func (rp *ReaderPool) Put(br *bufio.Reader) {
- // Should reset even if we do Reset() inside Get().
- // This is done to prevent locking underlying io.Reader from GC.
- br.Reset(nil)
- rp.pool.Put(br, readerSize(br))
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
deleted file mode 100644
index c736ae56e11..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build go1.10
-
-package pbufio
-
-import "bufio"
-
-func writerSize(bw *bufio.Writer) int {
- return bw.Size()
-}
-
-func readerSize(br *bufio.Reader) int {
- return br.Size()
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
deleted file mode 100644
index e71dd447d2a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build !go1.10
-
-package pbufio
-
-import "bufio"
-
-func writerSize(bw *bufio.Writer) int {
- return bw.Available() + bw.Buffered()
-}
-
-// readerSize returns buffer size of the given buffered reader.
-// NOTE: current workaround implementation resets underlying io.Reader.
-func readerSize(br *bufio.Reader) int {
- br.Reset(sizeReader)
- br.ReadByte()
- n := br.Buffered() + 1
- br.Reset(nil)
- return n
-}
-
-var sizeReader optimisticReader
-
-type optimisticReader struct{}
-
-func (optimisticReader) Read(p []byte) (int, error) {
- return len(p), nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
deleted file mode 100644
index 1fe9e602fc5..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/pool/pool.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Package pool contains helpers for pooling structures distinguishable by
-// size.
-//
-// Quick example:
-//
-// import "github.com/gobwas/pool"
-//
-// func main() {
-// // Reuse objects in logarithmic range from 0 to 64 (0,1,2,4,6,8,16,32,64).
-// p := pool.New(0, 64)
-//
-// buf, n := p.Get(10) // Returns buffer with 16 capacity.
-// if buf == nil {
-// buf = bytes.NewBuffer(make([]byte, n))
-// }
-// defer p.Put(buf, n)
-//
-// // Work with buf.
-// }
-//
-// There are non-generic implementations for pooling:
-// - pool/pbytes for []byte reuse;
-// - pool/pbufio for *bufio.Reader and *bufio.Writer reuse;
-//
-package pool
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
deleted file mode 100644
index e3e2b1080d0..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-bin/
-reports/
-cpu.out
-mem.out
-ws.test
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
deleted file mode 100644
index cf74f1bee3c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-sudo: required
-
-language: go
-
-services:
- - docker
-
-os:
- - linux
- - windows
-
-go:
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.x
-
-install:
- - go get github.com/gobwas/pool
- - go get github.com/gobwas/httphead
-
-script:
- - if [ "$TRAVIS_OS_NAME" = "windows" ]; then go test ./...; fi
- - if [ "$TRAVIS_OS_NAME" = "linux" ]; then make test autobahn; fi
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
deleted file mode 100644
index 075e83c74bc..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/Makefile
+++ /dev/null
@@ -1,47 +0,0 @@
-BENCH ?=.
-BENCH_BASE?=master
-
-clean:
- rm -f bin/reporter
- rm -fr autobahn/report/*
-
-bin/reporter:
- go build -o bin/reporter ./autobahn
-
-bin/gocovmerge:
- go build -o bin/gocovmerge github.com/wadey/gocovmerge
-
-.PHONY: autobahn
-autobahn: clean bin/reporter
- ./autobahn/script/test.sh --build
- bin/reporter $(PWD)/autobahn/report/index.json
-
-test:
- go test -coverprofile=ws.coverage .
- go test -coverprofile=wsutil.coverage ./wsutil
-
-cover: bin/gocovmerge test autobahn
- bin/gocovmerge ws.coverage wsutil.coverage autobahn/report/server.coverage > total.coverage
-
-benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
-benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX)
-benchcmp: BENCH_NEW:=$(shell mktemp -t new.XXXX)
-benchcmp:
- if [ ! -z "$(shell git status -s)" ]; then\
- echo "could not compare with $(BENCH_BASE) – found unstaged changes";\
- exit 1;\
- fi;\
- if [ "$(BENCH_BRANCH)" == "$(BENCH_BASE)" ]; then\
- echo "comparing the same branches";\
- exit 1;\
- fi;\
- echo "benchmarking $(BENCH_BRANCH)...";\
- go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_NEW);\
- echo "benchmarking $(BENCH_BASE)...";\
- git checkout -q $(BENCH_BASE);\
- go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_OLD);\
- git checkout -q $(BENCH_BRANCH);\
- echo "\nresults:";\
- echo "========\n";\
- benchcmp $(BENCH_OLD) $(BENCH_NEW);\
-
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
deleted file mode 100644
index 74acd78bd08..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/README.md
+++ /dev/null
@@ -1,360 +0,0 @@
-# ws
-
-[![GoDoc][godoc-image]][godoc-url]
-[![Travis][travis-image]][travis-url]
-
-> [RFC6455][rfc-url] WebSocket implementation in Go.
-
-# Features
-
-- Zero-copy upgrade
-- No intermediate allocations during I/O
-- Low-level API which allows to build your own logic of packet handling and
- buffers reuse
-- High-level wrappers and helpers around API in `wsutil` package, which allow
- to start fast without digging the protocol internals
-
-# Documentation
-
-[GoDoc][godoc-url].
-
-# Why
-
-Existing WebSocket implementations do not allow users to reuse I/O buffers
-between connections in clear way. This library aims to export efficient
-low-level interface for working with the protocol without forcing only one way
-it could be used.
-
-By the way, if you want get the higher-level tools, you can use `wsutil`
-package.
-
-# Status
-
-Library is tagged as `v1*` so its API must not be broken during some
-improvements or refactoring.
-
-This implementation of RFC6455 passes [Autobahn Test
-Suite](https://github.com/crossbario/autobahn-testsuite) and currently has
-about 78% coverage.
-
-# Examples
-
-Example applications using `ws` are developed in separate repository
-[ws-examples](https://github.com/gobwas/ws-examples).
-
-# Usage
-
-The higher-level example of WebSocket echo server:
-
-```go
-package main
-
-import (
- "net/http"
-
- "github.com/gobwas/ws"
- "github.com/gobwas/ws/wsutil"
-)
-
-func main() {
- http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- conn, _, _, err := ws.UpgradeHTTP(r, w)
- if err != nil {
- // handle error
- }
- go func() {
- defer conn.Close()
-
- for {
- msg, op, err := wsutil.ReadClientData(conn)
- if err != nil {
- // handle error
- }
- err = wsutil.WriteServerMessage(conn, op, msg)
- if err != nil {
- // handle error
- }
- }
- }()
- }))
-}
-```
-
-Lower-level, but still high-level example:
-
-
-```go
-import (
- "net/http"
- "io"
-
- "github.com/gobwas/ws"
- "github.com/gobwas/ws/wsutil"
-)
-
-func main() {
- http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- conn, _, _, err := ws.UpgradeHTTP(r, w)
- if err != nil {
- // handle error
- }
- go func() {
- defer conn.Close()
-
- var (
- state = ws.StateServerSide
- reader = wsutil.NewReader(conn, state)
- writer = wsutil.NewWriter(conn, state, ws.OpText)
- )
- for {
- header, err := reader.NextFrame()
- if err != nil {
- // handle error
- }
-
- // Reset writer to write frame with right operation code.
- writer.Reset(conn, state, header.OpCode)
-
- if _, err = io.Copy(writer, reader); err != nil {
- // handle error
- }
- if err = writer.Flush(); err != nil {
- // handle error
- }
- }
- }()
- }))
-}
-```
-
-We can apply the same pattern to read and write structured responses through a JSON encoder and decoder.:
-
-```go
- ...
- var (
- r = wsutil.NewReader(conn, ws.StateServerSide)
- w = wsutil.NewWriter(conn, ws.StateServerSide, ws.OpText)
- decoder = json.NewDecoder(r)
- encoder = json.NewEncoder(w)
- )
- for {
- hdr, err = r.NextFrame()
- if err != nil {
- return err
- }
- if hdr.OpCode == ws.OpClose {
- return io.EOF
- }
- var req Request
- if err := decoder.Decode(&req); err != nil {
- return err
- }
- var resp Response
- if err := encoder.Encode(&resp); err != nil {
- return err
- }
- if err = w.Flush(); err != nil {
- return err
- }
- }
- ...
-```
-
-The lower-level example without `wsutil`:
-
-```go
-package main
-
-import (
- "net"
- "io"
-
- "github.com/gobwas/ws"
-)
-
-func main() {
- ln, err := net.Listen("tcp", "localhost:8080")
- if err != nil {
- log.Fatal(err)
- }
-
- for {
- conn, err := ln.Accept()
- if err != nil {
- // handle error
- }
- _, err = ws.Upgrade(conn)
- if err != nil {
- // handle error
- }
-
- go func() {
- defer conn.Close()
-
- for {
- header, err := ws.ReadHeader(conn)
- if err != nil {
- // handle error
- }
-
- payload := make([]byte, header.Length)
- _, err = io.ReadFull(conn, payload)
- if err != nil {
- // handle error
- }
- if header.Masked {
- ws.Cipher(payload, header.Mask, 0)
- }
-
- // Reset the Masked flag, server frames must not be masked as
- // RFC6455 says.
- header.Masked = false
-
- if err := ws.WriteHeader(conn, header); err != nil {
- // handle error
- }
- if _, err := conn.Write(payload); err != nil {
- // handle error
- }
-
- if header.OpCode == ws.OpClose {
- return
- }
- }
- }()
- }
-}
-```
-
-# Zero-copy upgrade
-
-Zero-copy upgrade helps to avoid unnecessary allocations and copying while
-handling HTTP Upgrade request.
-
-Processing of all non-websocket headers is made in place with use of registered
-user callbacks whose arguments are only valid until callback returns.
-
-The simple example looks like this:
-
-```go
-package main
-
-import (
- "net"
- "log"
-
- "github.com/gobwas/ws"
-)
-
-func main() {
- ln, err := net.Listen("tcp", "localhost:8080")
- if err != nil {
- log.Fatal(err)
- }
- u := ws.Upgrader{
- OnHeader: func(key, value []byte) (err error) {
- log.Printf("non-websocket header: %q=%q", key, value)
- return
- },
- }
- for {
- conn, err := ln.Accept()
- if err != nil {
- // handle error
- }
-
- _, err = u.Upgrade(conn)
- if err != nil {
- // handle error
- }
- }
-}
-```
-
-Usage of `ws.Upgrader` here brings ability to control incoming connections on
-tcp level and simply not to accept them by some logic.
-
-Zero-copy upgrade is for high-load services which have to control many
-resources such as connections buffers.
-
-The real life example could be like this:
-
-```go
-package main
-
-import (
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "runtime"
-
- "github.com/gobwas/httphead"
- "github.com/gobwas/ws"
-)
-
-func main() {
- ln, err := net.Listen("tcp", "localhost:8080")
- if err != nil {
- // handle error
- }
-
- // Prepare handshake header writer from http.Header mapping.
- header := ws.HandshakeHeaderHTTP(http.Header{
- "X-Go-Version": []string{runtime.Version()},
- })
-
- u := ws.Upgrader{
- OnHost: func(host []byte) error {
- if string(host) == "github.com" {
- return nil
- }
- return ws.RejectConnectionError(
- ws.RejectionStatus(403),
- ws.RejectionHeader(ws.HandshakeHeaderString(
- "X-Want-Host: github.com\r\n",
- )),
- )
- },
- OnHeader: func(key, value []byte) error {
- if string(key) != "Cookie" {
- return nil
- }
- ok := httphead.ScanCookie(value, func(key, value []byte) bool {
- // Check session here or do some other stuff with cookies.
- // Maybe copy some values for future use.
- return true
- })
- if ok {
- return nil
- }
- return ws.RejectConnectionError(
- ws.RejectionReason("bad cookie"),
- ws.RejectionStatus(400),
- )
- },
- OnBeforeUpgrade: func() (ws.HandshakeHeader, error) {
- return header, nil
- },
- }
- for {
- conn, err := ln.Accept()
- if err != nil {
- log.Fatal(err)
- }
- _, err = u.Upgrade(conn)
- if err != nil {
- log.Printf("upgrade error: %s", err)
- }
- }
-}
-```
-
-
-
-[rfc-url]: https://tools.ietf.org/html/rfc6455
-[godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/ws
-[travis-image]: https://travis-ci.org/gobwas/ws.svg?branch=master
-[travis-url]: https://travis-ci.org/gobwas/ws
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
deleted file mode 100644
index 8aa0df8cc28..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/check.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package ws
-
-import "unicode/utf8"
-
-// State represents state of websocket endpoint.
-// It used by some functions to be more strict when checking compatibility with RFC6455.
-type State uint8
-
-const (
- // StateServerSide means that endpoint (caller) is a server.
- StateServerSide State = 0x1 << iota
- // StateClientSide means that endpoint (caller) is a client.
- StateClientSide
- // StateExtended means that extension was negotiated during handshake.
- StateExtended
- // StateFragmented means that endpoint (caller) has received fragmented
- // frame and waits for continuation parts.
- StateFragmented
-)
-
-// Is checks whether the s has v enabled.
-func (s State) Is(v State) bool {
- return uint8(s)&uint8(v) != 0
-}
-
-// Set enables v state on s.
-func (s State) Set(v State) State {
- return s | v
-}
-
-// Clear disables v state on s.
-func (s State) Clear(v State) State {
- return s & (^v)
-}
-
-// ServerSide reports whether states represents server side.
-func (s State) ServerSide() bool { return s.Is(StateServerSide) }
-
-// ClientSide reports whether state represents client side.
-func (s State) ClientSide() bool { return s.Is(StateClientSide) }
-
-// Extended reports whether state is extended.
-func (s State) Extended() bool { return s.Is(StateExtended) }
-
-// Fragmented reports whether state is fragmented.
-func (s State) Fragmented() bool { return s.Is(StateFragmented) }
-
-// ProtocolError describes error during checking/parsing websocket frames or
-// headers.
-type ProtocolError string
-
-// Error implements error interface.
-func (p ProtocolError) Error() string { return string(p) }
-
-// Errors used by the protocol checkers.
-var (
- ErrProtocolOpCodeReserved = ProtocolError("use of reserved op code")
- ErrProtocolControlPayloadOverflow = ProtocolError("control frame payload limit exceeded")
- ErrProtocolControlNotFinal = ProtocolError("control frame is not final")
- ErrProtocolNonZeroRsv = ProtocolError("non-zero rsv bits with no extension negotiated")
- ErrProtocolMaskRequired = ProtocolError("frames from client to server must be masked")
- ErrProtocolMaskUnexpected = ProtocolError("frames from server to client must be not masked")
- ErrProtocolContinuationExpected = ProtocolError("unexpected non-continuation data frame")
- ErrProtocolContinuationUnexpected = ProtocolError("unexpected continuation data frame")
- ErrProtocolStatusCodeNotInUse = ProtocolError("status code is not in use")
- ErrProtocolStatusCodeApplicationLevel = ProtocolError("status code is only application level")
- ErrProtocolStatusCodeNoMeaning = ProtocolError("status code has no meaning yet")
- ErrProtocolStatusCodeUnknown = ProtocolError("status code is not defined in spec")
- ErrProtocolInvalidUTF8 = ProtocolError("invalid utf8 sequence in close reason")
-)
-
-// CheckHeader checks h to contain valid header data for given state s.
-//
-// Note that zero state (0) means that state is clean,
-// neither server or client side, nor fragmented, nor extended.
-func CheckHeader(h Header, s State) error {
- if h.OpCode.IsReserved() {
- return ErrProtocolOpCodeReserved
- }
- if h.OpCode.IsControl() {
- if h.Length > MaxControlFramePayloadSize {
- return ErrProtocolControlPayloadOverflow
- }
- if !h.Fin {
- return ErrProtocolControlNotFinal
- }
- }
-
- switch {
- // [RFC6455]: MUST be 0 unless an extension is negotiated that defines meanings for
- // non-zero values. If a nonzero value is received and none of the
- // negotiated extensions defines the meaning of such a nonzero value, the
- // receiving endpoint MUST _Fail the WebSocket Connection_.
- case h.Rsv != 0 && !s.Extended():
- return ErrProtocolNonZeroRsv
-
- // [RFC6455]: The server MUST close the connection upon receiving a frame that is not masked.
- // In this case, a server MAY send a Close frame with a status code of 1002 (protocol error)
- // as defined in Section 7.4.1. A server MUST NOT mask any frames that it sends to the client.
- // A client MUST close a connection if it detects a masked frame. In this case, it MAY use the
- // status code 1002 (protocol error) as defined in Section 7.4.1.
- case s.ServerSide() && !h.Masked:
- return ErrProtocolMaskRequired
- case s.ClientSide() && h.Masked:
- return ErrProtocolMaskUnexpected
-
- // [RFC6455]: See detailed explanation in 5.4 section.
- case s.Fragmented() && !h.OpCode.IsControl() && h.OpCode != OpContinuation:
- return ErrProtocolContinuationExpected
- case !s.Fragmented() && h.OpCode == OpContinuation:
- return ErrProtocolContinuationUnexpected
-
- default:
- return nil
- }
-}
-
-// CheckCloseFrameData checks received close information
-// to be valid RFC6455 compatible close info.
-//
-// Note that code.Empty() or code.IsAppLevel() will raise error.
-//
-// If endpoint sends close frame without status code (with frame.Length = 0),
-// application should not check its payload.
-func CheckCloseFrameData(code StatusCode, reason string) error {
- switch {
- case code.IsNotUsed():
- return ErrProtocolStatusCodeNotInUse
-
- case code.IsProtocolReserved():
- return ErrProtocolStatusCodeApplicationLevel
-
- case code == StatusNoMeaningYet:
- return ErrProtocolStatusCodeNoMeaning
-
- case code.IsProtocolSpec() && !code.IsProtocolDefined():
- return ErrProtocolStatusCodeUnknown
-
- case !utf8.ValidString(reason):
- return ErrProtocolInvalidUTF8
-
- default:
- return nil
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
deleted file mode 100644
index 11a2af99bfc..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/cipher.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package ws
-
-import (
- "encoding/binary"
- "unsafe"
-)
-
-// Cipher applies XOR cipher to the payload using mask.
-// Offset is used to cipher chunked data (e.g. in io.Reader implementations).
-//
-// To convert masked data into unmasked data, or vice versa, the following
-// algorithm is applied. The same algorithm applies regardless of the
-// direction of the translation, e.g., the same steps are applied to
-// mask the data as to unmask the data.
-func Cipher(payload []byte, mask [4]byte, offset int) {
- n := len(payload)
- if n < 8 {
- for i := 0; i < n; i++ {
- payload[i] ^= mask[(offset+i)%4]
- }
- return
- }
-
- // Calculate position in mask due to previously processed bytes number.
- mpos := offset % 4
- // Count number of bytes will processed one by one from the beginning of payload.
- ln := remain[mpos]
- // Count number of bytes will processed one by one from the end of payload.
- // This is done to process payload by 8 bytes in each iteration of main loop.
- rn := (n - ln) % 8
-
- for i := 0; i < ln; i++ {
- payload[i] ^= mask[(mpos+i)%4]
- }
- for i := n - rn; i < n; i++ {
- payload[i] ^= mask[(mpos+i)%4]
- }
-
- // We should cast mask to uint32 with unsafe instead of encoding.BigEndian
- // to avoid care of os dependent byte order. That is, on any endianess mask
- // and payload will be presented with the same order. In other words, we
- // could not use encoding.BigEndian on xoring payload as uint64.
- m := *(*uint32)(unsafe.Pointer(&mask))
- m2 := uint64(m)<<32 | uint64(m)
-
- // Skip already processed right part.
- // Get number of uint64 parts remaining to process.
- n = (n - ln - rn) >> 3
- for i := 0; i < n; i++ {
- idx := ln + (i << 3)
- p := binary.LittleEndian.Uint64(payload[idx : idx+8])
- p = p ^ m2
- binary.LittleEndian.PutUint64(payload[idx:idx+8], p)
- }
-}
-
-// remain maps position in masking key [0,4) to number
-// of bytes that need to be processed manually inside Cipher().
-var remain = [4]int{0, 3, 2, 1}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
deleted file mode 100644
index 4357be2142b..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer.go
+++ /dev/null
@@ -1,556 +0,0 @@
-package ws
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "fmt"
- "io"
- "net"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/gobwas/httphead"
- "github.com/gobwas/pool/pbufio"
-)
-
-// Constants used by Dialer.
-const (
- DefaultClientReadBufferSize = 4096
- DefaultClientWriteBufferSize = 4096
-)
-
-// Handshake represents handshake result.
-type Handshake struct {
- // Protocol is the subprotocol selected during handshake.
- Protocol string
-
- // Extensions is the list of negotiated extensions.
- Extensions []httphead.Option
-}
-
-// Errors used by the websocket client.
-var (
- ErrHandshakeBadStatus = fmt.Errorf("unexpected http status")
- ErrHandshakeBadSubProtocol = fmt.Errorf("unexpected protocol in %q header", headerSecProtocol)
- ErrHandshakeBadExtensions = fmt.Errorf("unexpected extensions in %q header", headerSecProtocol)
-)
-
-// DefaultDialer is dialer that holds no options and is used by Dial function.
-var DefaultDialer Dialer
-
-// Dial is like Dialer{}.Dial().
-func Dial(ctx context.Context, urlstr string) (net.Conn, *bufio.Reader, Handshake, error) {
- return DefaultDialer.Dial(ctx, urlstr)
-}
-
-// Dialer contains options for establishing websocket connection to an url.
-type Dialer struct {
- // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
- // They used to read and write http data while upgrading to WebSocket.
- // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
- //
- // If a size is zero then default value is used.
- ReadBufferSize, WriteBufferSize int
-
- // Timeout is the maximum amount of time a Dial() will wait for a connect
- // and an handshake to complete.
- //
- // The default is no timeout.
- Timeout time.Duration
-
- // Protocols is the list of subprotocols that the client wants to speak,
- // ordered by preference.
- //
- // See https://tools.ietf.org/html/rfc6455#section-4.1
- Protocols []string
-
- // Extensions is the list of extensions that client wants to speak.
- //
- // Note that if server decides to use some of this extensions, Dial() will
- // return Handshake struct containing a slice of items, which are the
- // shallow copies of the items from this list. That is, internals of
- // Extensions items are shared during Dial().
- //
- // See https://tools.ietf.org/html/rfc6455#section-4.1
- // See https://tools.ietf.org/html/rfc6455#section-9.1
- Extensions []httphead.Option
-
- // Header is an optional HandshakeHeader instance that could be used to
- // write additional headers to the handshake request.
- //
- // It used instead of any key-value mappings to avoid allocations in user
- // land.
- Header HandshakeHeader
-
- // OnStatusError is the callback that will be called after receiving non
- // "101 Continue" HTTP response status. It receives an io.Reader object
- // representing server response bytes. That is, it gives ability to parse
- // HTTP response somehow (probably with http.ReadResponse call) and make a
- // decision of further logic.
- //
- // The arguments are only valid until the callback returns.
- OnStatusError func(status int, reason []byte, resp io.Reader)
-
- // OnHeader is the callback that will be called after successful parsing of
- // header, that is not used during WebSocket handshake procedure. That is,
- // it will be called with non-websocket headers, which could be relevant
- // for application-level logic.
- //
- // The arguments are only valid until the callback returns.
- //
- // Returned value could be used to prevent processing response.
- OnHeader func(key, value []byte) (err error)
-
- // NetDial is the function that is used to get plain tcp connection.
- // If it is not nil, then it is used instead of net.Dialer.
- NetDial func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // TLSClient is the callback that will be called after successful dial with
- // received connection and its remote host name. If it is nil, then the
- // default tls.Client() will be used.
- // If it is not nil, then TLSConfig field is ignored.
- TLSClient func(conn net.Conn, hostname string) net.Conn
-
- // TLSConfig is passed to tls.Client() to start TLS over established
- // connection. If TLSClient is not nil, then it is ignored. If TLSConfig is
- // non-nil and its ServerName is empty, then for every Dial() it will be
- // cloned and appropriate ServerName will be set.
- TLSConfig *tls.Config
-
- // WrapConn is the optional callback that will be called when connection is
- // ready for an i/o. That is, it will be called after successful dial and
- // TLS initialization (for "wss" schemes). It may be helpful for different
- // user land purposes such as end to end encryption.
- //
- // Note that for debugging purposes of an http handshake (e.g. sent request
- // and received response), there is an wsutil.DebugDialer struct.
- WrapConn func(conn net.Conn) net.Conn
-}
-
-// Dial connects to the url host and upgrades connection to WebSocket.
-//
-// If server has sent frames right after successful handshake then returned
-// buffer will be non-nil. In other cases buffer is always nil. For better
-// memory efficiency received non-nil bufio.Reader should be returned to the
-// inner pool with PutReader() function after use.
-//
-// Note that Dialer does not implement IDNA (RFC5895) logic as net/http does.
-// If you want to dial non-ascii host name, take care of its name serialization
-// avoiding bad request issues. For more info see net/http Request.Write()
-// implementation, especially cleanHost() function.
-func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) {
- u, err := url.ParseRequestURI(urlstr)
- if err != nil {
- return
- }
-
- // Prepare context to dial with. Initially it is the same as original, but
- // if d.Timeout is non-zero and points to time that is before ctx.Deadline,
- // we use more shorter context for dial.
- dialctx := ctx
-
- var deadline time.Time
- if t := d.Timeout; t != 0 {
- deadline = time.Now().Add(t)
- if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
- var cancel context.CancelFunc
- dialctx, cancel = context.WithDeadline(ctx, deadline)
- defer cancel()
- }
- }
- if conn, err = d.dial(dialctx, u); err != nil {
- return
- }
- defer func() {
- if err != nil {
- conn.Close()
- }
- }()
- if ctx == context.Background() {
- // No need to start I/O interrupter goroutine which is not zero-cost.
- conn.SetDeadline(deadline)
- defer conn.SetDeadline(noDeadline)
- } else {
- // Context could be canceled or its deadline could be exceeded.
- // Start the interrupter goroutine to handle context cancelation.
- done := setupContextDeadliner(ctx, conn)
- defer func() {
- // Map Upgrade() error to a possible context expiration error. That
- // is, even if Upgrade() err is nil, context could be already
- // expired and connection be "poisoned" by SetDeadline() call.
- // In that case we must not return ctx.Err() error.
- done(&err)
- }()
- }
-
- br, hs, err = d.Upgrade(conn, u)
-
- return
-}
-
-var (
- // netEmptyDialer is a net.Dialer without options, used in Dialer.dial() if
- // Dialer.NetDial is not provided.
- netEmptyDialer net.Dialer
- // tlsEmptyConfig is an empty tls.Config used as default one.
- tlsEmptyConfig tls.Config
-)
-
-func tlsDefaultConfig() *tls.Config {
- return &tlsEmptyConfig
-}
-
-func hostport(host string, defaultPort string) (hostname, addr string) {
- var (
- colon = strings.LastIndexByte(host, ':')
- bracket = strings.IndexByte(host, ']')
- )
- if colon > bracket {
- return host[:colon], host
- }
- return host, host + defaultPort
-}
-
-func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) {
- dial := d.NetDial
- if dial == nil {
- dial = netEmptyDialer.DialContext
- }
- switch u.Scheme {
- case "ws":
- _, addr := hostport(u.Host, ":80")
- conn, err = dial(ctx, "tcp", addr)
- case "wss":
- hostname, addr := hostport(u.Host, ":443")
- conn, err = dial(ctx, "tcp", addr)
- if err != nil {
- return
- }
- tlsClient := d.TLSClient
- if tlsClient == nil {
- tlsClient = d.tlsClient
- }
- conn = tlsClient(conn, hostname)
- default:
- return nil, fmt.Errorf("unexpected websocket scheme: %q", u.Scheme)
- }
- if wrap := d.WrapConn; wrap != nil {
- conn = wrap(conn)
- }
- return
-}
-
-func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn {
- config := d.TLSConfig
- if config == nil {
- config = tlsDefaultConfig()
- }
- if config.ServerName == "" {
- config = tlsCloneConfig(config)
- config.ServerName = hostname
- }
- // Do not make conn.Handshake() here because downstairs we will prepare
- // i/o on this conn with proper context's timeout handling.
- return tls.Client(conn, config)
-}
-
-var (
- // This variables are set like in net/net.go.
- // noDeadline is just zero value for readability.
- noDeadline = time.Time{}
- // aLongTimeAgo is a non-zero time, far in the past, used for immediate
- // cancelation of dials.
- aLongTimeAgo = time.Unix(42, 0)
-)
-
-// Upgrade writes an upgrade request to the given io.ReadWriter conn at given
-// url u and reads a response from it.
-//
-// It is a caller responsibility to manage I/O deadlines on conn.
-//
-// It returns handshake info and some bytes which could be written by the peer
-// right after response and be caught by us during buffered read.
-func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Handshake, err error) {
- // headerSeen constants helps to report whether or not some header was seen
- // during reading request bytes.
- const (
- headerSeenUpgrade = 1 << iota
- headerSeenConnection
- headerSeenSecAccept
-
- // headerSeenAll is the value that we expect to receive at the end of
- // headers read/parse loop.
- headerSeenAll = 0 |
- headerSeenUpgrade |
- headerSeenConnection |
- headerSeenSecAccept
- )
-
- br = pbufio.GetReader(conn,
- nonZero(d.ReadBufferSize, DefaultClientReadBufferSize),
- )
- bw := pbufio.GetWriter(conn,
- nonZero(d.WriteBufferSize, DefaultClientWriteBufferSize),
- )
- defer func() {
- pbufio.PutWriter(bw)
- if br.Buffered() == 0 || err != nil {
- // Server does not wrote additional bytes to the connection or
- // error occurred. That is, no reason to return buffer.
- pbufio.PutReader(br)
- br = nil
- }
- }()
-
- nonce := make([]byte, nonceSize)
- initNonce(nonce)
-
- httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header)
- if err = bw.Flush(); err != nil {
- return
- }
-
- // Read HTTP status line like "HTTP/1.1 101 Switching Protocols".
- sl, err := readLine(br)
- if err != nil {
- return
- }
- // Begin validation of the response.
- // See https://tools.ietf.org/html/rfc6455#section-4.2.2
- // Parse request line data like HTTP version, uri and method.
- resp, err := httpParseResponseLine(sl)
- if err != nil {
- return
- }
- // Even if RFC says "1.1 or higher" without mentioning the part of the
- // version, we apply it only to minor part.
- if resp.major != 1 || resp.minor < 1 {
- err = ErrHandshakeBadProtocol
- return
- }
- if resp.status != 101 {
- err = StatusError(resp.status)
- if onStatusError := d.OnStatusError; onStatusError != nil {
- // Invoke callback with multireader of status-line bytes br.
- onStatusError(resp.status, resp.reason,
- io.MultiReader(
- bytes.NewReader(sl),
- strings.NewReader(crlf),
- br,
- ),
- )
- }
- return
- }
- // If response status is 101 then we expect all technical headers to be
- // valid. If not, then we stop processing response without giving user
- // ability to read non-technical headers. That is, we do not distinguish
- // technical errors (such as parsing error) and protocol errors.
- var headerSeen byte
- for {
- line, e := readLine(br)
- if e != nil {
- err = e
- return
- }
- if len(line) == 0 {
- // Blank line, no more lines to read.
- break
- }
-
- k, v, ok := httpParseHeaderLine(line)
- if !ok {
- err = ErrMalformedResponse
- return
- }
-
- switch btsToString(k) {
- case headerUpgradeCanonical:
- headerSeen |= headerSeenUpgrade
- if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
- err = ErrHandshakeBadUpgrade
- return
- }
-
- case headerConnectionCanonical:
- headerSeen |= headerSeenConnection
- // Note that as RFC6455 says:
- // > A |Connection| header field with value "Upgrade".
- // That is, in server side, "Connection" header could contain
- // multiple token. But in response it must contains exactly one.
- if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) {
- err = ErrHandshakeBadConnection
- return
- }
-
- case headerSecAcceptCanonical:
- headerSeen |= headerSeenSecAccept
- if !checkAcceptFromNonce(v, nonce) {
- err = ErrHandshakeBadSecAccept
- return
- }
-
- case headerSecProtocolCanonical:
- // RFC6455 1.3:
- // "The server selects one or none of the acceptable protocols
- // and echoes that value in its handshake to indicate that it has
- // selected that protocol."
- for _, want := range d.Protocols {
- if string(v) == want {
- hs.Protocol = want
- break
- }
- }
- if hs.Protocol == "" {
- // Server echoed subprotocol that is not present in client
- // requested protocols.
- err = ErrHandshakeBadSubProtocol
- return
- }
-
- case headerSecExtensionsCanonical:
- hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions)
- if err != nil {
- return
- }
-
- default:
- if onHeader := d.OnHeader; onHeader != nil {
- if e := onHeader(k, v); e != nil {
- err = e
- return
- }
- }
- }
- }
- if err == nil && headerSeen != headerSeenAll {
- switch {
- case headerSeen&headerSeenUpgrade == 0:
- err = ErrHandshakeBadUpgrade
- case headerSeen&headerSeenConnection == 0:
- err = ErrHandshakeBadConnection
- case headerSeen&headerSeenSecAccept == 0:
- err = ErrHandshakeBadSecAccept
- default:
- panic("unknown headers state")
- }
- }
- return
-}
-
-// PutReader returns bufio.Reader instance to the inner reuse pool.
-// It is useful in rare cases, when Dialer.Dial() returns non-nil buffer which
-// contains unprocessed buffered data, that was sent by the server quickly
-// right after handshake.
-func PutReader(br *bufio.Reader) {
- pbufio.PutReader(br)
-}
-
-// StatusError contains an unexpected status-line code from the server.
-type StatusError int
-
-func (s StatusError) Error() string {
- return "unexpected HTTP response status: " + strconv.Itoa(int(s))
-}
-
-func isTimeoutError(err error) bool {
- t, ok := err.(net.Error)
- return ok && t.Timeout()
-}
-
-func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option) ([]httphead.Option, error) {
- if len(selected) == 0 {
- return received, nil
- }
- var (
- index int
- option httphead.Option
- err error
- )
- index = -1
- match := func() (ok bool) {
- for _, want := range wanted {
- if option.Equal(want) {
- // Check parsed extension to be present in client
- // requested extensions. We move matched extension
- // from client list to avoid allocation.
- received = append(received, want)
- return true
- }
- }
- return false
- }
- ok := httphead.ScanOptions(selected, func(i int, name, attr, val []byte) httphead.Control {
- if i != index {
- // Met next option.
- index = i
- if i != 0 && !match() {
- // Server returned non-requested extension.
- err = ErrHandshakeBadExtensions
- return httphead.ControlBreak
- }
- option = httphead.Option{Name: name}
- }
- if attr != nil {
- option.Parameters.Set(attr, val)
- }
- return httphead.ControlContinue
- })
- if !ok {
- err = ErrMalformedResponse
- return received, err
- }
- if !match() {
- return received, ErrHandshakeBadExtensions
- }
- return received, err
-}
-
-// setupContextDeadliner is a helper function that starts connection I/O
-// interrupter goroutine.
-//
-// Started goroutine calls SetDeadline() with long time ago value when context
-// become expired to make any I/O operations failed. It returns done function
-// that stops started goroutine and maps error received from conn I/O methods
-// to possible context expiration error.
-//
-// In concern with possible SetDeadline() call inside interrupter goroutine,
-// caller passes pointer to its I/O error (even if it is nil) to done(&err).
-// That is, even if I/O error is nil, context could be already expired and
-// connection "poisoned" by SetDeadline() call. In that case done(&err) will
-// store at *err ctx.Err() result. If err is caused not by timeout, it will
-// leaved untouched.
-func setupContextDeadliner(ctx context.Context, conn net.Conn) (done func(*error)) {
- var (
- quit = make(chan struct{})
- interrupt = make(chan error, 1)
- )
- go func() {
- select {
- case <-quit:
- interrupt <- nil
- case <-ctx.Done():
- // Cancel i/o immediately.
- conn.SetDeadline(aLongTimeAgo)
- interrupt <- ctx.Err()
- }
- }()
- return func(err *error) {
- close(quit)
- // If ctx.Err() is non-nil and the original err is net.Error with
- // Timeout() == true, then it means that I/O was canceled by us by
- // SetDeadline(aLongTimeAgo) call, or by somebody else previously
- // by conn.SetDeadline(x).
- //
- // Even on race condition when both deadlines are expired
- // (SetDeadline() made not by us and context's), we prefer ctx.Err() to
- // be returned.
- if ctxErr := <-interrupt; ctxErr != nil && (*err == nil || isTimeoutError(*err)) {
- *err = ctxErr
- }
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
deleted file mode 100644
index b606e0ad909..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go17.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !go1.8
-
-package ws
-
-import "crypto/tls"
-
-func tlsCloneConfig(c *tls.Config) *tls.Config {
- // NOTE: we copying SessionTicketsDisabled and SessionTicketKey here
- // without calling inner c.initOnceServer somehow because we only could get
- // here from the ws.Dialer code, which is obviously a client and makes
- // tls.Client() when it gets new net.Conn.
- return &tls.Config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
deleted file mode 100644
index a6704d5173a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/dialer_tls_go18.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.8
-
-package ws
-
-import "crypto/tls"
-
-func tlsCloneConfig(c *tls.Config) *tls.Config {
- return c.Clone()
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
deleted file mode 100644
index c9d5791570c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/doc.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Package ws implements a client and server for the WebSocket protocol as
-specified in RFC 6455.
-
-The main purpose of this package is to provide simple low-level API for
-efficient work with protocol.
-
-Overview.
-
-Upgrade to WebSocket (or WebSocket handshake) can be done in two ways.
-
-The first way is to use `net/http` server:
-
- http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- conn, _, _, err := ws.UpgradeHTTP(r, w)
- })
-
-The second and much more efficient way is so-called "zero-copy upgrade". It
-avoids redundant allocations and copying of not used headers or other request
-data. User decides by himself which data should be copied.
-
- ln, err := net.Listen("tcp", ":8080")
- if err != nil {
- // handle error
- }
-
- conn, err := ln.Accept()
- if err != nil {
- // handle error
- }
-
- handshake, err := ws.Upgrade(conn)
- if err != nil {
- // handle error
- }
-
-For customization details see `ws.Upgrader` documentation.
-
-After WebSocket handshake you can work with connection in multiple ways.
-That is, `ws` does not force the only one way of how to work with WebSocket:
-
- header, err := ws.ReadHeader(conn)
- if err != nil {
- // handle err
- }
-
- buf := make([]byte, header.Length)
- _, err := io.ReadFull(conn, buf)
- if err != nil {
- // handle err
- }
-
- resp := ws.NewBinaryFrame([]byte("hello, world!"))
- if err := ws.WriteFrame(conn, frame); err != nil {
- // handle err
- }
-
-As you can see, it stream friendly:
-
- const N = 42
-
- ws.WriteHeader(ws.Header{
- Fin: true,
- Length: N,
- OpCode: ws.OpBinary,
- })
-
- io.CopyN(conn, rand.Reader, N)
-
-Or:
-
- header, err := ws.ReadHeader(conn)
- if err != nil {
- // handle err
- }
-
- io.CopyN(ioutil.Discard, conn, header.Length)
-
-For more info see the documentation.
-*/
-package ws
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
deleted file mode 100644
index 48fce3b72c1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/errors.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package ws
-
-// RejectOption represents an option used to control the way connection is
-// rejected.
-type RejectOption func(*rejectConnectionError)
-
-// RejectionReason returns an option that makes connection to be rejected with
-// given reason.
-func RejectionReason(reason string) RejectOption {
- return func(err *rejectConnectionError) {
- err.reason = reason
- }
-}
-
-// RejectionStatus returns an option that makes connection to be rejected with
-// given HTTP status code.
-func RejectionStatus(code int) RejectOption {
- return func(err *rejectConnectionError) {
- err.code = code
- }
-}
-
-// RejectionHeader returns an option that makes connection to be rejected with
-// given HTTP headers.
-func RejectionHeader(h HandshakeHeader) RejectOption {
- return func(err *rejectConnectionError) {
- err.header = h
- }
-}
-
-// RejectConnectionError constructs an error that could be used to control the way
-// handshake is rejected by Upgrader.
-func RejectConnectionError(options ...RejectOption) error {
- err := new(rejectConnectionError)
- for _, opt := range options {
- opt(err)
- }
- return err
-}
-
-// rejectConnectionError represents a rejection of upgrade error.
-//
-// It can be returned by Upgrader's On* hooks to control the way WebSocket
-// handshake is rejected.
-type rejectConnectionError struct {
- reason string
- code int
- header HandshakeHeader
-}
-
-// Error implements error interface.
-func (r *rejectConnectionError) Error() string {
- return r.reason
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
deleted file mode 100644
index f157ee3e9ff..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/frame.go
+++ /dev/null
@@ -1,389 +0,0 @@
-package ws
-
-import (
- "bytes"
- "encoding/binary"
- "math/rand"
-)
-
-// Constants defined by specification.
-const (
- // All control frames MUST have a payload length of 125 bytes or less and MUST NOT be fragmented.
- MaxControlFramePayloadSize = 125
-)
-
-// OpCode represents operation code.
-type OpCode byte
-
-// Operation codes defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-const (
- OpContinuation OpCode = 0x0
- OpText OpCode = 0x1
- OpBinary OpCode = 0x2
- OpClose OpCode = 0x8
- OpPing OpCode = 0x9
- OpPong OpCode = 0xa
-)
-
-// IsControl checks whether the c is control operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.5
-func (c OpCode) IsControl() bool {
- // RFC6455: Control frames are identified by opcodes where
- // the most significant bit of the opcode is 1.
- //
- // Note that OpCode is only 4 bit length.
- return c&0x8 != 0
-}
-
-// IsData checks whether the c is data operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.6
-func (c OpCode) IsData() bool {
- // RFC6455: Data frames (e.g., non-control frames) are identified by opcodes
- // where the most significant bit of the opcode is 0.
- //
- // Note that OpCode is only 4 bit length.
- return c&0x8 == 0
-}
-
-// IsReserved checks whether the c is reserved operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-func (c OpCode) IsReserved() bool {
- // RFC6455:
- // %x3-7 are reserved for further non-control frames
- // %xB-F are reserved for further control frames
- return (0x3 <= c && c <= 0x7) || (0xb <= c && c <= 0xf)
-}
-
-// StatusCode represents the encoded reason for closure of websocket connection.
-//
-// There are few helper methods on StatusCode that helps to define a range in
-// which given code is lay in. accordingly to ranges defined in specification.
-//
-// See https://tools.ietf.org/html/rfc6455#section-7.4
-type StatusCode uint16
-
-// StatusCodeRange describes range of StatusCode values.
-type StatusCodeRange struct {
- Min, Max StatusCode
-}
-
-// Status code ranges defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-7.4.2
-var (
- StatusRangeNotInUse = StatusCodeRange{0, 999}
- StatusRangeProtocol = StatusCodeRange{1000, 2999}
- StatusRangeApplication = StatusCodeRange{3000, 3999}
- StatusRangePrivate = StatusCodeRange{4000, 4999}
-)
-
-// Status codes defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-7.4.1
-const (
- StatusNormalClosure StatusCode = 1000
- StatusGoingAway StatusCode = 1001
- StatusProtocolError StatusCode = 1002
- StatusUnsupportedData StatusCode = 1003
- StatusNoMeaningYet StatusCode = 1004
- StatusInvalidFramePayloadData StatusCode = 1007
- StatusPolicyViolation StatusCode = 1008
- StatusMessageTooBig StatusCode = 1009
- StatusMandatoryExt StatusCode = 1010
- StatusInternalServerError StatusCode = 1011
- StatusTLSHandshake StatusCode = 1015
-
- // StatusAbnormalClosure is a special code designated for use in
- // applications.
- StatusAbnormalClosure StatusCode = 1006
-
- // StatusNoStatusRcvd is a special code designated for use in applications.
- StatusNoStatusRcvd StatusCode = 1005
-)
-
-// In reports whether the code is defined in given range.
-func (s StatusCode) In(r StatusCodeRange) bool {
- return r.Min <= s && s <= r.Max
-}
-
-// Empty reports whether the code is empty.
-// Empty code has no any meaning neither app level codes nor other.
-// This method is useful just to check that code is golang default value 0.
-func (s StatusCode) Empty() bool {
- return s == 0
-}
-
-// IsNotUsed reports whether the code is predefined in not used range.
-func (s StatusCode) IsNotUsed() bool {
- return s.In(StatusRangeNotInUse)
-}
-
-// IsApplicationSpec reports whether the code should be defined by
-// application, framework or libraries specification.
-func (s StatusCode) IsApplicationSpec() bool {
- return s.In(StatusRangeApplication)
-}
-
-// IsPrivateSpec reports whether the code should be defined privately.
-func (s StatusCode) IsPrivateSpec() bool {
- return s.In(StatusRangePrivate)
-}
-
-// IsProtocolSpec reports whether the code should be defined by protocol specification.
-func (s StatusCode) IsProtocolSpec() bool {
- return s.In(StatusRangeProtocol)
-}
-
-// IsProtocolDefined reports whether the code is already defined by protocol specification.
-func (s StatusCode) IsProtocolDefined() bool {
- switch s {
- case StatusNormalClosure,
- StatusGoingAway,
- StatusProtocolError,
- StatusUnsupportedData,
- StatusInvalidFramePayloadData,
- StatusPolicyViolation,
- StatusMessageTooBig,
- StatusMandatoryExt,
- StatusInternalServerError,
- StatusNoStatusRcvd,
- StatusAbnormalClosure,
- StatusTLSHandshake:
- return true
- }
- return false
-}
-
-// IsProtocolReserved reports whether the code is defined by protocol specification
-// to be reserved only for application usage purpose.
-func (s StatusCode) IsProtocolReserved() bool {
- switch s {
- // [RFC6455]: {1005,1006,1015} is a reserved value and MUST NOT be set as a status code in a
- // Close control frame by an endpoint.
- case StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
- return true
- default:
- return false
- }
-}
-
-// Compiled control frames for common use cases.
-// For construct-serialize optimizations.
-var (
- CompiledPing = MustCompileFrame(NewPingFrame(nil))
- CompiledPong = MustCompileFrame(NewPongFrame(nil))
- CompiledClose = MustCompileFrame(NewCloseFrame(nil))
-
- CompiledCloseNormalClosure = MustCompileFrame(closeFrameNormalClosure)
- CompiledCloseGoingAway = MustCompileFrame(closeFrameGoingAway)
- CompiledCloseProtocolError = MustCompileFrame(closeFrameProtocolError)
- CompiledCloseUnsupportedData = MustCompileFrame(closeFrameUnsupportedData)
- CompiledCloseNoMeaningYet = MustCompileFrame(closeFrameNoMeaningYet)
- CompiledCloseInvalidFramePayloadData = MustCompileFrame(closeFrameInvalidFramePayloadData)
- CompiledClosePolicyViolation = MustCompileFrame(closeFramePolicyViolation)
- CompiledCloseMessageTooBig = MustCompileFrame(closeFrameMessageTooBig)
- CompiledCloseMandatoryExt = MustCompileFrame(closeFrameMandatoryExt)
- CompiledCloseInternalServerError = MustCompileFrame(closeFrameInternalServerError)
- CompiledCloseTLSHandshake = MustCompileFrame(closeFrameTLSHandshake)
-)
-
-// Header represents websocket frame header.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-type Header struct {
- Fin bool
- Rsv byte
- OpCode OpCode
- Masked bool
- Mask [4]byte
- Length int64
-}
-
-// Rsv1 reports whether the header has first rsv bit set.
-func (h Header) Rsv1() bool { return h.Rsv&bit5 != 0 }
-
-// Rsv2 reports whether the header has second rsv bit set.
-func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 }
-
-// Rsv3 reports whether the header has third rsv bit set.
-func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 }
-
-// Frame represents websocket frame.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-type Frame struct {
- Header Header
- Payload []byte
-}
-
-// NewFrame creates frame with given operation code,
-// flag of completeness and payload bytes.
-func NewFrame(op OpCode, fin bool, p []byte) Frame {
- return Frame{
- Header: Header{
- Fin: fin,
- OpCode: op,
- Length: int64(len(p)),
- },
- Payload: p,
- }
-}
-
-// NewTextFrame creates text frame with p as payload.
-// Note that p is not copied.
-func NewTextFrame(p []byte) Frame {
- return NewFrame(OpText, true, p)
-}
-
-// NewBinaryFrame creates binary frame with p as payload.
-// Note that p is not copied.
-func NewBinaryFrame(p []byte) Frame {
- return NewFrame(OpBinary, true, p)
-}
-
-// NewPingFrame creates ping frame with p as payload.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewPingFrame(p []byte) Frame {
- return NewFrame(OpPing, true, p)
-}
-
-// NewPongFrame creates pong frame with p as payload.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewPongFrame(p []byte) Frame {
- return NewFrame(OpPong, true, p)
-}
-
-// NewCloseFrame creates close frame with given close body.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewCloseFrame(p []byte) Frame {
- return NewFrame(OpClose, true, p)
-}
-
-// NewCloseFrameBody encodes a closure code and a reason into a binary
-// representation.
-//
-// It returns slice which is at most MaxControlFramePayloadSize bytes length.
-// If the reason is too big it will be cropped to fit the limit defined by the
-// spec.
-//
-// See https://tools.ietf.org/html/rfc6455#section-5.5
-func NewCloseFrameBody(code StatusCode, reason string) []byte {
- n := min(2+len(reason), MaxControlFramePayloadSize)
- p := make([]byte, n)
-
- crop := min(MaxControlFramePayloadSize-2, len(reason))
- PutCloseFrameBody(p, code, reason[:crop])
-
- return p
-}
-
-// PutCloseFrameBody encodes code and reason into buf.
-//
-// It will panic if the buffer is too small to accommodate a code or a reason.
-//
-// PutCloseFrameBody does not check buffer to be RFC compliant, but note that
-// by RFC it must be at most MaxControlFramePayloadSize.
-func PutCloseFrameBody(p []byte, code StatusCode, reason string) {
- _ = p[1+len(reason)]
- binary.BigEndian.PutUint16(p, uint16(code))
- copy(p[2:], reason)
-}
-
-// MaskFrame masks frame and returns frame with masked payload and Mask header's field set.
-// Note that it copies f payload to prevent collisions.
-// For less allocations you could use MaskFrameInPlace or construct frame manually.
-func MaskFrame(f Frame) Frame {
- return MaskFrameWith(f, NewMask())
-}
-
-// MaskFrameWith masks frame with given mask and returns frame
-// with masked payload and Mask header's field set.
-// Note that it copies f payload to prevent collisions.
-// For less allocations you could use MaskFrameInPlaceWith or construct frame manually.
-func MaskFrameWith(f Frame, mask [4]byte) Frame {
- // TODO(gobwas): check CopyCipher ws copy() Cipher().
- p := make([]byte, len(f.Payload))
- copy(p, f.Payload)
- f.Payload = p
- return MaskFrameInPlaceWith(f, mask)
-}
-
-// MaskFrameInPlace masks frame and returns frame with masked payload and Mask
-// header's field set.
-// Note that it applies xor cipher to f.Payload without copying, that is, it
-// modifies f.Payload inplace.
-func MaskFrameInPlace(f Frame) Frame {
- return MaskFrameInPlaceWith(f, NewMask())
-}
-
-// MaskFrameInPlaceWith masks frame with given mask and returns frame
-// with masked payload and Mask header's field set.
-// Note that it applies xor cipher to f.Payload without copying, that is, it
-// modifies f.Payload inplace.
-func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame {
- f.Header.Masked = true
- f.Header.Mask = m
- Cipher(f.Payload, m, 0)
- return f
-}
-
-// NewMask creates new random mask.
-func NewMask() (ret [4]byte) {
- binary.BigEndian.PutUint32(ret[:], rand.Uint32())
- return
-}
-
-// CompileFrame returns byte representation of given frame.
-// In terms of memory consumption it is useful to precompile static frames
-// which are often used.
-func CompileFrame(f Frame) (bts []byte, err error) {
- buf := bytes.NewBuffer(make([]byte, 0, 16))
- err = WriteFrame(buf, f)
- bts = buf.Bytes()
- return
-}
-
-// MustCompileFrame is like CompileFrame but panics if frame can not be
-// encoded.
-func MustCompileFrame(f Frame) []byte {
- bts, err := CompileFrame(f)
- if err != nil {
- panic(err)
- }
- return bts
-}
-
-// Rsv creates rsv byte representation.
-func Rsv(r1, r2, r3 bool) (rsv byte) {
- if r1 {
- rsv |= bit5
- }
- if r2 {
- rsv |= bit6
- }
- if r3 {
- rsv |= bit7
- }
- return rsv
-}
-
-func makeCloseFrame(code StatusCode) Frame {
- return NewCloseFrame(NewCloseFrameBody(code, ""))
-}
-
-var (
- closeFrameNormalClosure = makeCloseFrame(StatusNormalClosure)
- closeFrameGoingAway = makeCloseFrame(StatusGoingAway)
- closeFrameProtocolError = makeCloseFrame(StatusProtocolError)
- closeFrameUnsupportedData = makeCloseFrame(StatusUnsupportedData)
- closeFrameNoMeaningYet = makeCloseFrame(StatusNoMeaningYet)
- closeFrameInvalidFramePayloadData = makeCloseFrame(StatusInvalidFramePayloadData)
- closeFramePolicyViolation = makeCloseFrame(StatusPolicyViolation)
- closeFrameMessageTooBig = makeCloseFrame(StatusMessageTooBig)
- closeFrameMandatoryExt = makeCloseFrame(StatusMandatoryExt)
- closeFrameInternalServerError = makeCloseFrame(StatusInternalServerError)
- closeFrameTLSHandshake = makeCloseFrame(StatusTLSHandshake)
-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
deleted file mode 100644
index e18df441b47..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/http.go
+++ /dev/null
@@ -1,468 +0,0 @@
-package ws
-
-import (
- "bufio"
- "bytes"
- "io"
- "net/http"
- "net/textproto"
- "net/url"
- "strconv"
-
- "github.com/gobwas/httphead"
-)
-
-const (
- crlf = "\r\n"
- colonAndSpace = ": "
- commaAndSpace = ", "
-)
-
-const (
- textHeadUpgrade = "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n"
-)
-
-var (
- textHeadBadRequest = statusText(http.StatusBadRequest)
- textHeadInternalServerError = statusText(http.StatusInternalServerError)
- textHeadUpgradeRequired = statusText(http.StatusUpgradeRequired)
-
- textTailErrHandshakeBadProtocol = errorText(ErrHandshakeBadProtocol)
- textTailErrHandshakeBadMethod = errorText(ErrHandshakeBadMethod)
- textTailErrHandshakeBadHost = errorText(ErrHandshakeBadHost)
- textTailErrHandshakeBadUpgrade = errorText(ErrHandshakeBadUpgrade)
- textTailErrHandshakeBadConnection = errorText(ErrHandshakeBadConnection)
- textTailErrHandshakeBadSecAccept = errorText(ErrHandshakeBadSecAccept)
- textTailErrHandshakeBadSecKey = errorText(ErrHandshakeBadSecKey)
- textTailErrHandshakeBadSecVersion = errorText(ErrHandshakeBadSecVersion)
- textTailErrUpgradeRequired = errorText(ErrHandshakeUpgradeRequired)
-)
-
-var (
- headerHost = "Host"
- headerUpgrade = "Upgrade"
- headerConnection = "Connection"
- headerSecVersion = "Sec-WebSocket-Version"
- headerSecProtocol = "Sec-WebSocket-Protocol"
- headerSecExtensions = "Sec-WebSocket-Extensions"
- headerSecKey = "Sec-WebSocket-Key"
- headerSecAccept = "Sec-WebSocket-Accept"
-
- headerHostCanonical = textproto.CanonicalMIMEHeaderKey(headerHost)
- headerUpgradeCanonical = textproto.CanonicalMIMEHeaderKey(headerUpgrade)
- headerConnectionCanonical = textproto.CanonicalMIMEHeaderKey(headerConnection)
- headerSecVersionCanonical = textproto.CanonicalMIMEHeaderKey(headerSecVersion)
- headerSecProtocolCanonical = textproto.CanonicalMIMEHeaderKey(headerSecProtocol)
- headerSecExtensionsCanonical = textproto.CanonicalMIMEHeaderKey(headerSecExtensions)
- headerSecKeyCanonical = textproto.CanonicalMIMEHeaderKey(headerSecKey)
- headerSecAcceptCanonical = textproto.CanonicalMIMEHeaderKey(headerSecAccept)
-)
-
-var (
- specHeaderValueUpgrade = []byte("websocket")
- specHeaderValueConnection = []byte("Upgrade")
- specHeaderValueConnectionLower = []byte("upgrade")
- specHeaderValueSecVersion = []byte("13")
-)
-
-var (
- httpVersion1_0 = []byte("HTTP/1.0")
- httpVersion1_1 = []byte("HTTP/1.1")
- httpVersionPrefix = []byte("HTTP/")
-)
-
-type httpRequestLine struct {
- method, uri []byte
- major, minor int
-}
-
-type httpResponseLine struct {
- major, minor int
- status int
- reason []byte
-}
-
-// httpParseRequestLine parses http request line like "GET / HTTP/1.0".
-func httpParseRequestLine(line []byte) (req httpRequestLine, err error) {
- var proto []byte
- req.method, req.uri, proto = bsplit3(line, ' ')
-
- var ok bool
- req.major, req.minor, ok = httpParseVersion(proto)
- if !ok {
- err = ErrMalformedRequest
- return
- }
-
- return
-}
-
-func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) {
- var (
- proto []byte
- status []byte
- )
- proto, status, resp.reason = bsplit3(line, ' ')
-
- var ok bool
- resp.major, resp.minor, ok = httpParseVersion(proto)
- if !ok {
- return resp, ErrMalformedResponse
- }
-
- var convErr error
- resp.status, convErr = asciiToInt(status)
- if convErr != nil {
- return resp, ErrMalformedResponse
- }
-
- return resp, nil
-}
-
-// httpParseVersion parses major and minor version of HTTP protocol. It returns
-// parsed values and true if parse is ok.
-func httpParseVersion(bts []byte) (major, minor int, ok bool) {
- switch {
- case bytes.Equal(bts, httpVersion1_0):
- return 1, 0, true
- case bytes.Equal(bts, httpVersion1_1):
- return 1, 1, true
- case len(bts) < 8:
- return
- case !bytes.Equal(bts[:5], httpVersionPrefix):
- return
- }
-
- bts = bts[5:]
-
- dot := bytes.IndexByte(bts, '.')
- if dot == -1 {
- return
- }
- var err error
- major, err = asciiToInt(bts[:dot])
- if err != nil {
- return
- }
- minor, err = asciiToInt(bts[dot+1:])
- if err != nil {
- return
- }
-
- return major, minor, true
-}
-
-// httpParseHeaderLine parses HTTP header as key-value pair. It returns parsed
-// values and true if parse is ok.
-func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) {
- colon := bytes.IndexByte(line, ':')
- if colon == -1 {
- return
- }
-
- k = btrim(line[:colon])
- // TODO(gobwas): maybe use just lower here?
- canonicalizeHeaderKey(k)
-
- v = btrim(line[colon+1:])
-
- return k, v, true
-}
-
-// httpGetHeader is the same as textproto.MIMEHeader.Get, except the thing,
-// that key is already canonical. This helps to increase performance.
-func httpGetHeader(h http.Header, key string) string {
- if h == nil {
- return ""
- }
- v := h[key]
- if len(v) == 0 {
- return ""
- }
- return v[0]
-}
-
-// The request MAY include a header field with the name
-// |Sec-WebSocket-Protocol|. If present, this value indicates one or more
-// comma-separated subprotocol the client wishes to speak, ordered by
-// preference. The elements that comprise this value MUST be non-empty strings
-// with characters in the range U+0021 to U+007E not including separator
-// characters as defined in [RFC2616] and MUST all be unique strings. The ABNF
-// for the value of this header field is 1#token, where the definitions of
-// constructs and rules are as given in [RFC2616].
-func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool) {
- ok = httphead.ScanTokens(strToBytes(h), func(v []byte) bool {
- if check(btsToString(v)) {
- ret = string(v)
- return false
- }
- return true
- })
- return
-}
-func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) {
- var selected []byte
- ok = httphead.ScanTokens(h, func(v []byte) bool {
- if check(v) {
- selected = v
- return false
- }
- return true
- })
- if ok && selected != nil {
- return string(selected), true
- }
- return
-}
-
-func strSelectExtensions(h string, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
- return btsSelectExtensions(strToBytes(h), selected, check)
-}
-
-func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
- s := httphead.OptionSelector{
- Flags: httphead.SelectUnique | httphead.SelectCopy,
- Check: check,
- }
- return s.Select(h, selected)
-}
-
-func httpWriteHeader(bw *bufio.Writer, key, value string) {
- httpWriteHeaderKey(bw, key)
- bw.WriteString(value)
- bw.WriteString(crlf)
-}
-
-func httpWriteHeaderBts(bw *bufio.Writer, key string, value []byte) {
- httpWriteHeaderKey(bw, key)
- bw.Write(value)
- bw.WriteString(crlf)
-}
-
-func httpWriteHeaderKey(bw *bufio.Writer, key string) {
- bw.WriteString(key)
- bw.WriteString(colonAndSpace)
-}
-
-func httpWriteUpgradeRequest(
- bw *bufio.Writer,
- u *url.URL,
- nonce []byte,
- protocols []string,
- extensions []httphead.Option,
- header HandshakeHeader,
-) {
- bw.WriteString("GET ")
- bw.WriteString(u.RequestURI())
- bw.WriteString(" HTTP/1.1\r\n")
-
- httpWriteHeader(bw, headerHost, u.Host)
-
- httpWriteHeaderBts(bw, headerUpgrade, specHeaderValueUpgrade)
- httpWriteHeaderBts(bw, headerConnection, specHeaderValueConnection)
- httpWriteHeaderBts(bw, headerSecVersion, specHeaderValueSecVersion)
-
- // NOTE: write nonce bytes as a string to prevent heap allocation –
- // WriteString() copy given string into its inner buffer, unlike Write()
- // which may write p directly to the underlying io.Writer – which in turn
- // will lead to p escape.
- httpWriteHeader(bw, headerSecKey, btsToString(nonce))
-
- if len(protocols) > 0 {
- httpWriteHeaderKey(bw, headerSecProtocol)
- for i, p := range protocols {
- if i > 0 {
- bw.WriteString(commaAndSpace)
- }
- bw.WriteString(p)
- }
- bw.WriteString(crlf)
- }
-
- if len(extensions) > 0 {
- httpWriteHeaderKey(bw, headerSecExtensions)
- httphead.WriteOptions(bw, extensions)
- bw.WriteString(crlf)
- }
-
- if header != nil {
- header.WriteTo(bw)
- }
-
- bw.WriteString(crlf)
-}
-
-func httpWriteResponseUpgrade(bw *bufio.Writer, nonce []byte, hs Handshake, header HandshakeHeaderFunc) {
- bw.WriteString(textHeadUpgrade)
-
- httpWriteHeaderKey(bw, headerSecAccept)
- writeAccept(bw, nonce)
- bw.WriteString(crlf)
-
- if hs.Protocol != "" {
- httpWriteHeader(bw, headerSecProtocol, hs.Protocol)
- }
- if len(hs.Extensions) > 0 {
- httpWriteHeaderKey(bw, headerSecExtensions)
- httphead.WriteOptions(bw, hs.Extensions)
- bw.WriteString(crlf)
- }
- if header != nil {
- header(bw)
- }
-
- bw.WriteString(crlf)
-}
-
-func httpWriteResponseError(bw *bufio.Writer, err error, code int, header HandshakeHeaderFunc) {
- switch code {
- case http.StatusBadRequest:
- bw.WriteString(textHeadBadRequest)
- case http.StatusInternalServerError:
- bw.WriteString(textHeadInternalServerError)
- case http.StatusUpgradeRequired:
- bw.WriteString(textHeadUpgradeRequired)
- default:
- writeStatusText(bw, code)
- }
-
- // Write custom headers.
- if header != nil {
- header(bw)
- }
-
- switch err {
- case ErrHandshakeBadProtocol:
- bw.WriteString(textTailErrHandshakeBadProtocol)
- case ErrHandshakeBadMethod:
- bw.WriteString(textTailErrHandshakeBadMethod)
- case ErrHandshakeBadHost:
- bw.WriteString(textTailErrHandshakeBadHost)
- case ErrHandshakeBadUpgrade:
- bw.WriteString(textTailErrHandshakeBadUpgrade)
- case ErrHandshakeBadConnection:
- bw.WriteString(textTailErrHandshakeBadConnection)
- case ErrHandshakeBadSecAccept:
- bw.WriteString(textTailErrHandshakeBadSecAccept)
- case ErrHandshakeBadSecKey:
- bw.WriteString(textTailErrHandshakeBadSecKey)
- case ErrHandshakeBadSecVersion:
- bw.WriteString(textTailErrHandshakeBadSecVersion)
- case ErrHandshakeUpgradeRequired:
- bw.WriteString(textTailErrUpgradeRequired)
- case nil:
- bw.WriteString(crlf)
- default:
- writeErrorText(bw, err)
- }
-}
-
-func writeStatusText(bw *bufio.Writer, code int) {
- bw.WriteString("HTTP/1.1 ")
- bw.WriteString(strconv.Itoa(code))
- bw.WriteByte(' ')
- bw.WriteString(http.StatusText(code))
- bw.WriteString(crlf)
- bw.WriteString("Content-Type: text/plain; charset=utf-8")
- bw.WriteString(crlf)
-}
-
-func writeErrorText(bw *bufio.Writer, err error) {
- body := err.Error()
- bw.WriteString("Content-Length: ")
- bw.WriteString(strconv.Itoa(len(body)))
- bw.WriteString(crlf)
- bw.WriteString(crlf)
- bw.WriteString(body)
-}
-
-// httpError is like the http.Error with WebSocket context exception.
-func httpError(w http.ResponseWriter, body string, code int) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.Header().Set("Content-Length", strconv.Itoa(len(body)))
- w.WriteHeader(code)
- w.Write([]byte(body))
-}
-
-// statusText is a non-performant status text generator.
-// NOTE: Used only to generate constants.
-func statusText(code int) string {
- var buf bytes.Buffer
- bw := bufio.NewWriter(&buf)
- writeStatusText(bw, code)
- bw.Flush()
- return buf.String()
-}
-
-// errorText is a non-performant error text generator.
-// NOTE: Used only to generate constants.
-func errorText(err error) string {
- var buf bytes.Buffer
- bw := bufio.NewWriter(&buf)
- writeErrorText(bw, err)
- bw.Flush()
- return buf.String()
-}
-
-// HandshakeHeader is the interface that writes both upgrade request or
-// response headers into a given io.Writer.
-type HandshakeHeader interface {
- io.WriterTo
-}
-
-// HandshakeHeaderString is an adapter to allow the use of headers represented
-// by ordinary string as HandshakeHeader.
-type HandshakeHeaderString string
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (s HandshakeHeaderString) WriteTo(w io.Writer) (int64, error) {
- n, err := io.WriteString(w, string(s))
- return int64(n), err
-}
-
-// HandshakeHeaderBytes is an adapter to allow the use of headers represented
-// by ordinary slice of bytes as HandshakeHeader.
-type HandshakeHeaderBytes []byte
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (b HandshakeHeaderBytes) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(b)
- return int64(n), err
-}
-
-// HandshakeHeaderFunc is an adapter to allow the use of headers represented by
-// ordinary function as HandshakeHeader.
-type HandshakeHeaderFunc func(io.Writer) (int64, error)
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (f HandshakeHeaderFunc) WriteTo(w io.Writer) (int64, error) {
- return f(w)
-}
-
-// HandshakeHeaderHTTP is an adapter to allow the use of http.Header as
-// HandshakeHeader.
-type HandshakeHeaderHTTP http.Header
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (h HandshakeHeaderHTTP) WriteTo(w io.Writer) (int64, error) {
- wr := writer{w: w}
- err := http.Header(h).Write(&wr)
- return wr.n, err
-}
-
-type writer struct {
- n int64
- w io.Writer
-}
-
-func (w *writer) WriteString(s string) (int, error) {
- n, err := io.WriteString(w.w, s)
- w.n += int64(n)
- return n, err
-}
-
-func (w *writer) Write(p []byte) (int, error) {
- n, err := w.w.Write(p)
- w.n += int64(n)
- return n, err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
deleted file mode 100644
index e694da7c308..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/nonce.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package ws
-
-import (
- "bufio"
- "bytes"
- "crypto/sha1"
- "encoding/base64"
- "fmt"
- "math/rand"
-)
-
-const (
- // RFC6455: The value of this header field MUST be a nonce consisting of a
- // randomly selected 16-byte value that has been base64-encoded (see
- // Section 4 of [RFC4648]). The nonce MUST be selected randomly for each
- // connection.
- nonceKeySize = 16
- nonceSize = 24 // base64.StdEncoding.EncodedLen(nonceKeySize)
-
- // RFC6455: The value of this header field is constructed by concatenating
- // /key/, defined above in step 4 in Section 4.2.2, with the string
- // "258EAFA5- E914-47DA-95CA-C5AB0DC85B11", taking the SHA-1 hash of this
- // concatenated value to obtain a 20-byte value and base64- encoding (see
- // Section 4 of [RFC4648]) this 20-byte hash.
- acceptSize = 28 // base64.StdEncoding.EncodedLen(sha1.Size)
-)
-
-// initNonce fills given slice with random base64-encoded nonce bytes.
-func initNonce(dst []byte) {
- // NOTE: bts does not escape.
- bts := make([]byte, nonceKeySize)
- if _, err := rand.Read(bts); err != nil {
- panic(fmt.Sprintf("rand read error: %s", err))
- }
- base64.StdEncoding.Encode(dst, bts)
-}
-
-// checkAcceptFromNonce reports whether given accept bytes are valid for given
-// nonce bytes.
-func checkAcceptFromNonce(accept, nonce []byte) bool {
- if len(accept) != acceptSize {
- return false
- }
- // NOTE: expect does not escape.
- expect := make([]byte, acceptSize)
- initAcceptFromNonce(expect, nonce)
- return bytes.Equal(expect, accept)
-}
-
-// initAcceptFromNonce fills given slice with accept bytes generated from given
-// nonce bytes. Given buffer should be exactly acceptSize bytes.
-func initAcceptFromNonce(accept, nonce []byte) {
- const magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
-
- if len(accept) != acceptSize {
- panic("accept buffer is invalid")
- }
- if len(nonce) != nonceSize {
- panic("nonce is invalid")
- }
-
- p := make([]byte, nonceSize+len(magic))
- copy(p[:nonceSize], nonce)
- copy(p[nonceSize:], magic)
-
- sum := sha1.Sum(p)
- base64.StdEncoding.Encode(accept, sum[:])
-
- return
-}
-
-func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) {
- accept := make([]byte, acceptSize)
- initAcceptFromNonce(accept, nonce)
- // NOTE: write accept bytes as a string to prevent heap allocation –
- // WriteString() copy given string into its inner buffer, unlike Write()
- // which may write p directly to the underlying io.Writer – which in turn
- // will lead to p escape.
- return bw.WriteString(btsToString(accept))
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
deleted file mode 100644
index bc653e4690f..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/read.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package ws
-
-import (
- "encoding/binary"
- "fmt"
- "io"
-)
-
-// Errors used by frame reader.
-var (
- ErrHeaderLengthMSB = fmt.Errorf("header error: the most significant bit must be 0")
- ErrHeaderLengthUnexpected = fmt.Errorf("header error: unexpected payload length bits")
-)
-
-// ReadHeader reads a frame header from r.
-func ReadHeader(r io.Reader) (h Header, err error) {
- // Make slice of bytes with capacity 12 that could hold any header.
- //
- // The maximum header size is 14, but due to the 2 hop reads,
- // after first hop that reads first 2 constant bytes, we could reuse 2 bytes.
- // So 14 - 2 = 12.
- bts := make([]byte, 2, MaxHeaderSize-2)
-
- // Prepare to hold first 2 bytes to choose size of next read.
- _, err = io.ReadFull(r, bts)
- if err != nil {
- return
- }
-
- h.Fin = bts[0]&bit0 != 0
- h.Rsv = (bts[0] & 0x70) >> 4
- h.OpCode = OpCode(bts[0] & 0x0f)
-
- var extra int
-
- if bts[1]&bit0 != 0 {
- h.Masked = true
- extra += 4
- }
-
- length := bts[1] & 0x7f
- switch {
- case length < 126:
- h.Length = int64(length)
-
- case length == 126:
- extra += 2
-
- case length == 127:
- extra += 8
-
- default:
- err = ErrHeaderLengthUnexpected
- return
- }
-
- if extra == 0 {
- return
- }
-
- // Increase len of bts to extra bytes need to read.
- // Overwrite first 2 bytes that was read before.
- bts = bts[:extra]
- _, err = io.ReadFull(r, bts)
- if err != nil {
- return
- }
-
- switch {
- case length == 126:
- h.Length = int64(binary.BigEndian.Uint16(bts[:2]))
- bts = bts[2:]
-
- case length == 127:
- if bts[0]&0x80 != 0 {
- err = ErrHeaderLengthMSB
- return
- }
- h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
- bts = bts[8:]
- }
-
- if h.Masked {
- copy(h.Mask[:], bts)
- }
-
- return
-}
-
-// ReadFrame reads a frame from r.
-// It is not designed for high optimized use case cause it makes allocation
-// for frame.Header.Length size inside to read frame payload into.
-//
-// Note that ReadFrame does not unmask payload.
-func ReadFrame(r io.Reader) (f Frame, err error) {
- f.Header, err = ReadHeader(r)
- if err != nil {
- return
- }
-
- if f.Header.Length > 0 {
- // int(f.Header.Length) is safe here cause we have
- // checked it for overflow above in ReadHeader.
- f.Payload = make([]byte, int(f.Header.Length))
- _, err = io.ReadFull(r, f.Payload)
- }
-
- return
-}
-
-// MustReadFrame is like ReadFrame but panics if frame can not be read.
-func MustReadFrame(r io.Reader) Frame {
- f, err := ReadFrame(r)
- if err != nil {
- panic(err)
- }
- return f
-}
-
-// ParseCloseFrameData parses close frame status code and closure reason if any provided.
-// If there is no status code in the payload
-// the empty status code is returned (code.Empty()) with empty string as a reason.
-func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) {
- if len(payload) < 2 {
- // We returning empty StatusCode here, preventing the situation
- // when endpoint really sent code 1005 and we should return ProtocolError on that.
- //
- // In other words, we ignoring this rule [RFC6455:7.1.5]:
- // If this Close control frame contains no status code, _The WebSocket
- // Connection Close Code_ is considered to be 1005.
- return
- }
- code = StatusCode(binary.BigEndian.Uint16(payload))
- reason = string(payload[2:])
- return
-}
-
-// ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing
-// that it does not copies payload bytes into reason, but prepares unsafe cast.
-func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) {
- if len(payload) < 2 {
- return
- }
- code = StatusCode(binary.BigEndian.Uint16(payload))
- reason = btsToString(payload[2:])
- return
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
deleted file mode 100644
index 48059aded49..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server.go
+++ /dev/null
@@ -1,607 +0,0 @@
-package ws
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "time"
-
- "github.com/gobwas/httphead"
- "github.com/gobwas/pool/pbufio"
-)
-
-// Constants used by ConnUpgrader.
-const (
- DefaultServerReadBufferSize = 4096
- DefaultServerWriteBufferSize = 512
-)
-
-// Errors used by both client and server when preparing WebSocket handshake.
-var (
- ErrHandshakeBadProtocol = RejectConnectionError(
- RejectionStatus(http.StatusHTTPVersionNotSupported),
- RejectionReason(fmt.Sprintf("handshake error: bad HTTP protocol version")),
- )
- ErrHandshakeBadMethod = RejectConnectionError(
- RejectionStatus(http.StatusMethodNotAllowed),
- RejectionReason(fmt.Sprintf("handshake error: bad HTTP request method")),
- )
- ErrHandshakeBadHost = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerHost)),
- )
- ErrHandshakeBadUpgrade = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerUpgrade)),
- )
- ErrHandshakeBadConnection = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerConnection)),
- )
- ErrHandshakeBadSecAccept = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecAccept)),
- )
- ErrHandshakeBadSecKey = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecKey)),
- )
- ErrHandshakeBadSecVersion = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
- )
-)
-
-// ErrMalformedResponse is returned by Dialer to indicate that server response
-// can not be parsed.
-var ErrMalformedResponse = fmt.Errorf("malformed HTTP response")
-
-// ErrMalformedRequest is returned when HTTP request can not be parsed.
-var ErrMalformedRequest = RejectConnectionError(
- RejectionStatus(http.StatusBadRequest),
- RejectionReason("malformed HTTP request"),
-)
-
-// ErrHandshakeUpgradeRequired is returned by Upgrader to indicate that
-// connection is rejected because given WebSocket version is malformed.
-//
-// According to RFC6455:
-// If this version does not match a version understood by the server, the
-// server MUST abort the WebSocket handshake described in this section and
-// instead send an appropriate HTTP error code (such as 426 Upgrade Required)
-// and a |Sec-WebSocket-Version| header field indicating the version(s) the
-// server is capable of understanding.
-var ErrHandshakeUpgradeRequired = RejectConnectionError(
- RejectionStatus(http.StatusUpgradeRequired),
- RejectionHeader(HandshakeHeaderString(headerSecVersion+": 13\r\n")),
- RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
-)
-
-// ErrNotHijacker is an error returned when http.ResponseWriter does not
-// implement http.Hijacker interface.
-var ErrNotHijacker = RejectConnectionError(
- RejectionStatus(http.StatusInternalServerError),
- RejectionReason("given http.ResponseWriter is not a http.Hijacker"),
-)
-
-// DefaultHTTPUpgrader is an HTTPUpgrader that holds no options and is used by
-// UpgradeHTTP function.
-var DefaultHTTPUpgrader HTTPUpgrader
-
-// UpgradeHTTP is like HTTPUpgrader{}.Upgrade().
-func UpgradeHTTP(r *http.Request, w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, Handshake, error) {
- return DefaultHTTPUpgrader.Upgrade(r, w)
-}
-
-// DefaultUpgrader is an Upgrader that holds no options and is used by Upgrade
-// function.
-var DefaultUpgrader Upgrader
-
-// Upgrade is like Upgrader{}.Upgrade().
-func Upgrade(conn io.ReadWriter) (Handshake, error) {
- return DefaultUpgrader.Upgrade(conn)
-}
-
-// HTTPUpgrader contains options for upgrading connection to websocket from
-// net/http Handler arguments.
-type HTTPUpgrader struct {
- // Timeout is the maximum amount of time an Upgrade() will spent while
- // writing handshake response.
- //
- // The default is no timeout.
- Timeout time.Duration
-
- // Header is an optional http.Header mapping that could be used to
- // write additional headers to the handshake response.
- //
- // Note that if present, it will be written in any result of handshake.
- Header http.Header
-
- // Protocol is the select function that is used to select subprotocol from
- // list requested by client. If this field is set, then the first matched
- // protocol is sent to a client as negotiated.
- Protocol func(string) bool
-
- // Extension is the select function that is used to select extensions from
- // list requested by client. If this field is set, then the all matched
- // extensions are sent to a client as negotiated.
- Extension func(httphead.Option) bool
-}
-
-// Upgrade upgrades http connection to the websocket connection.
-//
-// It hijacks net.Conn from w and returns received net.Conn and
-// bufio.ReadWriter. On successful handshake it returns Handshake struct
-// describing handshake info.
-func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.Conn, rw *bufio.ReadWriter, hs Handshake, err error) {
- // Hijack connection first to get the ability to write rejection errors the
- // same way as in Upgrader.
- hj, ok := w.(http.Hijacker)
- if ok {
- conn, rw, err = hj.Hijack()
- } else {
- err = ErrNotHijacker
- }
- if err != nil {
- httpError(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- // See https://tools.ietf.org/html/rfc6455#section-4.1
- // The method of the request MUST be GET, and the HTTP version MUST be at least 1.1.
- var nonce string
- if r.Method != http.MethodGet {
- err = ErrHandshakeBadMethod
- } else if r.ProtoMajor < 1 || (r.ProtoMajor == 1 && r.ProtoMinor < 1) {
- err = ErrHandshakeBadProtocol
- } else if r.Host == "" {
- err = ErrHandshakeBadHost
- } else if u := httpGetHeader(r.Header, headerUpgradeCanonical); u != "websocket" && !strings.EqualFold(u, "websocket") {
- err = ErrHandshakeBadUpgrade
- } else if c := httpGetHeader(r.Header, headerConnectionCanonical); c != "Upgrade" && !strHasToken(c, "upgrade") {
- err = ErrHandshakeBadConnection
- } else if nonce = httpGetHeader(r.Header, headerSecKeyCanonical); len(nonce) != nonceSize {
- err = ErrHandshakeBadSecKey
- } else if v := httpGetHeader(r.Header, headerSecVersionCanonical); v != "13" {
- // According to RFC6455:
- //
- // If this version does not match a version understood by the server,
- // the server MUST abort the WebSocket handshake described in this
- // section and instead send an appropriate HTTP error code (such as 426
- // Upgrade Required) and a |Sec-WebSocket-Version| header field
- // indicating the version(s) the server is capable of understanding.
- //
- // So we branching here cause empty or not present version does not
- // meet the ABNF rules of RFC6455:
- //
- // version = DIGIT | (NZDIGIT DIGIT) |
- // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
- // ; Limited to 0-255 range, with no leading zeros
- //
- // That is, if version is really invalid – we sent 426 status, if it
- // not present or empty – it is 400.
- if v != "" {
- err = ErrHandshakeUpgradeRequired
- } else {
- err = ErrHandshakeBadSecVersion
- }
- }
- if check := u.Protocol; err == nil && check != nil {
- ps := r.Header[headerSecProtocolCanonical]
- for i := 0; i < len(ps) && err == nil && hs.Protocol == ""; i++ {
- var ok bool
- hs.Protocol, ok = strSelectProtocol(ps[i], check)
- if !ok {
- err = ErrMalformedRequest
- }
- }
- }
- if check := u.Extension; err == nil && check != nil {
- xs := r.Header[headerSecExtensionsCanonical]
- for i := 0; i < len(xs) && err == nil; i++ {
- var ok bool
- hs.Extensions, ok = strSelectExtensions(xs[i], hs.Extensions, check)
- if !ok {
- err = ErrMalformedRequest
- }
- }
- }
-
- // Clear deadlines set by server.
- conn.SetDeadline(noDeadline)
- if t := u.Timeout; t != 0 {
- conn.SetWriteDeadline(time.Now().Add(t))
- defer conn.SetWriteDeadline(noDeadline)
- }
-
- var header handshakeHeader
- if h := u.Header; h != nil {
- header[0] = HandshakeHeaderHTTP(h)
- }
- if err == nil {
- httpWriteResponseUpgrade(rw.Writer, strToBytes(nonce), hs, header.WriteTo)
- err = rw.Writer.Flush()
- } else {
- var code int
- if rej, ok := err.(*rejectConnectionError); ok {
- code = rej.code
- header[1] = rej.header
- }
- if code == 0 {
- code = http.StatusInternalServerError
- }
- httpWriteResponseError(rw.Writer, err, code, header.WriteTo)
- // Do not store Flush() error to not override already existing one.
- rw.Writer.Flush()
- }
- return
-}
-
-// Upgrader contains options for upgrading connection to websocket.
-type Upgrader struct {
- // ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
- // They used to read and write http data while upgrading to WebSocket.
- // Allocated buffers are pooled with sync.Pool to avoid extra allocations.
- //
- // If a size is zero then default value is used.
- //
- // Usually it is useful to set read buffer size bigger than write buffer
- // size because incoming request could contain long header values, such as
- // Cookie. Response, in other way, could be big only if user write multiple
- // custom headers. Usually response takes less than 256 bytes.
- ReadBufferSize, WriteBufferSize int
-
- // Protocol is a select function that is used to select subprotocol
- // from list requested by client. If this field is set, then the first matched
- // protocol is sent to a client as negotiated.
- //
- // The argument is only valid until the callback returns.
- Protocol func([]byte) bool
-
- // ProtocolCustrom allow user to parse Sec-WebSocket-Protocol header manually.
- // Note that returned bytes must be valid until Upgrade returns.
- // If ProtocolCustom is set, it used instead of Protocol function.
- ProtocolCustom func([]byte) (string, bool)
-
- // Extension is a select function that is used to select extensions
- // from list requested by client. If this field is set, then the all matched
- // extensions are sent to a client as negotiated.
- //
- // The argument is only valid until the callback returns.
- //
- // According to the RFC6455 order of extensions passed by a client is
- // significant. That is, returning true from this function means that no
- // other extension with the same name should be checked because server
- // accepted the most preferable extension right now:
- // "Note that the order of extensions is significant. Any interactions between
- // multiple extensions MAY be defined in the documents defining the extensions.
- // In the absence of such definitions, the interpretation is that the header
- // fields listed by the client in its request represent a preference of the
- // header fields it wishes to use, with the first options listed being most
- // preferable."
- Extension func(httphead.Option) bool
-
- // ExtensionCustorm allow user to parse Sec-WebSocket-Extensions header manually.
- // Note that returned options should be valid until Upgrade returns.
- // If ExtensionCustom is set, it used instead of Extension function.
- ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool)
-
- // Header is an optional HandshakeHeader instance that could be used to
- // write additional headers to the handshake response.
- //
- // It used instead of any key-value mappings to avoid allocations in user
- // land.
- //
- // Note that if present, it will be written in any result of handshake.
- Header HandshakeHeader
-
- // OnRequest is a callback that will be called after request line
- // successful parsing.
- //
- // The arguments are only valid until the callback returns.
- //
- // If returned error is non-nil then connection is rejected and response is
- // sent with appropriate HTTP error code and body set to error message.
- //
- // RejectConnectionError could be used to get more control on response.
- OnRequest func(uri []byte) error
-
- // OnHost is a callback that will be called after "Host" header successful
- // parsing.
- //
- // It is separated from OnHeader callback because the Host header must be
- // present in each request since HTTP/1.1. Thus Host header is non-optional
- // and required for every WebSocket handshake.
- //
- // The arguments are only valid until the callback returns.
- //
- // If returned error is non-nil then connection is rejected and response is
- // sent with appropriate HTTP error code and body set to error message.
- //
- // RejectConnectionError could be used to get more control on response.
- OnHost func(host []byte) error
-
- // OnHeader is a callback that will be called after successful parsing of
- // header, that is not used during WebSocket handshake procedure. That is,
- // it will be called with non-websocket headers, which could be relevant
- // for application-level logic.
- //
- // The arguments are only valid until the callback returns.
- //
- // If returned error is non-nil then connection is rejected and response is
- // sent with appropriate HTTP error code and body set to error message.
- //
- // RejectConnectionError could be used to get more control on response.
- OnHeader func(key, value []byte) error
-
- // OnBeforeUpgrade is a callback that will be called before sending
- // successful upgrade response.
- //
- // Setting OnBeforeUpgrade allows user to make final application-level
- // checks and decide whether this connection is allowed to successfully
- // upgrade to WebSocket.
- //
- // It must return non-nil either HandshakeHeader or error and never both.
- //
- // If returned error is non-nil then connection is rejected and response is
- // sent with appropriate HTTP error code and body set to error message.
- //
- // RejectConnectionError could be used to get more control on response.
- OnBeforeUpgrade func() (header HandshakeHeader, err error)
-}
-
-// Upgrade zero-copy upgrades connection to WebSocket. It interprets given conn
-// as connection with incoming HTTP Upgrade request.
-//
-// It is a caller responsibility to manage i/o timeouts on conn.
-//
-// Non-nil error means that request for the WebSocket upgrade is invalid or
-// malformed and usually connection should be closed.
-// Even when error is non-nil Upgrade will write appropriate response into
-// connection in compliance with RFC.
-func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
- // headerSeen constants helps to report whether or not some header was seen
- // during reading request bytes.
- const (
- headerSeenHost = 1 << iota
- headerSeenUpgrade
- headerSeenConnection
- headerSeenSecVersion
- headerSeenSecKey
-
- // headerSeenAll is the value that we expect to receive at the end of
- // headers read/parse loop.
- headerSeenAll = 0 |
- headerSeenHost |
- headerSeenUpgrade |
- headerSeenConnection |
- headerSeenSecVersion |
- headerSeenSecKey
- )
-
- // Prepare I/O buffers.
- // TODO(gobwas): make it configurable.
- br := pbufio.GetReader(conn,
- nonZero(u.ReadBufferSize, DefaultServerReadBufferSize),
- )
- bw := pbufio.GetWriter(conn,
- nonZero(u.WriteBufferSize, DefaultServerWriteBufferSize),
- )
- defer func() {
- pbufio.PutReader(br)
- pbufio.PutWriter(bw)
- }()
-
- // Read HTTP request line like "GET /ws HTTP/1.1".
- rl, err := readLine(br)
- if err != nil {
- return
- }
- // Parse request line data like HTTP version, uri and method.
- req, err := httpParseRequestLine(rl)
- if err != nil {
- return
- }
-
- // Prepare stack-based handshake header list.
- header := handshakeHeader{
- 0: u.Header,
- }
-
- // Parse and check HTTP request.
- // As RFC6455 says:
- // The client's opening handshake consists of the following parts. If the
- // server, while reading the handshake, finds that the client did not
- // send a handshake that matches the description below (note that as per
- // [RFC2616], the order of the header fields is not important), including
- // but not limited to any violations of the ABNF grammar specified for
- // the components of the handshake, the server MUST stop processing the
- // client's handshake and return an HTTP response with an appropriate
- // error code (such as 400 Bad Request).
- //
- // See https://tools.ietf.org/html/rfc6455#section-4.2.1
-
- // An HTTP/1.1 or higher GET request, including a "Request-URI".
- //
- // Even if RFC says "1.1 or higher" without mentioning the part of the
- // version, we apply it only to minor part.
- switch {
- case req.major != 1 || req.minor < 1:
- // Abort processing the whole request because we do not even know how
- // to actually parse it.
- err = ErrHandshakeBadProtocol
-
- case btsToString(req.method) != http.MethodGet:
- err = ErrHandshakeBadMethod
-
- default:
- if onRequest := u.OnRequest; onRequest != nil {
- err = onRequest(req.uri)
- }
- }
- // Start headers read/parse loop.
- var (
- // headerSeen reports which header was seen by setting corresponding
- // bit on.
- headerSeen byte
-
- nonce = make([]byte, nonceSize)
- )
- for err == nil {
- line, e := readLine(br)
- if e != nil {
- return hs, e
- }
- if len(line) == 0 {
- // Blank line, no more lines to read.
- break
- }
-
- k, v, ok := httpParseHeaderLine(line)
- if !ok {
- err = ErrMalformedRequest
- break
- }
-
- switch btsToString(k) {
- case headerHostCanonical:
- headerSeen |= headerSeenHost
- if onHost := u.OnHost; onHost != nil {
- err = onHost(v)
- }
-
- case headerUpgradeCanonical:
- headerSeen |= headerSeenUpgrade
- if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
- err = ErrHandshakeBadUpgrade
- }
-
- case headerConnectionCanonical:
- headerSeen |= headerSeenConnection
- if !bytes.Equal(v, specHeaderValueConnection) && !btsHasToken(v, specHeaderValueConnectionLower) {
- err = ErrHandshakeBadConnection
- }
-
- case headerSecVersionCanonical:
- headerSeen |= headerSeenSecVersion
- if !bytes.Equal(v, specHeaderValueSecVersion) {
- err = ErrHandshakeUpgradeRequired
- }
-
- case headerSecKeyCanonical:
- headerSeen |= headerSeenSecKey
- if len(v) != nonceSize {
- err = ErrHandshakeBadSecKey
- } else {
- copy(nonce[:], v)
- }
-
- case headerSecProtocolCanonical:
- if custom, check := u.ProtocolCustom, u.Protocol; hs.Protocol == "" && (custom != nil || check != nil) {
- var ok bool
- if custom != nil {
- hs.Protocol, ok = custom(v)
- } else {
- hs.Protocol, ok = btsSelectProtocol(v, check)
- }
- if !ok {
- err = ErrMalformedRequest
- }
- }
-
- case headerSecExtensionsCanonical:
- if custom, check := u.ExtensionCustom, u.Extension; custom != nil || check != nil {
- var ok bool
- if custom != nil {
- hs.Extensions, ok = custom(v, hs.Extensions)
- } else {
- hs.Extensions, ok = btsSelectExtensions(v, hs.Extensions, check)
- }
- if !ok {
- err = ErrMalformedRequest
- }
- }
-
- default:
- if onHeader := u.OnHeader; onHeader != nil {
- err = onHeader(k, v)
- }
- }
- }
- switch {
- case err == nil && headerSeen != headerSeenAll:
- switch {
- case headerSeen&headerSeenHost == 0:
- // As RFC2616 says:
- // A client MUST include a Host header field in all HTTP/1.1
- // request messages. If the requested URI does not include an
- // Internet host name for the service being requested, then the
- // Host header field MUST be given with an empty value. An
- // HTTP/1.1 proxy MUST ensure that any request message it
- // forwards does contain an appropriate Host header field that
- // identifies the service being requested by the proxy. All
- // Internet-based HTTP/1.1 servers MUST respond with a 400 (Bad
- // Request) status code to any HTTP/1.1 request message which
- // lacks a Host header field.
- err = ErrHandshakeBadHost
- case headerSeen&headerSeenUpgrade == 0:
- err = ErrHandshakeBadUpgrade
- case headerSeen&headerSeenConnection == 0:
- err = ErrHandshakeBadConnection
- case headerSeen&headerSeenSecVersion == 0:
- // In case of empty or not present version we do not send 426 status,
- // because it does not meet the ABNF rules of RFC6455:
- //
- // version = DIGIT | (NZDIGIT DIGIT) |
- // ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
- // ; Limited to 0-255 range, with no leading zeros
- //
- // That is, if version is really invalid – we sent 426 status as above, if it
- // not present – it is 400.
- err = ErrHandshakeBadSecVersion
- case headerSeen&headerSeenSecKey == 0:
- err = ErrHandshakeBadSecKey
- default:
- panic("unknown headers state")
- }
-
- case err == nil && u.OnBeforeUpgrade != nil:
- header[1], err = u.OnBeforeUpgrade()
- }
- if err != nil {
- var code int
- if rej, ok := err.(*rejectConnectionError); ok {
- code = rej.code
- header[1] = rej.header
- }
- if code == 0 {
- code = http.StatusInternalServerError
- }
- httpWriteResponseError(bw, err, code, header.WriteTo)
- // Do not store Flush() error to not override already existing one.
- bw.Flush()
- return
- }
-
- httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo)
- err = bw.Flush()
-
- return
-}
-
-type handshakeHeader [2]HandshakeHeader
-
-func (hs handshakeHeader) WriteTo(w io.Writer) (n int64, err error) {
- for i := 0; i < len(hs) && err == nil; i++ {
- if h := hs[i]; h != nil {
- var m int64
- m, err = h.WriteTo(w)
- n += m
- }
- }
- return n, err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/server_test.s
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/stub.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/stub.go
new file mode 100644
index 00000000000..0d00bc949fb
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/stub.go
@@ -0,0 +1,54 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/gobwas/ws, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/gobwas/ws (exports: Dialer; functions: Dial)
+
+// Package ws is a stub of github.com/gobwas/ws, generated by depstubber.
+package ws
+
+import (
+ bufio "bufio"
+ context "context"
+ tls "crypto/tls"
+ io "io"
+ net "net"
+ url "net/url"
+ time "time"
+)
+
+func Dial(_ context.Context, _ string) (net.Conn, *bufio.Reader, Handshake, error) {
+ return nil, nil, Handshake{}, nil
+}
+
+type Dialer struct {
+ ReadBufferSize int
+ WriteBufferSize int
+ Timeout time.Duration
+ Protocols []string
+ Extensions []interface{}
+ Header HandshakeHeader
+ OnStatusError func(int, []byte, io.Reader)
+ OnHeader func([]byte, []byte) error
+ NetDial func(context.Context, string, string) (net.Conn, error)
+ TLSClient func(net.Conn, string) net.Conn
+ TLSConfig *tls.Config
+ WrapConn func(net.Conn) net.Conn
+}
+
+func (_ Dialer) Dial(_ context.Context, _ string) (net.Conn, *bufio.Reader, Handshake, error) {
+ return nil, nil, Handshake{}, nil
+}
+
+func (_ Dialer) Upgrade(_ io.ReadWriter, _ *url.URL) (*bufio.Reader, Handshake, error) {
+ return nil, Handshake{}, nil
+}
+
+type Handshake struct {
+ Protocol string
+ Extensions []interface{}
+}
+
+type HandshakeHeader interface {
+ WriteTo(_ io.Writer) (int64, error)
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
deleted file mode 100644
index 67ad906e5d2..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/util.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package ws
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "reflect"
- "unsafe"
-
- "github.com/gobwas/httphead"
-)
-
-// SelectFromSlice creates accept function that could be used as Protocol/Extension
-// select during upgrade.
-func SelectFromSlice(accept []string) func(string) bool {
- if len(accept) > 16 {
- mp := make(map[string]struct{}, len(accept))
- for _, p := range accept {
- mp[p] = struct{}{}
- }
- return func(p string) bool {
- _, ok := mp[p]
- return ok
- }
- }
- return func(p string) bool {
- for _, ok := range accept {
- if p == ok {
- return true
- }
- }
- return false
- }
-}
-
-// SelectEqual creates accept function that could be used as Protocol/Extension
-// select during upgrade.
-func SelectEqual(v string) func(string) bool {
- return func(p string) bool {
- return v == p
- }
-}
-
-func strToBytes(str string) (bts []byte) {
- s := (*reflect.StringHeader)(unsafe.Pointer(&str))
- b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
- b.Data = s.Data
- b.Len = s.Len
- b.Cap = s.Len
- return
-}
-
-func btsToString(bts []byte) (str string) {
- return *(*string)(unsafe.Pointer(&bts))
-}
-
-// asciiToInt converts bytes to int.
-func asciiToInt(bts []byte) (ret int, err error) {
- // ASCII numbers all start with the high-order bits 0011.
- // If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
- // bits and interpret them directly as an integer.
- var n int
- if n = len(bts); n < 1 {
- return 0, fmt.Errorf("converting empty bytes to int")
- }
- for i := 0; i < n; i++ {
- if bts[i]&0xf0 != 0x30 {
- return 0, fmt.Errorf("%s is not a numeric character", string(bts[i]))
- }
- ret += int(bts[i]&0xf) * pow(10, n-i-1)
- }
- return ret, nil
-}
-
-// pow for integers implementation.
-// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
-func pow(a, b int) int {
- p := 1
- for b > 0 {
- if b&1 != 0 {
- p *= a
- }
- b >>= 1
- a *= a
- }
- return p
-}
-
-func bsplit3(bts []byte, sep byte) (b1, b2, b3 []byte) {
- a := bytes.IndexByte(bts, sep)
- b := bytes.IndexByte(bts[a+1:], sep)
- if a == -1 || b == -1 {
- return bts, nil, nil
- }
- b += a + 1
- return bts[:a], bts[a+1 : b], bts[b+1:]
-}
-
-func btrim(bts []byte) []byte {
- var i, j int
- for i = 0; i < len(bts) && (bts[i] == ' ' || bts[i] == '\t'); {
- i++
- }
- for j = len(bts); j > i && (bts[j-1] == ' ' || bts[j-1] == '\t'); {
- j--
- }
- return bts[i:j]
-}
-
-func strHasToken(header, token string) (has bool) {
- return btsHasToken(strToBytes(header), strToBytes(token))
-}
-
-func btsHasToken(header, token []byte) (has bool) {
- httphead.ScanTokens(header, func(v []byte) bool {
- has = bytes.EqualFold(v, token)
- return !has
- })
- return
-}
-
-const (
- toLower = 'a' - 'A' // for use with OR.
- toUpper = ^byte(toLower) // for use with AND.
- toLower8 = uint64(toLower) |
- uint64(toLower)<<8 |
- uint64(toLower)<<16 |
- uint64(toLower)<<24 |
- uint64(toLower)<<32 |
- uint64(toLower)<<40 |
- uint64(toLower)<<48 |
- uint64(toLower)<<56
-)
-
-// Algorithm below is like standard textproto/CanonicalMIMEHeaderKey, except
-// that it operates with slice of bytes and modifies it inplace without copying.
-func canonicalizeHeaderKey(k []byte) {
- upper := true
- for i, c := range k {
- if upper && 'a' <= c && c <= 'z' {
- k[i] &= toUpper
- } else if !upper && 'A' <= c && c <= 'Z' {
- k[i] |= toLower
- }
- upper = c == '-'
- }
-}
-
-// readLine reads line from br. It reads until '\n' and returns bytes without
-// '\n' or '\r\n' at the end.
-// It returns err if and only if line does not end in '\n'. Note that read
-// bytes returned in any case of error.
-//
-// It is much like the textproto/Reader.ReadLine() except the thing that it
-// returns raw bytes, instead of string. That is, it avoids copying bytes read
-// from br.
-//
-// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
-// safe with future I/O operations on br.
-//
-// We could control I/O operations on br and do not need to make additional
-// copy for safety.
-//
-// NOTE: it may return copied flag to notify that returned buffer is safe to
-// use.
-func readLine(br *bufio.Reader) ([]byte, error) {
- var line []byte
- for {
- bts, err := br.ReadSlice('\n')
- if err == bufio.ErrBufferFull {
- // Copy bytes because next read will discard them.
- line = append(line, bts...)
- continue
- }
-
- // Avoid copy of single read.
- if line == nil {
- line = bts
- } else {
- line = append(line, bts...)
- }
-
- if err != nil {
- return line, err
- }
-
- // Size of line is at least 1.
- // In other case bufio.ReadSlice() returns error.
- n := len(line)
-
- // Cut '\n' or '\r\n'.
- if n > 1 && line[n-2] == '\r' {
- line = line[:n-2]
- } else {
- line = line[:n-1]
- }
-
- return line, nil
- }
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func nonZero(a, b int) int {
- if a != 0 {
- return a
- }
- return b
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
deleted file mode 100644
index 94557c69639..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gobwas/ws/write.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package ws
-
-import (
- "encoding/binary"
- "io"
-)
-
-// Header size length bounds in bytes.
-const (
- MaxHeaderSize = 14
- MinHeaderSize = 2
-)
-
-const (
- bit0 = 0x80
- bit1 = 0x40
- bit2 = 0x20
- bit3 = 0x10
- bit4 = 0x08
- bit5 = 0x04
- bit6 = 0x02
- bit7 = 0x01
-
- len7 = int64(125)
- len16 = int64(^(uint16(0)))
- len64 = int64(^(uint64(0)) >> 1)
-)
-
-// HeaderSize returns number of bytes that are needed to encode given header.
-// It returns -1 if header is malformed.
-func HeaderSize(h Header) (n int) {
- switch {
- case h.Length < 126:
- n = 2
- case h.Length <= len16:
- n = 4
- case h.Length <= len64:
- n = 10
- default:
- return -1
- }
- if h.Masked {
- n += len(h.Mask)
- }
- return n
-}
-
-// WriteHeader writes header binary representation into w.
-func WriteHeader(w io.Writer, h Header) error {
- // Make slice of bytes with capacity 14 that could hold any header.
- bts := make([]byte, MaxHeaderSize)
-
- if h.Fin {
- bts[0] |= bit0
- }
- bts[0] |= h.Rsv << 4
- bts[0] |= byte(h.OpCode)
-
- var n int
- switch {
- case h.Length <= len7:
- bts[1] = byte(h.Length)
- n = 2
-
- case h.Length <= len16:
- bts[1] = 126
- binary.BigEndian.PutUint16(bts[2:4], uint16(h.Length))
- n = 4
-
- case h.Length <= len64:
- bts[1] = 127
- binary.BigEndian.PutUint64(bts[2:10], uint64(h.Length))
- n = 10
-
- default:
- return ErrHeaderLengthUnexpected
- }
-
- if h.Masked {
- bts[1] |= bit0
- n += copy(bts[n:], h.Mask[:])
- }
-
- _, err := w.Write(bts[:n])
-
- return err
-}
-
-// WriteFrame writes frame binary representation into w.
-func WriteFrame(w io.Writer, f Frame) error {
- err := WriteHeader(w, f.Header)
- if err != nil {
- return err
- }
- _, err = w.Write(f.Payload)
- return err
-}
-
-// MustWriteFrame is like WriteFrame but panics if frame can not be read.
-func MustWriteFrame(w io.Writer, f Frame) {
- if err := WriteFrame(w, f); err != nil {
- panic(err)
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
deleted file mode 100644
index cd3fcd1ef72..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-.idea/
-*.iml
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
deleted file mode 100644
index 1931f400682..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/AUTHORS
+++ /dev/null
@@ -1,9 +0,0 @@
-# This is the official list of Gorilla WebSocket authors for copyright
-# purposes.
-#
-# Please keep the list sorted.
-
-Gary Burd
-Google LLC (https://opensource.google.com/)
-Joachim Bauch
-
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
deleted file mode 100644
index 19aa2e75c82..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Gorilla WebSocket
-
-[](https://godoc.org/github.com/gorilla/websocket)
-[](https://circleci.com/gh/gorilla/websocket)
-
-Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
-[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
-
-### Documentation
-
-* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
-* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
-* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
-* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
-* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
-
-### Status
-
-The Gorilla WebSocket package provides a complete and tested implementation of
-the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
-package API is stable.
-
-### Installation
-
- go get github.com/gorilla/websocket
-
-### Protocol Compliance
-
-The Gorilla WebSocket package passes the server tests in the [Autobahn Test
-Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
-subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
-
-### Gorilla WebSocket compared with other packages
-
-
-
-Notes:
-
-1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
-2. The application can get the type of a received data message by implementing
- a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
- function.
-3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
- Read returns when the input buffer is full or a frame boundary is
- encountered. Each call to Write sends a single frame message. The Gorilla
- io.Reader and io.WriteCloser operate on a single WebSocket message.
-
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
deleted file mode 100644
index 962c06a391c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "errors"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httptrace"
- "net/url"
- "strings"
- "time"
-)
-
-// ErrBadHandshake is returned when the server response to opening handshake is
-// invalid.
-var ErrBadHandshake = errors.New("websocket: bad handshake")
-
-var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
-
-// NewClient creates a new client connection using the given net connection.
-// The URL u specifies the host and request URI. Use requestHeader to specify
-// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
-// (Cookie). Use the response.Header to get the selected subprotocol
-// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
-//
-// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
-// non-nil *http.Response so that callers can handle redirects, authentication,
-// etc.
-//
-// Deprecated: Use Dialer instead.
-func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
- d := Dialer{
- ReadBufferSize: readBufSize,
- WriteBufferSize: writeBufSize,
- NetDial: func(net, addr string) (net.Conn, error) {
- return netConn, nil
- },
- }
- return d.Dial(u.String(), requestHeader)
-}
-
-// A Dialer contains options for connecting to WebSocket server.
-type Dialer struct {
- // NetDial specifies the dial function for creating TCP connections. If
- // NetDial is nil, net.Dial is used.
- NetDial func(network, addr string) (net.Conn, error)
-
- // NetDialContext specifies the dial function for creating TCP connections. If
- // NetDialContext is nil, net.DialContext is used.
- NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // Proxy specifies a function to return a proxy for a given
- // Request. If the function returns a non-nil error, the
- // request is aborted with the provided error.
- // If Proxy is nil or returns a nil *URL, no proxy is used.
- Proxy func(*http.Request) (*url.URL, error)
-
- // TLSClientConfig specifies the TLS configuration to use with tls.Client.
- // If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- // HandshakeTimeout specifies the duration for the handshake to complete.
- HandshakeTimeout time.Duration
-
- // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
- // size is zero, then a useful default size is used. The I/O buffer sizes
- // do not limit the size of the messages that can be sent or received.
- ReadBufferSize, WriteBufferSize int
-
- // WriteBufferPool is a pool of buffers for write operations. If the value
- // is not set, then write buffers are allocated to the connection for the
- // lifetime of the connection.
- //
- // A pool is most useful when the application has a modest volume of writes
- // across a large number of connections.
- //
- // Applications should use a single pool for each unique value of
- // WriteBufferSize.
- WriteBufferPool BufferPool
-
- // Subprotocols specifies the client's requested subprotocols.
- Subprotocols []string
-
- // EnableCompression specifies if the client should attempt to negotiate
- // per message compression (RFC 7692). Setting this value to true does not
- // guarantee that compression will be supported. Currently only "no context
- // takeover" modes are supported.
- EnableCompression bool
-
- // Jar specifies the cookie jar.
- // If Jar is nil, cookies are not sent in requests and ignored
- // in responses.
- Jar http.CookieJar
-}
-
-// Dial creates a new client connection by calling DialContext with a background context.
-func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
- return d.DialContext(context.Background(), urlStr, requestHeader)
-}
-
-var errMalformedURL = errors.New("malformed ws or wss URL")
-
-func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
- hostPort = u.Host
- hostNoPort = u.Host
- if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
- hostNoPort = hostNoPort[:i]
- } else {
- switch u.Scheme {
- case "wss":
- hostPort += ":443"
- case "https":
- hostPort += ":443"
- default:
- hostPort += ":80"
- }
- }
- return hostPort, hostNoPort
-}
-
-// DefaultDialer is a dialer with all fields set to the default values.
-var DefaultDialer = &Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: 45 * time.Second,
-}
-
-// nilDialer is dialer to use when receiver is nil.
-var nilDialer = *DefaultDialer
-
-// DialContext creates a new client connection. Use requestHeader to specify the
-// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
-// Use the response.Header to get the selected subprotocol
-// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
-//
-// The context will be used in the request and in the Dialer.
-//
-// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
-// non-nil *http.Response so that callers can handle redirects, authentication,
-// etcetera. The response body may not contain the entire response and does not
-// need to be closed by the application.
-func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
- if d == nil {
- d = &nilDialer
- }
-
- challengeKey, err := generateChallengeKey()
- if err != nil {
- return nil, nil, err
- }
-
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, nil, err
- }
-
- switch u.Scheme {
- case "ws":
- u.Scheme = "http"
- case "wss":
- u.Scheme = "https"
- default:
- return nil, nil, errMalformedURL
- }
-
- if u.User != nil {
- // User name and password are not allowed in websocket URIs.
- return nil, nil, errMalformedURL
- }
-
- req := &http.Request{
- Method: "GET",
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(http.Header),
- Host: u.Host,
- }
- req = req.WithContext(ctx)
-
- // Set the cookies present in the cookie jar of the dialer
- if d.Jar != nil {
- for _, cookie := range d.Jar.Cookies(u) {
- req.AddCookie(cookie)
- }
- }
-
- // Set the request headers using the capitalization for names and values in
- // RFC examples. Although the capitalization shouldn't matter, there are
- // servers that depend on it. The Header.Set method is not used because the
- // method canonicalizes the header names.
- req.Header["Upgrade"] = []string{"websocket"}
- req.Header["Connection"] = []string{"Upgrade"}
- req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
- req.Header["Sec-WebSocket-Version"] = []string{"13"}
- if len(d.Subprotocols) > 0 {
- req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
- }
- for k, vs := range requestHeader {
- switch {
- case k == "Host":
- if len(vs) > 0 {
- req.Host = vs[0]
- }
- case k == "Upgrade" ||
- k == "Connection" ||
- k == "Sec-Websocket-Key" ||
- k == "Sec-Websocket-Version" ||
- k == "Sec-Websocket-Extensions" ||
- (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
- return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
- case k == "Sec-Websocket-Protocol":
- req.Header["Sec-WebSocket-Protocol"] = vs
- default:
- req.Header[k] = vs
- }
- }
-
- if d.EnableCompression {
- req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
- }
-
- if d.HandshakeTimeout != 0 {
- var cancel func()
- ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
- defer cancel()
- }
-
- // Get network dial function.
- var netDial func(network, add string) (net.Conn, error)
-
- if d.NetDialContext != nil {
- netDial = func(network, addr string) (net.Conn, error) {
- return d.NetDialContext(ctx, network, addr)
- }
- } else if d.NetDial != nil {
- netDial = d.NetDial
- } else {
- netDialer := &net.Dialer{}
- netDial = func(network, addr string) (net.Conn, error) {
- return netDialer.DialContext(ctx, network, addr)
- }
- }
-
- // If needed, wrap the dial function to set the connection deadline.
- if deadline, ok := ctx.Deadline(); ok {
- forwardDial := netDial
- netDial = func(network, addr string) (net.Conn, error) {
- c, err := forwardDial(network, addr)
- if err != nil {
- return nil, err
- }
- err = c.SetDeadline(deadline)
- if err != nil {
- c.Close()
- return nil, err
- }
- return c, nil
- }
- }
-
- // If needed, wrap the dial function to connect through a proxy.
- if d.Proxy != nil {
- proxyURL, err := d.Proxy(req)
- if err != nil {
- return nil, nil, err
- }
- if proxyURL != nil {
- dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
- if err != nil {
- return nil, nil, err
- }
- netDial = dialer.Dial
- }
- }
-
- hostPort, hostNoPort := hostPortNoPort(u)
- trace := httptrace.ContextClientTrace(ctx)
- if trace != nil && trace.GetConn != nil {
- trace.GetConn(hostPort)
- }
-
- netConn, err := netDial("tcp", hostPort)
- if trace != nil && trace.GotConn != nil {
- trace.GotConn(httptrace.GotConnInfo{
- Conn: netConn,
- })
- }
- if err != nil {
- return nil, nil, err
- }
-
- defer func() {
- if netConn != nil {
- netConn.Close()
- }
- }()
-
- if u.Scheme == "https" {
- cfg := cloneTLSConfig(d.TLSClientConfig)
- if cfg.ServerName == "" {
- cfg.ServerName = hostNoPort
- }
- tlsConn := tls.Client(netConn, cfg)
- netConn = tlsConn
-
- var err error
- if trace != nil {
- err = doHandshakeWithTrace(trace, tlsConn, cfg)
- } else {
- err = doHandshake(tlsConn, cfg)
- }
-
- if err != nil {
- return nil, nil, err
- }
- }
-
- conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
-
- if err := req.Write(netConn); err != nil {
- return nil, nil, err
- }
-
- if trace != nil && trace.GotFirstResponseByte != nil {
- if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
- trace.GotFirstResponseByte()
- }
- }
-
- resp, err := http.ReadResponse(conn.br, req)
- if err != nil {
- return nil, nil, err
- }
-
- if d.Jar != nil {
- if rc := resp.Cookies(); len(rc) > 0 {
- d.Jar.SetCookies(u, rc)
- }
- }
-
- if resp.StatusCode != 101 ||
- !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
- !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
- resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
- // Before closing the network connection on return from this
- // function, slurp up some of the response to aid application
- // debugging.
- buf := make([]byte, 1024)
- n, _ := io.ReadFull(resp.Body, buf)
- resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
- return nil, resp, ErrBadHandshake
- }
-
- for _, ext := range parseExtensions(resp.Header) {
- if ext[""] != "permessage-deflate" {
- continue
- }
- _, snct := ext["server_no_context_takeover"]
- _, cnct := ext["client_no_context_takeover"]
- if !snct || !cnct {
- return nil, resp, errInvalidCompression
- }
- conn.newCompressionWriter = compressNoContextTakeover
- conn.newDecompressionReader = decompressNoContextTakeover
- break
- }
-
- resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
- conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
-
- netConn.SetDeadline(time.Time{})
- netConn = nil // to avoid close in defer.
- return conn, resp, nil
-}
-
-func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
- if err := tlsConn.Handshake(); err != nil {
- return err
- }
- if !cfg.InsecureSkipVerify {
- if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
deleted file mode 100644
index 4f0d943723a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.8
-
-package websocket
-
-import "crypto/tls"
-
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return cfg.Clone()
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
deleted file mode 100644
index babb007fb41..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/client_clone_legacy.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.8
-
-package websocket
-
-import "crypto/tls"
-
-// cloneTLSConfig clones all public fields except the fields
-// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
-// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
-// config in active use.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return &tls.Config{
- Rand: cfg.Rand,
- Time: cfg.Time,
- Certificates: cfg.Certificates,
- NameToCertificate: cfg.NameToCertificate,
- GetCertificate: cfg.GetCertificate,
- RootCAs: cfg.RootCAs,
- NextProtos: cfg.NextProtos,
- ServerName: cfg.ServerName,
- ClientAuth: cfg.ClientAuth,
- ClientCAs: cfg.ClientCAs,
- InsecureSkipVerify: cfg.InsecureSkipVerify,
- CipherSuites: cfg.CipherSuites,
- PreferServerCipherSuites: cfg.PreferServerCipherSuites,
- ClientSessionCache: cfg.ClientSessionCache,
- MinVersion: cfg.MinVersion,
- MaxVersion: cfg.MaxVersion,
- CurvePreferences: cfg.CurvePreferences,
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
deleted file mode 100644
index 813ffb1e843..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/compression.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "compress/flate"
- "errors"
- "io"
- "strings"
- "sync"
-)
-
-const (
- minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
- maxCompressionLevel = flate.BestCompression
- defaultCompressionLevel = 1
-)
-
-var (
- flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
- flateReaderPool = sync.Pool{New: func() interface{} {
- return flate.NewReader(nil)
- }}
-)
-
-func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
- const tail =
- // Add four bytes as specified in RFC
- "\x00\x00\xff\xff" +
- // Add final block to squelch unexpected EOF error from flate reader.
- "\x01\x00\x00\xff\xff"
-
- fr, _ := flateReaderPool.Get().(io.ReadCloser)
- fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
- return &flateReadWrapper{fr}
-}
-
-func isValidCompressionLevel(level int) bool {
- return minCompressionLevel <= level && level <= maxCompressionLevel
-}
-
-func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
- p := &flateWriterPools[level-minCompressionLevel]
- tw := &truncWriter{w: w}
- fw, _ := p.Get().(*flate.Writer)
- if fw == nil {
- fw, _ = flate.NewWriter(tw, level)
- } else {
- fw.Reset(tw)
- }
- return &flateWriteWrapper{fw: fw, tw: tw, p: p}
-}
-
-// truncWriter is an io.Writer that writes all but the last four bytes of the
-// stream to another io.Writer.
-type truncWriter struct {
- w io.WriteCloser
- n int
- p [4]byte
-}
-
-func (w *truncWriter) Write(p []byte) (int, error) {
- n := 0
-
- // fill buffer first for simplicity.
- if w.n < len(w.p) {
- n = copy(w.p[w.n:], p)
- p = p[n:]
- w.n += n
- if len(p) == 0 {
- return n, nil
- }
- }
-
- m := len(p)
- if m > len(w.p) {
- m = len(w.p)
- }
-
- if nn, err := w.w.Write(w.p[:m]); err != nil {
- return n + nn, err
- }
-
- copy(w.p[:], w.p[m:])
- copy(w.p[len(w.p)-m:], p[len(p)-m:])
- nn, err := w.w.Write(p[:len(p)-m])
- return n + nn, err
-}
-
-type flateWriteWrapper struct {
- fw *flate.Writer
- tw *truncWriter
- p *sync.Pool
-}
-
-func (w *flateWriteWrapper) Write(p []byte) (int, error) {
- if w.fw == nil {
- return 0, errWriteClosed
- }
- return w.fw.Write(p)
-}
-
-func (w *flateWriteWrapper) Close() error {
- if w.fw == nil {
- return errWriteClosed
- }
- err1 := w.fw.Flush()
- w.p.Put(w.fw)
- w.fw = nil
- if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
- return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
- }
- err2 := w.tw.w.Close()
- if err1 != nil {
- return err1
- }
- return err2
-}
-
-type flateReadWrapper struct {
- fr io.ReadCloser
-}
-
-func (r *flateReadWrapper) Read(p []byte) (int, error) {
- if r.fr == nil {
- return 0, io.ErrClosedPipe
- }
- n, err := r.fr.Read(p)
- if err == io.EOF {
- // Preemptively place the reader back in the pool. This helps with
- // scenarios where the application does not call NextReader() soon after
- // this final read.
- r.Close()
- }
- return n, err
-}
-
-func (r *flateReadWrapper) Close() error {
- if r.fr == nil {
- return io.ErrClosedPipe
- }
- err := r.fr.Close()
- flateReaderPool.Put(r.fr)
- r.fr = nil
- return err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
deleted file mode 100644
index ca46d2f793c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn.go
+++ /dev/null
@@ -1,1201 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "encoding/binary"
- "errors"
- "io"
- "io/ioutil"
- "math/rand"
- "net"
- "strconv"
- "sync"
- "time"
- "unicode/utf8"
-)
-
-const (
- // Frame header byte 0 bits from Section 5.2 of RFC 6455
- finalBit = 1 << 7
- rsv1Bit = 1 << 6
- rsv2Bit = 1 << 5
- rsv3Bit = 1 << 4
-
- // Frame header byte 1 bits from Section 5.2 of RFC 6455
- maskBit = 1 << 7
-
- maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
- maxControlFramePayloadSize = 125
-
- writeWait = time.Second
-
- defaultReadBufferSize = 4096
- defaultWriteBufferSize = 4096
-
- continuationFrame = 0
- noFrame = -1
-)
-
-// Close codes defined in RFC 6455, section 11.7.
-const (
- CloseNormalClosure = 1000
- CloseGoingAway = 1001
- CloseProtocolError = 1002
- CloseUnsupportedData = 1003
- CloseNoStatusReceived = 1005
- CloseAbnormalClosure = 1006
- CloseInvalidFramePayloadData = 1007
- ClosePolicyViolation = 1008
- CloseMessageTooBig = 1009
- CloseMandatoryExtension = 1010
- CloseInternalServerErr = 1011
- CloseServiceRestart = 1012
- CloseTryAgainLater = 1013
- CloseTLSHandshake = 1015
-)
-
-// The message types are defined in RFC 6455, section 11.8.
-const (
- // TextMessage denotes a text data message. The text message payload is
- // interpreted as UTF-8 encoded text data.
- TextMessage = 1
-
- // BinaryMessage denotes a binary data message.
- BinaryMessage = 2
-
- // CloseMessage denotes a close control message. The optional message
- // payload contains a numeric code and text. Use the FormatCloseMessage
- // function to format a close message payload.
- CloseMessage = 8
-
- // PingMessage denotes a ping control message. The optional message payload
- // is UTF-8 encoded text.
- PingMessage = 9
-
- // PongMessage denotes a pong control message. The optional message payload
- // is UTF-8 encoded text.
- PongMessage = 10
-)
-
-// ErrCloseSent is returned when the application writes a message to the
-// connection after sending a close message.
-var ErrCloseSent = errors.New("websocket: close sent")
-
-// ErrReadLimit is returned when reading a message that is larger than the
-// read limit set for the connection.
-var ErrReadLimit = errors.New("websocket: read limit exceeded")
-
-// netError satisfies the net Error interface.
-type netError struct {
- msg string
- temporary bool
- timeout bool
-}
-
-func (e *netError) Error() string { return e.msg }
-func (e *netError) Temporary() bool { return e.temporary }
-func (e *netError) Timeout() bool { return e.timeout }
-
-// CloseError represents a close message.
-type CloseError struct {
- // Code is defined in RFC 6455, section 11.7.
- Code int
-
- // Text is the optional text payload.
- Text string
-}
-
-func (e *CloseError) Error() string {
- s := []byte("websocket: close ")
- s = strconv.AppendInt(s, int64(e.Code), 10)
- switch e.Code {
- case CloseNormalClosure:
- s = append(s, " (normal)"...)
- case CloseGoingAway:
- s = append(s, " (going away)"...)
- case CloseProtocolError:
- s = append(s, " (protocol error)"...)
- case CloseUnsupportedData:
- s = append(s, " (unsupported data)"...)
- case CloseNoStatusReceived:
- s = append(s, " (no status)"...)
- case CloseAbnormalClosure:
- s = append(s, " (abnormal closure)"...)
- case CloseInvalidFramePayloadData:
- s = append(s, " (invalid payload data)"...)
- case ClosePolicyViolation:
- s = append(s, " (policy violation)"...)
- case CloseMessageTooBig:
- s = append(s, " (message too big)"...)
- case CloseMandatoryExtension:
- s = append(s, " (mandatory extension missing)"...)
- case CloseInternalServerErr:
- s = append(s, " (internal server error)"...)
- case CloseTLSHandshake:
- s = append(s, " (TLS handshake error)"...)
- }
- if e.Text != "" {
- s = append(s, ": "...)
- s = append(s, e.Text...)
- }
- return string(s)
-}
-
-// IsCloseError returns boolean indicating whether the error is a *CloseError
-// with one of the specified codes.
-func IsCloseError(err error, codes ...int) bool {
- if e, ok := err.(*CloseError); ok {
- for _, code := range codes {
- if e.Code == code {
- return true
- }
- }
- }
- return false
-}
-
-// IsUnexpectedCloseError returns boolean indicating whether the error is a
-// *CloseError with a code not in the list of expected codes.
-func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
- if e, ok := err.(*CloseError); ok {
- for _, code := range expectedCodes {
- if e.Code == code {
- return false
- }
- }
- return true
- }
- return false
-}
-
-var (
- errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
- errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
- errBadWriteOpCode = errors.New("websocket: bad write message type")
- errWriteClosed = errors.New("websocket: write closed")
- errInvalidControlFrame = errors.New("websocket: invalid control frame")
-)
-
-func newMaskKey() [4]byte {
- n := rand.Uint32()
- return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
-}
-
-func hideTempErr(err error) error {
- if e, ok := err.(net.Error); ok && e.Temporary() {
- err = &netError{msg: e.Error(), timeout: e.Timeout()}
- }
- return err
-}
-
-func isControl(frameType int) bool {
- return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
-}
-
-func isData(frameType int) bool {
- return frameType == TextMessage || frameType == BinaryMessage
-}
-
-var validReceivedCloseCodes = map[int]bool{
- // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
-
- CloseNormalClosure: true,
- CloseGoingAway: true,
- CloseProtocolError: true,
- CloseUnsupportedData: true,
- CloseNoStatusReceived: false,
- CloseAbnormalClosure: false,
- CloseInvalidFramePayloadData: true,
- ClosePolicyViolation: true,
- CloseMessageTooBig: true,
- CloseMandatoryExtension: true,
- CloseInternalServerErr: true,
- CloseServiceRestart: true,
- CloseTryAgainLater: true,
- CloseTLSHandshake: false,
-}
-
-func isValidReceivedCloseCode(code int) bool {
- return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
-}
-
-// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
-// interface. The type of the value stored in a pool is not specified.
-type BufferPool interface {
- // Get gets a value from the pool or returns nil if the pool is empty.
- Get() interface{}
- // Put adds a value to the pool.
- Put(interface{})
-}
-
-// writePoolData is the type added to the write buffer pool. This wrapper is
-// used to prevent applications from peeking at and depending on the values
-// added to the pool.
-type writePoolData struct{ buf []byte }
-
-// The Conn type represents a WebSocket connection.
-type Conn struct {
- conn net.Conn
- isServer bool
- subprotocol string
-
- // Write fields
- mu chan struct{} // used as mutex to protect write to conn
- writeBuf []byte // frame is constructed in this buffer.
- writePool BufferPool
- writeBufSize int
- writeDeadline time.Time
- writer io.WriteCloser // the current writer returned to the application
- isWriting bool // for best-effort concurrent write detection
-
- writeErrMu sync.Mutex
- writeErr error
-
- enableWriteCompression bool
- compressionLevel int
- newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
-
- // Read fields
- reader io.ReadCloser // the current reader returned to the application
- readErr error
- br *bufio.Reader
- // bytes remaining in current frame.
- // set setReadRemaining to safely update this value and prevent overflow
- readRemaining int64
- readFinal bool // true the current message has more frames.
- readLength int64 // Message size.
- readLimit int64 // Maximum message size.
- readMaskPos int
- readMaskKey [4]byte
- handlePong func(string) error
- handlePing func(string) error
- handleClose func(int, string) error
- readErrCount int
- messageReader *messageReader // the current low-level reader
-
- readDecompress bool // whether last read frame had RSV1 set
- newDecompressionReader func(io.Reader) io.ReadCloser
-}
-
-func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
-
- if br == nil {
- if readBufferSize == 0 {
- readBufferSize = defaultReadBufferSize
- } else if readBufferSize < maxControlFramePayloadSize {
- // must be large enough for control frame
- readBufferSize = maxControlFramePayloadSize
- }
- br = bufio.NewReaderSize(conn, readBufferSize)
- }
-
- if writeBufferSize <= 0 {
- writeBufferSize = defaultWriteBufferSize
- }
- writeBufferSize += maxFrameHeaderSize
-
- if writeBuf == nil && writeBufferPool == nil {
- writeBuf = make([]byte, writeBufferSize)
- }
-
- mu := make(chan struct{}, 1)
- mu <- struct{}{}
- c := &Conn{
- isServer: isServer,
- br: br,
- conn: conn,
- mu: mu,
- readFinal: true,
- writeBuf: writeBuf,
- writePool: writeBufferPool,
- writeBufSize: writeBufferSize,
- enableWriteCompression: true,
- compressionLevel: defaultCompressionLevel,
- }
- c.SetCloseHandler(nil)
- c.SetPingHandler(nil)
- c.SetPongHandler(nil)
- return c
-}
-
-// setReadRemaining tracks the number of bytes remaining on the connection. If n
-// overflows, an ErrReadLimit is returned.
-func (c *Conn) setReadRemaining(n int64) error {
- if n < 0 {
- return ErrReadLimit
- }
-
- c.readRemaining = n
- return nil
-}
-
-// Subprotocol returns the negotiated protocol for the connection.
-func (c *Conn) Subprotocol() string {
- return c.subprotocol
-}
-
-// Close closes the underlying network connection without sending or waiting
-// for a close message.
-func (c *Conn) Close() error {
- return c.conn.Close()
-}
-
-// LocalAddr returns the local network address.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// Write methods
-
-func (c *Conn) writeFatal(err error) error {
- err = hideTempErr(err)
- c.writeErrMu.Lock()
- if c.writeErr == nil {
- c.writeErr = err
- }
- c.writeErrMu.Unlock()
- return err
-}
-
-func (c *Conn) read(n int) ([]byte, error) {
- p, err := c.br.Peek(n)
- if err == io.EOF {
- err = errUnexpectedEOF
- }
- c.br.Discard(len(p))
- return p, err
-}
-
-func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
- <-c.mu
- defer func() { c.mu <- struct{}{} }()
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- c.conn.SetWriteDeadline(deadline)
- if len(buf1) == 0 {
- _, err = c.conn.Write(buf0)
- } else {
- err = c.writeBufs(buf0, buf1)
- }
- if err != nil {
- return c.writeFatal(err)
- }
- if frameType == CloseMessage {
- c.writeFatal(ErrCloseSent)
- }
- return nil
-}
-
-// WriteControl writes a control message with the given deadline. The allowed
-// message types are CloseMessage, PingMessage and PongMessage.
-func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
- if !isControl(messageType) {
- return errBadWriteOpCode
- }
- if len(data) > maxControlFramePayloadSize {
- return errInvalidControlFrame
- }
-
- b0 := byte(messageType) | finalBit
- b1 := byte(len(data))
- if !c.isServer {
- b1 |= maskBit
- }
-
- buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
- buf = append(buf, b0, b1)
-
- if c.isServer {
- buf = append(buf, data...)
- } else {
- key := newMaskKey()
- buf = append(buf, key[:]...)
- buf = append(buf, data...)
- maskBytes(key, 0, buf[6:])
- }
-
- d := 1000 * time.Hour
- if !deadline.IsZero() {
- d = deadline.Sub(time.Now())
- if d < 0 {
- return errWriteTimeout
- }
- }
-
- timer := time.NewTimer(d)
- select {
- case <-c.mu:
- timer.Stop()
- case <-timer.C:
- return errWriteTimeout
- }
- defer func() { c.mu <- struct{}{} }()
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- c.conn.SetWriteDeadline(deadline)
- _, err = c.conn.Write(buf)
- if err != nil {
- return c.writeFatal(err)
- }
- if messageType == CloseMessage {
- c.writeFatal(ErrCloseSent)
- }
- return err
-}
-
-// beginMessage prepares a connection and message writer for a new message.
-func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
- // Close previous writer if not already closed by the application. It's
- // probably better to return an error in this situation, but we cannot
- // change this without breaking existing applications.
- if c.writer != nil {
- c.writer.Close()
- c.writer = nil
- }
-
- if !isControl(messageType) && !isData(messageType) {
- return errBadWriteOpCode
- }
-
- c.writeErrMu.Lock()
- err := c.writeErr
- c.writeErrMu.Unlock()
- if err != nil {
- return err
- }
-
- mw.c = c
- mw.frameType = messageType
- mw.pos = maxFrameHeaderSize
-
- if c.writeBuf == nil {
- wpd, ok := c.writePool.Get().(writePoolData)
- if ok {
- c.writeBuf = wpd.buf
- } else {
- c.writeBuf = make([]byte, c.writeBufSize)
- }
- }
- return nil
-}
-
-// NextWriter returns a writer for the next message to send. The writer's Close
-// method flushes the complete message to the network.
-//
-// There can be at most one open writer on a connection. NextWriter closes the
-// previous writer if the application has not already done so.
-//
-// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
-// PongMessage) are supported.
-func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
- var mw messageWriter
- if err := c.beginMessage(&mw, messageType); err != nil {
- return nil, err
- }
- c.writer = &mw
- if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
- w := c.newCompressionWriter(c.writer, c.compressionLevel)
- mw.compress = true
- c.writer = w
- }
- return c.writer, nil
-}
-
-type messageWriter struct {
- c *Conn
- compress bool // whether next call to flushFrame should set RSV1
- pos int // end of data in writeBuf.
- frameType int // type of the current frame.
- err error
-}
-
-func (w *messageWriter) endMessage(err error) error {
- if w.err != nil {
- return err
- }
- c := w.c
- w.err = err
- c.writer = nil
- if c.writePool != nil {
- c.writePool.Put(writePoolData{buf: c.writeBuf})
- c.writeBuf = nil
- }
- return err
-}
-
-// flushFrame writes buffered data and extra as a frame to the network. The
-// final argument indicates that this is the last frame in the message.
-func (w *messageWriter) flushFrame(final bool, extra []byte) error {
- c := w.c
- length := w.pos - maxFrameHeaderSize + len(extra)
-
- // Check for invalid control frames.
- if isControl(w.frameType) &&
- (!final || length > maxControlFramePayloadSize) {
- return w.endMessage(errInvalidControlFrame)
- }
-
- b0 := byte(w.frameType)
- if final {
- b0 |= finalBit
- }
- if w.compress {
- b0 |= rsv1Bit
- }
- w.compress = false
-
- b1 := byte(0)
- if !c.isServer {
- b1 |= maskBit
- }
-
- // Assume that the frame starts at beginning of c.writeBuf.
- framePos := 0
- if c.isServer {
- // Adjust up if mask not included in the header.
- framePos = 4
- }
-
- switch {
- case length >= 65536:
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | 127
- binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
- case length > 125:
- framePos += 6
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | 126
- binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
- default:
- framePos += 8
- c.writeBuf[framePos] = b0
- c.writeBuf[framePos+1] = b1 | byte(length)
- }
-
- if !c.isServer {
- key := newMaskKey()
- copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
- maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
- if len(extra) > 0 {
- return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
- }
- }
-
- // Write the buffers to the connection with best-effort detection of
- // concurrent writes. See the concurrency section in the package
- // documentation for more info.
-
- if c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = true
-
- err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
-
- if !c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = false
-
- if err != nil {
- return w.endMessage(err)
- }
-
- if final {
- w.endMessage(errWriteClosed)
- return nil
- }
-
- // Setup for next frame.
- w.pos = maxFrameHeaderSize
- w.frameType = continuationFrame
- return nil
-}
-
-func (w *messageWriter) ncopy(max int) (int, error) {
- n := len(w.c.writeBuf) - w.pos
- if n <= 0 {
- if err := w.flushFrame(false, nil); err != nil {
- return 0, err
- }
- n = len(w.c.writeBuf) - w.pos
- }
- if n > max {
- n = max
- }
- return n, nil
-}
-
-func (w *messageWriter) Write(p []byte) (int, error) {
- if w.err != nil {
- return 0, w.err
- }
-
- if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
- // Don't buffer large messages.
- err := w.flushFrame(false, p)
- if err != nil {
- return 0, err
- }
- return len(p), nil
- }
-
- nn := len(p)
- for len(p) > 0 {
- n, err := w.ncopy(len(p))
- if err != nil {
- return 0, err
- }
- copy(w.c.writeBuf[w.pos:], p[:n])
- w.pos += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *messageWriter) WriteString(p string) (int, error) {
- if w.err != nil {
- return 0, w.err
- }
-
- nn := len(p)
- for len(p) > 0 {
- n, err := w.ncopy(len(p))
- if err != nil {
- return 0, err
- }
- copy(w.c.writeBuf[w.pos:], p[:n])
- w.pos += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
- if w.err != nil {
- return 0, w.err
- }
- for {
- if w.pos == len(w.c.writeBuf) {
- err = w.flushFrame(false, nil)
- if err != nil {
- break
- }
- }
- var n int
- n, err = r.Read(w.c.writeBuf[w.pos:])
- w.pos += n
- nn += int64(n)
- if err != nil {
- if err == io.EOF {
- err = nil
- }
- break
- }
- }
- return nn, err
-}
-
-func (w *messageWriter) Close() error {
- if w.err != nil {
- return w.err
- }
- return w.flushFrame(true, nil)
-}
-
-// WritePreparedMessage writes prepared message into connection.
-func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
- frameType, frameData, err := pm.frame(prepareKey{
- isServer: c.isServer,
- compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
- compressionLevel: c.compressionLevel,
- })
- if err != nil {
- return err
- }
- if c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = true
- err = c.write(frameType, c.writeDeadline, frameData, nil)
- if !c.isWriting {
- panic("concurrent write to websocket connection")
- }
- c.isWriting = false
- return err
-}
-
-// WriteMessage is a helper method for getting a writer using NextWriter,
-// writing the message and closing the writer.
-func (c *Conn) WriteMessage(messageType int, data []byte) error {
-
- if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
- // Fast path with no allocations and single frame.
-
- var mw messageWriter
- if err := c.beginMessage(&mw, messageType); err != nil {
- return err
- }
- n := copy(c.writeBuf[mw.pos:], data)
- mw.pos += n
- data = data[n:]
- return mw.flushFrame(true, data)
- }
-
- w, err := c.NextWriter(messageType)
- if err != nil {
- return err
- }
- if _, err = w.Write(data); err != nil {
- return err
- }
- return w.Close()
-}
-
-// SetWriteDeadline sets the write deadline on the underlying network
-// connection. After a write has timed out, the websocket state is corrupt and
-// all future writes will return an error. A zero value for t means writes will
-// not time out.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- c.writeDeadline = t
- return nil
-}
-
-// Read methods
-
-func (c *Conn) advanceFrame() (int, error) {
- // 1. Skip remainder of previous frame.
-
- if c.readRemaining > 0 {
- if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
- return noFrame, err
- }
- }
-
- // 2. Read and parse first two bytes of frame header.
-
- p, err := c.read(2)
- if err != nil {
- return noFrame, err
- }
-
- final := p[0]&finalBit != 0
- frameType := int(p[0] & 0xf)
- mask := p[1]&maskBit != 0
- c.setReadRemaining(int64(p[1] & 0x7f))
-
- c.readDecompress = false
- if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
- c.readDecompress = true
- p[0] &^= rsv1Bit
- }
-
- if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
- return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
- }
-
- switch frameType {
- case CloseMessage, PingMessage, PongMessage:
- if c.readRemaining > maxControlFramePayloadSize {
- return noFrame, c.handleProtocolError("control frame length > 125")
- }
- if !final {
- return noFrame, c.handleProtocolError("control frame not final")
- }
- case TextMessage, BinaryMessage:
- if !c.readFinal {
- return noFrame, c.handleProtocolError("message start before final message frame")
- }
- c.readFinal = final
- case continuationFrame:
- if c.readFinal {
- return noFrame, c.handleProtocolError("continuation after final message frame")
- }
- c.readFinal = final
- default:
- return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
- }
-
- // 3. Read and parse frame length as per
- // https://tools.ietf.org/html/rfc6455#section-5.2
- //
- // The length of the "Payload data", in bytes: if 0-125, that is the payload
- // length.
- // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
- // integer are the payload length.
- // - If 127, the following 8 bytes interpreted as
- // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
- // payload length. Multibyte length quantities are expressed in network byte
- // order.
-
- switch c.readRemaining {
- case 126:
- p, err := c.read(2)
- if err != nil {
- return noFrame, err
- }
-
- if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
- return noFrame, err
- }
- case 127:
- p, err := c.read(8)
- if err != nil {
- return noFrame, err
- }
-
- if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
- return noFrame, err
- }
- }
-
- // 4. Handle frame masking.
-
- if mask != c.isServer {
- return noFrame, c.handleProtocolError("incorrect mask flag")
- }
-
- if mask {
- c.readMaskPos = 0
- p, err := c.read(len(c.readMaskKey))
- if err != nil {
- return noFrame, err
- }
- copy(c.readMaskKey[:], p)
- }
-
- // 5. For text and binary messages, enforce read limit and return.
-
- if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
-
- c.readLength += c.readRemaining
- // Don't allow readLength to overflow in the presence of a large readRemaining
- // counter.
- if c.readLength < 0 {
- return noFrame, ErrReadLimit
- }
-
- if c.readLimit > 0 && c.readLength > c.readLimit {
- c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
- return noFrame, ErrReadLimit
- }
-
- return frameType, nil
- }
-
- // 6. Read control frame payload.
-
- var payload []byte
- if c.readRemaining > 0 {
- payload, err = c.read(int(c.readRemaining))
- c.setReadRemaining(0)
- if err != nil {
- return noFrame, err
- }
- if c.isServer {
- maskBytes(c.readMaskKey, 0, payload)
- }
- }
-
- // 7. Process control frame payload.
-
- switch frameType {
- case PongMessage:
- if err := c.handlePong(string(payload)); err != nil {
- return noFrame, err
- }
- case PingMessage:
- if err := c.handlePing(string(payload)); err != nil {
- return noFrame, err
- }
- case CloseMessage:
- closeCode := CloseNoStatusReceived
- closeText := ""
- if len(payload) >= 2 {
- closeCode = int(binary.BigEndian.Uint16(payload))
- if !isValidReceivedCloseCode(closeCode) {
- return noFrame, c.handleProtocolError("invalid close code")
- }
- closeText = string(payload[2:])
- if !utf8.ValidString(closeText) {
- return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
- }
- }
- if err := c.handleClose(closeCode, closeText); err != nil {
- return noFrame, err
- }
- return noFrame, &CloseError{Code: closeCode, Text: closeText}
- }
-
- return frameType, nil
-}
-
-func (c *Conn) handleProtocolError(message string) error {
- c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
- return errors.New("websocket: " + message)
-}
-
-// NextReader returns the next data message received from the peer. The
-// returned messageType is either TextMessage or BinaryMessage.
-//
-// There can be at most one open reader on a connection. NextReader discards
-// the previous message if the application has not already consumed it.
-//
-// Applications must break out of the application's read loop when this method
-// returns a non-nil error value. Errors returned from this method are
-// permanent. Once this method returns a non-nil error, all subsequent calls to
-// this method return the same error.
-func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
- // Close previous reader, only relevant for decompression.
- if c.reader != nil {
- c.reader.Close()
- c.reader = nil
- }
-
- c.messageReader = nil
- c.readLength = 0
-
- for c.readErr == nil {
- frameType, err := c.advanceFrame()
- if err != nil {
- c.readErr = hideTempErr(err)
- break
- }
-
- if frameType == TextMessage || frameType == BinaryMessage {
- c.messageReader = &messageReader{c}
- c.reader = c.messageReader
- if c.readDecompress {
- c.reader = c.newDecompressionReader(c.reader)
- }
- return frameType, c.reader, nil
- }
- }
-
- // Applications that do handle the error returned from this method spin in
- // tight loop on connection failure. To help application developers detect
- // this error, panic on repeated reads to the failed connection.
- c.readErrCount++
- if c.readErrCount >= 1000 {
- panic("repeated read on failed websocket connection")
- }
-
- return noFrame, nil, c.readErr
-}
-
-type messageReader struct{ c *Conn }
-
-func (r *messageReader) Read(b []byte) (int, error) {
- c := r.c
- if c.messageReader != r {
- return 0, io.EOF
- }
-
- for c.readErr == nil {
-
- if c.readRemaining > 0 {
- if int64(len(b)) > c.readRemaining {
- b = b[:c.readRemaining]
- }
- n, err := c.br.Read(b)
- c.readErr = hideTempErr(err)
- if c.isServer {
- c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
- }
- rem := c.readRemaining
- rem -= int64(n)
- c.setReadRemaining(rem)
- if c.readRemaining > 0 && c.readErr == io.EOF {
- c.readErr = errUnexpectedEOF
- }
- return n, c.readErr
- }
-
- if c.readFinal {
- c.messageReader = nil
- return 0, io.EOF
- }
-
- frameType, err := c.advanceFrame()
- switch {
- case err != nil:
- c.readErr = hideTempErr(err)
- case frameType == TextMessage || frameType == BinaryMessage:
- c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
- }
- }
-
- err := c.readErr
- if err == io.EOF && c.messageReader == r {
- err = errUnexpectedEOF
- }
- return 0, err
-}
-
-func (r *messageReader) Close() error {
- return nil
-}
-
-// ReadMessage is a helper method for getting a reader using NextReader and
-// reading from that reader to a buffer.
-func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
- var r io.Reader
- messageType, r, err = c.NextReader()
- if err != nil {
- return messageType, nil, err
- }
- p, err = ioutil.ReadAll(r)
- return messageType, p, err
-}
-
-// SetReadDeadline sets the read deadline on the underlying network connection.
-// After a read has timed out, the websocket connection state is corrupt and
-// all future reads will return an error. A zero value for t means reads will
-// not time out.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
-// message exceeds the limit, the connection sends a close message to the peer
-// and returns ErrReadLimit to the application.
-func (c *Conn) SetReadLimit(limit int64) {
- c.readLimit = limit
-}
-
-// CloseHandler returns the current close handler
-func (c *Conn) CloseHandler() func(code int, text string) error {
- return c.handleClose
-}
-
-// SetCloseHandler sets the handler for close messages received from the peer.
-// The code argument to h is the received close code or CloseNoStatusReceived
-// if the close message is empty. The default close handler sends a close
-// message back to the peer.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// close messages as described in the section on Control Messages above.
-//
-// The connection read methods return a CloseError when a close message is
-// received. Most applications should handle close messages as part of their
-// normal error handling. Applications should only set a close handler when the
-// application must perform some action before sending a close message back to
-// the peer.
-func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
- if h == nil {
- h = func(code int, text string) error {
- message := FormatCloseMessage(code, "")
- c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
- return nil
- }
- }
- c.handleClose = h
-}
-
-// PingHandler returns the current ping handler
-func (c *Conn) PingHandler() func(appData string) error {
- return c.handlePing
-}
-
-// SetPingHandler sets the handler for ping messages received from the peer.
-// The appData argument to h is the PING message application data. The default
-// ping handler sends a pong to the peer.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// ping messages as described in the section on Control Messages above.
-func (c *Conn) SetPingHandler(h func(appData string) error) {
- if h == nil {
- h = func(message string) error {
- err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
- if err == ErrCloseSent {
- return nil
- } else if e, ok := err.(net.Error); ok && e.Temporary() {
- return nil
- }
- return err
- }
- }
- c.handlePing = h
-}
-
-// PongHandler returns the current pong handler
-func (c *Conn) PongHandler() func(appData string) error {
- return c.handlePong
-}
-
-// SetPongHandler sets the handler for pong messages received from the peer.
-// The appData argument to h is the PONG message application data. The default
-// pong handler does nothing.
-//
-// The handler function is called from the NextReader, ReadMessage and message
-// reader Read methods. The application must read the connection to process
-// pong messages as described in the section on Control Messages above.
-func (c *Conn) SetPongHandler(h func(appData string) error) {
- if h == nil {
- h = func(string) error { return nil }
- }
- c.handlePong = h
-}
-
-// UnderlyingConn returns the internal net.Conn. This can be used to further
-// modifications to connection specific flags.
-func (c *Conn) UnderlyingConn() net.Conn {
- return c.conn
-}
-
-// EnableWriteCompression enables and disables write compression of
-// subsequent text and binary messages. This function is a noop if
-// compression was not negotiated with the peer.
-func (c *Conn) EnableWriteCompression(enable bool) {
- c.enableWriteCompression = enable
-}
-
-// SetCompressionLevel sets the flate compression level for subsequent text and
-// binary messages. This function is a noop if compression was not negotiated
-// with the peer. See the compress/flate package for a description of
-// compression levels.
-func (c *Conn) SetCompressionLevel(level int) error {
- if !isValidCompressionLevel(level) {
- return errors.New("websocket: invalid compression level")
- }
- c.compressionLevel = level
- return nil
-}
-
-// FormatCloseMessage formats closeCode and text as a WebSocket close message.
-// An empty message is returned for code CloseNoStatusReceived.
-func FormatCloseMessage(closeCode int, text string) []byte {
- if closeCode == CloseNoStatusReceived {
- // Return empty message because it's illegal to send
- // CloseNoStatusReceived. Return non-nil value in case application
- // checks for nil.
- return []byte{}
- }
- buf := make([]byte, 2+len(text))
- binary.BigEndian.PutUint16(buf, uint16(closeCode))
- copy(buf[2:], text)
- return buf
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
deleted file mode 100644
index a509a21f87a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.8
-
-package websocket
-
-import "net"
-
-func (c *Conn) writeBufs(bufs ...[]byte) error {
- b := net.Buffers(bufs)
- _, err := b.WriteTo(c.conn)
- return err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
deleted file mode 100644
index 37edaff5a57..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/conn_write_legacy.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.8
-
-package websocket
-
-func (c *Conn) writeBufs(bufs ...[]byte) error {
- for _, buf := range bufs {
- if len(buf) > 0 {
- if _, err := c.conn.Write(buf); err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
deleted file mode 100644
index 8db0cef95a2..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/doc.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package websocket implements the WebSocket protocol defined in RFC 6455.
-//
-// Overview
-//
-// The Conn type represents a WebSocket connection. A server application calls
-// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
-//
-// var upgrader = websocket.Upgrader{
-// ReadBufferSize: 1024,
-// WriteBufferSize: 1024,
-// }
-//
-// func handler(w http.ResponseWriter, r *http.Request) {
-// conn, err := upgrader.Upgrade(w, r, nil)
-// if err != nil {
-// log.Println(err)
-// return
-// }
-// ... Use conn to send and receive messages.
-// }
-//
-// Call the connection's WriteMessage and ReadMessage methods to send and
-// receive messages as a slice of bytes. This snippet of code shows how to echo
-// messages using these methods:
-//
-// for {
-// messageType, p, err := conn.ReadMessage()
-// if err != nil {
-// log.Println(err)
-// return
-// }
-// if err := conn.WriteMessage(messageType, p); err != nil {
-// log.Println(err)
-// return
-// }
-// }
-//
-// In above snippet of code, p is a []byte and messageType is an int with value
-// websocket.BinaryMessage or websocket.TextMessage.
-//
-// An application can also send and receive messages using the io.WriteCloser
-// and io.Reader interfaces. To send a message, call the connection NextWriter
-// method to get an io.WriteCloser, write the message to the writer and close
-// the writer when done. To receive a message, call the connection NextReader
-// method to get an io.Reader and read until io.EOF is returned. This snippet
-// shows how to echo messages using the NextWriter and NextReader methods:
-//
-// for {
-// messageType, r, err := conn.NextReader()
-// if err != nil {
-// return
-// }
-// w, err := conn.NextWriter(messageType)
-// if err != nil {
-// return err
-// }
-// if _, err := io.Copy(w, r); err != nil {
-// return err
-// }
-// if err := w.Close(); err != nil {
-// return err
-// }
-// }
-//
-// Data Messages
-//
-// The WebSocket protocol distinguishes between text and binary data messages.
-// Text messages are interpreted as UTF-8 encoded text. The interpretation of
-// binary messages is left to the application.
-//
-// This package uses the TextMessage and BinaryMessage integer constants to
-// identify the two data message types. The ReadMessage and NextReader methods
-// return the type of the received message. The messageType argument to the
-// WriteMessage and NextWriter methods specifies the type of a sent message.
-//
-// It is the application's responsibility to ensure that text messages are
-// valid UTF-8 encoded text.
-//
-// Control Messages
-//
-// The WebSocket protocol defines three types of control messages: close, ping
-// and pong. Call the connection WriteControl, WriteMessage or NextWriter
-// methods to send a control message to the peer.
-//
-// Connections handle received close messages by calling the handler function
-// set with the SetCloseHandler method and by returning a *CloseError from the
-// NextReader, ReadMessage or the message Read method. The default close
-// handler sends a close message to the peer.
-//
-// Connections handle received ping messages by calling the handler function
-// set with the SetPingHandler method. The default ping handler sends a pong
-// message to the peer.
-//
-// Connections handle received pong messages by calling the handler function
-// set with the SetPongHandler method. The default pong handler does nothing.
-// If an application sends ping messages, then the application should set a
-// pong handler to receive the corresponding pong.
-//
-// The control message handler functions are called from the NextReader,
-// ReadMessage and message reader Read methods. The default close and ping
-// handlers can block these methods for a short time when the handler writes to
-// the connection.
-//
-// The application must read the connection to process close, ping and pong
-// messages sent from the peer. If the application is not otherwise interested
-// in messages from the peer, then the application should start a goroutine to
-// read and discard messages from the peer. A simple example is:
-//
-// func readLoop(c *websocket.Conn) {
-// for {
-// if _, _, err := c.NextReader(); err != nil {
-// c.Close()
-// break
-// }
-// }
-// }
-//
-// Concurrency
-//
-// Connections support one concurrent reader and one concurrent writer.
-//
-// Applications are responsible for ensuring that no more than one goroutine
-// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
-// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
-// that no more than one goroutine calls the read methods (NextReader,
-// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
-// concurrently.
-//
-// The Close and WriteControl methods can be called concurrently with all other
-// methods.
-//
-// Origin Considerations
-//
-// Web browsers allow Javascript applications to open a WebSocket connection to
-// any host. It's up to the server to enforce an origin policy using the Origin
-// request header sent by the browser.
-//
-// The Upgrader calls the function specified in the CheckOrigin field to check
-// the origin. If the CheckOrigin function returns false, then the Upgrade
-// method fails the WebSocket handshake with HTTP status 403.
-//
-// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
-// the handshake if the Origin request header is present and the Origin host is
-// not equal to the Host request header.
-//
-// The deprecated package-level Upgrade function does not perform origin
-// checking. The application is responsible for checking the Origin header
-// before calling the Upgrade function.
-//
-// Buffers
-//
-// Connections buffer network input and output to reduce the number
-// of system calls when reading or writing messages.
-//
-// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
-// Section 5 for a discussion of message framing. A WebSocket frame header is
-// written to the network each time a write buffer is flushed to the network.
-// Decreasing the size of the write buffer can increase the amount of framing
-// overhead on the connection.
-//
-// The buffer sizes in bytes are specified by the ReadBufferSize and
-// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
-// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
-// buffers created by the HTTP server when a buffer size field is set to zero.
-// The HTTP server buffers have a size of 4096 at the time of this writing.
-//
-// The buffer sizes do not limit the size of a message that can be read or
-// written by a connection.
-//
-// Buffers are held for the lifetime of the connection by default. If the
-// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
-// write buffer only when writing a message.
-//
-// Applications should tune the buffer sizes to balance memory use and
-// performance. Increasing the buffer size uses more memory, but can reduce the
-// number of system calls to read or write the network. In the case of writing,
-// increasing the buffer size can reduce the number of frame headers written to
-// the network.
-//
-// Some guidelines for setting buffer parameters are:
-//
-// Limit the buffer sizes to the maximum expected message size. Buffers larger
-// than the largest message do not provide any benefit.
-//
-// Depending on the distribution of message sizes, setting the buffer size to
-// a value less than the maximum expected message size can greatly reduce memory
-// use with a small impact on performance. Here's an example: If 99% of the
-// messages are smaller than 256 bytes and the maximum message size is 512
-// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
-// than a buffer size of 512 bytes. The memory savings is 50%.
-//
-// A write buffer pool is useful when the application has a modest number
-// writes over a large number of connections. when buffers are pooled, a larger
-// buffer size has a reduced impact on total memory use and has the benefit of
-// reducing system calls and frame overhead.
-//
-// Compression EXPERIMENTAL
-//
-// Per message compression extensions (RFC 7692) are experimentally supported
-// by this package in a limited capacity. Setting the EnableCompression option
-// to true in Dialer or Upgrader will attempt to negotiate per message deflate
-// support.
-//
-// var upgrader = websocket.Upgrader{
-// EnableCompression: true,
-// }
-//
-// If compression was successfully negotiated with the connection's peer, any
-// message received in compressed form will be automatically decompressed.
-// All Read methods will return uncompressed bytes.
-//
-// Per message compression of messages written to a connection can be enabled
-// or disabled by calling the corresponding Conn method:
-//
-// conn.EnableWriteCompression(false)
-//
-// Currently this package does not support compression with "context takeover".
-// This means that messages must be compressed and decompressed in isolation,
-// without retaining sliding window or dictionary state across messages. For
-// more details refer to RFC 7692.
-//
-// Use of compression is experimental and may result in decreased performance.
-package websocket
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
deleted file mode 100644
index 1a7afd5028a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/gorilla/websocket
-
-go 1.12
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
deleted file mode 100644
index c64f8c82901..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/join.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "io"
- "strings"
-)
-
-// JoinMessages concatenates received messages to create a single io.Reader.
-// The string term is appended to each message. The returned reader does not
-// support concurrent calls to the Read method.
-func JoinMessages(c *Conn, term string) io.Reader {
- return &joinReader{c: c, term: term}
-}
-
-type joinReader struct {
- c *Conn
- term string
- r io.Reader
-}
-
-func (r *joinReader) Read(p []byte) (int, error) {
- if r.r == nil {
- var err error
- _, r.r, err = r.c.NextReader()
- if err != nil {
- return 0, err
- }
- if r.term != "" {
- r.r = io.MultiReader(r.r, strings.NewReader(r.term))
- }
- }
- n, err := r.r.Read(p)
- if err == io.EOF {
- err = nil
- r.r = nil
- }
- return n, err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
deleted file mode 100644
index dc2c1f6415f..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/json.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "encoding/json"
- "io"
-)
-
-// WriteJSON writes the JSON encoding of v as a message.
-//
-// Deprecated: Use c.WriteJSON instead.
-func WriteJSON(c *Conn, v interface{}) error {
- return c.WriteJSON(v)
-}
-
-// WriteJSON writes the JSON encoding of v as a message.
-//
-// See the documentation for encoding/json Marshal for details about the
-// conversion of Go values to JSON.
-func (c *Conn) WriteJSON(v interface{}) error {
- w, err := c.NextWriter(TextMessage)
- if err != nil {
- return err
- }
- err1 := json.NewEncoder(w).Encode(v)
- err2 := w.Close()
- if err1 != nil {
- return err1
- }
- return err2
-}
-
-// ReadJSON reads the next JSON-encoded message from the connection and stores
-// it in the value pointed to by v.
-//
-// Deprecated: Use c.ReadJSON instead.
-func ReadJSON(c *Conn, v interface{}) error {
- return c.ReadJSON(v)
-}
-
-// ReadJSON reads the next JSON-encoded message from the connection and stores
-// it in the value pointed to by v.
-//
-// See the documentation for the encoding/json Unmarshal function for details
-// about the conversion of JSON to a Go value.
-func (c *Conn) ReadJSON(v interface{}) error {
- _, r, err := c.NextReader()
- if err != nil {
- return err
- }
- err = json.NewDecoder(r).Decode(v)
- if err == io.EOF {
- // One value is expected in the message.
- err = io.ErrUnexpectedEOF
- }
- return err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
deleted file mode 100644
index 577fce9efd7..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
-// this source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
-
-// +build !appengine
-
-package websocket
-
-import "unsafe"
-
-const wordSize = int(unsafe.Sizeof(uintptr(0)))
-
-func maskBytes(key [4]byte, pos int, b []byte) int {
- // Mask one byte at a time for small buffers.
- if len(b) < 2*wordSize {
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
- return pos & 3
- }
-
- // Mask one byte at a time to word boundary.
- if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
- n = wordSize - n
- for i := range b[:n] {
- b[i] ^= key[pos&3]
- pos++
- }
- b = b[n:]
- }
-
- // Create aligned word size key.
- var k [wordSize]byte
- for i := range k {
- k[i] = key[(pos+i)&3]
- }
- kw := *(*uintptr)(unsafe.Pointer(&k))
-
- // Mask one word at a time.
- n := (len(b) / wordSize) * wordSize
- for i := 0; i < n; i += wordSize {
- *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
- }
-
- // Mask one byte at a time for remaining bytes.
- b = b[n:]
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
-
- return pos & 3
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
deleted file mode 100644
index 2aac060e52e..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/mask_safe.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
-// this source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
-
-// +build appengine
-
-package websocket
-
-func maskBytes(key [4]byte, pos int, b []byte) int {
- for i := range b {
- b[i] ^= key[pos&3]
- pos++
- }
- return pos & 3
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
deleted file mode 100644
index c854225e967..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/prepared.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bytes"
- "net"
- "sync"
- "time"
-)
-
-// PreparedMessage caches on the wire representations of a message payload.
-// Use PreparedMessage to efficiently send a message payload to multiple
-// connections. PreparedMessage is especially useful when compression is used
-// because the CPU and memory expensive compression operation can be executed
-// once for a given set of compression options.
-type PreparedMessage struct {
- messageType int
- data []byte
- mu sync.Mutex
- frames map[prepareKey]*preparedFrame
-}
-
-// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
-type prepareKey struct {
- isServer bool
- compress bool
- compressionLevel int
-}
-
-// preparedFrame contains data in wire representation.
-type preparedFrame struct {
- once sync.Once
- data []byte
-}
-
-// NewPreparedMessage returns an initialized PreparedMessage. You can then send
-// it to connection using WritePreparedMessage method. Valid wire
-// representation will be calculated lazily only once for a set of current
-// connection options.
-func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
- pm := &PreparedMessage{
- messageType: messageType,
- frames: make(map[prepareKey]*preparedFrame),
- data: data,
- }
-
- // Prepare a plain server frame.
- _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
- if err != nil {
- return nil, err
- }
-
- // To protect against caller modifying the data argument, remember the data
- // copied to the plain server frame.
- pm.data = frameData[len(frameData)-len(data):]
- return pm, nil
-}
-
-func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
- pm.mu.Lock()
- frame, ok := pm.frames[key]
- if !ok {
- frame = &preparedFrame{}
- pm.frames[key] = frame
- }
- pm.mu.Unlock()
-
- var err error
- frame.once.Do(func() {
- // Prepare a frame using a 'fake' connection.
- // TODO: Refactor code in conn.go to allow more direct construction of
- // the frame.
- mu := make(chan struct{}, 1)
- mu <- struct{}{}
- var nc prepareConn
- c := &Conn{
- conn: &nc,
- mu: mu,
- isServer: key.isServer,
- compressionLevel: key.compressionLevel,
- enableWriteCompression: true,
- writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
- }
- if key.compress {
- c.newCompressionWriter = compressNoContextTakeover
- }
- err = c.WriteMessage(pm.messageType, pm.data)
- frame.data = nc.buf.Bytes()
- })
- return pm.messageType, frame.data, err
-}
-
-type prepareConn struct {
- buf bytes.Buffer
- net.Conn
-}
-
-func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
-func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
deleted file mode 100644
index e87a8c9f0c9..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/proxy.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "encoding/base64"
- "errors"
- "net"
- "net/http"
- "net/url"
- "strings"
-)
-
-type netDialerFunc func(network, addr string) (net.Conn, error)
-
-func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
- return fn(network, addr)
-}
-
-func init() {
- proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
- return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
- })
-}
-
-type httpProxyDialer struct {
- proxyURL *url.URL
- forwardDial func(network, addr string) (net.Conn, error)
-}
-
-func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
- hostPort, _ := hostPortNoPort(hpd.proxyURL)
- conn, err := hpd.forwardDial(network, hostPort)
- if err != nil {
- return nil, err
- }
-
- connectHeader := make(http.Header)
- if user := hpd.proxyURL.User; user != nil {
- proxyUser := user.Username()
- if proxyPassword, passwordSet := user.Password(); passwordSet {
- credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
- connectHeader.Set("Proxy-Authorization", "Basic "+credential)
- }
- }
-
- connectReq := &http.Request{
- Method: "CONNECT",
- URL: &url.URL{Opaque: addr},
- Host: addr,
- Header: connectHeader,
- }
-
- if err := connectReq.Write(conn); err != nil {
- conn.Close()
- return nil, err
- }
-
- // Read response. It's OK to use and discard buffered reader here becaue
- // the remote server does not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err := http.ReadResponse(br, connectReq)
- if err != nil {
- conn.Close()
- return nil, err
- }
-
- if resp.StatusCode != 200 {
- conn.Close()
- f := strings.SplitN(resp.Status, " ", 2)
- return nil, errors.New(f[1])
- }
- return conn, nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
deleted file mode 100644
index 887d558918c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/server.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "errors"
- "io"
- "net/http"
- "net/url"
- "strings"
- "time"
-)
-
-// HandshakeError describes an error with the handshake from the peer.
-type HandshakeError struct {
- message string
-}
-
-func (e HandshakeError) Error() string { return e.message }
-
-// Upgrader specifies parameters for upgrading an HTTP connection to a
-// WebSocket connection.
-type Upgrader struct {
- // HandshakeTimeout specifies the duration for the handshake to complete.
- HandshakeTimeout time.Duration
-
- // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
- // size is zero, then buffers allocated by the HTTP server are used. The
- // I/O buffer sizes do not limit the size of the messages that can be sent
- // or received.
- ReadBufferSize, WriteBufferSize int
-
- // WriteBufferPool is a pool of buffers for write operations. If the value
- // is not set, then write buffers are allocated to the connection for the
- // lifetime of the connection.
- //
- // A pool is most useful when the application has a modest volume of writes
- // across a large number of connections.
- //
- // Applications should use a single pool for each unique value of
- // WriteBufferSize.
- WriteBufferPool BufferPool
-
- // Subprotocols specifies the server's supported protocols in order of
- // preference. If this field is not nil, then the Upgrade method negotiates a
- // subprotocol by selecting the first match in this list with a protocol
- // requested by the client. If there's no match, then no protocol is
- // negotiated (the Sec-Websocket-Protocol header is not included in the
- // handshake response).
- Subprotocols []string
-
- // Error specifies the function for generating HTTP error responses. If Error
- // is nil, then http.Error is used to generate the HTTP response.
- Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
-
- // CheckOrigin returns true if the request Origin header is acceptable. If
- // CheckOrigin is nil, then a safe default is used: return false if the
- // Origin request header is present and the origin host is not equal to
- // request Host header.
- //
- // A CheckOrigin function should carefully validate the request origin to
- // prevent cross-site request forgery.
- CheckOrigin func(r *http.Request) bool
-
- // EnableCompression specify if the server should attempt to negotiate per
- // message compression (RFC 7692). Setting this value to true does not
- // guarantee that compression will be supported. Currently only "no context
- // takeover" modes are supported.
- EnableCompression bool
-}
-
-func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
- err := HandshakeError{reason}
- if u.Error != nil {
- u.Error(w, r, status, err)
- } else {
- w.Header().Set("Sec-Websocket-Version", "13")
- http.Error(w, http.StatusText(status), status)
- }
- return nil, err
-}
-
-// checkSameOrigin returns true if the origin is not set or is equal to the request host.
-func checkSameOrigin(r *http.Request) bool {
- origin := r.Header["Origin"]
- if len(origin) == 0 {
- return true
- }
- u, err := url.Parse(origin[0])
- if err != nil {
- return false
- }
- return equalASCIIFold(u.Host, r.Host)
-}
-
-func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
- if u.Subprotocols != nil {
- clientProtocols := Subprotocols(r)
- for _, serverProtocol := range u.Subprotocols {
- for _, clientProtocol := range clientProtocols {
- if clientProtocol == serverProtocol {
- return clientProtocol
- }
- }
- }
- } else if responseHeader != nil {
- return responseHeader.Get("Sec-Websocket-Protocol")
- }
- return ""
-}
-
-// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
-//
-// The responseHeader is included in the response to the client's upgrade
-// request. Use the responseHeader to specify cookies (Set-Cookie) and the
-// application negotiated subprotocol (Sec-WebSocket-Protocol).
-//
-// If the upgrade fails, then Upgrade replies to the client with an HTTP error
-// response.
-func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
- const badHandshake = "websocket: the client is not using the websocket protocol: "
-
- if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
- return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
- }
-
- if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
- return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
- }
-
- if r.Method != "GET" {
- return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
- }
-
- if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
- }
-
- if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
- return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
- }
-
- checkOrigin := u.CheckOrigin
- if checkOrigin == nil {
- checkOrigin = checkSameOrigin
- }
- if !checkOrigin(r) {
- return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
- }
-
- challengeKey := r.Header.Get("Sec-Websocket-Key")
- if challengeKey == "" {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
- }
-
- subprotocol := u.selectSubprotocol(r, responseHeader)
-
- // Negotiate PMCE
- var compress bool
- if u.EnableCompression {
- for _, ext := range parseExtensions(r.Header) {
- if ext[""] != "permessage-deflate" {
- continue
- }
- compress = true
- break
- }
- }
-
- h, ok := w.(http.Hijacker)
- if !ok {
- return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
- }
- var brw *bufio.ReadWriter
- netConn, brw, err := h.Hijack()
- if err != nil {
- return u.returnError(w, r, http.StatusInternalServerError, err.Error())
- }
-
- if brw.Reader.Buffered() > 0 {
- netConn.Close()
- return nil, errors.New("websocket: client sent data before handshake is complete")
- }
-
- var br *bufio.Reader
- if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
- // Reuse hijacked buffered reader as connection reader.
- br = brw.Reader
- }
-
- buf := bufioWriterBuffer(netConn, brw.Writer)
-
- var writeBuf []byte
- if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
- // Reuse hijacked write buffer as connection buffer.
- writeBuf = buf
- }
-
- c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
- c.subprotocol = subprotocol
-
- if compress {
- c.newCompressionWriter = compressNoContextTakeover
- c.newDecompressionReader = decompressNoContextTakeover
- }
-
- // Use larger of hijacked buffer and connection write buffer for header.
- p := buf
- if len(c.writeBuf) > len(p) {
- p = c.writeBuf
- }
- p = p[:0]
-
- p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
- p = append(p, computeAcceptKey(challengeKey)...)
- p = append(p, "\r\n"...)
- if c.subprotocol != "" {
- p = append(p, "Sec-WebSocket-Protocol: "...)
- p = append(p, c.subprotocol...)
- p = append(p, "\r\n"...)
- }
- if compress {
- p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
- }
- for k, vs := range responseHeader {
- if k == "Sec-Websocket-Protocol" {
- continue
- }
- for _, v := range vs {
- p = append(p, k...)
- p = append(p, ": "...)
- for i := 0; i < len(v); i++ {
- b := v[i]
- if b <= 31 {
- // prevent response splitting.
- b = ' '
- }
- p = append(p, b)
- }
- p = append(p, "\r\n"...)
- }
- }
- p = append(p, "\r\n"...)
-
- // Clear deadlines set by HTTP server.
- netConn.SetDeadline(time.Time{})
-
- if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
- }
- if _, err = netConn.Write(p); err != nil {
- netConn.Close()
- return nil, err
- }
- if u.HandshakeTimeout > 0 {
- netConn.SetWriteDeadline(time.Time{})
- }
-
- return c, nil
-}
-
-// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
-//
-// Deprecated: Use websocket.Upgrader instead.
-//
-// Upgrade does not perform origin checking. The application is responsible for
-// checking the Origin header before calling Upgrade. An example implementation
-// of the same origin policy check is:
-//
-// if req.Header.Get("Origin") != "http://"+req.Host {
-// http.Error(w, "Origin not allowed", http.StatusForbidden)
-// return
-// }
-//
-// If the endpoint supports subprotocols, then the application is responsible
-// for negotiating the protocol used on the connection. Use the Subprotocols()
-// function to get the subprotocols requested by the client. Use the
-// Sec-Websocket-Protocol response header to specify the subprotocol selected
-// by the application.
-//
-// The responseHeader is included in the response to the client's upgrade
-// request. Use the responseHeader to specify cookies (Set-Cookie) and the
-// negotiated subprotocol (Sec-Websocket-Protocol).
-//
-// The connection buffers IO to the underlying network connection. The
-// readBufSize and writeBufSize parameters specify the size of the buffers to
-// use. Messages can be larger than the buffers.
-//
-// If the request is not a valid WebSocket handshake, then Upgrade returns an
-// error of type HandshakeError. Applications should handle this error by
-// replying to the client with an HTTP error response.
-func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
- u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
- u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
- // don't return errors to maintain backwards compatibility
- }
- u.CheckOrigin = func(r *http.Request) bool {
- // allow all connections by default
- return true
- }
- return u.Upgrade(w, r, responseHeader)
-}
-
-// Subprotocols returns the subprotocols requested by the client in the
-// Sec-Websocket-Protocol header.
-func Subprotocols(r *http.Request) []string {
- h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
- if h == "" {
- return nil
- }
- protocols := strings.Split(h, ",")
- for i := range protocols {
- protocols[i] = strings.TrimSpace(protocols[i])
- }
- return protocols
-}
-
-// IsWebSocketUpgrade returns true if the client requested upgrade to the
-// WebSocket protocol.
-func IsWebSocketUpgrade(r *http.Request) bool {
- return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
- tokenListContainsValue(r.Header, "Upgrade", "websocket")
-}
-
-// bufioReaderSize size returns the size of a bufio.Reader.
-func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
- // This code assumes that peek on a reset reader returns
- // bufio.Reader.buf[:0].
- // TODO: Use bufio.Reader.Size() after Go 1.10
- br.Reset(originalReader)
- if p, err := br.Peek(0); err == nil {
- return cap(p)
- }
- return 0
-}
-
-// writeHook is an io.Writer that records the last slice passed to it vio
-// io.Writer.Write.
-type writeHook struct {
- p []byte
-}
-
-func (wh *writeHook) Write(p []byte) (int, error) {
- wh.p = p
- return len(p), nil
-}
-
-// bufioWriterBuffer grabs the buffer from a bufio.Writer.
-func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
- // This code assumes that bufio.Writer.buf[:1] is passed to the
- // bufio.Writer's underlying writer.
- var wh writeHook
- bw.Reset(&wh)
- bw.WriteByte(0)
- bw.Flush()
-
- bw.Reset(originalWriter)
-
- return wh.p[:cap(wh.p)]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/stub.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/stub.go
new file mode 100644
index 00000000000..0be1589cca9
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/stub.go
@@ -0,0 +1,135 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/gorilla/websocket, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/gorilla/websocket (exports: Dialer; functions: )
+
+// Package websocket is a stub of github.com/gorilla/websocket, generated by depstubber.
+package websocket
+
+import (
+ context "context"
+ tls "crypto/tls"
+ io "io"
+ net "net"
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+type BufferPool interface {
+ Get() interface{}
+ Put(_ interface{})
+}
+
+type Conn struct{}
+
+func (_ *Conn) Close() error {
+ return nil
+}
+
+func (_ *Conn) CloseHandler() func(int, string) error {
+ return nil
+}
+
+func (_ *Conn) EnableWriteCompression(_ bool) {}
+
+func (_ *Conn) LocalAddr() net.Addr {
+ return nil
+}
+
+func (_ *Conn) NextReader() (int, io.Reader, error) {
+ return 0, nil, nil
+}
+
+func (_ *Conn) NextWriter(_ int) (io.WriteCloser, error) {
+ return nil, nil
+}
+
+func (_ *Conn) PingHandler() func(string) error {
+ return nil
+}
+
+func (_ *Conn) PongHandler() func(string) error {
+ return nil
+}
+
+func (_ *Conn) ReadJSON(_ interface{}) error {
+ return nil
+}
+
+func (_ *Conn) ReadMessage() (int, []byte, error) {
+ return 0, nil, nil
+}
+
+func (_ *Conn) RemoteAddr() net.Addr {
+ return nil
+}
+
+func (_ *Conn) SetCloseHandler(_ func(int, string) error) {}
+
+func (_ *Conn) SetCompressionLevel(_ int) error {
+ return nil
+}
+
+func (_ *Conn) SetPingHandler(_ func(string) error) {}
+
+func (_ *Conn) SetPongHandler(_ func(string) error) {}
+
+func (_ *Conn) SetReadDeadline(_ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) SetReadLimit(_ int64) {}
+
+func (_ *Conn) SetWriteDeadline(_ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) Subprotocol() string {
+ return ""
+}
+
+func (_ *Conn) UnderlyingConn() net.Conn {
+ return nil
+}
+
+func (_ *Conn) WriteControl(_ int, _ []byte, _ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) WriteJSON(_ interface{}) error {
+ return nil
+}
+
+func (_ *Conn) WriteMessage(_ int, _ []byte) error {
+ return nil
+}
+
+func (_ *Conn) WritePreparedMessage(_ *PreparedMessage) error {
+ return nil
+}
+
+type Dialer struct {
+ NetDial func(string, string) (net.Conn, error)
+ NetDialContext func(context.Context, string, string) (net.Conn, error)
+ Proxy func(*http.Request) (*url.URL, error)
+ TLSClientConfig *tls.Config
+ HandshakeTimeout time.Duration
+ ReadBufferSize int
+ WriteBufferSize int
+ WriteBufferPool BufferPool
+ Subprotocols []string
+ EnableCompression bool
+ Jar http.CookieJar
+}
+
+func (_ *Dialer) Dial(_ string, _ http.Header) (*Conn, *http.Response, error) {
+ return nil, nil, nil
+}
+
+func (_ *Dialer) DialContext(_ context.Context, _ string, _ http.Header) (*Conn, *http.Response, error) {
+ return nil, nil, nil
+}
+
+type PreparedMessage struct{}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
deleted file mode 100644
index 834f122a00d..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.8
-
-package websocket
-
-import (
- "crypto/tls"
- "net/http/httptrace"
-)
-
-func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
- if trace.TLSHandshakeStart != nil {
- trace.TLSHandshakeStart()
- }
- err := doHandshake(tlsConn, cfg)
- if trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
- }
- return err
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
deleted file mode 100644
index 77d05a0b574..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/trace_17.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !go1.8
-
-package websocket
-
-import (
- "crypto/tls"
- "net/http/httptrace"
-)
-
-func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
- return doHandshake(tlsConn, cfg)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
deleted file mode 100644
index 7bf2f66c674..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/util.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "crypto/rand"
- "crypto/sha1"
- "encoding/base64"
- "io"
- "net/http"
- "strings"
- "unicode/utf8"
-)
-
-var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
-
-func computeAcceptKey(challengeKey string) string {
- h := sha1.New()
- h.Write([]byte(challengeKey))
- h.Write(keyGUID)
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
-
-func generateChallengeKey() (string, error) {
- p := make([]byte, 16)
- if _, err := io.ReadFull(rand.Reader, p); err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(p), nil
-}
-
-// Token octets per RFC 2616.
-var isTokenOctet = [256]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-// skipSpace returns a slice of the string s with all leading RFC 2616 linear
-// whitespace removed.
-func skipSpace(s string) (rest string) {
- i := 0
- for ; i < len(s); i++ {
- if b := s[i]; b != ' ' && b != '\t' {
- break
- }
- }
- return s[i:]
-}
-
-// nextToken returns the leading RFC 2616 token of s and the string following
-// the token.
-func nextToken(s string) (token, rest string) {
- i := 0
- for ; i < len(s); i++ {
- if !isTokenOctet[s[i]] {
- break
- }
- }
- return s[:i], s[i:]
-}
-
-// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
-// and the string following the token or quoted string.
-func nextTokenOrQuoted(s string) (value string, rest string) {
- if !strings.HasPrefix(s, "\"") {
- return nextToken(s)
- }
- s = s[1:]
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '"':
- return s[:i], s[i+1:]
- case '\\':
- p := make([]byte, len(s)-1)
- j := copy(p, s[:i])
- escape := true
- for i = i + 1; i < len(s); i++ {
- b := s[i]
- switch {
- case escape:
- escape = false
- p[j] = b
- j++
- case b == '\\':
- escape = true
- case b == '"':
- return string(p[:j]), s[i+1:]
- default:
- p[j] = b
- j++
- }
- }
- return "", ""
- }
- }
- return "", ""
-}
-
-// equalASCIIFold returns true if s is equal to t with ASCII case folding as
-// defined in RFC 4790.
-func equalASCIIFold(s, t string) bool {
- for s != "" && t != "" {
- sr, size := utf8.DecodeRuneInString(s)
- s = s[size:]
- tr, size := utf8.DecodeRuneInString(t)
- t = t[size:]
- if sr == tr {
- continue
- }
- if 'A' <= sr && sr <= 'Z' {
- sr = sr + 'a' - 'A'
- }
- if 'A' <= tr && tr <= 'Z' {
- tr = tr + 'a' - 'A'
- }
- if sr != tr {
- return false
- }
- }
- return s == t
-}
-
-// tokenListContainsValue returns true if the 1#token header with the given
-// name contains a token equal to value with ASCII case folding.
-func tokenListContainsValue(header http.Header, name string, value string) bool {
-headers:
- for _, s := range header[name] {
- for {
- var t string
- t, s = nextToken(skipSpace(s))
- if t == "" {
- continue headers
- }
- s = skipSpace(s)
- if s != "" && s[0] != ',' {
- continue headers
- }
- if equalASCIIFold(t, value) {
- return true
- }
- if s == "" {
- continue headers
- }
- s = s[1:]
- }
- }
- return false
-}
-
-// parseExtensions parses WebSocket extensions from a header.
-func parseExtensions(header http.Header) []map[string]string {
- // From RFC 6455:
- //
- // Sec-WebSocket-Extensions = extension-list
- // extension-list = 1#extension
- // extension = extension-token *( ";" extension-param )
- // extension-token = registered-token
- // registered-token = token
- // extension-param = token [ "=" (token | quoted-string) ]
- // ;When using the quoted-string syntax variant, the value
- // ;after quoted-string unescaping MUST conform to the
- // ;'token' ABNF.
-
- var result []map[string]string
-headers:
- for _, s := range header["Sec-Websocket-Extensions"] {
- for {
- var t string
- t, s = nextToken(skipSpace(s))
- if t == "" {
- continue headers
- }
- ext := map[string]string{"": t}
- for {
- s = skipSpace(s)
- if !strings.HasPrefix(s, ";") {
- break
- }
- var k string
- k, s = nextToken(skipSpace(s[1:]))
- if k == "" {
- continue headers
- }
- s = skipSpace(s)
- var v string
- if strings.HasPrefix(s, "=") {
- v, s = nextTokenOrQuoted(skipSpace(s[1:]))
- s = skipSpace(s)
- }
- if s != "" && s[0] != ',' && s[0] != ';' {
- continue headers
- }
- ext[k] = v
- }
- if s != "" && s[0] != ',' {
- continue headers
- }
- result = append(result, ext)
- if s == "" {
- continue headers
- }
- s = s[1:]
- }
- }
- return result
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
deleted file mode 100644
index 2e668f6b882..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/gorilla/websocket/x_net_proxy.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-//
-
-package websocket
-
-import (
- "errors"
- "io"
- "net"
- "net/url"
- "os"
- "strconv"
- "strings"
- "sync"
-)
-
-type proxy_direct struct{}
-
-// Direct is a direct proxy: one that makes network connections directly.
-var proxy_Direct = proxy_direct{}
-
-func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type proxy_PerHost struct {
- def, bypass proxy_Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
- return &proxy_PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *proxy_PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *proxy_PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *proxy_PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *proxy_PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
-
-// A Dialer is a means to establish a connection.
-type proxy_Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type proxy_Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy related variables in
-// the environment.
-func proxy_FromEnvironment() proxy_Dialer {
- allProxy := proxy_allProxyEnv.Get()
- if len(allProxy) == 0 {
- return proxy_Direct
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return proxy_Direct
- }
- proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
- if err != nil {
- return proxy_Direct
- }
-
- noProxy := proxy_noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := proxy_NewPerHost(proxy, proxy_Direct)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
- if proxy_proxySchemes == nil {
- proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
- }
- proxy_proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
- var auth *proxy_Auth
- if u.User != nil {
- auth = new(proxy_Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5":
- return proxy_SOCKS5("tcp", u.Host, auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxy_proxySchemes != nil {
- if f, ok := proxy_proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- proxy_allProxyEnv = &proxy_envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- proxy_noProxyEnv = &proxy_envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type proxy_envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *proxy_envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *proxy_envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
-// with an optional username and password. See RFC 1928 and RFC 1929.
-func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
- s := &proxy_socks5{
- network: network,
- addr: addr,
- forward: forward,
- }
- if auth != nil {
- s.user = auth.User
- s.password = auth.Password
- }
-
- return s, nil
-}
-
-type proxy_socks5 struct {
- user, password string
- network, addr string
- forward proxy_Dialer
-}
-
-const proxy_socks5Version = 5
-
-const (
- proxy_socks5AuthNone = 0
- proxy_socks5AuthPassword = 2
-)
-
-const proxy_socks5Connect = 1
-
-const (
- proxy_socks5IP4 = 1
- proxy_socks5Domain = 3
- proxy_socks5IP6 = 4
-)
-
-var proxy_socks5Errors = []string{
- "",
- "general failure",
- "connection forbidden",
- "network unreachable",
- "host unreachable",
- "connection refused",
- "TTL expired",
- "command not supported",
- "address type not supported",
-}
-
-// Dial connects to the address addr on the given network via the SOCKS5 proxy.
-func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
- }
-
- conn, err := s.forward.Dial(s.network, s.addr)
- if err != nil {
- return nil, err
- }
- if err := s.connect(conn, addr); err != nil {
- conn.Close()
- return nil, err
- }
- return conn, nil
-}
-
-// connect takes an existing connection to a socks5 proxy server,
-// and commands the server to extend that connection to target,
-// which must be a canonical address with a host and port.
-func (s *proxy_socks5) connect(conn net.Conn, target string) error {
- host, portStr, err := net.SplitHostPort(target)
- if err != nil {
- return err
- }
-
- port, err := strconv.Atoi(portStr)
- if err != nil {
- return errors.New("proxy: failed to parse port number: " + portStr)
- }
- if port < 1 || port > 0xffff {
- return errors.New("proxy: port number out of range: " + portStr)
- }
-
- // the size here is just an estimate
- buf := make([]byte, 0, 6+len(host))
-
- buf = append(buf, proxy_socks5Version)
- if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
- buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
- } else {
- buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
- }
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- if buf[0] != 5 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
- }
- if buf[1] == 0xff {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
- }
-
- // See RFC 1929
- if buf[1] == proxy_socks5AuthPassword {
- buf = buf[:0]
- buf = append(buf, 1 /* password protocol version */)
- buf = append(buf, uint8(len(s.user)))
- buf = append(buf, s.user...)
- buf = append(buf, uint8(len(s.password)))
- buf = append(buf, s.password...)
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if buf[1] != 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
- }
- }
-
- buf = buf[:0]
- buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
-
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- buf = append(buf, proxy_socks5IP4)
- ip = ip4
- } else {
- buf = append(buf, proxy_socks5IP6)
- }
- buf = append(buf, ip...)
- } else {
- if len(host) > 255 {
- return errors.New("proxy: destination host name too long: " + host)
- }
- buf = append(buf, proxy_socks5Domain)
- buf = append(buf, byte(len(host)))
- buf = append(buf, host...)
- }
- buf = append(buf, byte(port>>8), byte(port))
-
- if _, err := conn.Write(buf); err != nil {
- return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:4]); err != nil {
- return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- failure := "unknown error"
- if int(buf[1]) < len(proxy_socks5Errors) {
- failure = proxy_socks5Errors[buf[1]]
- }
-
- if len(failure) > 0 {
- return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
- }
-
- bytesToDiscard := 0
- switch buf[3] {
- case proxy_socks5IP4:
- bytesToDiscard = net.IPv4len
- case proxy_socks5IP6:
- bytesToDiscard = net.IPv6len
- case proxy_socks5Domain:
- _, err := io.ReadFull(conn, buf[:1])
- if err != nil {
- return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- bytesToDiscard = int(buf[0])
- default:
- return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
- }
-
- if cap(buf) < bytesToDiscard {
- buf = make([]byte, bytesToDiscard)
- } else {
- buf = buf[:bytesToDiscard]
- }
- if _, err := io.ReadFull(conn, buf); err != nil {
- return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- // Also need to discard the port number
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- return nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
deleted file mode 100644
index 1eb75ef68e4..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2019 Klaus Post. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
deleted file mode 100644
index 2b101d26b25..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/deflate.go
+++ /dev/null
@@ -1,819 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Copyright (c) 2015 Klaus Post
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "fmt"
- "io"
- "math"
-)
-
-const (
- NoCompression = 0
- BestSpeed = 1
- BestCompression = 9
- DefaultCompression = -1
-
- // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
- // entropy encoding. This mode is useful in compressing data that has
- // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
- // that lacks an entropy encoder. Compression gains are achieved when
- // certain bytes in the input stream occur more frequently than others.
- //
- // Note that HuffmanOnly produces a compressed output that is
- // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
- // continue to be able to decompress this output.
- HuffmanOnly = -2
- ConstantCompression = HuffmanOnly // compatibility alias.
-
- logWindowSize = 15
- windowSize = 1 << logWindowSize
- windowMask = windowSize - 1
- logMaxOffsetSize = 15 // Standard DEFLATE
- minMatchLength = 4 // The smallest match that the compressor looks for
- maxMatchLength = 258 // The longest match for the compressor
- minOffsetSize = 1 // The shortest offset that makes any sense
-
- // The maximum number of tokens we put into a single flat block, just too
- // stop things from getting too large.
- maxFlateBlockTokens = 1 << 14
- maxStoreBlockSize = 65535
- hashBits = 17 // After 17 performance degrades
- hashSize = 1 << hashBits
- hashMask = (1 << hashBits) - 1
- hashShift = (hashBits + minMatchLength - 1) / minMatchLength
- maxHashOffset = 1 << 24
-
- skipNever = math.MaxInt32
-
- debugDeflate = false
-)
-
-type compressionLevel struct {
- good, lazy, nice, chain, fastSkipHashing, level int
-}
-
-// Compression levels have been rebalanced from zlib deflate defaults
-// to give a bigger spread in speed and compression.
-// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
-var levels = []compressionLevel{
- {}, // 0
- // Level 1-6 uses specialized algorithm - values not used
- {0, 0, 0, 0, 0, 1},
- {0, 0, 0, 0, 0, 2},
- {0, 0, 0, 0, 0, 3},
- {0, 0, 0, 0, 0, 4},
- {0, 0, 0, 0, 0, 5},
- {0, 0, 0, 0, 0, 6},
- // Levels 7-9 use increasingly more lazy matching
- // and increasingly stringent conditions for "good enough".
- {8, 8, 24, 16, skipNever, 7},
- {10, 16, 24, 64, skipNever, 8},
- {32, 258, 258, 4096, skipNever, 9},
-}
-
-// advancedState contains state for the advanced levels, with bigger hash tables, etc.
-type advancedState struct {
- // deflate state
- length int
- offset int
- hash uint32
- maxInsertIndex int
- ii uint16 // position of last match, intended to overflow to reset.
-
- // Input hash chains
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value
- // If hashHead[hashValue] is within the current window, then
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
- // with the same hash value.
- chainHead int
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
- hashOffset int
-
- // input window: unprocessed data is window[index:windowEnd]
- index int
- hashMatch [maxMatchLength + minMatchLength]uint32
-}
-
-type compressor struct {
- compressionLevel
-
- w *huffmanBitWriter
-
- // compression algorithm
- fill func(*compressor, []byte) int // copy data to window
- step func(*compressor) // process window
- sync bool // requesting flush
-
- window []byte
- windowEnd int
- blockStart int // window index where current tokens start
- byteAvailable bool // if true, still need to process window[index-1].
- err error
-
- // queued output tokens
- tokens tokens
- fast fastEnc
- state *advancedState
-}
-
-func (d *compressor) fillDeflate(b []byte) int {
- s := d.state
- if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
- // shift the window by windowSize
- copy(d.window[:], d.window[windowSize:2*windowSize])
- s.index -= windowSize
- d.windowEnd -= windowSize
- if d.blockStart >= windowSize {
- d.blockStart -= windowSize
- } else {
- d.blockStart = math.MaxInt32
- }
- s.hashOffset += windowSize
- if s.hashOffset > maxHashOffset {
- delta := s.hashOffset - 1
- s.hashOffset -= delta
- s.chainHead -= delta
- // Iterate over slices instead of arrays to avoid copying
- // the entire table onto the stack (Issue #18625).
- for i, v := range s.hashPrev[:] {
- if int(v) > delta {
- s.hashPrev[i] = uint32(int(v) - delta)
- } else {
- s.hashPrev[i] = 0
- }
- }
- for i, v := range s.hashHead[:] {
- if int(v) > delta {
- s.hashHead[i] = uint32(int(v) - delta)
- } else {
- s.hashHead[i] = 0
- }
- }
- }
- }
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- var window []byte
- if d.blockStart <= index {
- window = d.window[d.blockStart:index]
- }
- d.blockStart = index
- d.w.writeBlock(tok, eof, window)
- return d.w.err
- }
- return nil
-}
-
-// writeBlockSkip writes the current block and uses the number of tokens
-// to determine if the block should be stored on no matches, or
-// only huffman encoded.
-func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- if d.blockStart <= index {
- window := d.window[d.blockStart:index]
- // If we removed less than a 64th of all literals
- // we huffman compress the block.
- if int(tok.n) > len(window)-int(tok.n>>6) {
- d.w.writeBlockHuff(eof, window, d.sync)
- } else {
- // Write a dynamic huffman block.
- d.w.writeBlockDynamic(tok, eof, window, d.sync)
- }
- } else {
- d.w.writeBlock(tok, eof, nil)
- }
- d.blockStart = index
- return d.w.err
- }
- return nil
-}
-
-// fillWindow will fill the current window with the supplied
-// dictionary and calculate all hashes.
-// This is much faster than doing a full encode.
-// Should only be used after a start/reset.
-func (d *compressor) fillWindow(b []byte) {
- // Do not fill window if we are in store-only or huffman mode.
- if d.level <= 0 {
- return
- }
- if d.fast != nil {
- // encode the last data, but discard the result
- if len(b) > maxMatchOffset {
- b = b[len(b)-maxMatchOffset:]
- }
- d.fast.Encode(&d.tokens, b)
- d.tokens.Reset()
- return
- }
- s := d.state
- // If we are given too much, cut it.
- if len(b) > windowSize {
- b = b[len(b)-windowSize:]
- }
- // Add all to window.
- n := copy(d.window[d.windowEnd:], b)
-
- // Calculate 256 hashes at the time (more L1 cache hits)
- loops := (n + 256 - minMatchLength) / 256
- for j := 0; j < loops; j++ {
- startindex := j * 256
- end := startindex + 256 + minMatchLength - 1
- if end > n {
- end = n
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
-
- if dstSize <= 0 {
- continue
- }
-
- dst := s.hashMatch[:dstSize]
- bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- s.hash = newH
- }
- // Update window information.
- d.windowEnd += n
- s.index = n
-}
-
-// Try to find a match starting at index whose length is greater than prevSize.
-// We only look at chainCount possibilities before giving up.
-// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
- minMatchLook := maxMatchLength
- if lookahead < minMatchLook {
- minMatchLook = lookahead
- }
-
- win := d.window[0 : pos+minMatchLook]
-
- // We quit when we get a match that's at least nice long
- nice := len(win) - pos
- if d.nice < nice {
- nice = d.nice
- }
-
- // If we've got a match that's good enough, only look in 1/4 the chain.
- tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
-
- wEnd := win[pos+length]
- wPos := win[pos:]
- minIndex := pos - windowSize
-
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
- n := matchLen(win[i:i+minMatchLook], wPos)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
- }
- wEnd = win[pos+n]
- }
- }
- if i == minIndex {
- // hashPrev[i & windowMask] has already been overwritten, so stop now.
- break
- }
- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex || i < 0 {
- break
- }
- }
- return
-}
-
-func (d *compressor) writeStoredBlock(buf []byte) error {
- if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
- return d.w.err
- }
- d.w.writeBytes(buf)
- return d.w.err
-}
-
-// hash4 returns a hash representation of the first 4 bytes
-// of the supplied slice.
-// The caller must ensure that len(b) >= 4.
-func hash4(b []byte) uint32 {
- b = b[:4]
- return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
-}
-
-// bulkHash4 will compute hashes using the same
-// algorithm as hash4
-func bulkHash4(b []byte, dst []uint32) {
- if len(b) < 4 {
- return
- }
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- dst[0] = hash4u(hb, hashBits)
- end := len(b) - 4 + 1
- for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
- dst[i] = hash4u(hb, hashBits)
- }
-}
-
-func (d *compressor) initDeflate() {
- d.window = make([]byte, 2*windowSize)
- d.byteAvailable = false
- d.err = nil
- if d.state == nil {
- return
- }
- s := d.state
- s.index = 0
- s.hashOffset = 1
- s.length = minMatchLength - 1
- s.offset = 0
- s.hash = 0
- s.chainHead = -1
-}
-
-// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
-// meaning it always has lazy matching on.
-func (d *compressor) deflateLazy() {
- s := d.state
- // Sanity enables additional runtime tests.
- // It's intended to be used during development
- // to supplement the currently ad-hoc unit tests.
- const sanity = debugDeflate
-
- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
-
- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- }
-
- for {
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - s.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- return
- }
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- // Flush current output block if any.
- if d.byteAvailable {
- // There is still one pending token that needs to be flushed
- d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- }
- if d.tokens.n > 0 {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- return
- }
- }
- if s.index < s.maxInsertIndex {
- // Update the hash
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- ch := s.hashHead[s.hash&hashMask]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
- }
- prevLength := s.length
- prevOffset := s.offset
- s.length = minMatchLength - 1
- s.offset = 0
- minIndex := s.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
- s.length = newLength
- s.offset = newOffset
- }
- }
- if prevLength >= minMatchLength && s.length <= prevLength {
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
-
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- var newIndex int
- newIndex = s.index + prevLength - 1
- // Calculate missing hashes
- end := newIndex
- if end > s.maxInsertIndex {
- end = s.maxInsertIndex
- }
- end += minMatchLength - 1
- startindex := s.index + 1
- if startindex > s.maxInsertIndex {
- startindex = s.maxInsertIndex
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
- if dstSize > 0 {
- dst := s.hashMatch[:dstSize]
- bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- s.hash = newH
- }
-
- s.index = newIndex
- d.byteAvailable = false
- s.length = minMatchLength - 1
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- } else {
- // Reset, if we got a match this run.
- if s.length >= minMatchLength {
- s.ii = 0
- }
- // We have a byte waiting. Emit it.
- if d.byteAvailable {
- s.ii++
- d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.index++
-
- // If we have a long run of no matches, skip additional bytes
- // Resets when s.ii overflows after 64KB.
- if s.ii > 31 {
- n := int(s.ii >> 5)
- for j := 0; j < n; j++ {
- if s.index >= d.windowEnd-1 {
- break
- }
-
- d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.index++
- }
- // Flush last byte
- d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- }
- } else {
- s.index++
- d.byteAvailable = true
- }
- }
- }
-}
-
-func (d *compressor) store() {
- if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- d.windowEnd = 0
- }
-}
-
-// fillWindow will fill the buffer with data for huffman-only compression.
-// The number of bytes copied is returned.
-func (d *compressor) fillBlock(b []byte) int {
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-// storeHuff will compress and store the currently added data,
-// if enough has been accumulated or we at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) storeHuff() {
- if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
- return
- }
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- d.windowEnd = 0
-}
-
-// storeFast will compress and store the currently added data,
-// if enough has been accumulated or we at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) storeFast() {
- // We only compress if we have maxStoreBlockSize.
- if d.windowEnd < len(d.window) {
- if !d.sync {
- return
- }
- // Handle extremely small sizes.
- if d.windowEnd < 128 {
- if d.windowEnd == 0 {
- return
- }
- if d.windowEnd <= 32 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- } else {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
- d.err = d.w.err
- }
- d.tokens.Reset()
- d.windowEnd = 0
- d.fast.Reset()
- return
- }
- }
-
- d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
- // If we made zero matches, store the block as is.
- if d.tokens.n == 0 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- // If we removed less than 1/16th, huffman compress the block.
- } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- } else {
- d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- }
- d.tokens.Reset()
- d.windowEnd = 0
-}
-
-// write will add input byte to the stream.
-// Unless an error occurs all bytes will be consumed.
-func (d *compressor) write(b []byte) (n int, err error) {
- if d.err != nil {
- return 0, d.err
- }
- n = len(b)
- for len(b) > 0 {
- d.step(d)
- b = b[d.fill(d, b):]
- if d.err != nil {
- return 0, d.err
- }
- }
- return n, d.err
-}
-
-func (d *compressor) syncFlush() error {
- d.sync = true
- if d.err != nil {
- return d.err
- }
- d.step(d)
- if d.err == nil {
- d.w.writeStoredHeader(0, false)
- d.w.flush()
- d.err = d.w.err
- }
- d.sync = false
- return d.err
-}
-
-func (d *compressor) init(w io.Writer, level int) (err error) {
- d.w = newHuffmanBitWriter(w)
-
- switch {
- case level == NoCompression:
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).store
- case level == ConstantCompression:
- d.w.logNewTablePenalty = 4
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeHuff
- case level == DefaultCompression:
- level = 5
- fallthrough
- case level >= 1 && level <= 6:
- d.w.logNewTablePenalty = 6
- d.fast = newFastEnc(level)
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeFast
- case 7 <= level && level <= 9:
- d.w.logNewTablePenalty = 10
- d.state = &advancedState{}
- d.compressionLevel = levels[level]
- d.initDeflate()
- d.fill = (*compressor).fillDeflate
- d.step = (*compressor).deflateLazy
- default:
- return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
- }
- d.level = level
- return nil
-}
-
-// reset the state of the compressor.
-func (d *compressor) reset(w io.Writer) {
- d.w.reset(w)
- d.sync = false
- d.err = nil
- // We only need to reset a few things for Snappy.
- if d.fast != nil {
- d.fast.Reset()
- d.windowEnd = 0
- d.tokens.Reset()
- return
- }
- switch d.compressionLevel.chain {
- case 0:
- // level was NoCompression or ConstantCompresssion.
- d.windowEnd = 0
- default:
- s := d.state
- s.chainHead = -1
- for i := range s.hashHead {
- s.hashHead[i] = 0
- }
- for i := range s.hashPrev {
- s.hashPrev[i] = 0
- }
- s.hashOffset = 1
- s.index, d.windowEnd = 0, 0
- d.blockStart, d.byteAvailable = 0, false
- d.tokens.Reset()
- s.length = minMatchLength - 1
- s.offset = 0
- s.hash = 0
- s.ii = 0
- s.maxInsertIndex = 0
- }
-}
-
-func (d *compressor) close() error {
- if d.err != nil {
- return d.err
- }
- d.sync = true
- d.step(d)
- if d.err != nil {
- return d.err
- }
- if d.w.writeStoredHeader(0, true); d.w.err != nil {
- return d.w.err
- }
- d.w.flush()
- d.w.reset(nil)
- return d.w.err
-}
-
-// NewWriter returns a new Writer compressing data at the given level.
-// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
-// higher levels typically run slower but compress more.
-// Level 0 (NoCompression) does not attempt any compression; it only adds the
-// necessary DEFLATE framing.
-// Level -1 (DefaultCompression) uses the default compression level.
-// Level -2 (ConstantCompression) will use Huffman compression only, giving
-// a very fast compression for all types of input, but sacrificing considerable
-// compression efficiency.
-//
-// If level is in the range [-2, 9] then the error returned will be nil.
-// Otherwise the error returned will be non-nil.
-func NewWriter(w io.Writer, level int) (*Writer, error) {
- var dw Writer
- if err := dw.d.init(w, level); err != nil {
- return nil, err
- }
- return &dw, nil
-}
-
-// NewWriterDict is like NewWriter but initializes the new
-// Writer with a preset dictionary. The returned Writer behaves
-// as if the dictionary had been written to it without producing
-// any compressed output. The compressed data written to w
-// can only be decompressed by a Reader initialized with the
-// same dictionary.
-func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
- zw, err := NewWriter(w, level)
- if err != nil {
- return nil, err
- }
- zw.d.fillWindow(dict)
- zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
- return zw, err
-}
-
-// A Writer takes data written to it and writes the compressed
-// form of that data to an underlying writer (see NewWriter).
-type Writer struct {
- d compressor
- dict []byte
-}
-
-// Write writes data to w, which will eventually write the
-// compressed form of data to its underlying writer.
-func (w *Writer) Write(data []byte) (n int, err error) {
- return w.d.write(data)
-}
-
-// Flush flushes any pending data to the underlying writer.
-// It is useful mainly in compressed network protocols, to ensure that
-// a remote reader has enough data to reconstruct a packet.
-// Flush does not return until the data has been written.
-// Calling Flush when there is no pending data still causes the Writer
-// to emit a sync marker of at least 4 bytes.
-// If the underlying writer returns an error, Flush returns that error.
-//
-// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
-func (w *Writer) Flush() error {
- // For more about flushing:
- // http://www.bolet.org/~pornin/deflate-flush.html
- return w.d.syncFlush()
-}
-
-// Close flushes and closes the writer.
-func (w *Writer) Close() error {
- return w.d.close()
-}
-
-// Reset discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
-// and w's level and dictionary.
-func (w *Writer) Reset(dst io.Writer) {
- if len(w.dict) > 0 {
- // w was created with NewWriterDict
- w.d.reset(dst)
- if dst != nil {
- w.d.fillWindow(w.dict)
- }
- } else {
- // w was created with NewWriter
- w.d.reset(dst)
- }
-}
-
-// ResetDict discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
-// and w's level, but sets a specific dictionary.
-func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
- w.dict = dict
- w.d.reset(dst)
- w.d.fillWindow(w.dict)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
deleted file mode 100644
index 71c75a065ea..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/dict_decoder.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
-// LZ77 decompresses data through sequences of two forms of commands:
-//
-// * Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
-//
-// * Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
-//
-// For performance reasons, this implementation performs little to no sanity
-// checks about the arguments. As such, the invariants documented for each
-// method call must be respected.
-type dictDecoder struct {
- hist []byte // Sliding window history
-
- // Invariant: 0 <= rdPos <= wrPos <= len(hist)
- wrPos int // Current output position in buffer
- rdPos int // Have emitted hist[:rdPos] already
- full bool // Has a full window length been written yet?
-}
-
-// init initializes dictDecoder to have a sliding window dictionary of the given
-// size. If a preset dict is provided, it will initialize the dictionary with
-// the contents of dict.
-func (dd *dictDecoder) init(size int, dict []byte) {
- *dd = dictDecoder{hist: dd.hist}
-
- if cap(dd.hist) < size {
- dd.hist = make([]byte, size)
- }
- dd.hist = dd.hist[:size]
-
- if len(dict) > len(dd.hist) {
- dict = dict[len(dict)-len(dd.hist):]
- }
- dd.wrPos = copy(dd.hist, dict)
- if dd.wrPos == len(dd.hist) {
- dd.wrPos = 0
- dd.full = true
- }
- dd.rdPos = dd.wrPos
-}
-
-// histSize reports the total amount of historical data in the dictionary.
-func (dd *dictDecoder) histSize() int {
- if dd.full {
- return len(dd.hist)
- }
- return dd.wrPos
-}
-
-// availRead reports the number of bytes that can be flushed by readFlush.
-func (dd *dictDecoder) availRead() int {
- return dd.wrPos - dd.rdPos
-}
-
-// availWrite reports the available amount of output buffer space.
-func (dd *dictDecoder) availWrite() int {
- return len(dd.hist) - dd.wrPos
-}
-
-// writeSlice returns a slice of the available buffer to write data to.
-//
-// This invariant will be kept: len(s) <= availWrite()
-func (dd *dictDecoder) writeSlice() []byte {
- return dd.hist[dd.wrPos:]
-}
-
-// writeMark advances the writer pointer by cnt.
-//
-// This invariant must be kept: 0 <= cnt <= availWrite()
-func (dd *dictDecoder) writeMark(cnt int) {
- dd.wrPos += cnt
-}
-
-// writeByte writes a single byte to the dictionary.
-//
-// This invariant must be kept: 0 < availWrite()
-func (dd *dictDecoder) writeByte(c byte) {
- dd.hist[dd.wrPos] = c
- dd.wrPos++
-}
-
-// writeCopy copies a string at a given (dist, length) to the output.
-// This returns the number of bytes copied and may be less than the requested
-// length if the available space in the output buffer is too small.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) writeCopy(dist, length int) int {
- dstBase := dd.wrPos
- dstPos := dstBase
- srcPos := dstPos - dist
- endPos := dstPos + length
- if endPos > len(dd.hist) {
- endPos = len(dd.hist)
- }
-
- // Copy non-overlapping section after destination position.
- //
- // This section is non-overlapping in that the copy length for this section
- // is always less than or equal to the backwards distance. This can occur
- // if a distance refers to data that wraps-around in the buffer.
- // Thus, a backwards copy is performed here; that is, the exact bytes in
- // the source prior to the copy is placed in the destination.
- if srcPos < 0 {
- srcPos += len(dd.hist)
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
- srcPos = 0
- }
-
- // Copy possibly overlapping section before destination position.
- //
- // This section can overlap if the copy length for this section is larger
- // than the backwards distance. This is allowed by LZ77 so that repeated
- // strings can be succinctly represented using (dist, length) pairs.
- // Thus, a forwards copy is performed here; that is, the bytes copied is
- // possibly dependent on the resulting bytes in the destination as the copy
- // progresses along. This is functionally equivalent to the following:
- //
- // for i := 0; i < endPos-dstPos; i++ {
- // dd.hist[dstPos+i] = dd.hist[srcPos+i]
- // }
- // dstPos = endPos
- //
- for dstPos < endPos {
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// tryWriteCopy tries to copy a string at a given (distance, length) to the
-// output. This specialized version is optimized for short distances.
-//
-// This method is designed to be inlined for performance reasons.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
- dstPos := dd.wrPos
- endPos := dstPos + length
- if dstPos < dist || endPos > len(dd.hist) {
- return 0
- }
- dstBase := dstPos
- srcPos := dstPos - dist
-
- // Copy possibly overlapping section before destination position.
-loop:
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- if dstPos < endPos {
- goto loop // Avoid for-loop so that this function can be inlined
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// readFlush returns a slice of the historical buffer that is ready to be
-// emitted to the user. The data returned by readFlush must be fully consumed
-// before calling any other dictDecoder methods.
-func (dd *dictDecoder) readFlush() []byte {
- toRead := dd.hist[dd.rdPos:dd.wrPos]
- dd.rdPos = dd.wrPos
- if dd.wrPos == len(dd.hist) {
- dd.wrPos, dd.rdPos = 0, 0
- dd.full = true
- }
- return toRead
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
deleted file mode 100644
index 6d4c1e98bc5..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Modified for deflate by Klaus Post (c) 2015.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "fmt"
- "math/bits"
-)
-
-type fastEnc interface {
- Encode(dst *tokens, src []byte)
- Reset()
-}
-
-func newFastEnc(level int) fastEnc {
- switch level {
- case 1:
- return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 2:
- return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 3:
- return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 4:
- return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 5:
- return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 6:
- return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
- default:
- panic("invalid level specified")
- }
-}
-
-const (
- tableBits = 15 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
- baseMatchOffset = 1 // The smallest match offset
- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
- maxMatchOffset = 1 << 15 // The largest match offset
-
- bTableBits = 17 // Bits used in the big tables
- bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history.
- bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
-)
-
-const (
- prime3bytes = 506832829
- prime4bytes = 2654435761
- prime5bytes = 889523592379
- prime6bytes = 227718039650203
- prime7bytes = 58295818150454627
- prime8bytes = 0xcf1bbcdcb7a56463
-)
-
-func load32(b []byte, i int) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load64(b []byte, i int) uint64 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:8]
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func load3232(b []byte, i int32) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load6432(b []byte, i int32) uint64 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:8]
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func hash(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> tableShift
-}
-
-type tableEntry struct {
- offset int32
-}
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastGen struct {
- hist []byte
- cur int32
-}
-
-func (e *fastGen) addBlock(src []byte) int32 {
- // check if we have space already
- if len(e.hist)+len(src) > cap(e.hist) {
- if cap(e.hist) == 0 {
- e.hist = make([]byte, 0, allocHistory)
- } else {
- if cap(e.hist) < maxMatchOffset*2 {
- panic("unexpected buffer size")
- }
- // Move down
- offset := int32(len(e.hist)) - maxMatchOffset
- copy(e.hist[0:maxMatchOffset], e.hist[offset:])
- e.cur += offset
- e.hist = e.hist[:maxMatchOffset]
- }
- }
- s := int32(len(e.hist))
- e.hist = append(e.hist, src...)
- return s
-}
-
-// hash4 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & 31)
-}
-
-type tableEntryPrev struct {
- Cur tableEntry
- Prev tableEntry
-}
-
-// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4x64(u uint64, h uint8) uint32 {
- return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
-}
-
-// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash7(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
-}
-
-// hash8 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash8(u uint64, h uint8) uint32 {
- return uint32((u * prime8bytes) >> ((64 - h) & 63))
-}
-
-// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash6(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
-}
-
-// matchlen will return the match length between offsets and t in src.
-// The maximum length returned is maxMatchLength - 4.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
- if debugDecode {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > maxMatchOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
- }
-
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:s1], src[t:]))
-}
-
-// matchlenLong will return the match length between offsets and t in src.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
- if debugDecode {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > maxMatchOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
-// Reset the encoding table.
-func (e *fastGen) Reset() {
- if cap(e.hist) < allocHistory {
- e.hist = make([]byte, 0, allocHistory)
- }
- // We offset current position so everything will be out of reach.
- // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
- if e.cur <= bufferReset {
- e.cur += maxMatchOffset + int32(len(e.hist))
- }
- e.hist = e.hist[:0]
-}
-
-// matchLen returns the maximum length.
-// 'a' must be the shortest of the two.
-func matchLen(a, b []byte) int {
- b = b[:len(a)]
- var checked int
- if len(a) > 4 {
- // Try 4 bytes first
- if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
- return bits.TrailingZeros32(diff) >> 3
- }
- // Switch to 8 byte matching.
- checked = 4
- a = a[4:]
- b = b[4:]
- for len(a) >= 8 {
- b = b[:len(a)]
- if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
- return checked + (bits.TrailingZeros64(diff) >> 3)
- }
- checked += 8
- a = a[8:]
- b = b[8:]
- }
- }
- b = b[:len(a)]
- for i := range a {
- if a[i] != b[i] {
- return int(i) + checked
- }
- }
- return len(a) + checked
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
deleted file mode 100644
index c74a95fe7f6..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/gen_inflate.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// +build generate
-
-//go:generate go run $GOFILE && gofmt -w inflate_gen.go
-
-package main
-
-import (
- "os"
- "strings"
-)
-
-func main() {
- f, err := os.Create("inflate_gen.go")
- if err != nil {
- panic(err)
- }
- defer f.Close()
- types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
- names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
- imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
- f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
-
-package flate
-
-import (
-`)
-
- for _, imp := range imports {
- f.WriteString("\t\"" + imp + "\"\n")
- }
- f.WriteString(")\n\n")
-
- template := `
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) $FUNCNAME$() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.($TYPE$)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).$FUNCNAME$
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-`
- for i, t := range types {
- s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
- s = strings.Replace(s, "$TYPE$", t, -1)
- f.WriteString(s)
- }
- f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
- f.WriteString("\tswitch f.r.(type) {\n")
- for i, t := range types {
- f.WriteString("\t\tcase " + t + ":\n")
- f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
- }
- f.WriteString("\t\tdefault:\n")
- f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
- f.WriteString("\t}\n}\n")
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
deleted file mode 100644
index 53fe1d06e25..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ /dev/null
@@ -1,911 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "io"
-)
-
-const (
- // The largest offset code.
- offsetCodeCount = 30
-
- // The special code used to mark the end of a block.
- endBlockMarker = 256
-
- // The first length code.
- lengthCodesStart = 257
-
- // The number of codegen codes.
- codegenCodeCount = 19
- badCode = 255
-
- // bufferFlushSize indicates the buffer size
- // after which bytes are flushed to the writer.
- // Should preferably be a multiple of 6, since
- // we accumulate 6 bytes between writes to the buffer.
- bufferFlushSize = 240
-
- // bufferSize is the actual output byte buffer size.
- // It must have additional headroom for a flush
- // which can contain up to 8 bytes.
- bufferSize = bufferFlushSize + 8
-)
-
-// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = [32]int8{
- /* 257 */ 0, 0, 0,
- /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
- /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
- /* 280 */ 4, 5, 5, 5, 5, 0,
-}
-
-// The length indicated by length code X - LENGTH_CODES_START.
-var lengthBase = [32]uint8{
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
- 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
- 64, 80, 96, 112, 128, 160, 192, 224, 255,
-}
-
-// offset code word extra bits.
-var offsetExtraBits = [64]int8{
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
- 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
- 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
- /* extended window */
- 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
-}
-
-var offsetBase = [64]uint32{
- /* normal deflate */
- 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
- 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
- 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
- 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
- 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
- 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
-
- /* extended window */
- 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
- 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
- 0x100000, 0x180000, 0x200000, 0x300000,
-}
-
-// The odd order in which the codegen code sizes are written.
-var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-
-type huffmanBitWriter struct {
- // writer is the underlying writer.
- // Do not use it directly; use the write method, which ensures
- // that Write errors are sticky.
- writer io.Writer
-
- // Data waiting to be written is bytes[0:nbytes]
- // and then the low nbits of bits.
- bits uint64
- nbits uint16
- nbytes uint8
- literalEncoding *huffmanEncoder
- offsetEncoding *huffmanEncoder
- codegenEncoding *huffmanEncoder
- err error
- lastHeader int
- // Set between 0 (reused block can be up to 2x the size)
- logNewTablePenalty uint
- lastHuffMan bool
- bytes [256]byte
- literalFreq [lengthCodesStart + 32]uint16
- offsetFreq [32]uint16
- codegenFreq [codegenCodeCount]uint16
-
- // codegen must have an extra space for the final symbol.
- codegen [literalCount + offsetCodeCount + 1]uint8
-}
-
-// Huffman reuse.
-//
-// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
-//
-// This is controlled by several variables:
-//
-// If lastHeader is non-zero the Huffman table can be reused.
-// This also indicates that a Huffman table has been generated that can output all
-// possible symbols.
-// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
-// an EOB with the previous table must be written.
-//
-// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
-//
-// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
-// optimal size and adding a penalty in 'logNewTablePenalty'.
-// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
-// is slower both for compression and decompression.
-
-func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
- return &huffmanBitWriter{
- writer: w,
- literalEncoding: newHuffmanEncoder(literalCount),
- codegenEncoding: newHuffmanEncoder(codegenCodeCount),
- offsetEncoding: newHuffmanEncoder(offsetCodeCount),
- }
-}
-
-func (w *huffmanBitWriter) reset(writer io.Writer) {
- w.writer = writer
- w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
- w.lastHeader = 0
- w.lastHuffMan = false
-}
-
-func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
- offsets, lits = true, true
- a := t.offHist[:offsetCodeCount]
- b := w.offsetFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- offsets = false
- break
- }
- }
-
- a = t.extraHist[:literalCount-256]
- b = w.literalFreq[256:literalCount]
- b = b[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
- }
- }
- if lits {
- a = t.litHist[:]
- b = w.literalFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
- }
- }
- }
- return
-}
-
-func (w *huffmanBitWriter) flush() {
- if w.err != nil {
- w.nbits = 0
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
- n := w.nbytes
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- if w.nbits > 8 { // Avoid underflow
- w.nbits -= 8
- } else {
- w.nbits = 0
- }
- n++
- }
- w.bits = 0
- w.write(w.bytes[:n])
- w.nbytes = 0
-}
-
-func (w *huffmanBitWriter) write(b []byte) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write(b)
-}
-
-func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
- w.bits |= uint64(b) << (w.nbits & 63)
- w.nbits += nb
- if w.nbits >= 48 {
- w.writeOutBits()
- }
-}
-
-func (w *huffmanBitWriter) writeBytes(bytes []byte) {
- if w.err != nil {
- return
- }
- n := w.nbytes
- if w.nbits&7 != 0 {
- w.err = InternalError("writeBytes with unfinished bits")
- return
- }
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- w.nbits -= 8
- n++
- }
- if n != 0 {
- w.write(w.bytes[:n])
- }
- w.nbytes = 0
- w.write(bytes)
-}
-
-// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
-// the literal and offset lengths arrays (which are concatenated into a single
-// array). This method generates that run-length encoding.
-//
-// The result is written into the codegen array, and the frequencies
-// of each code is written into the codegenFreq array.
-// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
-// information. Code badCode is an end marker
-//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
-func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
- for i := range w.codegenFreq {
- w.codegenFreq[i] = 0
- }
- // Note that we are using codegen both as a temporary variable for holding
- // a copy of the frequencies, and as the place where we put the result.
- // This is fine because the output is always shorter than the input used
- // so far.
- codegen := w.codegen[:] // cache
- // Copy the concatenated code sizes to codegen. Put a marker at the end.
- cgnl := codegen[:numLiterals]
- for i := range cgnl {
- cgnl[i] = uint8(litEnc.codes[i].len)
- }
-
- cgnl = codegen[numLiterals : numLiterals+numOffsets]
- for i := range cgnl {
- cgnl[i] = uint8(offEnc.codes[i].len)
- }
- codegen[numLiterals+numOffsets] = badCode
-
- size := codegen[0]
- count := 1
- outIndex := 0
- for inIndex := 1; size != badCode; inIndex++ {
- // INVARIANT: We have seen "count" copies of size that have not yet
- // had output generated for them.
- nextSize := codegen[inIndex]
- if nextSize == size {
- count++
- continue
- }
- // We need to generate codegen indicating "count" of size.
- if size != 0 {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- count--
- for count >= 3 {
- n := 6
- if n > count {
- n = count
- }
- codegen[outIndex] = 16
- outIndex++
- codegen[outIndex] = uint8(n - 3)
- outIndex++
- w.codegenFreq[16]++
- count -= n
- }
- } else {
- for count >= 11 {
- n := 138
- if n > count {
- n = count
- }
- codegen[outIndex] = 18
- outIndex++
- codegen[outIndex] = uint8(n - 11)
- outIndex++
- w.codegenFreq[18]++
- count -= n
- }
- if count >= 3 {
- // count >= 3 && count <= 10
- codegen[outIndex] = 17
- outIndex++
- codegen[outIndex] = uint8(count - 3)
- outIndex++
- w.codegenFreq[17]++
- count = 0
- }
- }
- count--
- for ; count >= 0; count-- {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- }
- // Set up invariant for next time through the loop.
- size = nextSize
- count = 1
- }
- // Marker indicating the end of the codegen.
- codegen[outIndex] = badCode
-}
-
-func (w *huffmanBitWriter) codegens() int {
- numCodegens := len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
- return numCodegens
-}
-
-func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
- numCodegens = len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
- return 3 + 5 + 5 + 4 + (3 * numCodegens) +
- w.codegenEncoding.bitLength(w.codegenFreq[:]) +
- int(w.codegenFreq[16])*2 +
- int(w.codegenFreq[17])*3 +
- int(w.codegenFreq[18])*7, numCodegens
-}
-
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
- size = litEnc.bitLength(w.literalFreq[:]) +
- offEnc.bitLength(w.offsetFreq[:])
- return size
-}
-
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
- header, numCodegens := w.headerSize()
- size = header +
- litEnc.bitLength(w.literalFreq[:]) +
- offEnc.bitLength(w.offsetFreq[:]) +
- extraBits
- return size, numCodegens
-}
-
-// extraBitSize will return the number of bits that will be written
-// as "extra" bits on matches.
-func (w *huffmanBitWriter) extraBitSize() int {
- total := 0
- for i, n := range w.literalFreq[257:literalCount] {
- total += int(n) * int(lengthExtraBits[i&31])
- }
- for i, n := range w.offsetFreq[:offsetCodeCount] {
- total += int(n) * int(offsetExtraBits[i&31])
- }
- return total
-}
-
-// fixedSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) fixedSize(extraBits int) int {
- return 3 +
- fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
- fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
- extraBits
-}
-
-// storedSize calculates the stored size, including header.
-// The function returns the size in bits and whether the block
-// fits inside a single block.
-func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
- if in == nil {
- return 0, false
- }
- if len(in) <= maxStoreBlockSize {
- return (len(in) + 5) * 8, true
- }
- return 0, false
-}
-
-func (w *huffmanBitWriter) writeCode(c hcode) {
- // The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += c.len
- if w.nbits >= 48 {
- w.writeOutBits()
- }
-}
-
-// writeOutBits will write bits to the buffer.
-func (w *huffmanBitWriter) writeOutBits() {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- w.bytes[n] = byte(bits)
- w.bytes[n+1] = byte(bits >> 8)
- w.bytes[n+2] = byte(bits >> 16)
- w.bytes[n+3] = byte(bits >> 24)
- w.bytes[n+4] = byte(bits >> 32)
- w.bytes[n+5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- if w.err != nil {
- n = 0
- return
- }
- w.write(w.bytes[:n])
- n = 0
- }
- w.nbytes = n
-}
-
-// Write the header of a dynamic Huffman block to the output stream.
-//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
-func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
- if w.err != nil {
- return
- }
- var firstBits int32 = 4
- if isEof {
- firstBits = 5
- }
- w.writeBits(firstBits, 3)
- w.writeBits(int32(numLiterals-257), 5)
- w.writeBits(int32(numOffsets-1), 5)
- w.writeBits(int32(numCodegens-4), 4)
-
- for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
- w.writeBits(int32(value), 3)
- }
-
- i := 0
- for {
- var codeWord = uint32(w.codegen[i])
- i++
- if codeWord == badCode {
- break
- }
- w.writeCode(w.codegenEncoding.codes[codeWord])
-
- switch codeWord {
- case 16:
- w.writeBits(int32(w.codegen[i]), 2)
- i++
- case 17:
- w.writeBits(int32(w.codegen[i]), 3)
- i++
- case 18:
- w.writeBits(int32(w.codegen[i]), 7)
- i++
- }
- }
-}
-
-// writeStoredHeader will write a stored header.
-// If the stored block is only used for EOF,
-// it is replaced with a fixed huffman block.
-func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
- if w.err != nil {
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
-
- // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
- if length == 0 && isEof {
- w.writeFixedHeader(isEof)
- // EOB: 7 bits, value: 0
- w.writeBits(0, 7)
- w.flush()
- return
- }
-
- var flag int32
- if isEof {
- flag = 1
- }
- w.writeBits(flag, 3)
- w.flush()
- w.writeBits(int32(length), 16)
- w.writeBits(int32(^uint16(length)), 16)
-}
-
-func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
- if w.err != nil {
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
-
- // Indicate that we are a fixed Huffman block
- var value int32 = 2
- if isEof {
- value = 3
- }
- w.writeBits(value, 3)
-}
-
-// writeBlock will write a block of tokens with the smallest encoding.
-// The original input can be supplied, and if the huffman encoded data
-// is larger than the original bytes, the data will be written as a
-// stored block.
-// If the input is nil, the tokens will always be Huffman encoded.
-func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
- tokens.AddEOB()
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
- numLiterals, numOffsets := w.indexTokens(tokens, false)
- w.generate(tokens)
- var extraBits int
- storedSize, storable := w.storedSize(input)
- if storable {
- extraBits = w.extraBitSize()
- }
-
- // Figure out smallest code.
- // Fixed Huffman baseline.
- var literalEncoding = fixedLiteralEncoding
- var offsetEncoding = fixedOffsetEncoding
- var size = w.fixedSize(extraBits)
-
- // Dynamic Huffman?
- var numCodegens int
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
-
- if dynamicSize < size {
- size = dynamicSize
- literalEncoding = w.literalEncoding
- offsetEncoding = w.offsetEncoding
- }
-
- // Stored bytes?
- if storable && storedSize < size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Huffman.
- if literalEncoding == fixedLiteralEncoding {
- w.writeFixedHeader(eof)
- } else {
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- }
-
- // Write the tokens.
- w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
-}
-
-// writeBlockDynamic encodes a block using a dynamic Huffman table.
-// This should be used if the symbols used have a disproportionate
-// histogram distribution.
-// If input is supplied and the compression savings are below 1/16th of the
-// input size the block is stored.
-func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
- sync = sync || eof
- if sync {
- tokens.AddEOB()
- }
-
- // We cannot reuse pure huffman table, and must mark as EOF.
- if (w.lastHuffMan || eof) && w.lastHeader > 0 {
- // We will not try to reuse.
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- w.lastHuffMan = false
- }
- if !sync {
- tokens.Fill()
- }
- numLiterals, numOffsets := w.indexTokens(tokens, !sync)
-
- var size int
- // Check if we should reuse.
- if w.lastHeader > 0 {
- // Estimate size for using a new table.
- // Use the previous header size as the best estimate.
- newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += newSize >> w.logNewTablePenalty
-
- // The estimated size is calculated as an optimal table.
- // We add a penalty to make it more realistic and re-use a bit more.
- reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
-
- // Check if a new table is better.
- if newSize < reuseSize {
- // Write the EOB we owe.
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- size = newSize
- w.lastHeader = 0
- } else {
- size = reuseSize
- }
- // Check if we get a reasonable size decrease.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- w.lastHeader = 0
- return
- }
- }
-
- // We want a new block/table
- if w.lastHeader == 0 {
- w.generate(tokens)
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- var numCodegens int
- size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- w.lastHeader = 0
- return
- }
-
- // Write Huffman table.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHeader, _ = w.headerSize()
- w.lastHuffMan = false
- }
-
- if sync {
- w.lastHeader = 0
- }
- // Write the tokens.
- w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
-}
-
-// indexTokens indexes a slice of tokens, and updates
-// literalFreq and offsetFreq, and generates literalEncoding
-// and offsetEncoding.
-// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
- copy(w.literalFreq[:], t.litHist[:])
- copy(w.literalFreq[256:], t.extraHist[:])
- copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
-
- if t.n == 0 {
- return
- }
- if filled {
- return maxNumLit, maxNumDist
- }
- // get the number of literals
- numLiterals = len(w.literalFreq)
- for w.literalFreq[numLiterals-1] == 0 {
- numLiterals--
- }
- // get the number of offsets
- numOffsets = len(w.offsetFreq)
- for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
- numOffsets--
- }
- if numOffsets == 0 {
- // We haven't found a single match. If we want to go with the dynamic encoding,
- // we should count at least one offset to be sure that the offset huffman tree could be encoded.
- w.offsetFreq[0] = 1
- numOffsets = 1
- }
- return
-}
-
-func (w *huffmanBitWriter) generate(t *tokens) {
- w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
- w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
-}
-
-// writeTokens writes a slice of tokens to the output.
-// codes for literal and offset encoding must be supplied.
-func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
- if w.err != nil {
- return
- }
- if len(tokens) == 0 {
- return
- }
-
- // Only last token should be endBlockMarker.
- var deferEOB bool
- if tokens[len(tokens)-1] == endBlockMarker {
- tokens = tokens[:len(tokens)-1]
- deferEOB = true
- }
-
- // Create slices up to the next power of two to avoid bounds checks.
- lits := leCodes[:256]
- offs := oeCodes[:32]
- lengths := leCodes[lengthCodesStart:]
- lengths = lengths[:32]
- for _, t := range tokens {
- if t < matchType {
- w.writeCode(lits[t.literal()])
- continue
- }
-
- // Write the length
- length := t.length()
- lengthCode := lengthCode(length)
- if false {
- w.writeCode(lengths[lengthCode&31])
- } else {
- // inlined
- c := lengths[lengthCode&31]
- w.bits |= uint64(c.code) << (w.nbits & 63)
- w.nbits += c.len
- if w.nbits >= 48 {
- w.writeOutBits()
- }
- }
-
- extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
- if extraLengthBits > 0 {
- extraLength := int32(length - lengthBase[lengthCode&31])
- w.writeBits(extraLength, extraLengthBits)
- }
- // Write the offset
- offset := t.offset()
- offsetCode := offsetCode(offset)
- if false {
- w.writeCode(offs[offsetCode&31])
- } else {
- // inlined
- c := offs[offsetCode&31]
- w.bits |= uint64(c.code) << (w.nbits & 63)
- w.nbits += c.len
- if w.nbits >= 48 {
- w.writeOutBits()
- }
- }
- extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
- if extraOffsetBits > 0 {
- extraOffset := int32(offset - offsetBase[offsetCode&63])
- w.writeBits(extraOffset, extraOffsetBits)
- }
- }
- if deferEOB {
- w.writeCode(leCodes[endBlockMarker])
- }
-}
-
-// huffOffset is a static offset encoder used for huffman only encoding.
-// It can be reused since we will not be encoding offset values.
-var huffOffset *huffmanEncoder
-
-func init() {
- w := newHuffmanBitWriter(nil)
- w.offsetFreq[0] = 1
- huffOffset = newHuffmanEncoder(offsetCodeCount)
- huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
-}
-
-// writeBlockHuff encodes a block of bytes as either
-// Huffman encoded literals or uncompressed bytes if the
-// results only gains very little from compression.
-func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
- // Clear histogram
- for i := range w.literalFreq[:] {
- w.literalFreq[i] = 0
- }
- if !w.lastHuffMan {
- for i := range w.offsetFreq[:] {
- w.offsetFreq[i] = 0
- }
- }
-
- // Add everything as literals
- // We have to estimate the header size.
- // Assume header is around 70 bytes:
- // https://stackoverflow.com/a/25454430
- const guessHeaderSizeBits = 70 * 8
- estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync)
- estBits += w.lastHeader + 15
- if w.lastHeader == 0 {
- estBits += guessHeaderSizeBits
- }
- estBits += estBits >> w.logNewTablePenalty
-
- // Store bytes, if we don't get a reasonable improvement.
- ssize, storable := w.storedSize(input)
- if storable && ssize < estBits {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- if w.lastHeader > 0 {
- reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256])
- estBits += estExtra
-
- if estBits < reuseSize {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
- }
-
- const numLiterals = endBlockMarker + 1
- const numOffsets = 1
- if w.lastHeader == 0 {
- w.literalFreq[endBlockMarker] = 1
- w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- numCodegens := w.codegens()
-
- // Huffman.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHuffMan = true
- w.lastHeader, _ = w.headerSize()
- }
-
- encoding := w.literalEncoding.codes[:257]
- for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- w.bits |= uint64(c.code) << ((w.nbits) & 63)
- w.nbits += c.len
- if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- w.bytes[n] = byte(bits)
- w.bytes[n+1] = byte(bits >> 8)
- w.bytes[n+2] = byte(bits >> 16)
- w.bytes[n+3] = byte(bits >> 24)
- w.bytes[n+4] = byte(bits >> 32)
- w.bytes[n+5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- if w.err != nil {
- n = 0
- return
- }
- w.write(w.bytes[:n])
- n = 0
- }
- w.nbytes = n
- }
- }
- if eof || sync {
- w.writeCode(encoding[endBlockMarker])
- w.lastHeader = 0
- w.lastHuffMan = false
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
deleted file mode 100644
index 4c39a301871..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "math"
- "math/bits"
-)
-
-const (
- maxBitsLimit = 16
- // number of valid literals
- literalCount = 286
-)
-
-// hcode is a huffman code with a bit code and bit length.
-type hcode struct {
- code, len uint16
-}
-
-type huffmanEncoder struct {
- codes []hcode
- freqcache []literalNode
- bitCount [17]int32
-}
-
-type literalNode struct {
- literal uint16
- freq uint16
-}
-
-// A levelInfo describes the state of the constructed tree for a given depth.
-type levelInfo struct {
- // Our level. for better printing
- level int32
-
- // The frequency of the last node at this level
- lastFreq int32
-
- // The frequency of the next character to add to this level
- nextCharFreq int32
-
- // The frequency of the next pair (from level below) to add to this level.
- // Only valid if the "needed" value of the next lower level is 0.
- nextPairFreq int32
-
- // The number of chains remaining to generate for this level before moving
- // up to the next level
- needed int32
-}
-
-// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint16) {
- h.len = length
- h.code = code
-}
-
-func reverseBits(number uint16, bitLength byte) uint16 {
- return bits.Reverse16(number << ((16 - bitLength) & 15))
-}
-
-func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
-
-func newHuffmanEncoder(size int) *huffmanEncoder {
- // Make capacity to next power of two.
- c := uint(bits.Len32(uint32(size - 1)))
- return &huffmanEncoder{codes: make([]hcode, size, 1<= 3
-// The cases of 0, 1, and 2 literals are handled by special case code.
-//
-// list An array of the literals with non-zero frequencies
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
-// maxBits The maximum number of bits that should be used to encode any literal.
-// Must be less than 16.
-// return An integer array in which array[i] indicates the number of literals
-// that should be encoded in i bits.
-func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
- if maxBits >= maxBitsLimit {
- panic("flate: maxBits too large")
- }
- n := int32(len(list))
- list = list[0 : n+1]
- list[n] = maxNode()
-
- // The tree can't have greater depth than n - 1, no matter what. This
- // saves a little bit of work in some small cases
- if maxBits > n-1 {
- maxBits = n - 1
- }
-
- // Create information about each of the levels.
- // A bogus "Level 0" whose sole purpose is so that
- // level1.prev.needed==0. This makes level1.nextPairFreq
- // be a legitimate value that never gets chosen.
- var levels [maxBitsLimit]levelInfo
- // leafCounts[i] counts the number of literals at the left
- // of ancestors of the rightmost node at level i.
- // leafCounts[i][j] is the number of literals at the left
- // of the level j ancestor.
- var leafCounts [maxBitsLimit][maxBitsLimit]int32
-
- for level := int32(1); level <= maxBits; level++ {
- // For every level, the first two items are the first two characters.
- // We initialize the levels as if we had already figured this out.
- levels[level] = levelInfo{
- level: level,
- lastFreq: int32(list[1].freq),
- nextCharFreq: int32(list[2].freq),
- nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
- }
- leafCounts[level][level] = 2
- if level == 1 {
- levels[level].nextPairFreq = math.MaxInt32
- }
- }
-
- // We need a total of 2*n - 2 items at top level and have already generated 2.
- levels[maxBits].needed = 2*n - 4
-
- level := maxBits
- for {
- l := &levels[level]
- if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
- // End all calculations for this level.
- // To make sure we never come back to this level or any lower level,
- // set nextPairFreq impossibly large.
- l.needed = 0
- levels[level+1].nextPairFreq = math.MaxInt32
- level++
- continue
- }
-
- prevFreq := l.lastFreq
- if l.nextCharFreq < l.nextPairFreq {
- // The next item on this row is a leaf node.
- n := leafCounts[level][level] + 1
- l.lastFreq = l.nextCharFreq
- // Lower leafCounts are the same of the previous node.
- leafCounts[level][level] = n
- e := list[n]
- if e.literal < math.MaxUint16 {
- l.nextCharFreq = int32(e.freq)
- } else {
- l.nextCharFreq = math.MaxInt32
- }
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
- // more values in the level below
- l.lastFreq = l.nextPairFreq
- // Take leaf counts from the lower level, except counts[level] remains the same.
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
- levels[l.level-1].needed = 2
- }
-
- if l.needed--; l.needed == 0 {
- // We've done everything we need to do for this level.
- // Continue calculating one level up. Fill in nextPairFreq
- // of that level with the sum of the two nodes we've just calculated on
- // this level.
- if l.level == maxBits {
- // All done!
- break
- }
- levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
- level++
- } else {
- // If we stole from below, move down temporarily to replenish it.
- for levels[level-1].needed > 0 {
- level--
- }
- }
- }
-
- // Somethings is wrong if at the end, the top level is null or hasn't used
- // all of the leaves.
- if leafCounts[maxBits][maxBits] != n {
- panic("leafCounts[maxBits][maxBits] != n")
- }
-
- bitCount := h.bitCount[:maxBits+1]
- bits := 1
- counts := &leafCounts[maxBits]
- for level := maxBits; level > 0; level-- {
- // chain.leafCount gives the number of literals requiring at least "bits"
- // bits to encode.
- bitCount[bits] = counts[level] - counts[level-1]
- bits++
- }
- return bitCount
-}
-
-// Look at the leaves and assign them a bit count and an encoding as specified
-// in RFC 1951 3.2.2
-func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
- code := uint16(0)
- for n, bits := range bitCount {
- code <<= 1
- if n == 0 || bits == 0 {
- continue
- }
- // The literals list[len(list)-bits] .. list[len(list)-bits]
- // are encoded using "bits" bits, and get the values
- // code, code + 1, .... The code values are
- // assigned in literal order (not frequency order).
- chunk := list[len(list)-int(bits):]
-
- sortByLiteral(chunk)
- for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
- code++
- }
- list = list[0 : len(list)-int(bits)]
- }
-}
-
-// Update this Huffman Code object to be the minimum code for the specified frequency count.
-//
-// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
-// maxBits The maximum number of bits to use for any literal.
-func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
- if h.freqcache == nil {
- // Allocate a reusable buffer with the longest possible frequency table.
- // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
- // The largest of these is literalCount, so we allocate for that case.
- h.freqcache = make([]literalNode, literalCount+1)
- }
- list := h.freqcache[:len(freq)+1]
- // Number of non-zero literals
- count := 0
- // Set list to be the set of all non-zero literals and their frequencies
- for i, f := range freq {
- if f != 0 {
- list[count] = literalNode{uint16(i), f}
- count++
- } else {
- list[count] = literalNode{}
- h.codes[i].len = 0
- }
- }
- list[len(freq)] = literalNode{}
-
- list = list[:count]
- if count <= 2 {
- // Handle the small cases here, because they are awkward for the general case code. With
- // two or fewer literals, everything has bit length 1.
- for i, node := range list {
- // "list" is in order of increasing literal value.
- h.codes[node.literal].set(uint16(i), 1)
- }
- return
- }
- sortByFreq(list)
-
- // Get the number of literals for each bit count
- bitCount := h.bitCounts(list, maxBits)
- // And do the assignment
- h.assignEncodingAndSize(bitCount, list)
-}
-
-func atLeastOne(v float32) float32 {
- if v < 1 {
- return 1
- }
- return v
-}
-
-// histogramSize accumulates a histogram of b in h.
-// An estimated size in bits is returned.
-// Unassigned values are assigned '1' in the histogram.
-// len(h) must be >= 256, and h's elements must be all zeroes.
-func histogramSize(b []byte, h []uint16, fill bool) (int, int) {
- h = h[:256]
- for _, t := range b {
- h[t]++
- }
- invTotal := 1.0 / float32(len(b))
- shannon := float32(0.0)
- var extra float32
- if fill {
- oneBits := atLeastOne(-mFastLog2(invTotal))
- for i, v := range h[:] {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- } else {
- h[i] = 1
- extra += oneBits
- }
- }
- } else {
- for _, v := range h[:] {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- }
- }
- }
-
- return int(shannon + 0.99), int(extra + 0.99)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
deleted file mode 100644
index 20778029900..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// Sort sorts data.
-// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
-// data.Less and data.Swap. The sort is not guaranteed to be stable.
-func sortByFreq(data []literalNode) {
- n := len(data)
- quickSortByFreq(data, 0, n, maxDepth(n))
-}
-
-func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivotByFreq(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSortByFreq(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSortByFreq(data, mhi, b)
- } else {
- quickSortByFreq(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSortByFreq(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
- data[i], data[i-6] = data[i-6], data[i]
- }
- }
- insertionSortByFreq(data, a, b)
- }
-}
-
-// siftDownByFreq implements the heap property on data[lo, hi).
-// first is an offset into the array where the root of the heap lies.
-func siftDownByFreq(data []literalNode, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
- child++
- }
- if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
- medianOfThreeSortByFreq(data, m, m-s, m+s)
- medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThreeSortByFreq(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
- }
- b := a
- for {
- for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
- }
- for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data[b], data[c-1] = data[c-1], data[b]
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
- data[c], data[hi-1] = data[hi-1], data[c]
- c++
- dups++
- }
- if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
- data[m], data[b-1] = data[b-1], data[m]
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
- }
- for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data[a], data[b-1] = data[b-1], data[a]
- a++
- b--
- }
- }
- // Swap pivot into middle
- data[pivot], data[b-1] = data[b-1], data[pivot]
- return b - 1, c
-}
-
-// Insertion sort
-func insertionSortByFreq(data []literalNode, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// quickSortByFreq, loosely following Bentley and McIlroy,
-// ``Engineering a Sort Function,'' SP&E November 1993.
-
-// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
- // sort 3 elements
- if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
- data[m1], data[m0] = data[m0], data[m1]
- }
- // data[m0] <= data[m1]
- if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
- data[m2], data[m1] = data[m1], data[m2]
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
- data[m1], data[m0] = data[m0], data[m1]
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
deleted file mode 100644
index 93f1aea109e..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// Sort sorts data.
-// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
-// data.Less and data.Swap. The sort is not guaranteed to be stable.
-func sortByLiteral(data []literalNode) {
- n := len(data)
- quickSort(data, 0, n, maxDepth(n))
-}
-
-func quickSort(data []literalNode, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSort(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSort(data, mhi, b)
- } else {
- quickSort(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSort(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data[i].literal < data[i-6].literal {
- data[i], data[i-6] = data[i-6], data[i]
- }
- }
- insertionSort(data, a, b)
- }
-}
-func heapSort(data []literalNode, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDown(data, lo, i, first)
- }
-}
-
-// siftDown implements the heap property on data[lo, hi).
-// first is an offset into the array where the root of the heap lies.
-func siftDown(data []literalNode, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
- child++
- }
- if data[first+root].literal > data[first+child].literal {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThree(data, lo, lo+s, lo+2*s)
- medianOfThree(data, m, m-s, m+s)
- medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && data[a].literal < data[pivot].literal; a++ {
- }
- b := a
- for {
- for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
- }
- for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data[b], data[c-1] = data[c-1], data[b]
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
- data[c], data[hi-1] = data[hi-1], data[c]
- c++
- dups++
- }
- if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if data[m].literal > data[pivot].literal { // data[m] = pivot
- data[m], data[b-1] = data[b-1], data[m]
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
- }
- for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data[a], data[b-1] = data[b-1], data[a]
- a++
- b--
- }
- }
- // Swap pivot into middle
- data[pivot], data[b-1] = data[b-1], data[pivot]
- return b - 1, c
-}
-
-// Insertion sort
-func insertionSort(data []literalNode, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// maxDepth returns a threshold at which quicksort should switch
-// to heapsort. It returns 2*ceil(lg(n+1)).
-func maxDepth(n int) int {
- var depth int
- for i := n; i > 0; i >>= 1 {
- depth++
- }
- return depth * 2
-}
-
-// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThree(data []literalNode, m1, m0, m2 int) {
- // sort 3 elements
- if data[m1].literal < data[m0].literal {
- data[m1], data[m0] = data[m0], data[m1]
- }
- // data[m0] <= data[m1]
- if data[m2].literal < data[m1].literal {
- data[m2], data[m1] = data[m1], data[m2]
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data[m1].literal < data[m0].literal {
- data[m1], data[m0] = data[m0], data[m1]
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
deleted file mode 100644
index 7f175a4ec26..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate.go
+++ /dev/null
@@ -1,1000 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package flate implements the DEFLATE compressed data format, described in
-// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
-// formats.
-package flate
-
-import (
- "bufio"
- "fmt"
- "io"
- "math/bits"
- "strconv"
- "sync"
-)
-
-const (
- maxCodeLen = 16 // max length of Huffman code
- maxCodeLenMask = 15 // mask for max length of Huffman code
- // The next three numbers come from the RFC section 3.2.7, with the
- // additional proviso in section 3.2.5 which implies that distance codes
- // 30 and 31 should never occur in compressed data.
- maxNumLit = 286
- maxNumDist = 30
- numCodes = 19 // number of codes in Huffman meta-code
-
- debugDecode = false
-)
-
-// Initialize the fixedHuffmanDecoder only once upon first use.
-var fixedOnce sync.Once
-var fixedHuffmanDecoder huffmanDecoder
-
-// A CorruptInputError reports the presence of corrupt input at a given offset.
-type CorruptInputError int64
-
-func (e CorruptInputError) Error() string {
- return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
-}
-
-// An InternalError reports an error in the flate code itself.
-type InternalError string
-
-func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
-
-// A ReadError reports an error encountered while reading input.
-//
-// Deprecated: No longer returned.
-type ReadError struct {
- Offset int64 // byte offset where error occurred
- Err error // error returned by underlying Read
-}
-
-func (e *ReadError) Error() string {
- return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
-}
-
-// A WriteError reports an error encountered while writing output.
-//
-// Deprecated: No longer returned.
-type WriteError struct {
- Offset int64 // byte offset where error occurred
- Err error // error returned by underlying Write
-}
-
-func (e *WriteError) Error() string {
- return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
-}
-
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
-// to switch to a new underlying Reader. This permits reusing a ReadCloser
-// instead of allocating a new one.
-type Resetter interface {
- // Reset discards any buffered data and resets the Resetter as if it was
- // newly initialized with the given reader.
- Reset(r io.Reader, dict []byte) error
-}
-
-// The data structure for decoding Huffman tables is based on that of
-// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
-// For codes smaller than the table width, there are multiple entries
-// (each combination of trailing bits has the same value). For codes
-// larger than the table width, the table contains a link to an overflow
-// table. The width of each entry in the link table is the maximum code
-// size minus the chunk width.
-//
-// Note that you can do a lookup in the table even without all bits
-// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
-// have the property that shorter codes come before longer ones, the
-// bit length estimate in the result is a lower bound on the actual
-// number of bits.
-//
-// See the following:
-// http://www.gzip.org/algorithm.txt
-
-// chunk & 15 is number of bits
-// chunk >> 4 is value, including table link
-
-const (
- huffmanChunkBits = 9
- huffmanNumChunks = 1 << huffmanChunkBits
- huffmanCountMask = 15
- huffmanValueShift = 4
-)
-
-type huffmanDecoder struct {
- maxRead int // the maximum number of bits we can read and not overread
- chunks *[huffmanNumChunks]uint16 // chunks as described above
- links [][]uint16 // overflow links
- linkMask uint32 // mask the width of the link table
-}
-
-// Initialize Huffman decoding tables from array of code lengths.
-// Following this function, h is guaranteed to be initialized into a complete
-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
-// degenerate case where the tree has only a single symbol with length 1. Empty
-// trees are permitted.
-func (h *huffmanDecoder) init(lengths []int) bool {
- // Sanity enables additional runtime tests during Huffman
- // table construction. It's intended to be used during
- // development to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if h.chunks == nil {
- h.chunks = &[huffmanNumChunks]uint16{}
- }
- if h.maxRead != 0 {
- *h = huffmanDecoder{chunks: h.chunks, links: h.links}
- }
-
- // Count number of codes of each length,
- // compute maxRead and max length.
- var count [maxCodeLen]int
- var min, max int
- for _, n := range lengths {
- if n == 0 {
- continue
- }
- if min == 0 || n < min {
- min = n
- }
- if n > max {
- max = n
- }
- count[n&maxCodeLenMask]++
- }
-
- // Empty tree. The decompressor.huffSym function will fail later if the tree
- // is used. Technically, an empty tree is only valid for the HDIST tree and
- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
- // is guaranteed to fail since it will attempt to use the tree to decode the
- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
- // guaranteed to fail later since the compressed data section must be
- // composed of at least one symbol (the end-of-block marker).
- if max == 0 {
- return true
- }
-
- code := 0
- var nextcode [maxCodeLen]int
- for i := min; i <= max; i++ {
- code <<= 1
- nextcode[i&maxCodeLenMask] = code
- code += count[i&maxCodeLenMask]
- }
-
- // Check that the coding is complete (i.e., that we've
- // assigned all 2-to-the-max possible bit sequences).
- // Exception: To be compatible with zlib, we also need to
- // accept degenerate single-code codings. See also
- // TestDegenerateHuffmanCoding.
- if code != 1< huffmanChunkBits {
- numLinks := 1 << (uint(max) - huffmanChunkBits)
- h.linkMask = uint32(numLinks - 1)
-
- // create link tables
- link := nextcode[huffmanChunkBits+1] >> 1
- if cap(h.links) < huffmanNumChunks-link {
- h.links = make([][]uint16, huffmanNumChunks-link)
- } else {
- h.links = h.links[:huffmanNumChunks-link]
- }
- for j := uint(link); j < huffmanNumChunks; j++ {
- reverse := int(bits.Reverse16(uint16(j)))
- reverse >>= uint(16 - huffmanChunkBits)
- off := j - uint(link)
- if sanity && h.chunks[reverse] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[reverse] = uint16(off<>= uint(16 - n)
- if n <= huffmanChunkBits {
- for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
- // We should never need to overwrite
- // an existing chunk. Also, 0 is
- // never a valid chunk, because the
- // lower 4 "count" bits should be
- // between 1 and 15.
- if sanity && h.chunks[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[off] = chunk
- }
- } else {
- j := reverse & (huffmanNumChunks - 1)
- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
- // Longer codes should have been
- // associated with a link table above.
- panic("impossible: not an indirect chunk")
- }
- value := h.chunks[j] >> huffmanValueShift
- linktab := h.links[value]
- reverse >>= huffmanChunkBits
- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
- if sanity && linktab[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- linktab[off] = chunk
- }
- }
- }
-
- if sanity {
- // Above we've sanity checked that we never overwrote
- // an existing entry. Here we additionally check that
- // we filled the tables completely.
- for i, chunk := range h.chunks {
- if chunk == 0 {
- // As an exception, in the degenerate
- // single-code case, we allow odd
- // chunks to be missing.
- if code == 1 && i%2 == 1 {
- continue
- }
- panic("impossible: missing chunk")
- }
- }
- for _, linktab := range h.links {
- for _, chunk := range linktab {
- if chunk == 0 {
- panic("impossible: missing chunk")
- }
- }
- }
- }
-
- return true
-}
-
-// The actual read interface needed by NewReader.
-// If the passed in io.Reader does not also have ReadByte,
-// the NewReader will introduce its own buffering.
-type Reader interface {
- io.Reader
- io.ByteReader
-}
-
-// Decompress state.
-type decompressor struct {
- // Input source.
- r Reader
- roffset int64
-
- // Input bits, in top of b.
- b uint32
- nb uint
-
- // Huffman decoders for literal/length, distance.
- h1, h2 huffmanDecoder
-
- // Length arrays used to define Huffman codes.
- bits *[maxNumLit + maxNumDist]int
- codebits *[numCodes]int
-
- // Output history, buffer.
- dict dictDecoder
-
- // Temporary buffer (avoids repeated allocation).
- buf [4]byte
-
- // Next step in the decompression,
- // and decompression state.
- step func(*decompressor)
- stepState int
- final bool
- err error
- toRead []byte
- hl, hd *huffmanDecoder
- copyLen int
- copyDist int
-}
-
-func (f *decompressor) nextBlock() {
- for f.nb < 1+2 {
- if f.err = f.moreBits(); f.err != nil {
- return
- }
- }
- f.final = f.b&1 == 1
- f.b >>= 1
- typ := f.b & 3
- f.b >>= 2
- f.nb -= 1 + 2
- switch typ {
- case 0:
- f.dataBlock()
- case 1:
- // compressed, fixed Huffman tables
- f.hl = &fixedHuffmanDecoder
- f.hd = nil
- f.huffmanBlockDecoder()()
- case 2:
- // compressed, dynamic Huffman tables
- if f.err = f.readHuffman(); f.err != nil {
- break
- }
- f.hl = &f.h1
- f.hd = &f.h2
- f.huffmanBlockDecoder()()
- default:
- // 3 is reserved.
- if debugDecode {
- fmt.Println("reserved data block encountered")
- }
- f.err = CorruptInputError(f.roffset)
- }
-}
-
-func (f *decompressor) Read(b []byte) (int, error) {
- for {
- if len(f.toRead) > 0 {
- n := copy(b, f.toRead)
- f.toRead = f.toRead[n:]
- if len(f.toRead) == 0 {
- return n, f.err
- }
- return n, nil
- }
- if f.err != nil {
- return 0, f.err
- }
- f.step(f)
- if f.err != nil && len(f.toRead) == 0 {
- f.toRead = f.dict.readFlush() // Flush what's left in case of error
- }
- }
-}
-
-// Support the io.WriteTo interface for io.Copy and friends.
-func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
- total := int64(0)
- flushed := false
- for {
- if len(f.toRead) > 0 {
- n, err := w.Write(f.toRead)
- total += int64(n)
- if err != nil {
- f.err = err
- return total, err
- }
- if n != len(f.toRead) {
- return total, io.ErrShortWrite
- }
- f.toRead = f.toRead[:0]
- }
- if f.err != nil && flushed {
- if f.err == io.EOF {
- return total, nil
- }
- return total, f.err
- }
- if f.err == nil {
- f.step(f)
- }
- if len(f.toRead) == 0 && f.err != nil && !flushed {
- f.toRead = f.dict.readFlush() // Flush what's left in case of error
- flushed = true
- }
- }
-}
-
-func (f *decompressor) Close() error {
- if f.err == io.EOF {
- return nil
- }
- return f.err
-}
-
-// RFC 1951 section 3.2.7.
-// Compression with dynamic Huffman codes
-
-var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-
-func (f *decompressor) readHuffman() error {
- // HLIT[5], HDIST[5], HCLEN[4].
- for f.nb < 5+5+4 {
- if err := f.moreBits(); err != nil {
- return err
- }
- }
- nlit := int(f.b&0x1F) + 257
- if nlit > maxNumLit {
- if debugDecode {
- fmt.Println("nlit > maxNumLit", nlit)
- }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
- ndist := int(f.b&0x1F) + 1
- if ndist > maxNumDist {
- if debugDecode {
- fmt.Println("ndist > maxNumDist", ndist)
- }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
- nclen := int(f.b&0xF) + 4
- // numCodes is 19, so nclen is always valid.
- f.b >>= 4
- f.nb -= 5 + 5 + 4
-
- // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
- for i := 0; i < nclen; i++ {
- for f.nb < 3 {
- if err := f.moreBits(); err != nil {
- return err
- }
- }
- f.codebits[codeOrder[i]] = int(f.b & 0x7)
- f.b >>= 3
- f.nb -= 3
- }
- for i := nclen; i < len(codeOrder); i++ {
- f.codebits[codeOrder[i]] = 0
- }
- if !f.h1.init(f.codebits[0:]) {
- if debugDecode {
- fmt.Println("init codebits failed")
- }
- return CorruptInputError(f.roffset)
- }
-
- // HLIT + 257 code lengths, HDIST + 1 code lengths,
- // using the code length Huffman code.
- for i, n := 0, nlit+ndist; i < n; {
- x, err := f.huffSym(&f.h1)
- if err != nil {
- return err
- }
- if x < 16 {
- // Actual length.
- f.bits[i] = x
- i++
- continue
- }
- // Repeat previous length or zero.
- var rep int
- var nb uint
- var b int
- switch x {
- default:
- return InternalError("unexpected length code")
- case 16:
- rep = 3
- nb = 2
- if i == 0 {
- if debugDecode {
- fmt.Println("i==0")
- }
- return CorruptInputError(f.roffset)
- }
- b = f.bits[i-1]
- case 17:
- rep = 3
- nb = 3
- b = 0
- case 18:
- rep = 11
- nb = 7
- b = 0
- }
- for f.nb < nb {
- if err := f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits:", err)
- }
- return err
- }
- }
- rep += int(f.b & uint32(1<>= nb
- f.nb -= nb
- if i+rep > n {
- if debugDecode {
- fmt.Println("i+rep > n", i, rep, n)
- }
- return CorruptInputError(f.roffset)
- }
- for j := 0; j < rep; j++ {
- f.bits[i] = b
- i++
- }
- }
-
- if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
- if debugDecode {
- fmt.Println("init2 failed")
- }
- return CorruptInputError(f.roffset)
- }
-
- // As an optimization, we can initialize the maxRead bits to read at a time
- // for the HLIT tree to the length of the EOB marker since we know that
- // every block must terminate with one. This preserves the property that
- // we never read any extra bytes after the end of the DEFLATE stream.
- if f.h1.maxRead < f.bits[endBlockMarker] {
- f.h1.maxRead = f.bits[endBlockMarker]
- }
- if !f.final {
- // If not the final block, the smallest block possible is
- // a predefined table, BTYPE=01, with a single EOB marker.
- // This will take up 3 + 7 bits.
- f.h1.maxRead += 10
- }
-
- return nil
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBlockGeneric() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-// Copy a single uncompressed data block from input to output.
-func (f *decompressor) dataBlock() {
- // Uncompressed.
- // Discard current half-byte.
- left := (f.nb) & 7
- f.nb -= left
- f.b >>= left
-
- offBytes := f.nb >> 3
- // Unfilled values will be overwritten.
- f.buf[0] = uint8(f.b)
- f.buf[1] = uint8(f.b >> 8)
- f.buf[2] = uint8(f.b >> 16)
- f.buf[3] = uint8(f.b >> 24)
-
- f.roffset += int64(offBytes)
- f.nb, f.b = 0, 0
-
- // Length then ones-complement of length.
- nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
- f.roffset += int64(nr)
- if err != nil {
- f.err = noEOF(err)
- return
- }
- n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
- nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
- if nn != ^n {
- if debugDecode {
- ncomp := ^n
- fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- if n == 0 {
- f.toRead = f.dict.readFlush()
- f.finishBlock()
- return
- }
-
- f.copyLen = int(n)
- f.copyData()
-}
-
-// copyData copies f.copyLen bytes from the underlying reader into f.hist.
-// It pauses for reads when f.hist is full.
-func (f *decompressor) copyData() {
- buf := f.dict.writeSlice()
- if len(buf) > f.copyLen {
- buf = buf[:f.copyLen]
- }
-
- cnt, err := io.ReadFull(f.r, buf)
- f.roffset += int64(cnt)
- f.copyLen -= cnt
- f.dict.writeMark(cnt)
- if err != nil {
- f.err = noEOF(err)
- return
- }
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).copyData
- return
- }
- f.finishBlock()
-}
-
-func (f *decompressor) finishBlock() {
- if f.final {
- if f.dict.availRead() > 0 {
- f.toRead = f.dict.readFlush()
- }
- f.err = io.EOF
- }
- f.step = (*decompressor).nextBlock
-}
-
-// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
-func noEOF(e error) error {
- if e == io.EOF {
- return io.ErrUnexpectedEOF
- }
- return e
-}
-
-func (f *decompressor) moreBits() error {
- c, err := f.r.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
-}
-
-// Read the next Huffman-encoded symbol from f according to h.
-func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(h.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- return 0, noEOF(err)
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := h.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return 0, f.err
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- return int(chunk >> huffmanValueShift), nil
- }
- }
-}
-
-func makeReader(r io.Reader) Reader {
- if rr, ok := r.(Reader); ok {
- return rr
- }
- return bufio.NewReader(r)
-}
-
-func fixedHuffmanDecoderInit() {
- fixedOnce.Do(func() {
- // These come from the RFC section 3.2.6.
- var bits [288]int
- for i := 0; i < 144; i++ {
- bits[i] = 8
- }
- for i := 144; i < 256; i++ {
- bits[i] = 9
- }
- for i := 256; i < 280; i++ {
- bits[i] = 7
- }
- for i := 280; i < 288; i++ {
- bits[i] = 8
- }
- fixedHuffmanDecoder.init(bits[:])
- })
-}
-
-func (f *decompressor) Reset(r io.Reader, dict []byte) error {
- *f = decompressor{
- r: makeReader(r),
- bits: f.bits,
- codebits: f.codebits,
- h1: f.h1,
- h2: f.h2,
- dict: f.dict,
- step: (*decompressor).nextBlock,
- }
- f.dict.init(maxMatchOffset, dict)
- return nil
-}
-
-// NewReader returns a new ReadCloser that can be used
-// to read the uncompressed version of r.
-// If r does not also implement io.ByteReader,
-// the decompressor may read more data than necessary from r.
-// It is the caller's responsibility to call Close on the ReadCloser
-// when finished reading.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReader(r io.Reader) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = (*decompressor).nextBlock
- f.dict.init(maxMatchOffset, nil)
- return &f
-}
-
-// NewReaderDict is like NewReader but initializes the reader
-// with a preset dictionary. The returned Reader behaves as if
-// the uncompressed data stream started with the given dictionary,
-// which has already been read. NewReaderDict is typically used
-// to read data compressed by NewWriterDict.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = (*decompressor).nextBlock
- f.dict.init(maxMatchOffset, dict)
- return &f
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
deleted file mode 100644
index 397dc1b1a13..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ /dev/null
@@ -1,922 +0,0 @@
-// Code generated by go generate gen_inflate.go. DO NOT EDIT.
-
-package flate
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "math/bits"
- "strings"
-)
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBytesBuffer() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bytes.Buffer)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBytesBuffer
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBytesReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bytes.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBytesReader
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBytesReader // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBufioReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bufio.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBufioReader
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBufioReader // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanStringsReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*strings.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & 31)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & 31)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<>= n
- f.nb -= n
- }
-
- var dist int
- if f.hd == nil {
- for f.nb < 5 {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb
- f.nb -= nb
- dist = 1<<(nb+1) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > f.dict.histSize() {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, dist
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
-func (f *decompressor) huffmanBlockDecoder() func() {
- switch f.r.(type) {
- case *bytes.Buffer:
- return f.huffmanBytesBuffer
- case *bytes.Reader:
- return f.huffmanBytesReader
- case *bufio.Reader:
- return f.huffmanBufioReader
- case *strings.Reader:
- return f.huffmanStringsReader
- default:
- return f.huffmanBlockGeneric
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
deleted file mode 100644
index 1e5eea3968a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level1.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package flate
-
-import "fmt"
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastEncL1 struct {
- fastGen
- table [tableSize]tableEntry
-}
-
-// EncodeL1 uses a similar algorithm to level 1
-func (e *fastEncL1) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
-
- for {
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hash(cv)
- candidate = e.table[nextHash]
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
-
- now := load6432(src, nextS)
- e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash(uint32(now))
-
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
-
- // Do one right away...
- cv = uint32(now)
- s = nextS
- nextS++
- candidate = e.table[nextHash]
- now >>= 8
- e.table[nextHash] = tableEntry{offset: s + e.cur}
-
- offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
- cv = uint32(now)
- s = nextS
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
-
- // Save the match found
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash(cv)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-2)
- o := e.cur + s - 2
- prevHash := hash(uint32(x))
- e.table[prevHash] = tableEntry{offset: o}
- x >>= 16
- currHash := hash(uint32(x))
- candidate = e.table[currHash]
- e.table[currHash] = tableEntry{offset: o + 2}
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 8)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
deleted file mode 100644
index 5b986a1944e..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level2.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package flate
-
-import "fmt"
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastEncL2 struct {
- fastGen
- table [bTableSize]tableEntry
-}
-
-// EncodeL2 uses a similar algorithm to level 1, but is capable
-// of matching across blocks giving better compression at a small slowdown.
-func (e *fastEncL2) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
- for {
- // When should we start skipping if we haven't found matches in a long while.
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hash4u(cv, bTableBits)
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- candidate = e.table[nextHash]
- now := load6432(src, nextS)
- e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash4u(uint32(now), bTableBits)
-
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
-
- // Do one right away...
- cv = uint32(now)
- s = nextS
- nextS++
- candidate = e.table[nextHash]
- now >>= 8
- e.table[nextHash] = tableEntry{offset: s + e.cur}
-
- offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
- break
- }
- cv = uint32(now)
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // Store every second hash in-between, but offset by 1.
- for i := s - l + 2; i < s-5; i += 7 {
- x := load6432(src, int32(i))
- nextHash := hash4u(uint32(x), bTableBits)
- e.table[nextHash] = tableEntry{offset: e.cur + i}
- // Skip one
- x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
- e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
- // Skip one
- x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
- e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 to s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-2)
- o := e.cur + s - 2
- prevHash := hash4u(uint32(x), bTableBits)
- prevHash2 := hash4u(uint32(x>>8), bTableBits)
- e.table[prevHash] = tableEntry{offset: o}
- e.table[prevHash2] = tableEntry{offset: o + 1}
- currHash := hash4u(uint32(x>>16), bTableBits)
- candidate = e.table[currHash]
- e.table[currHash] = tableEntry{offset: o + 2}
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 24)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
deleted file mode 100644
index c22b4244a5c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level3.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package flate
-
-import "fmt"
-
-// fastEncL3
-type fastEncL3 struct {
- fastGen
- table [tableSize]tableEntryPrev
-}
-
-// Encode uses a similar algorithm to level 2, will check up to two candidates.
-func (e *fastEncL3) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 8 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- }
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- e.table[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // Skip if too small.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
- for {
- const skipLog = 6
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hash(cv)
- s = nextS
- nextS = s + 1 + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- candidates := e.table[nextHash]
- now := load3232(src, nextS)
-
- // Safe offset distance until s + 4...
- minOffset := e.cur + s - (maxMatchOffset - 4)
- e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
-
- // Check both candidates
- candidate = candidates.Cur
- if candidate.offset < minOffset {
- cv = now
- // Previous will also be invalid, we have nothing.
- continue
- }
-
- if cv == load3232(src, candidate.offset-e.cur) {
- if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) {
- break
- }
- // Both match and are valid, pick longest.
- offset := s - (candidate.offset - e.cur)
- o2 := s - (candidates.Prev.offset - e.cur)
- l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
- if l2 > l1 {
- candidate = candidates.Prev
- }
- break
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
- break
- }
- }
- cv = now
- }
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- //
- t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- t += l
- // Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- nextHash := hash(cv)
- e.table[nextHash] = tableEntryPrev{
- Prev: e.table[nextHash].Cur,
- Cur: tableEntry{offset: e.cur + t},
- }
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 3},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 2},
- }
- x >>= 8
- prevHash = hash(uint32(x))
-
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 1},
- }
- x >>= 8
- currHash := hash(uint32(x))
- candidates := e.table[currHash]
- cv = uint32(x)
- e.table[currHash] = tableEntryPrev{
- Prev: candidates.Cur,
- Cur: tableEntry{offset: s + e.cur},
- }
-
- // Check both candidates
- candidate = candidates.Cur
- minOffset := e.cur + s - (maxMatchOffset - 4)
-
- if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
- }
- }
- cv = uint32(x >> 8)
- s++
- break
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
deleted file mode 100644
index e62f0c02b1e..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level4.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL4 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntry
-}
-
-func (e *fastEncL4) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.bTable[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 6
- const doEvery = 1
-
- nextS := s
- var t int32
- for {
- nextHashS := hash4x64(cv, tableBits)
- nextHashL := hash7(cv, tableBits)
-
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- e.bTable[nextHashL] = entry
-
- t = lCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
- // We got a long match. Use that.
- break
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- lCandidate = e.bTable[hash7(next, tableBits)]
-
- // If the next long is a candidate, check if we should use that instead...
- lOff := nextS - (lCandidate.offset - e.cur)
- if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
- l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
- if l2 > l1 {
- s = nextS
- t = lCandidate.offset - e.cur
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Extend the 4-byte match as long as possible.
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
- if debugDeflate {
- if t >= s {
- panic("s-t")
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+8) < len(src) {
- cv := load6432(src, s)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
- e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // Store every 3rd hash in-between
- if true {
- i := nextS
- if i < s-1 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- e.bTable[hash7(cv, tableBits)] = t
- e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
-
- i += 3
- for ; i < s-1; i += 3 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- e.bTable[hash7(cv, tableBits)] = t
- e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
- }
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- x := load6432(src, s-1)
- o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
- prevHashL := hash7(x, tableBits)
- e.table[prevHashS] = tableEntry{offset: o}
- e.bTable[prevHashL] = tableEntry{offset: o}
- cv = x >> 8
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
deleted file mode 100644
index d513f1ffd37..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level5.go
+++ /dev/null
@@ -1,279 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL5 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntryPrev
-}
-
-func (e *fastEncL5) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- v.Prev.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- }
- e.bTable[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 6
- const doEvery = 1
-
- nextS := s
- var l int32
- var t int32
- for {
- nextHashS := hash4x64(cv, tableBits)
- nextHashL := hash7(cv, tableBits)
-
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = entry, eLong.Cur
-
- nextHashS = hash4x64(next, tableBits)
- nextHashL = hash7(next, tableBits)
-
- t = lCandidate.Cur.offset - e.cur
- if s-t < maxMatchOffset {
- if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- t2 := lCandidate.Prev.offset - e.cur
- if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- l = e.matchlen(s+4, t+4, src) + 4
- ml1 := e.matchlen(s+4, t2+4, src) + 4
- if ml1 > l {
- t = t2
- l = ml1
- break
- }
- }
- break
- }
- t = lCandidate.Prev.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
- break
- }
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- l = e.matchlen(s+4, t+4, src) + 4
- lCandidate = e.bTable[nextHashL]
- // Store the next match
-
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // If the next long is a candidate, use that...
- t2 := lCandidate.Cur.offset - e.cur
- if nextS-t2 < maxMatchOffset {
- if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- // If the previous long is a candidate, use that...
- t2 = lCandidate.Prev.offset - e.cur
- if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Extend the 4-byte match as long as possible.
- if l == 0 {
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else if l == maxMatchLength {
- l += e.matchlenLong(s+l, t+l, src)
- }
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
- if debugDeflate {
- if t >= s {
- panic(fmt.Sprintln("s-t", s, t))
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", s-t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- goto emitRemainder
- }
-
- // Store every 3rd hash in-between.
- if true {
- const hashEvery = 3
- i := s - l + 1
- if i < s-1 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- e.table[hash4x64(cv, tableBits)] = t
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // Do an long at i+1
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- eLong = &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // We only have enough bits for a short entry at i+2
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- e.table[hash4x64(cv, tableBits)] = t
-
- // Skip one - otherwise we risk hitting 's'
- i += 4
- for ; i < s-1; i += hashEvery {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
- }
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- x := load6432(src, s-1)
- o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
- prevHashL := hash7(x, tableBits)
- e.table[prevHashS] = tableEntry{offset: o}
- eLong := &e.bTable[prevHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
- cv = x >> 8
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
deleted file mode 100644
index a52c80ea456..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/level6.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL6 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntryPrev
-}
-
-func (e *fastEncL6) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- v.Prev.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- }
- e.bTable[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- // Repeat MUST be > 1 and within range
- repeat := int32(1)
- for {
- const skipLog = 7
- const doEvery = 1
-
- nextS := s
- var l int32
- var t int32
- for {
- nextHashS := hash4x64(cv, tableBits)
- nextHashL := hash7(cv, tableBits)
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = entry, eLong.Cur
-
- // Calculate hashes of 'next'
- nextHashS = hash4x64(next, tableBits)
- nextHashL = hash7(next, tableBits)
-
- t = lCandidate.Cur.offset - e.cur
- if s-t < maxMatchOffset {
- if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
- // Long candidate matches at least 4 bytes.
-
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // Check the previous long candidate as well.
- t2 := lCandidate.Prev.offset - e.cur
- if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- l = e.matchlen(s+4, t+4, src) + 4
- ml1 := e.matchlen(s+4, t2+4, src) + 4
- if ml1 > l {
- t = t2
- l = ml1
- break
- }
- }
- break
- }
- // Current value did not match, but check if previous long value does.
- t = lCandidate.Prev.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
- break
- }
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- l = e.matchlen(s+4, t+4, src) + 4
-
- // Look up next long candidate (at nextS)
- lCandidate = e.bTable[nextHashL]
-
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // Check repeat at s + repOff
- const repOff = 1
- t2 := s - repeat + repOff
- if load3232(src, t2) == uint32(cv>>(8*repOff)) {
- ml := e.matchlen(s+4+repOff, t2+4, src) + 4
- if ml > l {
- t = t2
- l = ml
- s += repOff
- // Not worth checking more.
- break
- }
- }
-
- // If the next long is a candidate, use that...
- t2 = lCandidate.Cur.offset - e.cur
- if nextS-t2 < maxMatchOffset {
- if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- // This is ok, but check previous as well.
- }
- }
- // If the previous long is a candidate, use that...
- t2 = lCandidate.Prev.offset - e.cur
- if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Extend the 4-byte match as long as possible.
- if l == 0 {
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else if l == maxMatchLength {
- l += e.matchlenLong(s+l, t+l, src)
- }
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
- if false {
- if t >= s {
- panic(fmt.Sprintln("s-t", s, t))
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", s-t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- repeat = s - t
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index after match end.
- for i := nextS + 1; i < int32(len(src))-8; i += 2 {
- cv := load6432(src, i)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
- }
- goto emitRemainder
- }
-
- // Store every long hash in-between and every second short.
- if true {
- for i := nextS + 1; i < s-1; i += 2 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
- e.table[hash4x64(cv, tableBits)] = t
- eLong.Cur, eLong.Prev = t, eLong.Cur
- eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- cv = load6432(src, s)
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
deleted file mode 100644
index 53e89912463..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/stateless.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package flate
-
-import (
- "io"
- "math"
- "sync"
-)
-
-const (
- maxStatelessBlock = math.MaxInt16
- // dictionary will be taken from maxStatelessBlock, so limit it.
- maxStatelessDict = 8 << 10
-
- slTableBits = 13
- slTableSize = 1 << slTableBits
- slTableShift = 32 - slTableBits
-)
-
-type statelessWriter struct {
- dst io.Writer
- closed bool
-}
-
-func (s *statelessWriter) Close() error {
- if s.closed {
- return nil
- }
- s.closed = true
- // Emit EOF block
- return StatelessDeflate(s.dst, nil, true, nil)
-}
-
-func (s *statelessWriter) Write(p []byte) (n int, err error) {
- err = StatelessDeflate(s.dst, p, false, nil)
- if err != nil {
- return 0, err
- }
- return len(p), nil
-}
-
-func (s *statelessWriter) Reset(w io.Writer) {
- s.dst = w
- s.closed = false
-}
-
-// NewStatelessWriter will do compression but without maintaining any state
-// between Write calls.
-// There will be no memory kept between Write calls,
-// but compression and speed will be suboptimal.
-// Because of this, the size of actual Write calls will affect output size.
-func NewStatelessWriter(dst io.Writer) io.WriteCloser {
- return &statelessWriter{dst: dst}
-}
-
-// bitWriterPool contains bit writers that can be reused.
-var bitWriterPool = sync.Pool{
- New: func() interface{} {
- return newHuffmanBitWriter(nil)
- },
-}
-
-// StatelessDeflate allows to compress directly to a Writer without retaining state.
-// When returning everything will be flushed.
-// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
-// Longer dictionaries will be truncated and will still produce valid output.
-// Sending nil dictionary is perfectly fine.
-func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
- var dst tokens
- bw := bitWriterPool.Get().(*huffmanBitWriter)
- bw.reset(out)
- defer func() {
- // don't keep a reference to our output
- bw.reset(nil)
- bitWriterPool.Put(bw)
- }()
- if eof && len(in) == 0 {
- // Just write an EOF block.
- // Could be faster...
- bw.writeStoredHeader(0, true)
- bw.flush()
- return bw.err
- }
-
- // Truncate dict
- if len(dict) > maxStatelessDict {
- dict = dict[len(dict)-maxStatelessDict:]
- }
-
- for len(in) > 0 {
- todo := in
- if len(todo) > maxStatelessBlock-len(dict) {
- todo = todo[:maxStatelessBlock-len(dict)]
- }
- in = in[len(todo):]
- uncompressed := todo
- if len(dict) > 0 {
- // combine dict and source
- bufLen := len(todo) + len(dict)
- combined := make([]byte, bufLen)
- copy(combined, dict)
- copy(combined[len(dict):], todo)
- todo = combined
- }
- // Compress
- statelessEnc(&dst, todo, int16(len(dict)))
- isEof := eof && len(in) == 0
-
- if dst.n == 0 {
- bw.writeStoredHeader(len(uncompressed), isEof)
- if bw.err != nil {
- return bw.err
- }
- bw.writeBytes(uncompressed)
- } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
- // If we removed less than 1/16th, huffman compress the block.
- bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
- } else {
- bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
- }
- if len(in) > 0 {
- // Retain a dict if we have more
- dict = todo[len(todo)-maxStatelessDict:]
- dst.Reset()
- }
- if bw.err != nil {
- return bw.err
- }
- }
- if !eof {
- // Align, only a stored block can do that.
- bw.writeStoredHeader(0, false)
- }
- bw.flush()
- return bw.err
-}
-
-func hashSL(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> slTableShift
-}
-
-func load3216(b []byte, i int16) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load6416(b []byte, i int16) uint64 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:8]
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func statelessEnc(dst *tokens, src []byte, startAt int16) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- type tableEntry struct {
- offset int16
- }
-
- var table [slTableSize]tableEntry
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src)-int(startAt) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = 0
- return
- }
- // Index until startAt
- if startAt > 0 {
- cv := load3232(src, 0)
- for i := int16(0); i < startAt; i++ {
- table[hashSL(cv)] = tableEntry{offset: i}
- cv = (cv >> 8) | (uint32(src[i+4]) << 24)
- }
- }
-
- s := startAt + 1
- nextEmit := startAt
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int16(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load3216(src, s)
-
- for {
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hashSL(cv)
- candidate = table[nextHash]
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit || nextS <= 0 {
- goto emitRemainder
- }
-
- now := load6416(src, nextS)
- table[nextHash] = tableEntry{offset: s}
- nextHash = hashSL(uint32(now))
-
- if cv == load3216(src, candidate.offset) {
- table[nextHash] = tableEntry{offset: nextS}
- break
- }
-
- // Do one right away...
- cv = uint32(now)
- s = nextS
- nextS++
- candidate = table[nextHash]
- now >>= 8
- table[nextHash] = tableEntry{offset: s}
-
- if cv == load3216(src, candidate.offset) {
- table[nextHash] = tableEntry{offset: nextS}
- break
- }
- cv = uint32(now)
- s = nextS
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset
- l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
- }
-
- // Save the match found
- dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
- if s >= sLimit {
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6416(src, s-2)
- o := s - 2
- prevHash := hashSL(uint32(x))
- table[prevHash] = tableEntry{offset: o}
- x >>= 16
- currHash := hashSL(uint32(x))
- candidate = table[currHash]
- table[currHash] = tableEntry{offset: o + 2}
-
- if uint32(x) != load3216(src, candidate.offset) {
- cv = uint32(x >> 8)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
deleted file mode 100644
index f9abf606d67..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/klauspost/compress/flate/token.go
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "math"
-)
-
-const (
- // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
- // 8 bits: xlength = length - MIN_MATCH_LENGTH
- // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
- lengthShift = 22
- offsetMask = 1<maxnumlit
- offHist [32]uint16 // offset codes
- litHist [256]uint16 // codes 0->255
- n uint16 // Must be able to contain maxStoreBlockSize
- tokens [maxStoreBlockSize + 1]token
-}
-
-func (t *tokens) Reset() {
- if t.n == 0 {
- return
- }
- t.n = 0
- t.nLits = 0
- for i := range t.litHist[:] {
- t.litHist[i] = 0
- }
- for i := range t.extraHist[:] {
- t.extraHist[i] = 0
- }
- for i := range t.offHist[:] {
- t.offHist[i] = 0
- }
-}
-
-func (t *tokens) Fill() {
- if t.n == 0 {
- return
- }
- for i, v := range t.litHist[:] {
- if v == 0 {
- t.litHist[i] = 1
- t.nLits++
- }
- }
- for i, v := range t.extraHist[:literalCount-256] {
- if v == 0 {
- t.nLits++
- t.extraHist[i] = 1
- }
- }
- for i, v := range t.offHist[:offsetCodeCount] {
- if v == 0 {
- t.offHist[i] = 1
- }
- }
-}
-
-func indexTokens(in []token) tokens {
- var t tokens
- t.indexTokens(in)
- return t
-}
-
-func (t *tokens) indexTokens(in []token) {
- t.Reset()
- for _, tok := range in {
- if tok < matchType {
- t.AddLiteral(tok.literal())
- continue
- }
- t.AddMatch(uint32(tok.length()), tok.offset())
- }
-}
-
-// emitLiteral writes a literal chunk and returns the number of bytes written.
-func emitLiteral(dst *tokens, lit []byte) {
- ol := int(dst.n)
- for i, v := range lit {
- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
- dst.litHist[v]++
- }
- dst.n += uint16(len(lit))
- dst.nLits += len(lit)
-}
-
-func (t *tokens) AddLiteral(lit byte) {
- t.tokens[t.n] = token(lit)
- t.litHist[lit]++
- t.n++
- t.nLits++
-}
-
-// from https://stackoverflow.com/a/28730362
-func mFastLog2(val float32) float32 {
- ux := int32(math.Float32bits(val))
- log2 := (float32)(((ux >> 23) & 255) - 128)
- ux &= -0x7f800001
- ux += 127 << 23
- uval := math.Float32frombits(uint32(ux))
- log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
- return log2
-}
-
-// EstimatedBits will return an minimum size estimated by an *optimal*
-// compression of the block.
-// The size of the block
-func (t *tokens) EstimatedBits() int {
- shannon := float32(0)
- bits := int(0)
- nMatches := 0
- if t.nLits > 0 {
- invTotal := 1.0 / float32(t.nLits)
- for _, v := range t.litHist[:] {
- if v > 0 {
- n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
- }
- }
- // Just add 15 for EOB
- shannon += 15
- for i, v := range t.extraHist[1 : literalCount-256] {
- if v > 0 {
- n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
- bits += int(lengthExtraBits[i&31]) * int(v)
- nMatches += int(v)
- }
- }
- }
- if nMatches > 0 {
- invTotal := 1.0 / float32(nMatches)
- for i, v := range t.offHist[:offsetCodeCount] {
- if v > 0 {
- n := float32(v)
- shannon += -mFastLog2(n*invTotal) * n
- bits += int(offsetExtraBits[i&31]) * int(v)
- }
- }
- }
- return int(shannon) + bits
-}
-
-// AddMatch adds a match to the tokens.
-// This function is very sensitive to inlining and right on the border.
-func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
- if debugDeflate {
- if xlength >= maxMatchLength+baseMatchLength {
- panic(fmt.Errorf("invalid length: %v", xlength))
- }
- if xoffset >= maxMatchOffset+baseMatchOffset {
- panic(fmt.Errorf("invalid offset: %v", xoffset))
- }
- }
- t.nLits++
- lengthCode := lengthCodes1[uint8(xlength)] & 31
- t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
- panic(fmt.Errorf("invalid offset: %v", xoffset))
- }
- }
- oc := offsetCode(xoffset) & 31
- for xlength > 0 {
- xl := xlength
- if xl > 258 {
- // We need to have at least baseMatchLength left over for next loop.
- xl = 258 - baseMatchLength
- }
- xlength -= xl
- xl -= 3
- t.nLits++
- lengthCode := lengthCodes1[uint8(xl)] & 31
- t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
-
-// The code is never more than 8 bits, but is returned as uint32 for convenience.
-func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
-
-// Returns the offset code corresponding to a specific offset
-func offsetCode(off uint32) uint32 {
- if false {
- if off < uint32(len(offsetCodes)) {
- return offsetCodes[off&255]
- } else if off>>7 < uint32(len(offsetCodes)) {
- return offsetCodes[(off>>7)&255] + 14
- } else {
- return offsetCodes[(off>>14)&255] + 28
- }
- }
- if off < uint32(len(offsetCodes)) {
- return offsetCodes[uint8(off)]
- }
- return offsetCodes14[uint8(off>>7)]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
deleted file mode 100644
index f1c181ec9c5..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
deleted file mode 100644
index 7364c76bad1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 sachin shinde
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
deleted file mode 100644
index 12d377d8056..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logging.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package logging
-
-import (
- "log"
- "io"
- "io/ioutil"
- "os"
-)
-
-type Logger struct {
- Name string
- Trace *log.Logger
- Info *log.Logger
- Warning *log.Logger
- Error *log.Logger
- level LoggingLevel
-}
-
-var loggers = make(map[string]Logger)
-
-func GetLogger(name string) Logger {
- return New(name, os.Stdout, os.Stdout, os.Stdout, os.Stderr)
-}
-
-func (logger Logger) SetLevel(level LoggingLevel) Logger{
- switch level {
- case TRACE:
- logger.Trace.SetOutput(os.Stdout);
- logger.Info.SetOutput(os.Stdout);
- logger.Warning.SetOutput(os.Stdout);
- logger.Error.SetOutput(os.Stderr);
- case INFO:
- logger.Trace.SetOutput(ioutil.Discard);
- logger.Info.SetOutput(os.Stdout);
- logger.Warning.SetOutput(os.Stdout);
- logger.Error.SetOutput(os.Stderr);
- case WARNING:
- logger.Trace.SetOutput(ioutil.Discard);
- logger.Info.SetOutput(ioutil.Discard);
- logger.Warning.SetOutput(os.Stdout);
- logger.Error.SetOutput(os.Stderr);
- case ERROR:
- logger.Trace.SetOutput(ioutil.Discard);
- logger.Info.SetOutput(ioutil.Discard);
- logger.Warning.SetOutput(ioutil.Discard);
- logger.Error.SetOutput(os.Stderr);
- case OFF:
- logger.Trace.SetOutput(ioutil.Discard);
- logger.Info.SetOutput(ioutil.Discard);
- logger.Warning.SetOutput(ioutil.Discard);
- logger.Error.SetOutput(ioutil.Discard);
- }
- return logger;
-}
-
-func (logger Logger) GetLevel() LoggingLevel {
- return logger.level;
-}
-
-func New(
- name string,
- traceHandle io.Writer,
- infoHandle io.Writer,
- warningHandle io.Writer,
- errorHandle io.Writer) Logger {
- loggers[name] = Logger{
- Name: name,
- Trace: log.New(traceHandle,
- "TRACE: ",
- log.Ldate|log.Ltime|log.Lshortfile),
- Info: log.New(infoHandle,
- "INFO: ",
- log.Ldate|log.Ltime|log.Lshortfile),
- Warning: log.New(warningHandle,
- "WARNING: ",
- log.Ldate|log.Ltime|log.Lshortfile),
- Error: log.New(errorHandle,
- "ERROR: ",
- log.Ldate|log.Ltime|log.Lshortfile),
- }
- return loggers[name]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
deleted file mode 100644
index aab5a8567af..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/loggingL.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package logging
-
-type LoggingLevel int
-
-//go:generate stringer -type=LoggingLevel
-
-const (
- TRACE LoggingLevel = iota
- INFO
- WARNING
- ERROR
- OFF
-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
deleted file mode 100644
index 9f24f0acbfe..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/go-logger/logginglevel_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type=LoggingLevel"; DO NOT EDIT.
-
-package logging
-
-import "strconv"
-
-const _LoggingLevel_name = "TRACEINFOWARNINGERROROFF"
-
-var _LoggingLevel_index = [...]uint8{0, 5, 9, 16, 21, 24}
-
-func (i LoggingLevel) String() string {
- if i < 0 || i >= LoggingLevel(len(_LoggingLevel_index)-1) {
- return "LoggingLevel(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _LoggingLevel_name[_LoggingLevel_index[i]:_LoggingLevel_index[i+1]]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
deleted file mode 100644
index 9a289397844..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/.gitignore
+++ /dev/null
@@ -1,21 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-
-# ignore build under build directory
-build/
-bin/
-
-#ignore any IDE based files
-.idea/**
\ No newline at end of file
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
deleted file mode 100644
index 2439bc6a7e1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-# GoWebsocket
-Gorilla websocket based simplified client implementation in GO.
-
-Overview
---------
-This client provides following easy to implement functionality
-- Support for emitting and receiving text and binary data
-- Data compression
-- Concurrency control
-- Proxy support
-- Setting request headers
-- Subprotocols support
-- SSL verification enable/disable
-
-To install use
-
-```markdown
- go get github.com/sacOO7/gowebsocket
-```
-
-Description
------------
-
-Create instance of `Websocket` by passing url of websocket-server end-point
-
-```go
- //Create a client instance
- socket := gowebsocket.New("ws://echo.websocket.org/")
-
-```
-
-**Important Note** : url to websocket server must be specified with either **ws** or **wss**.
-
-#### Connecting to server
-- For connecting to server:
-
-```go
- //This will send websocket handshake request to socketcluster-server
- socket.Connect()
-```
-
-#### Registering All Listeners
-```go
- package main
-
- import (
- "log"
- "github.com/sacOO7/gowebsocket"
- "os"
- "os/signal"
- )
-
- func main() {
-
- interrupt := make(chan os.Signal, 1)
- signal.Notify(interrupt, os.Interrupt)
-
- socket := gowebsocket.New("ws://echo.websocket.org/");
-
- socket.OnConnected = func(socket gowebsocket.Socket) {
- log.Println("Connected to server");
- };
-
- socket.OnConnectError = func(err error, socket gowebsocket.Socket) {
- log.Println("Recieved connect error ", err)
- };
-
- socket.OnTextMessage = func(message string, socket gowebsocket.Socket) {
- log.Println("Recieved message " + message)
- };
-
- socket.OnBinaryMessage = func(data [] byte, socket gowebsocket.Socket) {
- log.Println("Recieved binary data ", data)
- };
-
- socket.OnPingReceived = func(data string, socket gowebsocket.Socket) {
- log.Println("Recieved ping " + data)
- };
-
- socket.OnPongReceived = func(data string, socket gowebsocket.Socket) {
- log.Println("Recieved pong " + data)
- };
-
- socket.OnDisconnected = func(err error, socket gowebsocket.Socket) {
- log.Println("Disconnected from server ")
- return
- };
-
- socket.Connect()
-
- for {
- select {
- case <-interrupt:
- log.Println("interrupt")
- socket.Close()
- return
- }
- }
- }
-
-```
-
-#### Sending Text message
-
-```go
- socket.SendText("Hi there, this is my sample test message")
-```
-
-#### Sending Binary data
-```go
- token := make([]byte, 4)
- // rand.Read(token) putting some random value in token
- socket.SendBinary(token)
-```
-
-#### Closing the connection with server
-```go
- socket.Close()
-```
-
-#### Setting request headers
-```go
- socket.RequestHeader.Set("Accept-Encoding","gzip, deflate, sdch")
- socket.RequestHeader.Set("Accept-Language","en-US,en;q=0.8")
- socket.RequestHeader.Set("Pragma","no-cache")
- socket.RequestHeader.Set("User-Agent","Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36")
-
-```
-
-#### Setting proxy server
-- It can be set using connectionOptions by providing url to proxy server
-
-```go
- socket.ConnectionOptions = gowebsocket.ConnectionOptions {
- Proxy: gowebsocket.BuildProxy("http://example.com"),
- }
-```
-
-#### Setting data compression, ssl verification and subprotocols
-
-- It can be set using connectionOptions inside socket
-
-```go
- socket.ConnectionOptions = gowebsocket.ConnectionOptions {
- UseSSL:true,
- UseCompression:true,
- Subprotocols: [] string{"chat","superchat"},
- }
-```
-
-- ConnectionOptions needs to be applied before connecting to server
-- Please checkout [**examples/gowebsocket**](!https://github.com/sacOO7/GoWebsocket/tree/master/examples/gowebsocket) directory for detailed code..
-
-License
--------
-Apache License, Version 2.0
-
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
deleted file mode 100644
index 1ea2b0d7a71..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/gowebsocket.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package gowebsocket
-
-import (
- "github.com/gorilla/websocket"
- "net/http"
- "errors"
- "crypto/tls"
- "net/url"
- "sync"
- "github.com/sacOO7/go-logger"
- "reflect"
-)
-
-type Empty struct {
-}
-
-var logger = logging.GetLogger(reflect.TypeOf(Empty{}).PkgPath()).SetLevel(logging.OFF)
-
-func (socket Socket) EnableLogging() {
- logger.SetLevel(logging.TRACE)
-}
-
-func (socket Socket) GetLogger() logging.Logger {
- return logger;
-}
-
-type Socket struct {
- Conn *websocket.Conn
- WebsocketDialer *websocket.Dialer
- Url string
- ConnectionOptions ConnectionOptions
- RequestHeader http.Header
- OnConnected func(socket Socket)
- OnTextMessage func(message string, socket Socket)
- OnBinaryMessage func(data [] byte, socket Socket)
- OnConnectError func(err error, socket Socket)
- OnDisconnected func(err error, socket Socket)
- OnPingReceived func(data string, socket Socket)
- OnPongReceived func(data string, socket Socket)
- IsConnected bool
- sendMu *sync.Mutex // Prevent "concurrent write to websocket connection"
- receiveMu *sync.Mutex
-}
-
-type ConnectionOptions struct {
- UseCompression bool
- UseSSL bool
- Proxy func(*http.Request) (*url.URL, error)
- Subprotocols [] string
-}
-
-// todo Yet to be done
-type ReconnectionOptions struct {
-}
-
-func New(url string) Socket {
- return Socket{
- Url: url,
- RequestHeader: http.Header{},
- ConnectionOptions: ConnectionOptions{
- UseCompression: false,
- UseSSL: true,
- },
- WebsocketDialer: &websocket.Dialer{},
- sendMu: &sync.Mutex{},
- receiveMu: &sync.Mutex{},
- }
-}
-
-func (socket *Socket) setConnectionOptions() {
- socket.WebsocketDialer.EnableCompression = socket.ConnectionOptions.UseCompression
- socket.WebsocketDialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: socket.ConnectionOptions.UseSSL}
- socket.WebsocketDialer.Proxy = socket.ConnectionOptions.Proxy
- socket.WebsocketDialer.Subprotocols = socket.ConnectionOptions.Subprotocols
-}
-
-func (socket *Socket) Connect() {
- var err error;
- socket.setConnectionOptions()
-
- socket.Conn, _, err = socket.WebsocketDialer.Dial(socket.Url, socket.RequestHeader)
-
- if err != nil {
- logger.Error.Println("Error while connecting to server ", err)
- socket.IsConnected = false
- if socket.OnConnectError != nil {
- socket.OnConnectError(err, *socket)
- }
- return
- }
-
- logger.Info.Println("Connected to server")
-
- if socket.OnConnected != nil {
- socket.IsConnected = true
- socket.OnConnected(*socket)
- }
-
- defaultPingHandler := socket.Conn.PingHandler()
- socket.Conn.SetPingHandler(func(appData string) error {
- logger.Trace.Println("Received PING from server")
- if socket.OnPingReceived != nil {
- socket.OnPingReceived(appData, *socket)
- }
- return defaultPingHandler(appData)
- })
-
- defaultPongHandler := socket.Conn.PongHandler()
- socket.Conn.SetPongHandler(func(appData string) error {
- logger.Trace.Println("Received PONG from server")
- if socket.OnPongReceived != nil {
- socket.OnPongReceived(appData, *socket)
- }
- return defaultPongHandler(appData)
- })
-
- defaultCloseHandler := socket.Conn.CloseHandler()
- socket.Conn.SetCloseHandler(func(code int, text string) error {
- result := defaultCloseHandler(code, text)
- logger.Warning.Println("Disconnected from server ", result)
- if socket.OnDisconnected != nil {
- socket.IsConnected = false
- socket.OnDisconnected(errors.New(text), *socket)
- }
- return result
- })
-
- go func() {
- for {
- socket.receiveMu.Lock()
- messageType, message, err := socket.Conn.ReadMessage()
- socket.receiveMu.Unlock()
- if err != nil {
- logger.Error.Println("read:", err)
- return
- }
- logger.Info.Println("recv: %s", message)
-
- switch messageType {
- case websocket.TextMessage:
- if socket.OnTextMessage != nil {
- socket.OnTextMessage(string(message), *socket)
- }
- case websocket.BinaryMessage:
- if socket.OnBinaryMessage != nil {
- socket.OnBinaryMessage(message, *socket)
- }
- }
- }
- }()
-}
-
-func (socket *Socket) SendText(message string) {
- err := socket.send(websocket.TextMessage, [] byte (message))
- if err != nil {
- logger.Error.Println("write:", err)
- return
- }
-}
-
-func (socket *Socket) SendBinary(data [] byte) {
- err := socket.send(websocket.BinaryMessage, data)
- if err != nil {
- logger.Error.Println("write:", err)
- return
- }
-}
-
-func (socket *Socket) send(messageType int, data [] byte) error {
- socket.sendMu.Lock()
- err := socket.Conn.WriteMessage(messageType, data)
- socket.sendMu.Unlock()
- return err
-}
-
-func (socket *Socket) Close() {
- err := socket.send(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
- if err != nil {
- logger.Error.Println("write close:", err)
- }
- socket.Conn.Close()
- if socket.OnDisconnected != nil {
- socket.IsConnected = false
- socket.OnDisconnected(err, *socket)
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/stub.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/stub.go
new file mode 100644
index 00000000000..f1f20c86857
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/stub.go
@@ -0,0 +1,58 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for github.com/sacOO7/gowebsocket, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: github.com/sacOO7/gowebsocket (exports: ; functions: New,BuildProxy)
+
+// Package gowebsocket is a stub of github.com/sacOO7/gowebsocket, generated by depstubber.
+package gowebsocket
+
+import (
+ http "net/http"
+ url "net/url"
+)
+
+func BuildProxy(_ string) func(*http.Request) (*url.URL, error) {
+ return nil
+}
+
+type ConnectionOptions struct {
+ UseCompression bool
+ UseSSL bool
+ Proxy func(*http.Request) (*url.URL, error)
+ Subprotocols []string
+}
+
+func New(_ string) Socket {
+ return Socket{}
+}
+
+type Socket struct {
+ Conn interface{}
+ WebsocketDialer interface{}
+ Url string
+ ConnectionOptions ConnectionOptions
+ RequestHeader http.Header
+ OnConnected func(Socket)
+ OnTextMessage func(string, Socket)
+ OnBinaryMessage func([]byte, Socket)
+ OnConnectError func(error, Socket)
+ OnDisconnected func(error, Socket)
+ OnPingReceived func(string, Socket)
+ OnPongReceived func(string, Socket)
+ IsConnected bool
+}
+
+func (_ Socket) EnableLogging() {}
+
+func (_ Socket) GetLogger() interface{} {
+ return nil
+}
+
+func (_ *Socket) Close() {}
+
+func (_ *Socket) Connect() {}
+
+func (_ *Socket) SendBinary(_ []byte) {}
+
+func (_ *Socket) SendText(_ string) {}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
deleted file mode 100644
index d8702ebb6df..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/github.com/sacOO7/gowebsocket/utils.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package gowebsocket
-
-import (
- "net/http"
- "net/url"
- "log"
-)
-
-func BuildProxy(Url string) func(*http.Request) (*url.URL, error) {
- uProxy, err := url.Parse(Url)
- if err != nil {
- log.Fatal("Error while parsing url ", err)
- }
- return http.ProxyURL(uProxy)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
deleted file mode 100644
index 15167cd746c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9680..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
deleted file mode 100644
index 733099041f8..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/LICENSE
similarity index 100%
rename from ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/LICENSE
rename to ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/LICENSE
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
deleted file mode 100644
index 69a4ac7eefe..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/client.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
- "net/url"
-)
-
-// DialError is an error that occurs while dialling a websocket server.
-type DialError struct {
- *Config
- Err error
-}
-
-func (e *DialError) Error() string {
- return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
-}
-
-// NewConfig creates a new WebSocket config for client connection.
-func NewConfig(server, origin string) (config *Config, err error) {
- config = new(Config)
- config.Version = ProtocolVersionHybi13
- config.Location, err = url.ParseRequestURI(server)
- if err != nil {
- return
- }
- config.Origin, err = url.ParseRequestURI(origin)
- if err != nil {
- return
- }
- config.Header = http.Header(make(map[string][]string))
- return
-}
-
-// NewClient creates a new WebSocket client connection over rwc.
-func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
- br := bufio.NewReader(rwc)
- bw := bufio.NewWriter(rwc)
- err = hybiClientHandshake(config, br, bw)
- if err != nil {
- return
- }
- buf := bufio.NewReadWriter(br, bw)
- ws = newHybiClientConn(config, buf, rwc)
- return
-}
-
-// Dial opens a new client connection to a WebSocket.
-func Dial(url_, protocol, origin string) (ws *Conn, err error) {
- config, err := NewConfig(url_, origin)
- if err != nil {
- return nil, err
- }
- if protocol != "" {
- config.Protocol = []string{protocol}
- }
- return DialConfig(config)
-}
-
-var portMap = map[string]string{
- "ws": "80",
- "wss": "443",
-}
-
-func parseAuthority(location *url.URL) string {
- if _, ok := portMap[location.Scheme]; ok {
- if _, _, err := net.SplitHostPort(location.Host); err != nil {
- return net.JoinHostPort(location.Host, portMap[location.Scheme])
- }
- }
- return location.Host
-}
-
-// DialConfig opens a new client connection to a WebSocket with a config.
-func DialConfig(config *Config) (ws *Conn, err error) {
- var client net.Conn
- if config.Location == nil {
- return nil, &DialError{config, ErrBadWebSocketLocation}
- }
- if config.Origin == nil {
- return nil, &DialError{config, ErrBadWebSocketOrigin}
- }
- dialer := config.Dialer
- if dialer == nil {
- dialer = &net.Dialer{}
- }
- client, err = dialWithDialer(dialer, config)
- if err != nil {
- goto Error
- }
- ws, err = NewClient(config, client)
- if err != nil {
- client.Close()
- goto Error
- }
- return
-
-Error:
- return nil, &DialError{config, err}
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
deleted file mode 100644
index 2dab943a489..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/dial.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "crypto/tls"
- "net"
-)
-
-func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
- switch config.Location.Scheme {
- case "ws":
- conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
-
- case "wss":
- conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
-
- default:
- err = ErrBadScheme
- }
- return
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
deleted file mode 100644
index 8cffdd16c91..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/hybi.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-// This file implements a protocol of hybi draft.
-// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "crypto/sha1"
- "encoding/base64"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
-)
-
-const (
- websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
-
- closeStatusNormal = 1000
- closeStatusGoingAway = 1001
- closeStatusProtocolError = 1002
- closeStatusUnsupportedData = 1003
- closeStatusFrameTooLarge = 1004
- closeStatusNoStatusRcvd = 1005
- closeStatusAbnormalClosure = 1006
- closeStatusBadMessageData = 1007
- closeStatusPolicyViolation = 1008
- closeStatusTooBigData = 1009
- closeStatusExtensionMismatch = 1010
-
- maxControlFramePayloadLength = 125
-)
-
-var (
- ErrBadMaskingKey = &ProtocolError{"bad masking key"}
- ErrBadPongMessage = &ProtocolError{"bad pong message"}
- ErrBadClosingStatus = &ProtocolError{"bad closing status"}
- ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
- ErrNotImplemented = &ProtocolError{"not implemented"}
-
- handshakeHeader = map[string]bool{
- "Host": true,
- "Upgrade": true,
- "Connection": true,
- "Sec-Websocket-Key": true,
- "Sec-Websocket-Origin": true,
- "Sec-Websocket-Version": true,
- "Sec-Websocket-Protocol": true,
- "Sec-Websocket-Accept": true,
- }
-)
-
-// A hybiFrameHeader is a frame header as defined in hybi draft.
-type hybiFrameHeader struct {
- Fin bool
- Rsv [3]bool
- OpCode byte
- Length int64
- MaskingKey []byte
-
- data *bytes.Buffer
-}
-
-// A hybiFrameReader is a reader for hybi frame.
-type hybiFrameReader struct {
- reader io.Reader
-
- header hybiFrameHeader
- pos int64
- length int
-}
-
-func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
- n, err = frame.reader.Read(msg)
- if frame.header.MaskingKey != nil {
- for i := 0; i < n; i++ {
- msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
- frame.pos++
- }
- }
- return n, err
-}
-
-func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
-
-func (frame *hybiFrameReader) HeaderReader() io.Reader {
- if frame.header.data == nil {
- return nil
- }
- if frame.header.data.Len() == 0 {
- return nil
- }
- return frame.header.data
-}
-
-func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
-
-func (frame *hybiFrameReader) Len() (n int) { return frame.length }
-
-// A hybiFrameReaderFactory creates new frame reader based on its frame type.
-type hybiFrameReaderFactory struct {
- *bufio.Reader
-}
-
-// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
-// See Section 5.2 Base Framing protocol for detail.
-// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
-func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
- hybiFrame := new(hybiFrameReader)
- frame = hybiFrame
- var header []byte
- var b byte
- // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
- b, err = buf.ReadByte()
- if err != nil {
- return
- }
- header = append(header, b)
- hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
- for i := 0; i < 3; i++ {
- j := uint(6 - i)
- hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
- }
- hybiFrame.header.OpCode = header[0] & 0x0f
-
- // Second byte. Mask/Payload len(7bits)
- b, err = buf.ReadByte()
- if err != nil {
- return
- }
- header = append(header, b)
- mask := (b & 0x80) != 0
- b &= 0x7f
- lengthFields := 0
- switch {
- case b <= 125: // Payload length 7bits.
- hybiFrame.header.Length = int64(b)
- case b == 126: // Payload length 7+16bits
- lengthFields = 2
- case b == 127: // Payload length 7+64bits
- lengthFields = 8
- }
- for i := 0; i < lengthFields; i++ {
- b, err = buf.ReadByte()
- if err != nil {
- return
- }
- if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
- b &= 0x7f
- }
- header = append(header, b)
- hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
- }
- if mask {
- // Masking key. 4 bytes.
- for i := 0; i < 4; i++ {
- b, err = buf.ReadByte()
- if err != nil {
- return
- }
- header = append(header, b)
- hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
- }
- }
- hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
- hybiFrame.header.data = bytes.NewBuffer(header)
- hybiFrame.length = len(header) + int(hybiFrame.header.Length)
- return
-}
-
-// A HybiFrameWriter is a writer for hybi frame.
-type hybiFrameWriter struct {
- writer *bufio.Writer
-
- header *hybiFrameHeader
-}
-
-func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
- var header []byte
- var b byte
- if frame.header.Fin {
- b |= 0x80
- }
- for i := 0; i < 3; i++ {
- if frame.header.Rsv[i] {
- j := uint(6 - i)
- b |= 1 << j
- }
- }
- b |= frame.header.OpCode
- header = append(header, b)
- if frame.header.MaskingKey != nil {
- b = 0x80
- } else {
- b = 0
- }
- lengthFields := 0
- length := len(msg)
- switch {
- case length <= 125:
- b |= byte(length)
- case length < 65536:
- b |= 126
- lengthFields = 2
- default:
- b |= 127
- lengthFields = 8
- }
- header = append(header, b)
- for i := 0; i < lengthFields; i++ {
- j := uint((lengthFields - i - 1) * 8)
- b = byte((length >> j) & 0xff)
- header = append(header, b)
- }
- if frame.header.MaskingKey != nil {
- if len(frame.header.MaskingKey) != 4 {
- return 0, ErrBadMaskingKey
- }
- header = append(header, frame.header.MaskingKey...)
- frame.writer.Write(header)
- data := make([]byte, length)
- for i := range data {
- data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
- }
- frame.writer.Write(data)
- err = frame.writer.Flush()
- return length, err
- }
- frame.writer.Write(header)
- frame.writer.Write(msg)
- err = frame.writer.Flush()
- return length, err
-}
-
-func (frame *hybiFrameWriter) Close() error { return nil }
-
-type hybiFrameWriterFactory struct {
- *bufio.Writer
- needMaskingKey bool
-}
-
-func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
- frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
- if buf.needMaskingKey {
- frameHeader.MaskingKey, err = generateMaskingKey()
- if err != nil {
- return nil, err
- }
- }
- return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
-}
-
-type hybiFrameHandler struct {
- conn *Conn
- payloadType byte
-}
-
-func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
- if handler.conn.IsServerConn() {
- // The client MUST mask all frames sent to the server.
- if frame.(*hybiFrameReader).header.MaskingKey == nil {
- handler.WriteClose(closeStatusProtocolError)
- return nil, io.EOF
- }
- } else {
- // The server MUST NOT mask all frames.
- if frame.(*hybiFrameReader).header.MaskingKey != nil {
- handler.WriteClose(closeStatusProtocolError)
- return nil, io.EOF
- }
- }
- if header := frame.HeaderReader(); header != nil {
- io.Copy(ioutil.Discard, header)
- }
- switch frame.PayloadType() {
- case ContinuationFrame:
- frame.(*hybiFrameReader).header.OpCode = handler.payloadType
- case TextFrame, BinaryFrame:
- handler.payloadType = frame.PayloadType()
- case CloseFrame:
- return nil, io.EOF
- case PingFrame, PongFrame:
- b := make([]byte, maxControlFramePayloadLength)
- n, err := io.ReadFull(frame, b)
- if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
- return nil, err
- }
- io.Copy(ioutil.Discard, frame)
- if frame.PayloadType() == PingFrame {
- if _, err := handler.WritePong(b[:n]); err != nil {
- return nil, err
- }
- }
- return nil, nil
- }
- return frame, nil
-}
-
-func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
- handler.conn.wio.Lock()
- defer handler.conn.wio.Unlock()
- w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
- if err != nil {
- return err
- }
- msg := make([]byte, 2)
- binary.BigEndian.PutUint16(msg, uint16(status))
- _, err = w.Write(msg)
- w.Close()
- return err
-}
-
-func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
- handler.conn.wio.Lock()
- defer handler.conn.wio.Unlock()
- w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
- if err != nil {
- return 0, err
- }
- n, err = w.Write(msg)
- w.Close()
- return n, err
-}
-
-// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
-func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
- if buf == nil {
- br := bufio.NewReader(rwc)
- bw := bufio.NewWriter(rwc)
- buf = bufio.NewReadWriter(br, bw)
- }
- ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
- frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
- frameWriterFactory: hybiFrameWriterFactory{
- buf.Writer, request == nil},
- PayloadType: TextFrame,
- defaultCloseStatus: closeStatusNormal}
- ws.frameHandler = &hybiFrameHandler{conn: ws}
- return ws
-}
-
-// generateMaskingKey generates a masking key for a frame.
-func generateMaskingKey() (maskingKey []byte, err error) {
- maskingKey = make([]byte, 4)
- if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
- return
- }
- return
-}
-
-// generateNonce generates a nonce consisting of a randomly selected 16-byte
-// value that has been base64-encoded.
-func generateNonce() (nonce []byte) {
- key := make([]byte, 16)
- if _, err := io.ReadFull(rand.Reader, key); err != nil {
- panic(err)
- }
- nonce = make([]byte, 24)
- base64.StdEncoding.Encode(nonce, key)
- return
-}
-
-// removeZone removes IPv6 zone identifer from host.
-// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
-func removeZone(host string) string {
- if !strings.HasPrefix(host, "[") {
- return host
- }
- i := strings.LastIndex(host, "]")
- if i < 0 {
- return host
- }
- j := strings.LastIndex(host[:i], "%")
- if j < 0 {
- return host
- }
- return host[:j] + host[i:]
-}
-
-// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
-// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
-func getNonceAccept(nonce []byte) (expected []byte, err error) {
- h := sha1.New()
- if _, err = h.Write(nonce); err != nil {
- return
- }
- if _, err = h.Write([]byte(websocketGUID)); err != nil {
- return
- }
- expected = make([]byte, 28)
- base64.StdEncoding.Encode(expected, h.Sum(nil))
- return
-}
-
-// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
-func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
- bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
-
- // According to RFC 6874, an HTTP client, proxy, or other
- // intermediary must remove any IPv6 zone identifier attached
- // to an outgoing URI.
- bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
- bw.WriteString("Upgrade: websocket\r\n")
- bw.WriteString("Connection: Upgrade\r\n")
- nonce := generateNonce()
- if config.handshakeData != nil {
- nonce = []byte(config.handshakeData["key"])
- }
- bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
- bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
-
- if config.Version != ProtocolVersionHybi13 {
- return ErrBadProtocolVersion
- }
-
- bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
- if len(config.Protocol) > 0 {
- bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
- }
- // TODO(ukai): send Sec-WebSocket-Extensions.
- err = config.Header.WriteSubset(bw, handshakeHeader)
- if err != nil {
- return err
- }
-
- bw.WriteString("\r\n")
- if err = bw.Flush(); err != nil {
- return err
- }
-
- resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
- if err != nil {
- return err
- }
- if resp.StatusCode != 101 {
- return ErrBadStatus
- }
- if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
- strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
- return ErrBadUpgrade
- }
- expectedAccept, err := getNonceAccept(nonce)
- if err != nil {
- return err
- }
- if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
- return ErrChallengeResponse
- }
- if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
- return ErrUnsupportedExtensions
- }
- offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
- if offeredProtocol != "" {
- protocolMatched := false
- for i := 0; i < len(config.Protocol); i++ {
- if config.Protocol[i] == offeredProtocol {
- protocolMatched = true
- break
- }
- }
- if !protocolMatched {
- return ErrBadWebSocketProtocol
- }
- config.Protocol = []string{offeredProtocol}
- }
-
- return nil
-}
-
-// newHybiClientConn creates a client WebSocket connection after handshake.
-func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
- return newHybiConn(config, buf, rwc, nil)
-}
-
-// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
-type hybiServerHandshaker struct {
- *Config
- accept []byte
-}
-
-func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
- c.Version = ProtocolVersionHybi13
- if req.Method != "GET" {
- return http.StatusMethodNotAllowed, ErrBadRequestMethod
- }
- // HTTP version can be safely ignored.
-
- if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
- !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
- return http.StatusBadRequest, ErrNotWebSocket
- }
-
- key := req.Header.Get("Sec-Websocket-Key")
- if key == "" {
- return http.StatusBadRequest, ErrChallengeResponse
- }
- version := req.Header.Get("Sec-Websocket-Version")
- switch version {
- case "13":
- c.Version = ProtocolVersionHybi13
- default:
- return http.StatusBadRequest, ErrBadWebSocketVersion
- }
- var scheme string
- if req.TLS != nil {
- scheme = "wss"
- } else {
- scheme = "ws"
- }
- c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
- if err != nil {
- return http.StatusBadRequest, err
- }
- protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
- if protocol != "" {
- protocols := strings.Split(protocol, ",")
- for i := 0; i < len(protocols); i++ {
- c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
- }
- }
- c.accept, err = getNonceAccept([]byte(key))
- if err != nil {
- return http.StatusInternalServerError, err
- }
- return http.StatusSwitchingProtocols, nil
-}
-
-// Origin parses the Origin header in req.
-// If the Origin header is not set, it returns nil and nil.
-func Origin(config *Config, req *http.Request) (*url.URL, error) {
- var origin string
- switch config.Version {
- case ProtocolVersionHybi13:
- origin = req.Header.Get("Origin")
- }
- if origin == "" {
- return nil, nil
- }
- return url.ParseRequestURI(origin)
-}
-
-func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
- if len(c.Protocol) > 0 {
- if len(c.Protocol) != 1 {
- // You need choose a Protocol in Handshake func in Server.
- return ErrBadWebSocketProtocol
- }
- }
- buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
- buf.WriteString("Upgrade: websocket\r\n")
- buf.WriteString("Connection: Upgrade\r\n")
- buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
- if len(c.Protocol) > 0 {
- buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
- }
- // TODO(ukai): send Sec-WebSocket-Extensions.
- if c.Header != nil {
- err := c.Header.WriteSubset(buf, handshakeHeader)
- if err != nil {
- return err
- }
- }
- buf.WriteString("\r\n")
- return buf.Flush()
-}
-
-func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
- return newHybiServerConn(c.Config, buf, rwc, request)
-}
-
-// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
-func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
- return newHybiConn(config, buf, rwc, request)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
deleted file mode 100644
index 0895dea1905..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/server.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package websocket
-
-import (
- "bufio"
- "fmt"
- "io"
- "net/http"
-)
-
-func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
- var hs serverHandshaker = &hybiServerHandshaker{Config: config}
- code, err := hs.ReadHandshake(buf.Reader, req)
- if err == ErrBadWebSocketVersion {
- fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
- fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
- buf.WriteString("\r\n")
- buf.WriteString(err.Error())
- buf.Flush()
- return
- }
- if err != nil {
- fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
- buf.WriteString("\r\n")
- buf.WriteString(err.Error())
- buf.Flush()
- return
- }
- if handshake != nil {
- err = handshake(config, req)
- if err != nil {
- code = http.StatusForbidden
- fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
- buf.WriteString("\r\n")
- buf.Flush()
- return
- }
- }
- err = hs.AcceptHandshake(buf.Writer)
- if err != nil {
- code = http.StatusBadRequest
- fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
- buf.WriteString("\r\n")
- buf.Flush()
- return
- }
- conn = hs.NewServerConn(buf, rwc, req)
- return
-}
-
-// Server represents a server of a WebSocket.
-type Server struct {
- // Config is a WebSocket configuration for new WebSocket connection.
- Config
-
- // Handshake is an optional function in WebSocket handshake.
- // For example, you can check, or don't check Origin header.
- // Another example, you can select config.Protocol.
- Handshake func(*Config, *http.Request) error
-
- // Handler handles a WebSocket connection.
- Handler
-}
-
-// ServeHTTP implements the http.Handler interface for a WebSocket
-func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- s.serveWebSocket(w, req)
-}
-
-func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
- rwc, buf, err := w.(http.Hijacker).Hijack()
- if err != nil {
- panic("Hijack failed: " + err.Error())
- }
- // The server should abort the WebSocket connection if it finds
- // the client did not send a handshake that matches with protocol
- // specification.
- defer rwc.Close()
- conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
- if err != nil {
- return
- }
- if conn == nil {
- panic("unexpected nil conn")
- }
- s.Handler(conn)
-}
-
-// Handler is a simple interface to a WebSocket browser client.
-// It checks if Origin header is valid URL by default.
-// You might want to verify websocket.Conn.Config().Origin in the func.
-// If you use Server instead of Handler, you could call websocket.Origin and
-// check the origin in your Handshake func. So, if you want to accept
-// non-browser clients, which do not send an Origin header, set a
-// Server.Handshake that does not check the origin.
-type Handler func(*Conn)
-
-func checkOrigin(config *Config, req *http.Request) (err error) {
- config.Origin, err = Origin(config, req)
- if err == nil && config.Origin == nil {
- return fmt.Errorf("null origin")
- }
- return err
-}
-
-// ServeHTTP implements the http.Handler interface for a WebSocket
-func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- s := Server{Handler: h, Handshake: checkOrigin}
- s.serveWebSocket(w, req)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/stub.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/stub.go
new file mode 100644
index 00000000000..b860854e6e8
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/stub.go
@@ -0,0 +1,120 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for golang.org/x/net/websocket, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: golang.org/x/net/websocket (exports: ; functions: Dial,NewConfig,DialConfig)
+
+// Package websocket is a stub of golang.org/x/net/websocket, generated by depstubber.
+package websocket
+
+import (
+ tls "crypto/tls"
+ io "io"
+ net "net"
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+type Config struct {
+ Location *url.URL
+ Origin *url.URL
+ Protocol []string
+ Version int
+ TlsConfig *tls.Config
+ Header http.Header
+ Dialer *net.Dialer
+}
+
+type Conn struct {
+ PayloadType byte
+ MaxPayloadBytes int
+}
+
+func (_ Conn) HandleFrame(_ interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ Conn) HeaderReader() io.Reader {
+ return nil
+}
+
+func (_ Conn) Len() int {
+ return 0
+}
+
+func (_ Conn) NewFrameReader() (interface{}, error) {
+ return nil, nil
+}
+
+func (_ Conn) NewFrameWriter(_ byte) (interface{}, error) {
+ return nil, nil
+}
+
+func (_ Conn) TrailerReader() io.Reader {
+ return nil
+}
+
+func (_ Conn) WriteClose(_ int) error {
+ return nil
+}
+
+func (_ *Conn) Close() error {
+ return nil
+}
+
+func (_ *Conn) Config() *Config {
+ return nil
+}
+
+func (_ *Conn) IsClientConn() bool {
+ return false
+}
+
+func (_ *Conn) IsServerConn() bool {
+ return false
+}
+
+func (_ *Conn) LocalAddr() net.Addr {
+ return nil
+}
+
+func (_ *Conn) Read(_ []byte) (int, error) {
+ return 0, nil
+}
+
+func (_ *Conn) RemoteAddr() net.Addr {
+ return nil
+}
+
+func (_ *Conn) Request() *http.Request {
+ return nil
+}
+
+func (_ *Conn) SetDeadline(_ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) SetReadDeadline(_ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) SetWriteDeadline(_ time.Time) error {
+ return nil
+}
+
+func (_ *Conn) Write(_ []byte) (int, error) {
+ return 0, nil
+}
+
+func Dial(_ string, _ string, _ string) (*Conn, error) {
+ return nil, nil
+}
+
+func DialConfig(_ *Config) (*Conn, error) {
+ return nil, nil
+}
+
+func NewConfig(_ string, _ string) (*Config, error) {
+ return nil, nil
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
deleted file mode 100644
index 6c45c735296..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/golang.org/x/net/websocket/websocket.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package websocket implements a client and server for the WebSocket protocol
-// as specified in RFC 6455.
-//
-// This package currently lacks some features found in alternative
-// and more actively maintained WebSocket packages:
-//
-// https://godoc.org/github.com/gorilla/websocket
-// https://godoc.org/nhooyr.io/websocket
-package websocket // import "golang.org/x/net/websocket"
-
-import (
- "bufio"
- "crypto/tls"
- "encoding/json"
- "errors"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "sync"
- "time"
-)
-
-const (
- ProtocolVersionHybi13 = 13
- ProtocolVersionHybi = ProtocolVersionHybi13
- SupportedProtocolVersion = "13"
-
- ContinuationFrame = 0
- TextFrame = 1
- BinaryFrame = 2
- CloseFrame = 8
- PingFrame = 9
- PongFrame = 10
- UnknownFrame = 255
-
- DefaultMaxPayloadBytes = 32 << 20 // 32MB
-)
-
-// ProtocolError represents WebSocket protocol errors.
-type ProtocolError struct {
- ErrorString string
-}
-
-func (err *ProtocolError) Error() string { return err.ErrorString }
-
-var (
- ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
- ErrBadScheme = &ProtocolError{"bad scheme"}
- ErrBadStatus = &ProtocolError{"bad status"}
- ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
- ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
- ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
- ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
- ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
- ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
- ErrBadFrame = &ProtocolError{"bad frame"}
- ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
- ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
- ErrBadRequestMethod = &ProtocolError{"bad method"}
- ErrNotSupported = &ProtocolError{"not supported"}
-)
-
-// ErrFrameTooLarge is returned by Codec's Receive method if payload size
-// exceeds limit set by Conn.MaxPayloadBytes
-var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
-
-// Addr is an implementation of net.Addr for WebSocket.
-type Addr struct {
- *url.URL
-}
-
-// Network returns the network type for a WebSocket, "websocket".
-func (addr *Addr) Network() string { return "websocket" }
-
-// Config is a WebSocket configuration
-type Config struct {
- // A WebSocket server address.
- Location *url.URL
-
- // A Websocket client origin.
- Origin *url.URL
-
- // WebSocket subprotocols.
- Protocol []string
-
- // WebSocket protocol version.
- Version int
-
- // TLS config for secure WebSocket (wss).
- TlsConfig *tls.Config
-
- // Additional header fields to be sent in WebSocket opening handshake.
- Header http.Header
-
- // Dialer used when opening websocket connections.
- Dialer *net.Dialer
-
- handshakeData map[string]string
-}
-
-// serverHandshaker is an interface to handle WebSocket server side handshake.
-type serverHandshaker interface {
- // ReadHandshake reads handshake request message from client.
- // Returns http response code and error if any.
- ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
-
- // AcceptHandshake accepts the client handshake request and sends
- // handshake response back to client.
- AcceptHandshake(buf *bufio.Writer) (err error)
-
- // NewServerConn creates a new WebSocket connection.
- NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
-}
-
-// frameReader is an interface to read a WebSocket frame.
-type frameReader interface {
- // Reader is to read payload of the frame.
- io.Reader
-
- // PayloadType returns payload type.
- PayloadType() byte
-
- // HeaderReader returns a reader to read header of the frame.
- HeaderReader() io.Reader
-
- // TrailerReader returns a reader to read trailer of the frame.
- // If it returns nil, there is no trailer in the frame.
- TrailerReader() io.Reader
-
- // Len returns total length of the frame, including header and trailer.
- Len() int
-}
-
-// frameReaderFactory is an interface to creates new frame reader.
-type frameReaderFactory interface {
- NewFrameReader() (r frameReader, err error)
-}
-
-// frameWriter is an interface to write a WebSocket frame.
-type frameWriter interface {
- // Writer is to write payload of the frame.
- io.WriteCloser
-}
-
-// frameWriterFactory is an interface to create new frame writer.
-type frameWriterFactory interface {
- NewFrameWriter(payloadType byte) (w frameWriter, err error)
-}
-
-type frameHandler interface {
- HandleFrame(frame frameReader) (r frameReader, err error)
- WriteClose(status int) (err error)
-}
-
-// Conn represents a WebSocket connection.
-//
-// Multiple goroutines may invoke methods on a Conn simultaneously.
-type Conn struct {
- config *Config
- request *http.Request
-
- buf *bufio.ReadWriter
- rwc io.ReadWriteCloser
-
- rio sync.Mutex
- frameReaderFactory
- frameReader
-
- wio sync.Mutex
- frameWriterFactory
-
- frameHandler
- PayloadType byte
- defaultCloseStatus int
-
- // MaxPayloadBytes limits the size of frame payload received over Conn
- // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
- MaxPayloadBytes int
-}
-
-// Read implements the io.Reader interface:
-// it reads data of a frame from the WebSocket connection.
-// if msg is not large enough for the frame data, it fills the msg and next Read
-// will read the rest of the frame data.
-// it reads Text frame or Binary frame.
-func (ws *Conn) Read(msg []byte) (n int, err error) {
- ws.rio.Lock()
- defer ws.rio.Unlock()
-again:
- if ws.frameReader == nil {
- frame, err := ws.frameReaderFactory.NewFrameReader()
- if err != nil {
- return 0, err
- }
- ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
- if err != nil {
- return 0, err
- }
- if ws.frameReader == nil {
- goto again
- }
- }
- n, err = ws.frameReader.Read(msg)
- if err == io.EOF {
- if trailer := ws.frameReader.TrailerReader(); trailer != nil {
- io.Copy(ioutil.Discard, trailer)
- }
- ws.frameReader = nil
- goto again
- }
- return n, err
-}
-
-// Write implements the io.Writer interface:
-// it writes data as a frame to the WebSocket connection.
-func (ws *Conn) Write(msg []byte) (n int, err error) {
- ws.wio.Lock()
- defer ws.wio.Unlock()
- w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
- if err != nil {
- return 0, err
- }
- n, err = w.Write(msg)
- w.Close()
- return n, err
-}
-
-// Close implements the io.Closer interface.
-func (ws *Conn) Close() error {
- err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
- err1 := ws.rwc.Close()
- if err != nil {
- return err
- }
- return err1
-}
-
-// IsClientConn reports whether ws is a client-side connection.
-func (ws *Conn) IsClientConn() bool { return ws.request == nil }
-
-// IsServerConn reports whether ws is a server-side connection.
-func (ws *Conn) IsServerConn() bool { return ws.request != nil }
-
-// LocalAddr returns the WebSocket Origin for the connection for client, or
-// the WebSocket location for server.
-func (ws *Conn) LocalAddr() net.Addr {
- if ws.IsClientConn() {
- return &Addr{ws.config.Origin}
- }
- return &Addr{ws.config.Location}
-}
-
-// RemoteAddr returns the WebSocket location for the connection for client, or
-// the Websocket Origin for server.
-func (ws *Conn) RemoteAddr() net.Addr {
- if ws.IsClientConn() {
- return &Addr{ws.config.Location}
- }
- return &Addr{ws.config.Origin}
-}
-
-var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
-
-// SetDeadline sets the connection's network read & write deadlines.
-func (ws *Conn) SetDeadline(t time.Time) error {
- if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetDeadline(t)
- }
- return errSetDeadline
-}
-
-// SetReadDeadline sets the connection's network read deadline.
-func (ws *Conn) SetReadDeadline(t time.Time) error {
- if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetReadDeadline(t)
- }
- return errSetDeadline
-}
-
-// SetWriteDeadline sets the connection's network write deadline.
-func (ws *Conn) SetWriteDeadline(t time.Time) error {
- if conn, ok := ws.rwc.(net.Conn); ok {
- return conn.SetWriteDeadline(t)
- }
- return errSetDeadline
-}
-
-// Config returns the WebSocket config.
-func (ws *Conn) Config() *Config { return ws.config }
-
-// Request returns the http request upgraded to the WebSocket.
-// It is nil for client side.
-func (ws *Conn) Request() *http.Request { return ws.request }
-
-// Codec represents a symmetric pair of functions that implement a codec.
-type Codec struct {
- Marshal func(v interface{}) (data []byte, payloadType byte, err error)
- Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
-}
-
-// Send sends v marshaled by cd.Marshal as single frame to ws.
-func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
- data, payloadType, err := cd.Marshal(v)
- if err != nil {
- return err
- }
- ws.wio.Lock()
- defer ws.wio.Unlock()
- w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- w.Close()
- return err
-}
-
-// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
-// in v. The whole frame payload is read to an in-memory buffer; max size of
-// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
-// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
-// completely. The next call to Receive would read and discard leftover data of
-// previous oversized frame before processing next frame.
-func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
- ws.rio.Lock()
- defer ws.rio.Unlock()
- if ws.frameReader != nil {
- _, err = io.Copy(ioutil.Discard, ws.frameReader)
- if err != nil {
- return err
- }
- ws.frameReader = nil
- }
-again:
- frame, err := ws.frameReaderFactory.NewFrameReader()
- if err != nil {
- return err
- }
- frame, err = ws.frameHandler.HandleFrame(frame)
- if err != nil {
- return err
- }
- if frame == nil {
- goto again
- }
- maxPayloadBytes := ws.MaxPayloadBytes
- if maxPayloadBytes == 0 {
- maxPayloadBytes = DefaultMaxPayloadBytes
- }
- if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
- // payload size exceeds limit, no need to call Unmarshal
- //
- // set frameReader to current oversized frame so that
- // the next call to this function can drain leftover
- // data before processing the next frame
- ws.frameReader = frame
- return ErrFrameTooLarge
- }
- payloadType := frame.PayloadType()
- data, err := ioutil.ReadAll(frame)
- if err != nil {
- return err
- }
- return cd.Unmarshal(data, payloadType, v)
-}
-
-func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
- switch data := v.(type) {
- case string:
- return []byte(data), TextFrame, nil
- case []byte:
- return data, BinaryFrame, nil
- }
- return nil, UnknownFrame, ErrNotSupported
-}
-
-func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
- switch data := v.(type) {
- case *string:
- *data = string(msg)
- return nil
- case *[]byte:
- *data = msg
- return nil
- }
- return ErrNotSupported
-}
-
-/*
-Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
-To send/receive text frame, use string type.
-To send/receive binary frame, use []byte type.
-
-Trivial usage:
-
- import "websocket"
-
- // receive text frame
- var message string
- websocket.Message.Receive(ws, &message)
-
- // send text frame
- message = "hello"
- websocket.Message.Send(ws, message)
-
- // receive binary frame
- var data []byte
- websocket.Message.Receive(ws, &data)
-
- // send binary frame
- data = []byte{0, 1, 2}
- websocket.Message.Send(ws, data)
-
-*/
-var Message = Codec{marshal, unmarshal}
-
-func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
- msg, err = json.Marshal(v)
- return msg, TextFrame, err
-}
-
-func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
- return json.Unmarshal(msg, v)
-}
-
-/*
-JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
-
-Trivial usage:
-
- import "websocket"
-
- type T struct {
- Msg string
- Count int
- }
-
- // receive JSON type T
- var data T
- websocket.JSON.Receive(ws, &data)
-
- // send JSON type T
- websocket.JSON.Send(ws, data)
-*/
-var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
index 6551554790e..319b30b771b 100644
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/modules.txt
@@ -1,30 +1,15 @@
-# github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee
-github.com/gobwas/httphead
-# github.com/gobwas/pool v0.2.0
-github.com/gobwas/pool
-github.com/gobwas/pool/internal/pmath
-github.com/gobwas/pool/pbufio
# github.com/gobwas/ws v1.0.3
## explicit
github.com/gobwas/ws
# github.com/gorilla/websocket v1.4.2
## explicit
github.com/gorilla/websocket
-# github.com/klauspost/compress v1.10.3
-github.com/klauspost/compress/flate
-# github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d
-## explicit
-github.com/sacOO7/go-logger
# github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
## explicit
github.com/sacOO7/gowebsocket
# golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
## explicit
-golang.org/x/net/websocket
+golang.org/x/net
# nhooyr.io/websocket v1.8.5
## explicit
nhooyr.io/websocket
-nhooyr.io/websocket/internal/bpool
-nhooyr.io/websocket/internal/errd
-nhooyr.io/websocket/internal/wsjs
-nhooyr.io/websocket/internal/xsync
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
deleted file mode 100644
index 6961e5c894a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-websocket.test
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
deleted file mode 100644
index 41d3c201468..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/.travis.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-language: go
-go: 1.x
-dist: bionic
-
-env:
- global:
- - SHFMT_URL=https://github.com/mvdan/sh/releases/download/v3.0.1/shfmt_v3.0.1_linux_amd64
- - GOFLAGS="-mod=readonly"
-
-jobs:
- include:
- - name: Format
- before_script:
- - sudo apt-get install -y npm
- - sudo npm install -g prettier
- - sudo curl -L "$SHFMT_URL" > /usr/local/bin/shfmt && sudo chmod +x /usr/local/bin/shfmt
- - go get golang.org/x/tools/cmd/stringer
- - go get golang.org/x/tools/cmd/goimports
- script: make -j16 fmt
- - name: Lint
- before_script:
- - sudo apt-get install -y shellcheck
- - go get golang.org/x/lint/golint
- script: make -j16 lint
- - name: Test
- before_script:
- - sudo apt-get install -y chromium-browser
- - go get github.com/agnivade/wasmbrowsertest
- - go get github.com/mattn/goveralls
- script: make -j16 test
-
-addons:
- apt:
- update: true
-
-cache:
- npm: true
- directories:
- - ~/.cache
- - ~/gopath/pkg
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE
similarity index 100%
rename from ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE.txt
rename to ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/LICENSE
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
deleted file mode 100644
index f9f31c49f1c..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all: fmt lint test
-
-.SILENT:
-
-include ci/fmt.mk
-include ci/lint.mk
-include ci/test.mk
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
deleted file mode 100644
index 14c392935e1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/README.md
+++ /dev/null
@@ -1,132 +0,0 @@
-# websocket
-
-[](https://pkg.go.dev/nhooyr.io/websocket)
-
-websocket is a minimal and idiomatic WebSocket library for Go.
-
-## Install
-
-```bash
-go get nhooyr.io/websocket
-```
-
-## Features
-
-- Minimal and idiomatic API
-- First class [context.Context](https://blog.golang.org/context) support
-- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite)
-- Thorough tests with [90% coverage](https://coveralls.io/github/nhooyr/websocket)
-- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports)
-- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
-- Zero alloc reads and writes
-- Concurrent writes
-- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close)
-- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
-- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
-- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression
-- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm)
-
-## Roadmap
-
-- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4)
-
-## Examples
-
-For a production quality example that demonstrates the complete API, see the
-[echo example](./examples/echo).
-
-For a full stack example, see the [chat example](./examples/chat).
-
-### Server
-
-```go
-http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
- c, err := websocket.Accept(w, r, nil)
- if err != nil {
- // ...
- }
- defer c.Close(websocket.StatusInternalError, "the sky is falling")
-
- ctx, cancel := context.WithTimeout(r.Context(), time.Second*10)
- defer cancel()
-
- var v interface{}
- err = wsjson.Read(ctx, c, &v)
- if err != nil {
- // ...
- }
-
- log.Printf("received: %v", v)
-
- c.Close(websocket.StatusNormalClosure, "")
-})
-```
-
-### Client
-
-```go
-ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
-defer cancel()
-
-c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil)
-if err != nil {
- // ...
-}
-defer c.Close(websocket.StatusInternalError, "the sky is falling")
-
-err = wsjson.Write(ctx, c, "hi")
-if err != nil {
- // ...
-}
-
-c.Close(websocket.StatusNormalClosure, "")
-```
-
-## Comparison
-
-### gorilla/websocket
-
-Advantages of [gorilla/websocket](https://github.com/gorilla/websocket):
-
-- Mature and widely used
-- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage)
-- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers)
-
-Advantages of nhooyr.io/websocket:
-
-- Minimal and idiomatic API
- - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side.
-- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
-- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535))
-- Full [context.Context](https://blog.golang.org/context) support
-- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client)
- - Will enable easy HTTP/2 support in the future
- - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client.
-- Concurrent writes
-- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448))
-- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
- - Gorilla requires registering a pong callback before sending a Ping
-- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432))
-- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
-- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go
- - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/).
-- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support
- - Gorilla only supports no context takeover mode
- - We use a vendored [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203))
-- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492))
-- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370))
-
-#### golang.org/x/net/websocket
-
-[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated.
-See [golang/go/issues/18152](https://github.com/golang/go/issues/18152).
-
-The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning
-to nhooyr.io/websocket.
-
-#### gobwas/ws
-
-[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used
-in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb).
-
-However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use.
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
deleted file mode 100644
index 6bed54da028..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "bytes"
- "crypto/sha1"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "log"
- "net/http"
- "net/textproto"
- "net/url"
- "path/filepath"
- "strings"
-
- "nhooyr.io/websocket/internal/errd"
-)
-
-// AcceptOptions represents Accept's options.
-type AcceptOptions struct {
- // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client.
- // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to
- // reject it, close the connection when c.Subprotocol() == "".
- Subprotocols []string
-
- // InsecureSkipVerify is used to disable Accept's origin verification behaviour.
- //
- // Deprecated: Use OriginPatterns with a match all pattern of * instead to control
- // origin authorization yourself.
- InsecureSkipVerify bool
-
- // OriginPatterns lists the host patterns for authorized origins.
- // The request host is always authorized.
- // Use this to enable cross origin WebSockets.
- //
- // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com.
- // In such a case, example.com is the origin and chat.example.com is the request host.
- // One would set this field to []string{"example.com"} to authorize example.com to connect.
- //
- // Each pattern is matched case insensitively against the request origin host
- // with filepath.Match.
- // See https://golang.org/pkg/path/filepath/#Match
- //
- // Please ensure you understand the ramifications of enabling this.
- // If used incorrectly your WebSocket server will be open to CSRF attacks.
- OriginPatterns []string
-
- // CompressionMode controls the compression mode.
- // Defaults to CompressionNoContextTakeover.
- //
- // See docs on CompressionMode for details.
- CompressionMode CompressionMode
-
- // CompressionThreshold controls the minimum size of a message before compression is applied.
- //
- // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
- // for CompressionContextTakeover.
- CompressionThreshold int
-}
-
-// Accept accepts a WebSocket handshake from a client and upgrades the
-// the connection to a WebSocket.
-//
-// Accept will not allow cross origin requests by default.
-// See the InsecureSkipVerify option to allow cross origin requests.
-//
-// Accept will write a response to w on all errors.
-func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
- return accept(w, r, opts)
-}
-
-func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) {
- defer errd.Wrap(&err, "failed to accept WebSocket connection")
-
- if opts == nil {
- opts = &AcceptOptions{}
- }
- opts = &*opts
-
- errCode, err := verifyClientRequest(w, r)
- if err != nil {
- http.Error(w, err.Error(), errCode)
- return nil, err
- }
-
- if !opts.InsecureSkipVerify {
- err = authenticateOrigin(r, opts.OriginPatterns)
- if err != nil {
- if errors.Is(err, filepath.ErrBadPattern) {
- log.Printf("websocket: %v", err)
- err = errors.New(http.StatusText(http.StatusForbidden))
- }
- http.Error(w, err.Error(), http.StatusForbidden)
- return nil, err
- }
- }
-
- hj, ok := w.(http.Hijacker)
- if !ok {
- err = errors.New("http.ResponseWriter does not implement http.Hijacker")
- http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
- return nil, err
- }
-
- w.Header().Set("Upgrade", "websocket")
- w.Header().Set("Connection", "Upgrade")
-
- key := r.Header.Get("Sec-WebSocket-Key")
- w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key))
-
- subproto := selectSubprotocol(r, opts.Subprotocols)
- if subproto != "" {
- w.Header().Set("Sec-WebSocket-Protocol", subproto)
- }
-
- copts, err := acceptCompression(r, w, opts.CompressionMode)
- if err != nil {
- return nil, err
- }
-
- w.WriteHeader(http.StatusSwitchingProtocols)
-
- netConn, brw, err := hj.Hijack()
- if err != nil {
- err = fmt.Errorf("failed to hijack connection: %w", err)
- http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
- return nil, err
- }
-
- // https://github.com/golang/go/issues/32314
- b, _ := brw.Reader.Peek(brw.Reader.Buffered())
- brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn))
-
- return newConn(connConfig{
- subprotocol: w.Header().Get("Sec-WebSocket-Protocol"),
- rwc: netConn,
- client: false,
- copts: copts,
- flateThreshold: opts.CompressionThreshold,
-
- br: brw.Reader,
- bw: brw.Writer,
- }), nil
-}
-
-func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) {
- if !r.ProtoAtLeast(1, 1) {
- return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto)
- }
-
- if !headerContainsToken(r.Header, "Connection", "Upgrade") {
- w.Header().Set("Connection", "Upgrade")
- w.Header().Set("Upgrade", "websocket")
- return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection"))
- }
-
- if !headerContainsToken(r.Header, "Upgrade", "websocket") {
- w.Header().Set("Connection", "Upgrade")
- w.Header().Set("Upgrade", "websocket")
- return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade"))
- }
-
- if r.Method != "GET" {
- return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method)
- }
-
- if r.Header.Get("Sec-WebSocket-Version") != "13" {
- w.Header().Set("Sec-WebSocket-Version", "13")
- return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version"))
- }
-
- if r.Header.Get("Sec-WebSocket-Key") == "" {
- return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key")
- }
-
- return 0, nil
-}
-
-func authenticateOrigin(r *http.Request, originHosts []string) error {
- origin := r.Header.Get("Origin")
- if origin == "" {
- return nil
- }
-
- u, err := url.Parse(origin)
- if err != nil {
- return fmt.Errorf("failed to parse Origin header %q: %w", origin, err)
- }
-
- if strings.EqualFold(r.Host, u.Host) {
- return nil
- }
-
- for _, hostPattern := range originHosts {
- matched, err := match(hostPattern, u.Host)
- if err != nil {
- return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err)
- }
- if matched {
- return nil
- }
- }
- return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host)
-}
-
-func match(pattern, s string) (bool, error) {
- return filepath.Match(strings.ToLower(pattern), strings.ToLower(s))
-}
-
-func selectSubprotocol(r *http.Request, subprotocols []string) string {
- cps := headerTokens(r.Header, "Sec-WebSocket-Protocol")
- for _, sp := range subprotocols {
- for _, cp := range cps {
- if strings.EqualFold(sp, cp) {
- return cp
- }
- }
- }
- return ""
-}
-
-func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) {
- if mode == CompressionDisabled {
- return nil, nil
- }
-
- for _, ext := range websocketExtensions(r.Header) {
- switch ext.name {
- case "permessage-deflate":
- return acceptDeflate(w, ext, mode)
- // Disabled for now, see https://github.com/nhooyr/websocket/issues/218
- // case "x-webkit-deflate-frame":
- // return acceptWebkitDeflate(w, ext, mode)
- }
- }
- return nil, nil
-}
-
-func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
- copts := mode.opts()
-
- for _, p := range ext.params {
- switch p {
- case "client_no_context_takeover":
- copts.clientNoContextTakeover = true
- continue
- case "server_no_context_takeover":
- copts.serverNoContextTakeover = true
- continue
- }
-
- if strings.HasPrefix(p, "client_max_window_bits") {
- // We cannot adjust the read sliding window so cannot make use of this.
- continue
- }
-
- err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
- http.Error(w, err.Error(), http.StatusBadRequest)
- return nil, err
- }
-
- copts.setHeader(w.Header())
-
- return copts, nil
-}
-
-func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
- copts := mode.opts()
- // The peer must explicitly request it.
- copts.serverNoContextTakeover = false
-
- for _, p := range ext.params {
- if p == "no_context_takeover" {
- copts.serverNoContextTakeover = true
- continue
- }
-
- // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead
- // of ignoring it as the draft spec is unclear. It says the server can ignore it
- // but the server has no way of signalling to the client it was ignored as the parameters
- // are set one way.
- // Thus us ignoring it would make the client think we understood it which would cause issues.
- // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1
- //
- // Either way, we're only implementing this for webkit which never sends the max_window_bits
- // parameter so we don't need to worry about it.
- err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p)
- http.Error(w, err.Error(), http.StatusBadRequest)
- return nil, err
- }
-
- s := "x-webkit-deflate-frame"
- if copts.clientNoContextTakeover {
- s += "; no_context_takeover"
- }
- w.Header().Set("Sec-WebSocket-Extensions", s)
-
- return copts, nil
-}
-
-func headerContainsToken(h http.Header, key, token string) bool {
- token = strings.ToLower(token)
-
- for _, t := range headerTokens(h, key) {
- if t == token {
- return true
- }
- }
- return false
-}
-
-type websocketExtension struct {
- name string
- params []string
-}
-
-func websocketExtensions(h http.Header) []websocketExtension {
- var exts []websocketExtension
- extStrs := headerTokens(h, "Sec-WebSocket-Extensions")
- for _, extStr := range extStrs {
- if extStr == "" {
- continue
- }
-
- vals := strings.Split(extStr, ";")
- for i := range vals {
- vals[i] = strings.TrimSpace(vals[i])
- }
-
- e := websocketExtension{
- name: vals[0],
- params: vals[1:],
- }
-
- exts = append(exts, e)
- }
- return exts
-}
-
-func headerTokens(h http.Header, key string) []string {
- key = textproto.CanonicalMIMEHeaderKey(key)
- var tokens []string
- for _, v := range h[key] {
- v = strings.TrimSpace(v)
- for _, t := range strings.Split(v, ",") {
- t = strings.ToLower(t)
- t = strings.TrimSpace(t)
- tokens = append(tokens, t)
- }
- }
- return tokens
-}
-
-var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
-
-func secWebSocketAccept(secWebSocketKey string) string {
- h := sha1.New()
- h.Write([]byte(secWebSocketKey))
- h.Write(keyGUID)
-
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
deleted file mode 100644
index daad4b79fec..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/accept_js.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package websocket
-
-import (
- "errors"
- "net/http"
-)
-
-// AcceptOptions represents Accept's options.
-type AcceptOptions struct {
- Subprotocols []string
- InsecureSkipVerify bool
- OriginPatterns []string
- CompressionMode CompressionMode
- CompressionThreshold int
-}
-
-// Accept is stubbed out for Wasm.
-func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
- return nil, errors.New("unimplemented")
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
deleted file mode 100644
index 7cbc19e9def..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package websocket
-
-import (
- "errors"
- "fmt"
-)
-
-// StatusCode represents a WebSocket status code.
-// https://tools.ietf.org/html/rfc6455#section-7.4
-type StatusCode int
-
-// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
-//
-// These are only the status codes defined by the protocol.
-//
-// You can define custom codes in the 3000-4999 range.
-// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
-// The 4000-4999 range is reserved for private use.
-const (
- StatusNormalClosure StatusCode = 1000
- StatusGoingAway StatusCode = 1001
- StatusProtocolError StatusCode = 1002
- StatusUnsupportedData StatusCode = 1003
-
- // 1004 is reserved and so unexported.
- statusReserved StatusCode = 1004
-
- // StatusNoStatusRcvd cannot be sent in a close message.
- // It is reserved for when a close message is received without
- // a status code.
- StatusNoStatusRcvd StatusCode = 1005
-
- // StatusAbnormalClosure is exported for use only with Wasm.
- // In non Wasm Go, the returned error will indicate whether the
- // connection was closed abnormally.
- StatusAbnormalClosure StatusCode = 1006
-
- StatusInvalidFramePayloadData StatusCode = 1007
- StatusPolicyViolation StatusCode = 1008
- StatusMessageTooBig StatusCode = 1009
- StatusMandatoryExtension StatusCode = 1010
- StatusInternalError StatusCode = 1011
- StatusServiceRestart StatusCode = 1012
- StatusTryAgainLater StatusCode = 1013
- StatusBadGateway StatusCode = 1014
-
- // StatusTLSHandshake is only exported for use with Wasm.
- // In non Wasm Go, the returned error will indicate whether there was
- // a TLS handshake failure.
- StatusTLSHandshake StatusCode = 1015
-)
-
-// CloseError is returned when the connection is closed with a status and reason.
-//
-// Use Go 1.13's errors.As to check for this error.
-// Also see the CloseStatus helper.
-type CloseError struct {
- Code StatusCode
- Reason string
-}
-
-func (ce CloseError) Error() string {
- return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
-}
-
-// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
-// the status code from a CloseError.
-//
-// -1 will be returned if the passed error is nil or not a CloseError.
-func CloseStatus(err error) StatusCode {
- var ce CloseError
- if errors.As(err, &ce) {
- return ce.Code
- }
- return -1
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
deleted file mode 100644
index 4251311d2e6..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/close_notjs.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "log"
- "time"
-
- "nhooyr.io/websocket/internal/errd"
-)
-
-// Close performs the WebSocket close handshake with the given status code and reason.
-//
-// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for
-// the peer to send a close frame.
-// All data messages received from the peer during the close handshake will be discarded.
-//
-// The connection can only be closed once. Additional calls to Close
-// are no-ops.
-//
-// The maximum length of reason must be 125 bytes. Avoid
-// sending a dynamic reason.
-//
-// Close will unblock all goroutines interacting with the connection once
-// complete.
-func (c *Conn) Close(code StatusCode, reason string) error {
- return c.closeHandshake(code, reason)
-}
-
-func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) {
- defer errd.Wrap(&err, "failed to close WebSocket")
-
- writeErr := c.writeClose(code, reason)
- closeHandshakeErr := c.waitCloseHandshake()
-
- if writeErr != nil {
- return writeErr
- }
-
- if CloseStatus(closeHandshakeErr) == -1 {
- return closeHandshakeErr
- }
-
- return nil
-}
-
-var errAlreadyWroteClose = errors.New("already wrote close")
-
-func (c *Conn) writeClose(code StatusCode, reason string) error {
- c.closeMu.Lock()
- wroteClose := c.wroteClose
- c.wroteClose = true
- c.closeMu.Unlock()
- if wroteClose {
- return errAlreadyWroteClose
- }
-
- ce := CloseError{
- Code: code,
- Reason: reason,
- }
-
- var p []byte
- var marshalErr error
- if ce.Code != StatusNoStatusRcvd {
- p, marshalErr = ce.bytes()
- if marshalErr != nil {
- log.Printf("websocket: %v", marshalErr)
- }
- }
-
- writeErr := c.writeControl(context.Background(), opClose, p)
- if CloseStatus(writeErr) != -1 {
- // Not a real error if it's due to a close frame being received.
- writeErr = nil
- }
-
- // We do this after in case there was an error writing the close frame.
- c.setCloseErr(fmt.Errorf("sent close frame: %w", ce))
-
- if marshalErr != nil {
- return marshalErr
- }
- return writeErr
-}
-
-func (c *Conn) waitCloseHandshake() error {
- defer c.close(nil)
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
- defer cancel()
-
- err := c.readMu.lock(ctx)
- if err != nil {
- return err
- }
- defer c.readMu.unlock()
-
- if c.readCloseFrameErr != nil {
- return c.readCloseFrameErr
- }
-
- for {
- h, err := c.readLoop(ctx)
- if err != nil {
- return err
- }
-
- for i := int64(0); i < h.payloadLength; i++ {
- _, err := c.br.ReadByte()
- if err != nil {
- return err
- }
- }
- }
-}
-
-func parseClosePayload(p []byte) (CloseError, error) {
- if len(p) == 0 {
- return CloseError{
- Code: StatusNoStatusRcvd,
- }, nil
- }
-
- if len(p) < 2 {
- return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p)
- }
-
- ce := CloseError{
- Code: StatusCode(binary.BigEndian.Uint16(p)),
- Reason: string(p[2:]),
- }
-
- if !validWireCloseCode(ce.Code) {
- return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code)
- }
-
- return ce, nil
-}
-
-// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
-// and https://tools.ietf.org/html/rfc6455#section-7.4.1
-func validWireCloseCode(code StatusCode) bool {
- switch code {
- case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
- return false
- }
-
- if code >= StatusNormalClosure && code <= StatusBadGateway {
- return true
- }
- if code >= 3000 && code <= 4999 {
- return true
- }
-
- return false
-}
-
-func (ce CloseError) bytes() ([]byte, error) {
- p, err := ce.bytesErr()
- if err != nil {
- err = fmt.Errorf("failed to marshal close frame: %w", err)
- ce = CloseError{
- Code: StatusInternalError,
- }
- p, _ = ce.bytesErr()
- }
- return p, err
-}
-
-const maxCloseReason = maxControlPayload - 2
-
-func (ce CloseError) bytesErr() ([]byte, error) {
- if len(ce.Reason) > maxCloseReason {
- return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason))
- }
-
- if !validWireCloseCode(ce.Code) {
- return nil, fmt.Errorf("status code %v cannot be set", ce.Code)
- }
-
- buf := make([]byte, 2+len(ce.Reason))
- binary.BigEndian.PutUint16(buf, uint16(ce.Code))
- copy(buf[2:], ce.Reason)
- return buf, nil
-}
-
-func (c *Conn) setCloseErr(err error) {
- c.closeMu.Lock()
- c.setCloseErrLocked(err)
- c.closeMu.Unlock()
-}
-
-func (c *Conn) setCloseErrLocked(err error) {
- if c.closeErr == nil {
- c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
- }
-}
-
-func (c *Conn) isClosed() bool {
- select {
- case <-c.closed:
- return true
- default:
- return false
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
deleted file mode 100644
index 80b46d1c1d3..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package websocket
-
-// CompressionMode represents the modes available to the deflate extension.
-// See https://tools.ietf.org/html/rfc7692
-//
-// A compatibility layer is implemented for the older deflate-frame extension used
-// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06
-// It will work the same in every way except that we cannot signal to the peer we
-// want to use no context takeover on our side, we can only signal that they should.
-// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218
-type CompressionMode int
-
-const (
- // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed
- // for every message. This applies to both server and client side.
- //
- // This means less efficient compression as the sliding window from previous messages
- // will not be used but the memory overhead will be lower if the connections
- // are long lived and seldom used.
- //
- // The message will only be compressed if greater than 512 bytes.
- CompressionNoContextTakeover CompressionMode = iota
-
- // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection.
- // This enables reusing the sliding window from previous messages.
- // As most WebSocket protocols are repetitive, this can be very efficient.
- // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover.
- //
- // If the peer negotiates NoContextTakeover on the client or server side, it will be
- // used instead as this is required by the RFC.
- CompressionContextTakeover
-
- // CompressionDisabled disables the deflate extension.
- //
- // Use this if you are using a predominantly binary protocol with very
- // little duplication in between messages or CPU and memory are more
- // important than bandwidth.
- CompressionDisabled
-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
deleted file mode 100644
index 809a272c3d1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/compress_notjs.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "io"
- "net/http"
- "sync"
-
- "github.com/klauspost/compress/flate"
-)
-
-func (m CompressionMode) opts() *compressionOptions {
- return &compressionOptions{
- clientNoContextTakeover: m == CompressionNoContextTakeover,
- serverNoContextTakeover: m == CompressionNoContextTakeover,
- }
-}
-
-type compressionOptions struct {
- clientNoContextTakeover bool
- serverNoContextTakeover bool
-}
-
-func (copts *compressionOptions) setHeader(h http.Header) {
- s := "permessage-deflate"
- if copts.clientNoContextTakeover {
- s += "; client_no_context_takeover"
- }
- if copts.serverNoContextTakeover {
- s += "; server_no_context_takeover"
- }
- h.Set("Sec-WebSocket-Extensions", s)
-}
-
-// These bytes are required to get flate.Reader to return.
-// They are removed when sending to avoid the overhead as
-// WebSocket framing tell's when the message has ended but then
-// we need to add them back otherwise flate.Reader keeps
-// trying to return more bytes.
-const deflateMessageTail = "\x00\x00\xff\xff"
-
-type trimLastFourBytesWriter struct {
- w io.Writer
- tail []byte
-}
-
-func (tw *trimLastFourBytesWriter) reset() {
- if tw != nil && tw.tail != nil {
- tw.tail = tw.tail[:0]
- }
-}
-
-func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) {
- if tw.tail == nil {
- tw.tail = make([]byte, 0, 4)
- }
-
- extra := len(tw.tail) + len(p) - 4
-
- if extra <= 0 {
- tw.tail = append(tw.tail, p...)
- return len(p), nil
- }
-
- // Now we need to write as many extra bytes as we can from the previous tail.
- if extra > len(tw.tail) {
- extra = len(tw.tail)
- }
- if extra > 0 {
- _, err := tw.w.Write(tw.tail[:extra])
- if err != nil {
- return 0, err
- }
-
- // Shift remaining bytes in tail over.
- n := copy(tw.tail, tw.tail[extra:])
- tw.tail = tw.tail[:n]
- }
-
- // If p is less than or equal to 4 bytes,
- // all of it is is part of the tail.
- if len(p) <= 4 {
- tw.tail = append(tw.tail, p...)
- return len(p), nil
- }
-
- // Otherwise, only the last 4 bytes are.
- tw.tail = append(tw.tail, p[len(p)-4:]...)
-
- p = p[:len(p)-4]
- n, err := tw.w.Write(p)
- return n + 4, err
-}
-
-var flateReaderPool sync.Pool
-
-func getFlateReader(r io.Reader, dict []byte) io.Reader {
- fr, ok := flateReaderPool.Get().(io.Reader)
- if !ok {
- return flate.NewReaderDict(r, dict)
- }
- fr.(flate.Resetter).Reset(r, dict)
- return fr
-}
-
-func putFlateReader(fr io.Reader) {
- flateReaderPool.Put(fr)
-}
-
-type slidingWindow struct {
- buf []byte
-}
-
-var swPoolMu sync.RWMutex
-var swPool = map[int]*sync.Pool{}
-
-func slidingWindowPool(n int) *sync.Pool {
- swPoolMu.RLock()
- p, ok := swPool[n]
- swPoolMu.RUnlock()
- if ok {
- return p
- }
-
- p = &sync.Pool{}
-
- swPoolMu.Lock()
- swPool[n] = p
- swPoolMu.Unlock()
-
- return p
-}
-
-func (sw *slidingWindow) init(n int) {
- if sw.buf != nil {
- return
- }
-
- if n == 0 {
- n = 32768
- }
-
- p := slidingWindowPool(n)
- buf, ok := p.Get().([]byte)
- if ok {
- sw.buf = buf[:0]
- } else {
- sw.buf = make([]byte, 0, n)
- }
-}
-
-func (sw *slidingWindow) close() {
- if sw.buf == nil {
- return
- }
-
- swPoolMu.Lock()
- swPool[cap(sw.buf)].Put(sw.buf)
- swPoolMu.Unlock()
- sw.buf = nil
-}
-
-func (sw *slidingWindow) write(p []byte) {
- if len(p) >= cap(sw.buf) {
- sw.buf = sw.buf[:cap(sw.buf)]
- p = p[len(p)-cap(sw.buf):]
- copy(sw.buf, p)
- return
- }
-
- left := cap(sw.buf) - len(sw.buf)
- if left < len(p) {
- // We need to shift spaceNeeded bytes from the end to make room for p at the end.
- spaceNeeded := len(p) - left
- copy(sw.buf, sw.buf[spaceNeeded:])
- sw.buf = sw.buf[:len(sw.buf)-spaceNeeded]
- }
-
- sw.buf = append(sw.buf, p...)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
deleted file mode 100644
index a41808be3fa..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package websocket
-
-// MessageType represents the type of a WebSocket message.
-// See https://tools.ietf.org/html/rfc6455#section-5.6
-type MessageType int
-
-// MessageType constants.
-const (
- // MessageText is for UTF-8 encoded text messages like JSON.
- MessageText MessageType = iota + 1
- // MessageBinary is for binary messages like protobufs.
- MessageBinary
-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
deleted file mode 100644
index bb2eb22f7db..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/conn_notjs.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "bufio"
- "context"
- "errors"
- "fmt"
- "io"
- "runtime"
- "strconv"
- "sync"
- "sync/atomic"
-)
-
-// Conn represents a WebSocket connection.
-// All methods may be called concurrently except for Reader and Read.
-//
-// You must always read from the connection. Otherwise control
-// frames will not be handled. See Reader and CloseRead.
-//
-// Be sure to call Close on the connection when you
-// are finished with it to release associated resources.
-//
-// On any error from any method, the connection is closed
-// with an appropriate reason.
-type Conn struct {
- subprotocol string
- rwc io.ReadWriteCloser
- client bool
- copts *compressionOptions
- flateThreshold int
- br *bufio.Reader
- bw *bufio.Writer
-
- readTimeout chan context.Context
- writeTimeout chan context.Context
-
- // Read state.
- readMu *mu
- readHeaderBuf [8]byte
- readControlBuf [maxControlPayload]byte
- msgReader *msgReader
- readCloseFrameErr error
-
- // Write state.
- msgWriterState *msgWriterState
- writeFrameMu *mu
- writeBuf []byte
- writeHeaderBuf [8]byte
- writeHeader header
-
- closed chan struct{}
- closeMu sync.Mutex
- closeErr error
- wroteClose bool
-
- pingCounter int32
- activePingsMu sync.Mutex
- activePings map[string]chan<- struct{}
-}
-
-type connConfig struct {
- subprotocol string
- rwc io.ReadWriteCloser
- client bool
- copts *compressionOptions
- flateThreshold int
-
- br *bufio.Reader
- bw *bufio.Writer
-}
-
-func newConn(cfg connConfig) *Conn {
- c := &Conn{
- subprotocol: cfg.subprotocol,
- rwc: cfg.rwc,
- client: cfg.client,
- copts: cfg.copts,
- flateThreshold: cfg.flateThreshold,
-
- br: cfg.br,
- bw: cfg.bw,
-
- readTimeout: make(chan context.Context),
- writeTimeout: make(chan context.Context),
-
- closed: make(chan struct{}),
- activePings: make(map[string]chan<- struct{}),
- }
-
- c.readMu = newMu(c)
- c.writeFrameMu = newMu(c)
-
- c.msgReader = newMsgReader(c)
-
- c.msgWriterState = newMsgWriterState(c)
- if c.client {
- c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc)
- }
-
- if c.flate() && c.flateThreshold == 0 {
- c.flateThreshold = 128
- if !c.msgWriterState.flateContextTakeover() {
- c.flateThreshold = 512
- }
- }
-
- runtime.SetFinalizer(c, func(c *Conn) {
- c.close(errors.New("connection garbage collected"))
- })
-
- go c.timeoutLoop()
-
- return c
-}
-
-// Subprotocol returns the negotiated subprotocol.
-// An empty string means the default protocol.
-func (c *Conn) Subprotocol() string {
- return c.subprotocol
-}
-
-func (c *Conn) close(err error) {
- c.closeMu.Lock()
- defer c.closeMu.Unlock()
-
- if c.isClosed() {
- return
- }
- c.setCloseErrLocked(err)
- close(c.closed)
- runtime.SetFinalizer(c, nil)
-
- // Have to close after c.closed is closed to ensure any goroutine that wakes up
- // from the connection being closed also sees that c.closed is closed and returns
- // closeErr.
- c.rwc.Close()
-
- go func() {
- c.msgWriterState.close()
-
- c.msgReader.close()
- }()
-}
-
-func (c *Conn) timeoutLoop() {
- readCtx := context.Background()
- writeCtx := context.Background()
-
- for {
- select {
- case <-c.closed:
- return
-
- case writeCtx = <-c.writeTimeout:
- case readCtx = <-c.readTimeout:
-
- case <-readCtx.Done():
- c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err()))
- go c.writeError(StatusPolicyViolation, errors.New("timed out"))
- case <-writeCtx.Done():
- c.close(fmt.Errorf("write timed out: %w", writeCtx.Err()))
- return
- }
- }
-}
-
-func (c *Conn) flate() bool {
- return c.copts != nil
-}
-
-// Ping sends a ping to the peer and waits for a pong.
-// Use this to measure latency or ensure the peer is responsive.
-// Ping must be called concurrently with Reader as it does
-// not read from the connection but instead waits for a Reader call
-// to read the pong.
-//
-// TCP Keepalives should suffice for most use cases.
-func (c *Conn) Ping(ctx context.Context) error {
- p := atomic.AddInt32(&c.pingCounter, 1)
-
- err := c.ping(ctx, strconv.Itoa(int(p)))
- if err != nil {
- return fmt.Errorf("failed to ping: %w", err)
- }
- return nil
-}
-
-func (c *Conn) ping(ctx context.Context, p string) error {
- pong := make(chan struct{})
-
- c.activePingsMu.Lock()
- c.activePings[p] = pong
- c.activePingsMu.Unlock()
-
- defer func() {
- c.activePingsMu.Lock()
- delete(c.activePings, p)
- c.activePingsMu.Unlock()
- }()
-
- err := c.writeControl(ctx, opPing, []byte(p))
- if err != nil {
- return err
- }
-
- select {
- case <-c.closed:
- return c.closeErr
- case <-ctx.Done():
- err := fmt.Errorf("failed to wait for pong: %w", ctx.Err())
- c.close(err)
- return err
- case <-pong:
- return nil
- }
-}
-
-type mu struct {
- c *Conn
- ch chan struct{}
-}
-
-func newMu(c *Conn) *mu {
- return &mu{
- c: c,
- ch: make(chan struct{}, 1),
- }
-}
-
-func (m *mu) forceLock() {
- m.ch <- struct{}{}
-}
-
-func (m *mu) lock(ctx context.Context) error {
- select {
- case <-m.c.closed:
- return m.c.closeErr
- case <-ctx.Done():
- err := fmt.Errorf("failed to acquire lock: %w", ctx.Err())
- m.c.close(err)
- return err
- case m.ch <- struct{}{}:
- // To make sure the connection is certainly alive.
- // As it's possible the send on m.ch was selected
- // over the receive on closed.
- select {
- case <-m.c.closed:
- // Make sure to release.
- m.unlock()
- return m.c.closeErr
- default:
- }
- return nil
- }
-}
-
-func (m *mu) unlock() {
- select {
- case <-m.ch:
- default:
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
deleted file mode 100644
index 2b25e3517d6..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/dial.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/rand"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "nhooyr.io/websocket/internal/errd"
-)
-
-// DialOptions represents Dial's options.
-type DialOptions struct {
- // HTTPClient is used for the connection.
- // Its Transport must return writable bodies for WebSocket handshakes.
- // http.Transport does beginning with Go 1.12.
- HTTPClient *http.Client
-
- // HTTPHeader specifies the HTTP headers included in the handshake request.
- HTTPHeader http.Header
-
- // Subprotocols lists the WebSocket subprotocols to negotiate with the server.
- Subprotocols []string
-
- // CompressionMode controls the compression mode.
- // Defaults to CompressionNoContextTakeover.
- //
- // See docs on CompressionMode for details.
- CompressionMode CompressionMode
-
- // CompressionThreshold controls the minimum size of a message before compression is applied.
- //
- // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
- // for CompressionContextTakeover.
- CompressionThreshold int
-}
-
-// Dial performs a WebSocket handshake on url.
-//
-// The response is the WebSocket handshake response from the server.
-// You never need to close resp.Body yourself.
-//
-// If an error occurs, the returned response may be non nil.
-// However, you can only read the first 1024 bytes of the body.
-//
-// This function requires at least Go 1.12 as it uses a new feature
-// in net/http to perform WebSocket handshakes.
-// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861
-//
-// URLs with http/https schemes will work and are interpreted as ws/wss.
-func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) {
- return dial(ctx, u, opts, nil)
-}
-
-func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) {
- defer errd.Wrap(&err, "failed to WebSocket dial")
-
- if opts == nil {
- opts = &DialOptions{}
- }
-
- opts = &*opts
- if opts.HTTPClient == nil {
- opts.HTTPClient = http.DefaultClient
- }
- if opts.HTTPHeader == nil {
- opts.HTTPHeader = http.Header{}
- }
-
- secWebSocketKey, err := secWebSocketKey(rand)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err)
- }
-
- var copts *compressionOptions
- if opts.CompressionMode != CompressionDisabled {
- copts = opts.CompressionMode.opts()
- }
-
- resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey)
- if err != nil {
- return nil, resp, err
- }
- respBody := resp.Body
- resp.Body = nil
- defer func() {
- if err != nil {
- // We read a bit of the body for easier debugging.
- r := io.LimitReader(respBody, 1024)
-
- timer := time.AfterFunc(time.Second*3, func() {
- respBody.Close()
- })
- defer timer.Stop()
-
- b, _ := ioutil.ReadAll(r)
- respBody.Close()
- resp.Body = ioutil.NopCloser(bytes.NewReader(b))
- }
- }()
-
- copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp)
- if err != nil {
- return nil, resp, err
- }
-
- rwc, ok := respBody.(io.ReadWriteCloser)
- if !ok {
- return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody)
- }
-
- return newConn(connConfig{
- subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"),
- rwc: rwc,
- client: true,
- copts: copts,
- flateThreshold: opts.CompressionThreshold,
- br: getBufioReader(rwc),
- bw: getBufioWriter(rwc),
- }), resp, nil
-}
-
-func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) {
- if opts.HTTPClient.Timeout > 0 {
- return nil, errors.New("use context for cancellation instead of http.Client.Timeout; see https://github.com/nhooyr/websocket/issues/67")
- }
-
- u, err := url.Parse(urls)
- if err != nil {
- return nil, fmt.Errorf("failed to parse url: %w", err)
- }
-
- switch u.Scheme {
- case "ws":
- u.Scheme = "http"
- case "wss":
- u.Scheme = "https"
- case "http", "https":
- default:
- return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme)
- }
-
- req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
- req.Header = opts.HTTPHeader.Clone()
- req.Header.Set("Connection", "Upgrade")
- req.Header.Set("Upgrade", "websocket")
- req.Header.Set("Sec-WebSocket-Version", "13")
- req.Header.Set("Sec-WebSocket-Key", secWebSocketKey)
- if len(opts.Subprotocols) > 0 {
- req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ","))
- }
- if copts != nil {
- copts.setHeader(req.Header)
- }
-
- resp, err := opts.HTTPClient.Do(req)
- if err != nil {
- return nil, fmt.Errorf("failed to send handshake request: %w", err)
- }
- return resp, nil
-}
-
-func secWebSocketKey(rr io.Reader) (string, error) {
- if rr == nil {
- rr = rand.Reader
- }
- b := make([]byte, 16)
- _, err := io.ReadFull(rr, b)
- if err != nil {
- return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err)
- }
- return base64.StdEncoding.EncodeToString(b), nil
-}
-
-func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) {
- if resp.StatusCode != http.StatusSwitchingProtocols {
- return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode)
- }
-
- if !headerContainsToken(resp.Header, "Connection", "Upgrade") {
- return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection"))
- }
-
- if !headerContainsToken(resp.Header, "Upgrade", "WebSocket") {
- return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade"))
- }
-
- if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) {
- return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q",
- resp.Header.Get("Sec-WebSocket-Accept"),
- secWebSocketKey,
- )
- }
-
- err := verifySubprotocol(opts.Subprotocols, resp)
- if err != nil {
- return nil, err
- }
-
- return verifyServerExtensions(copts, resp.Header)
-}
-
-func verifySubprotocol(subprotos []string, resp *http.Response) error {
- proto := resp.Header.Get("Sec-WebSocket-Protocol")
- if proto == "" {
- return nil
- }
-
- for _, sp2 := range subprotos {
- if strings.EqualFold(sp2, proto) {
- return nil
- }
- }
-
- return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto)
-}
-
-func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) {
- exts := websocketExtensions(h)
- if len(exts) == 0 {
- return nil, nil
- }
-
- ext := exts[0]
- if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil {
- return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:])
- }
-
- copts = &*copts
-
- for _, p := range ext.params {
- switch p {
- case "client_no_context_takeover":
- copts.clientNoContextTakeover = true
- continue
- case "server_no_context_takeover":
- copts.serverNoContextTakeover = true
- continue
- }
-
- return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
- }
-
- return copts, nil
-}
-
-var bufioReaderPool sync.Pool
-
-func getBufioReader(r io.Reader) *bufio.Reader {
- br, ok := bufioReaderPool.Get().(*bufio.Reader)
- if !ok {
- return bufio.NewReader(r)
- }
- br.Reset(r)
- return br
-}
-
-func putBufioReader(br *bufio.Reader) {
- bufioReaderPool.Put(br)
-}
-
-var bufioWriterPool sync.Pool
-
-func getBufioWriter(w io.Writer) *bufio.Writer {
- bw, ok := bufioWriterPool.Get().(*bufio.Writer)
- if !ok {
- return bufio.NewWriter(w)
- }
- bw.Reset(w)
- return bw
-}
-
-func putBufioWriter(bw *bufio.Writer) {
- bufioWriterPool.Put(bw)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
deleted file mode 100644
index efa920e3b61..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/doc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !js
-
-// Package websocket implements the RFC 6455 WebSocket protocol.
-//
-// https://tools.ietf.org/html/rfc6455
-//
-// Use Dial to dial a WebSocket server.
-//
-// Use Accept to accept a WebSocket client.
-//
-// Conn represents the resulting WebSocket connection.
-//
-// The examples are the best way to understand how to correctly use the library.
-//
-// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages.
-//
-// More documentation at https://nhooyr.io/websocket.
-//
-// Wasm
-//
-// The client side supports compiling to Wasm.
-// It wraps the WebSocket browser API.
-//
-// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
-//
-// Some important caveats to be aware of:
-//
-// - Accept always errors out
-// - Conn.Ping is no-op
-// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op
-// - *http.Response from Dial is &http.Response{} with a 101 status code on success
-package websocket // import "nhooyr.io/websocket"
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
deleted file mode 100644
index 2a036f944ac..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/frame.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package websocket
-
-import (
- "bufio"
- "encoding/binary"
- "fmt"
- "io"
- "math"
- "math/bits"
-
- "nhooyr.io/websocket/internal/errd"
-)
-
-// opcode represents a WebSocket opcode.
-type opcode int
-
-// https://tools.ietf.org/html/rfc6455#section-11.8.
-const (
- opContinuation opcode = iota
- opText
- opBinary
- // 3 - 7 are reserved for further non-control frames.
- _
- _
- _
- _
- _
- opClose
- opPing
- opPong
- // 11-16 are reserved for further control frames.
-)
-
-// header represents a WebSocket frame header.
-// See https://tools.ietf.org/html/rfc6455#section-5.2.
-type header struct {
- fin bool
- rsv1 bool
- rsv2 bool
- rsv3 bool
- opcode opcode
-
- payloadLength int64
-
- masked bool
- maskKey uint32
-}
-
-// readFrameHeader reads a header from the reader.
-// See https://tools.ietf.org/html/rfc6455#section-5.2.
-func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) {
- defer errd.Wrap(&err, "failed to read frame header")
-
- b, err := r.ReadByte()
- if err != nil {
- return header{}, err
- }
-
- h.fin = b&(1<<7) != 0
- h.rsv1 = b&(1<<6) != 0
- h.rsv2 = b&(1<<5) != 0
- h.rsv3 = b&(1<<4) != 0
-
- h.opcode = opcode(b & 0xf)
-
- b, err = r.ReadByte()
- if err != nil {
- return header{}, err
- }
-
- h.masked = b&(1<<7) != 0
-
- payloadLength := b &^ (1 << 7)
- switch {
- case payloadLength < 126:
- h.payloadLength = int64(payloadLength)
- case payloadLength == 126:
- _, err = io.ReadFull(r, readBuf[:2])
- h.payloadLength = int64(binary.BigEndian.Uint16(readBuf))
- case payloadLength == 127:
- _, err = io.ReadFull(r, readBuf)
- h.payloadLength = int64(binary.BigEndian.Uint64(readBuf))
- }
- if err != nil {
- return header{}, err
- }
-
- if h.payloadLength < 0 {
- return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength)
- }
-
- if h.masked {
- _, err = io.ReadFull(r, readBuf[:4])
- if err != nil {
- return header{}, err
- }
- h.maskKey = binary.LittleEndian.Uint32(readBuf)
- }
-
- return h, nil
-}
-
-// maxControlPayload is the maximum length of a control frame payload.
-// See https://tools.ietf.org/html/rfc6455#section-5.5.
-const maxControlPayload = 125
-
-// writeFrameHeader writes the bytes of the header to w.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {
- defer errd.Wrap(&err, "failed to write frame header")
-
- var b byte
- if h.fin {
- b |= 1 << 7
- }
- if h.rsv1 {
- b |= 1 << 6
- }
- if h.rsv2 {
- b |= 1 << 5
- }
- if h.rsv3 {
- b |= 1 << 4
- }
-
- b |= byte(h.opcode)
-
- err = w.WriteByte(b)
- if err != nil {
- return err
- }
-
- lengthByte := byte(0)
- if h.masked {
- lengthByte |= 1 << 7
- }
-
- switch {
- case h.payloadLength > math.MaxUint16:
- lengthByte |= 127
- case h.payloadLength > 125:
- lengthByte |= 126
- case h.payloadLength >= 0:
- lengthByte |= byte(h.payloadLength)
- }
- err = w.WriteByte(lengthByte)
- if err != nil {
- return err
- }
-
- switch {
- case h.payloadLength > math.MaxUint16:
- binary.BigEndian.PutUint64(buf, uint64(h.payloadLength))
- _, err = w.Write(buf)
- case h.payloadLength > 125:
- binary.BigEndian.PutUint16(buf, uint16(h.payloadLength))
- _, err = w.Write(buf[:2])
- }
- if err != nil {
- return err
- }
-
- if h.masked {
- binary.LittleEndian.PutUint32(buf, h.maskKey)
- _, err = w.Write(buf[:4])
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// mask applies the WebSocket masking algorithm to p
-// with the given key.
-// See https://tools.ietf.org/html/rfc6455#section-5.3
-//
-// The returned value is the correctly rotated key to
-// to continue to mask/unmask the message.
-//
-// It is optimized for LittleEndian and expects the key
-// to be in little endian.
-//
-// See https://github.com/golang/go/issues/31586
-func mask(key uint32, b []byte) uint32 {
- if len(b) >= 8 {
- key64 := uint64(key)<<32 | uint64(key)
-
- // At some point in the future we can clean these unrolled loops up.
- // See https://github.com/golang/go/issues/31586#issuecomment-487436401
-
- // Then we xor until b is less than 128 bytes.
- for len(b) >= 128 {
- v := binary.LittleEndian.Uint64(b)
- binary.LittleEndian.PutUint64(b, v^key64)
- v = binary.LittleEndian.Uint64(b[8:16])
- binary.LittleEndian.PutUint64(b[8:16], v^key64)
- v = binary.LittleEndian.Uint64(b[16:24])
- binary.LittleEndian.PutUint64(b[16:24], v^key64)
- v = binary.LittleEndian.Uint64(b[24:32])
- binary.LittleEndian.PutUint64(b[24:32], v^key64)
- v = binary.LittleEndian.Uint64(b[32:40])
- binary.LittleEndian.PutUint64(b[32:40], v^key64)
- v = binary.LittleEndian.Uint64(b[40:48])
- binary.LittleEndian.PutUint64(b[40:48], v^key64)
- v = binary.LittleEndian.Uint64(b[48:56])
- binary.LittleEndian.PutUint64(b[48:56], v^key64)
- v = binary.LittleEndian.Uint64(b[56:64])
- binary.LittleEndian.PutUint64(b[56:64], v^key64)
- v = binary.LittleEndian.Uint64(b[64:72])
- binary.LittleEndian.PutUint64(b[64:72], v^key64)
- v = binary.LittleEndian.Uint64(b[72:80])
- binary.LittleEndian.PutUint64(b[72:80], v^key64)
- v = binary.LittleEndian.Uint64(b[80:88])
- binary.LittleEndian.PutUint64(b[80:88], v^key64)
- v = binary.LittleEndian.Uint64(b[88:96])
- binary.LittleEndian.PutUint64(b[88:96], v^key64)
- v = binary.LittleEndian.Uint64(b[96:104])
- binary.LittleEndian.PutUint64(b[96:104], v^key64)
- v = binary.LittleEndian.Uint64(b[104:112])
- binary.LittleEndian.PutUint64(b[104:112], v^key64)
- v = binary.LittleEndian.Uint64(b[112:120])
- binary.LittleEndian.PutUint64(b[112:120], v^key64)
- v = binary.LittleEndian.Uint64(b[120:128])
- binary.LittleEndian.PutUint64(b[120:128], v^key64)
- b = b[128:]
- }
-
- // Then we xor until b is less than 64 bytes.
- for len(b) >= 64 {
- v := binary.LittleEndian.Uint64(b)
- binary.LittleEndian.PutUint64(b, v^key64)
- v = binary.LittleEndian.Uint64(b[8:16])
- binary.LittleEndian.PutUint64(b[8:16], v^key64)
- v = binary.LittleEndian.Uint64(b[16:24])
- binary.LittleEndian.PutUint64(b[16:24], v^key64)
- v = binary.LittleEndian.Uint64(b[24:32])
- binary.LittleEndian.PutUint64(b[24:32], v^key64)
- v = binary.LittleEndian.Uint64(b[32:40])
- binary.LittleEndian.PutUint64(b[32:40], v^key64)
- v = binary.LittleEndian.Uint64(b[40:48])
- binary.LittleEndian.PutUint64(b[40:48], v^key64)
- v = binary.LittleEndian.Uint64(b[48:56])
- binary.LittleEndian.PutUint64(b[48:56], v^key64)
- v = binary.LittleEndian.Uint64(b[56:64])
- binary.LittleEndian.PutUint64(b[56:64], v^key64)
- b = b[64:]
- }
-
- // Then we xor until b is less than 32 bytes.
- for len(b) >= 32 {
- v := binary.LittleEndian.Uint64(b)
- binary.LittleEndian.PutUint64(b, v^key64)
- v = binary.LittleEndian.Uint64(b[8:16])
- binary.LittleEndian.PutUint64(b[8:16], v^key64)
- v = binary.LittleEndian.Uint64(b[16:24])
- binary.LittleEndian.PutUint64(b[16:24], v^key64)
- v = binary.LittleEndian.Uint64(b[24:32])
- binary.LittleEndian.PutUint64(b[24:32], v^key64)
- b = b[32:]
- }
-
- // Then we xor until b is less than 16 bytes.
- for len(b) >= 16 {
- v := binary.LittleEndian.Uint64(b)
- binary.LittleEndian.PutUint64(b, v^key64)
- v = binary.LittleEndian.Uint64(b[8:16])
- binary.LittleEndian.PutUint64(b[8:16], v^key64)
- b = b[16:]
- }
-
- // Then we xor until b is less than 8 bytes.
- for len(b) >= 8 {
- v := binary.LittleEndian.Uint64(b)
- binary.LittleEndian.PutUint64(b, v^key64)
- b = b[8:]
- }
- }
-
- // Then we xor until b is less than 4 bytes.
- for len(b) >= 4 {
- v := binary.LittleEndian.Uint32(b)
- binary.LittleEndian.PutUint32(b, v^key)
- b = b[4:]
- }
-
- // xor remaining bytes.
- for i := range b {
- b[i] ^= byte(key)
- key = bits.RotateLeft32(key, -8)
- }
-
- return key
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
deleted file mode 100644
index 60377823cba..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/go.mod
+++ /dev/null
@@ -1,14 +0,0 @@
-module nhooyr.io/websocket
-
-go 1.13
-
-require (
- github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee // indirect
- github.com/gobwas/pool v0.2.0 // indirect
- github.com/gobwas/ws v1.0.2
- github.com/golang/protobuf v1.3.5
- github.com/google/go-cmp v0.4.0
- github.com/gorilla/websocket v1.4.1
- github.com/klauspost/compress v1.10.3
- golang.org/x/time v0.0.0-20191024005414-555d28b269f0
-)
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
deleted file mode 100644
index aa826fba2b1..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package bpool
-
-import (
- "bytes"
- "sync"
-)
-
-var bpool sync.Pool
-
-// Get returns a buffer from the pool or creates a new one if
-// the pool is empty.
-func Get() *bytes.Buffer {
- b := bpool.Get()
- if b == nil {
- return &bytes.Buffer{}
- }
- return b.(*bytes.Buffer)
-}
-
-// Put returns a buffer into the pool.
-func Put(b *bytes.Buffer) {
- b.Reset()
- bpool.Put(b)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
deleted file mode 100644
index 6e779131af8..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/errd/wrap.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package errd
-
-import (
- "fmt"
-)
-
-// Wrap wraps err with fmt.Errorf if err is non nil.
-// Intended for use with defer and a named error return.
-// Inspired by https://github.com/golang/go/issues/32676.
-func Wrap(err *error, f string, v ...interface{}) {
- if *err != nil {
- *err = fmt.Errorf(f+": %w", append(v, *err)...)
- }
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
deleted file mode 100644
index 26ffb45625b..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// +build js
-
-// Package wsjs implements typed access to the browser javascript WebSocket API.
-//
-// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
-package wsjs
-
-import (
- "syscall/js"
-)
-
-func handleJSError(err *error, onErr func()) {
- r := recover()
-
- if jsErr, ok := r.(js.Error); ok {
- *err = jsErr
-
- if onErr != nil {
- onErr()
- }
- return
- }
-
- if r != nil {
- panic(r)
- }
-}
-
-// New is a wrapper around the javascript WebSocket constructor.
-func New(url string, protocols []string) (c WebSocket, err error) {
- defer handleJSError(&err, func() {
- c = WebSocket{}
- })
-
- jsProtocols := make([]interface{}, len(protocols))
- for i, p := range protocols {
- jsProtocols[i] = p
- }
-
- c = WebSocket{
- v: js.Global().Get("WebSocket").New(url, jsProtocols),
- }
-
- c.setBinaryType("arraybuffer")
-
- return c, nil
-}
-
-// WebSocket is a wrapper around a javascript WebSocket object.
-type WebSocket struct {
- v js.Value
-}
-
-func (c WebSocket) setBinaryType(typ string) {
- c.v.Set("binaryType", string(typ))
-}
-
-func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() {
- f := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
- fn(args[0])
- return nil
- })
- c.v.Call("addEventListener", eventType, f)
-
- return func() {
- c.v.Call("removeEventListener", eventType, f)
- f.Release()
- }
-}
-
-// CloseEvent is the type passed to a WebSocket close handler.
-type CloseEvent struct {
- Code uint16
- Reason string
- WasClean bool
-}
-
-// OnClose registers a function to be called when the WebSocket is closed.
-func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) {
- return c.addEventListener("close", func(e js.Value) {
- ce := CloseEvent{
- Code: uint16(e.Get("code").Int()),
- Reason: e.Get("reason").String(),
- WasClean: e.Get("wasClean").Bool(),
- }
- fn(ce)
- })
-}
-
-// OnError registers a function to be called when there is an error
-// with the WebSocket.
-func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) {
- return c.addEventListener("error", fn)
-}
-
-// MessageEvent is the type passed to a message handler.
-type MessageEvent struct {
- // string or []byte.
- Data interface{}
-
- // There are more fields to the interface but we don't use them.
- // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent
-}
-
-// OnMessage registers a function to be called when the WebSocket receives a message.
-func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) {
- return c.addEventListener("message", func(e js.Value) {
- var data interface{}
-
- arrayBuffer := e.Get("data")
- if arrayBuffer.Type() == js.TypeString {
- data = arrayBuffer.String()
- } else {
- data = extractArrayBuffer(arrayBuffer)
- }
-
- me := MessageEvent{
- Data: data,
- }
- fn(me)
-
- return
- })
-}
-
-// Subprotocol returns the WebSocket subprotocol in use.
-func (c WebSocket) Subprotocol() string {
- return c.v.Get("protocol").String()
-}
-
-// OnOpen registers a function to be called when the WebSocket is opened.
-func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) {
- return c.addEventListener("open", fn)
-}
-
-// Close closes the WebSocket with the given code and reason.
-func (c WebSocket) Close(code int, reason string) (err error) {
- defer handleJSError(&err, nil)
- c.v.Call("close", code, reason)
- return err
-}
-
-// SendText sends the given string as a text message
-// on the WebSocket.
-func (c WebSocket) SendText(v string) (err error) {
- defer handleJSError(&err, nil)
- c.v.Call("send", v)
- return err
-}
-
-// SendBytes sends the given message as a binary message
-// on the WebSocket.
-func (c WebSocket) SendBytes(v []byte) (err error) {
- defer handleJSError(&err, nil)
- c.v.Call("send", uint8Array(v))
- return err
-}
-
-func extractArrayBuffer(arrayBuffer js.Value) []byte {
- uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer)
- dst := make([]byte, uint8Array.Length())
- js.CopyBytesToGo(dst, uint8Array)
- return dst
-}
-
-func uint8Array(src []byte) js.Value {
- uint8Array := js.Global().Get("Uint8Array").New(len(src))
- js.CopyBytesToJS(uint8Array, src)
- return uint8Array
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
deleted file mode 100644
index 7a61f27fa2a..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/go.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package xsync
-
-import (
- "fmt"
-)
-
-// Go allows running a function in another goroutine
-// and waiting for its error.
-func Go(fn func() error) <-chan error {
- errs := make(chan error, 1)
- go func() {
- defer func() {
- r := recover()
- if r != nil {
- select {
- case errs <- fmt.Errorf("panic in go fn: %v", r):
- default:
- }
- }
- }()
- errs <- fn()
- }()
-
- return errs
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
deleted file mode 100644
index a0c40204156..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/internal/xsync/int64.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package xsync
-
-import (
- "sync/atomic"
-)
-
-// Int64 represents an atomic int64.
-type Int64 struct {
- // We do not use atomic.Load/StoreInt64 since it does not
- // work on 32 bit computers but we need 64 bit integers.
- i atomic.Value
-}
-
-// Load loads the int64.
-func (v *Int64) Load() int64 {
- i, _ := v.i.Load().(int64)
- return i
-}
-
-// Store stores the int64.
-func (v *Int64) Store(i int64) {
- v.i.Store(i)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
deleted file mode 100644
index 64aadf0b998..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/netconn.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package websocket
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "net"
- "sync"
- "time"
-)
-
-// NetConn converts a *websocket.Conn into a net.Conn.
-//
-// It's for tunneling arbitrary protocols over WebSockets.
-// Few users of the library will need this but it's tricky to implement
-// correctly and so provided in the library.
-// See https://github.com/nhooyr/websocket/issues/100.
-//
-// Every Write to the net.Conn will correspond to a message write of
-// the given type on *websocket.Conn.
-//
-// The passed ctx bounds the lifetime of the net.Conn. If cancelled,
-// all reads and writes on the net.Conn will be cancelled.
-//
-// If a message is read that is not of the correct type, the connection
-// will be closed with StatusUnsupportedData and an error will be returned.
-//
-// Close will close the *websocket.Conn with StatusNormalClosure.
-//
-// When a deadline is hit, the connection will be closed. This is
-// different from most net.Conn implementations where only the
-// reading/writing goroutines are interrupted but the connection is kept alive.
-//
-// The Addr methods will return a mock net.Addr that returns "websocket" for Network
-// and "websocket/unknown-addr" for String.
-//
-// A received StatusNormalClosure or StatusGoingAway close frame will be translated to
-// io.EOF when reading.
-func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn {
- nc := &netConn{
- c: c,
- msgType: msgType,
- }
-
- var cancel context.CancelFunc
- nc.writeContext, cancel = context.WithCancel(ctx)
- nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel)
- if !nc.writeTimer.Stop() {
- <-nc.writeTimer.C
- }
-
- nc.readContext, cancel = context.WithCancel(ctx)
- nc.readTimer = time.AfterFunc(math.MaxInt64, cancel)
- if !nc.readTimer.Stop() {
- <-nc.readTimer.C
- }
-
- return nc
-}
-
-type netConn struct {
- c *Conn
- msgType MessageType
-
- writeTimer *time.Timer
- writeContext context.Context
-
- readTimer *time.Timer
- readContext context.Context
-
- readMu sync.Mutex
- eofed bool
- reader io.Reader
-}
-
-var _ net.Conn = &netConn{}
-
-func (c *netConn) Close() error {
- return c.c.Close(StatusNormalClosure, "")
-}
-
-func (c *netConn) Write(p []byte) (int, error) {
- err := c.c.Write(c.writeContext, c.msgType, p)
- if err != nil {
- return 0, err
- }
- return len(p), nil
-}
-
-func (c *netConn) Read(p []byte) (int, error) {
- c.readMu.Lock()
- defer c.readMu.Unlock()
-
- if c.eofed {
- return 0, io.EOF
- }
-
- if c.reader == nil {
- typ, r, err := c.c.Reader(c.readContext)
- if err != nil {
- switch CloseStatus(err) {
- case StatusNormalClosure, StatusGoingAway:
- c.eofed = true
- return 0, io.EOF
- }
- return 0, err
- }
- if typ != c.msgType {
- err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ)
- c.c.Close(StatusUnsupportedData, err.Error())
- return 0, err
- }
- c.reader = r
- }
-
- n, err := c.reader.Read(p)
- if err == io.EOF {
- c.reader = nil
- err = nil
- }
- return n, err
-}
-
-type websocketAddr struct {
-}
-
-func (a websocketAddr) Network() string {
- return "websocket"
-}
-
-func (a websocketAddr) String() string {
- return "websocket/unknown-addr"
-}
-
-func (c *netConn) RemoteAddr() net.Addr {
- return websocketAddr{}
-}
-
-func (c *netConn) LocalAddr() net.Addr {
- return websocketAddr{}
-}
-
-func (c *netConn) SetDeadline(t time.Time) error {
- c.SetWriteDeadline(t)
- c.SetReadDeadline(t)
- return nil
-}
-
-func (c *netConn) SetWriteDeadline(t time.Time) error {
- if t.IsZero() {
- c.writeTimer.Stop()
- } else {
- c.writeTimer.Reset(t.Sub(time.Now()))
- }
- return nil
-}
-
-func (c *netConn) SetReadDeadline(t time.Time) error {
- if t.IsZero() {
- c.readTimer.Stop()
- } else {
- c.readTimer.Reset(t.Sub(time.Now()))
- }
- return nil
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
deleted file mode 100644
index afd08cc7cde..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/read.go
+++ /dev/null
@@ -1,471 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "bufio"
- "context"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "time"
-
- "nhooyr.io/websocket/internal/errd"
- "nhooyr.io/websocket/internal/xsync"
-)
-
-// Reader reads from the connection until until there is a WebSocket
-// data message to be read. It will handle ping, pong and close frames as appropriate.
-//
-// It returns the type of the message and an io.Reader to read it.
-// The passed context will also bound the reader.
-// Ensure you read to EOF otherwise the connection will hang.
-//
-// Call CloseRead if you do not expect any data messages from the peer.
-//
-// Only one Reader may be open at a time.
-func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
- return c.reader(ctx)
-}
-
-// Read is a convenience method around Reader to read a single message
-// from the connection.
-func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
- typ, r, err := c.Reader(ctx)
- if err != nil {
- return 0, nil, err
- }
-
- b, err := ioutil.ReadAll(r)
- return typ, b, err
-}
-
-// CloseRead starts a goroutine to read from the connection until it is closed
-// or a data message is received.
-//
-// Once CloseRead is called you cannot read any messages from the connection.
-// The returned context will be cancelled when the connection is closed.
-//
-// If a data message is received, the connection will be closed with StatusPolicyViolation.
-//
-// Call CloseRead when you do not expect to read any more messages.
-// Since it actively reads from the connection, it will ensure that ping, pong and close
-// frames are responded to. This means c.Ping and c.Close will still work as expected.
-func (c *Conn) CloseRead(ctx context.Context) context.Context {
- ctx, cancel := context.WithCancel(ctx)
- go func() {
- defer cancel()
- c.Reader(ctx)
- c.Close(StatusPolicyViolation, "unexpected data message")
- }()
- return ctx
-}
-
-// SetReadLimit sets the max number of bytes to read for a single message.
-// It applies to the Reader and Read methods.
-//
-// By default, the connection has a message read limit of 32768 bytes.
-//
-// When the limit is hit, the connection will be closed with StatusMessageTooBig.
-func (c *Conn) SetReadLimit(n int64) {
- // We add read one more byte than the limit in case
- // there is a fin frame that needs to be read.
- c.msgReader.limitReader.limit.Store(n + 1)
-}
-
-const defaultReadLimit = 32768
-
-func newMsgReader(c *Conn) *msgReader {
- mr := &msgReader{
- c: c,
- fin: true,
- }
- mr.readFunc = mr.read
-
- mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1)
- return mr
-}
-
-func (mr *msgReader) resetFlate() {
- if mr.flateContextTakeover() {
- mr.dict.init(32768)
- }
- if mr.flateBufio == nil {
- mr.flateBufio = getBufioReader(mr.readFunc)
- }
-
- mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf)
- mr.limitReader.r = mr.flateReader
- mr.flateTail.Reset(deflateMessageTail)
-}
-
-func (mr *msgReader) putFlateReader() {
- if mr.flateReader != nil {
- putFlateReader(mr.flateReader)
- mr.flateReader = nil
- }
-}
-
-func (mr *msgReader) close() {
- mr.c.readMu.forceLock()
- mr.putFlateReader()
- mr.dict.close()
- if mr.flateBufio != nil {
- putBufioReader(mr.flateBufio)
- }
-
- if mr.c.client {
- putBufioReader(mr.c.br)
- mr.c.br = nil
- }
-}
-
-func (mr *msgReader) flateContextTakeover() bool {
- if mr.c.client {
- return !mr.c.copts.serverNoContextTakeover
- }
- return !mr.c.copts.clientNoContextTakeover
-}
-
-func (c *Conn) readRSV1Illegal(h header) bool {
- // If compression is disabled, rsv1 is illegal.
- if !c.flate() {
- return true
- }
- // rsv1 is only allowed on data frames beginning messages.
- if h.opcode != opText && h.opcode != opBinary {
- return true
- }
- return false
-}
-
-func (c *Conn) readLoop(ctx context.Context) (header, error) {
- for {
- h, err := c.readFrameHeader(ctx)
- if err != nil {
- return header{}, err
- }
-
- if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 {
- err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3)
- c.writeError(StatusProtocolError, err)
- return header{}, err
- }
-
- if !c.client && !h.masked {
- return header{}, errors.New("received unmasked frame from client")
- }
-
- switch h.opcode {
- case opClose, opPing, opPong:
- err = c.handleControl(ctx, h)
- if err != nil {
- // Pass through CloseErrors when receiving a close frame.
- if h.opcode == opClose && CloseStatus(err) != -1 {
- return header{}, err
- }
- return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err)
- }
- case opContinuation, opText, opBinary:
- return h, nil
- default:
- err := fmt.Errorf("received unknown opcode %v", h.opcode)
- c.writeError(StatusProtocolError, err)
- return header{}, err
- }
- }
-}
-
-func (c *Conn) readFrameHeader(ctx context.Context) (header, error) {
- select {
- case <-c.closed:
- return header{}, c.closeErr
- case c.readTimeout <- ctx:
- }
-
- h, err := readFrameHeader(c.br, c.readHeaderBuf[:])
- if err != nil {
- select {
- case <-c.closed:
- return header{}, c.closeErr
- case <-ctx.Done():
- return header{}, ctx.Err()
- default:
- c.close(err)
- return header{}, err
- }
- }
-
- select {
- case <-c.closed:
- return header{}, c.closeErr
- case c.readTimeout <- context.Background():
- }
-
- return h, nil
-}
-
-func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) {
- select {
- case <-c.closed:
- return 0, c.closeErr
- case c.readTimeout <- ctx:
- }
-
- n, err := io.ReadFull(c.br, p)
- if err != nil {
- select {
- case <-c.closed:
- return n, c.closeErr
- case <-ctx.Done():
- return n, ctx.Err()
- default:
- err = fmt.Errorf("failed to read frame payload: %w", err)
- c.close(err)
- return n, err
- }
- }
-
- select {
- case <-c.closed:
- return n, c.closeErr
- case c.readTimeout <- context.Background():
- }
-
- return n, err
-}
-
-func (c *Conn) handleControl(ctx context.Context, h header) (err error) {
- if h.payloadLength < 0 || h.payloadLength > maxControlPayload {
- err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength)
- c.writeError(StatusProtocolError, err)
- return err
- }
-
- if !h.fin {
- err := errors.New("received fragmented control frame")
- c.writeError(StatusProtocolError, err)
- return err
- }
-
- ctx, cancel := context.WithTimeout(ctx, time.Second*5)
- defer cancel()
-
- b := c.readControlBuf[:h.payloadLength]
- _, err = c.readFramePayload(ctx, b)
- if err != nil {
- return err
- }
-
- if h.masked {
- mask(h.maskKey, b)
- }
-
- switch h.opcode {
- case opPing:
- return c.writeControl(ctx, opPong, b)
- case opPong:
- c.activePingsMu.Lock()
- pong, ok := c.activePings[string(b)]
- c.activePingsMu.Unlock()
- if ok {
- close(pong)
- }
- return nil
- }
-
- defer func() {
- c.readCloseFrameErr = err
- }()
-
- ce, err := parseClosePayload(b)
- if err != nil {
- err = fmt.Errorf("received invalid close payload: %w", err)
- c.writeError(StatusProtocolError, err)
- return err
- }
-
- err = fmt.Errorf("received close frame: %w", ce)
- c.setCloseErr(err)
- c.writeClose(ce.Code, ce.Reason)
- c.close(err)
- return err
-}
-
-func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) {
- defer errd.Wrap(&err, "failed to get reader")
-
- err = c.readMu.lock(ctx)
- if err != nil {
- return 0, nil, err
- }
- defer c.readMu.unlock()
-
- if !c.msgReader.fin {
- err = errors.New("previous message not read to completion")
- c.close(fmt.Errorf("failed to get reader: %w", err))
- return 0, nil, err
- }
-
- h, err := c.readLoop(ctx)
- if err != nil {
- return 0, nil, err
- }
-
- if h.opcode == opContinuation {
- err := errors.New("received continuation frame without text or binary frame")
- c.writeError(StatusProtocolError, err)
- return 0, nil, err
- }
-
- c.msgReader.reset(ctx, h)
-
- return MessageType(h.opcode), c.msgReader, nil
-}
-
-type msgReader struct {
- c *Conn
-
- ctx context.Context
- flate bool
- flateReader io.Reader
- flateBufio *bufio.Reader
- flateTail strings.Reader
- limitReader *limitReader
- dict slidingWindow
-
- fin bool
- payloadLength int64
- maskKey uint32
-
- // readerFunc(mr.Read) to avoid continuous allocations.
- readFunc readerFunc
-}
-
-func (mr *msgReader) reset(ctx context.Context, h header) {
- mr.ctx = ctx
- mr.flate = h.rsv1
- mr.limitReader.reset(mr.readFunc)
-
- if mr.flate {
- mr.resetFlate()
- }
-
- mr.setFrame(h)
-}
-
-func (mr *msgReader) setFrame(h header) {
- mr.fin = h.fin
- mr.payloadLength = h.payloadLength
- mr.maskKey = h.maskKey
-}
-
-func (mr *msgReader) Read(p []byte) (n int, err error) {
- err = mr.c.readMu.lock(mr.ctx)
- if err != nil {
- return 0, fmt.Errorf("failed to read: %w", err)
- }
- defer mr.c.readMu.unlock()
-
- n, err = mr.limitReader.Read(p)
- if mr.flate && mr.flateContextTakeover() {
- p = p[:n]
- mr.dict.write(p)
- }
- if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate {
- mr.putFlateReader()
- return n, io.EOF
- }
- if err != nil {
- err = fmt.Errorf("failed to read: %w", err)
- mr.c.close(err)
- }
- return n, err
-}
-
-func (mr *msgReader) read(p []byte) (int, error) {
- for {
- if mr.payloadLength == 0 {
- if mr.fin {
- if mr.flate {
- return mr.flateTail.Read(p)
- }
- return 0, io.EOF
- }
-
- h, err := mr.c.readLoop(mr.ctx)
- if err != nil {
- return 0, err
- }
- if h.opcode != opContinuation {
- err := errors.New("received new data message without finishing the previous message")
- mr.c.writeError(StatusProtocolError, err)
- return 0, err
- }
- mr.setFrame(h)
-
- continue
- }
-
- if int64(len(p)) > mr.payloadLength {
- p = p[:mr.payloadLength]
- }
-
- n, err := mr.c.readFramePayload(mr.ctx, p)
- if err != nil {
- return n, err
- }
-
- mr.payloadLength -= int64(n)
-
- if !mr.c.client {
- mr.maskKey = mask(mr.maskKey, p)
- }
-
- return n, nil
- }
-}
-
-type limitReader struct {
- c *Conn
- r io.Reader
- limit xsync.Int64
- n int64
-}
-
-func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader {
- lr := &limitReader{
- c: c,
- }
- lr.limit.Store(limit)
- lr.reset(r)
- return lr
-}
-
-func (lr *limitReader) reset(r io.Reader) {
- lr.n = lr.limit.Load()
- lr.r = r
-}
-
-func (lr *limitReader) Read(p []byte) (int, error) {
- if lr.n <= 0 {
- err := fmt.Errorf("read limited at %v bytes", lr.limit.Load())
- lr.c.writeError(StatusMessageTooBig, err)
- return 0, err
- }
-
- if int64(len(p)) > lr.n {
- p = p[:lr.n]
- }
- n, err := lr.r.Read(p)
- lr.n -= int64(n)
- return n, err
-}
-
-type readerFunc func(p []byte) (int, error)
-
-func (f readerFunc) Read(p []byte) (int, error) {
- return f(p)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
deleted file mode 100644
index 5a66ba29076..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stringer.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT.
-
-package websocket
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[opContinuation-0]
- _ = x[opText-1]
- _ = x[opBinary-2]
- _ = x[opClose-8]
- _ = x[opPing-9]
- _ = x[opPong-10]
-}
-
-const (
- _opcode_name_0 = "opContinuationopTextopBinary"
- _opcode_name_1 = "opCloseopPingopPong"
-)
-
-var (
- _opcode_index_0 = [...]uint8{0, 14, 20, 28}
- _opcode_index_1 = [...]uint8{0, 7, 13, 19}
-)
-
-func (i opcode) String() string {
- switch {
- case 0 <= i && i <= 2:
- return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]]
- case 8 <= i && i <= 10:
- i -= 8
- return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]]
- default:
- return "opcode(" + strconv.FormatInt(int64(i), 10) + ")"
- }
-}
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[MessageText-1]
- _ = x[MessageBinary-2]
-}
-
-const _MessageType_name = "MessageTextMessageBinary"
-
-var _MessageType_index = [...]uint8{0, 11, 24}
-
-func (i MessageType) String() string {
- i -= 1
- if i < 0 || i >= MessageType(len(_MessageType_index)-1) {
- return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]]
-}
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[StatusNormalClosure-1000]
- _ = x[StatusGoingAway-1001]
- _ = x[StatusProtocolError-1002]
- _ = x[StatusUnsupportedData-1003]
- _ = x[statusReserved-1004]
- _ = x[StatusNoStatusRcvd-1005]
- _ = x[StatusAbnormalClosure-1006]
- _ = x[StatusInvalidFramePayloadData-1007]
- _ = x[StatusPolicyViolation-1008]
- _ = x[StatusMessageTooBig-1009]
- _ = x[StatusMandatoryExtension-1010]
- _ = x[StatusInternalError-1011]
- _ = x[StatusServiceRestart-1012]
- _ = x[StatusTryAgainLater-1013]
- _ = x[StatusBadGateway-1014]
- _ = x[StatusTLSHandshake-1015]
-}
-
-const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake"
-
-var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312}
-
-func (i StatusCode) String() string {
- i -= 1000
- if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) {
- return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")"
- }
- return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]]
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stub.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stub.go
new file mode 100644
index 00000000000..7bf2a208dac
--- /dev/null
+++ b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/stub.go
@@ -0,0 +1,76 @@
+// Code generated by depstubber. DO NOT EDIT.
+// This is a simple stub for nhooyr.io/websocket, strictly for use in testing.
+
+// See the LICENSE file for information about the licensing of the original library.
+// Source: nhooyr.io/websocket (exports: ; functions: Dial)
+
+// Package websocket is a stub of nhooyr.io/websocket, generated by depstubber.
+package websocket
+
+import (
+ context "context"
+ io "io"
+ http "net/http"
+)
+
+type CompressionMode int
+
+type Conn struct{}
+
+func (_ *Conn) Close(_ StatusCode, _ string) error {
+ return nil
+}
+
+func (_ *Conn) CloseRead(_ context.Context) context.Context {
+ return nil
+}
+
+func (_ *Conn) Ping(_ context.Context) error {
+ return nil
+}
+
+func (_ *Conn) Read(_ context.Context) (MessageType, []byte, error) {
+ return 0, nil, nil
+}
+
+func (_ *Conn) Reader(_ context.Context) (MessageType, io.Reader, error) {
+ return 0, nil, nil
+}
+
+func (_ *Conn) SetReadLimit(_ int64) {}
+
+func (_ *Conn) Subprotocol() string {
+ return ""
+}
+
+func (_ *Conn) Write(_ context.Context, _ MessageType, _ []byte) error {
+ return nil
+}
+
+func (_ *Conn) Writer(_ context.Context, _ MessageType) (io.WriteCloser, error) {
+ return nil, nil
+}
+
+func Dial(_ context.Context, _ string, _ *DialOptions) (*Conn, *http.Response, error) {
+ return nil, nil, nil
+}
+
+type DialOptions struct {
+ HTTPClient *http.Client
+ HTTPHeader http.Header
+ Subprotocols []string
+ CompressionMode CompressionMode
+ CompressionThreshold int
+}
+
+type MessageType int
+
+func (_ MessageType) String() string {
+ return ""
+}
+
+type StatusCode int
+
+func (_ StatusCode) String() string {
+ return ""
+}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
deleted file mode 100644
index 60a4fba0644..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/write.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// +build !js
-
-package websocket
-
-import (
- "bufio"
- "context"
- "crypto/rand"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "time"
-
- "github.com/klauspost/compress/flate"
-
- "nhooyr.io/websocket/internal/errd"
-)
-
-// Writer returns a writer bounded by the context that will write
-// a WebSocket message of type dataType to the connection.
-//
-// You must close the writer once you have written the entire message.
-//
-// Only one writer can be open at a time, multiple calls will block until the previous writer
-// is closed.
-func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
- w, err := c.writer(ctx, typ)
- if err != nil {
- return nil, fmt.Errorf("failed to get writer: %w", err)
- }
- return w, nil
-}
-
-// Write writes a message to the connection.
-//
-// See the Writer method if you want to stream a message.
-//
-// If compression is disabled or the threshold is not met, then it
-// will write the message in a single frame.
-func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
- _, err := c.write(ctx, typ, p)
- if err != nil {
- return fmt.Errorf("failed to write msg: %w", err)
- }
- return nil
-}
-
-type msgWriter struct {
- mw *msgWriterState
- closed bool
-}
-
-func (mw *msgWriter) Write(p []byte) (int, error) {
- if mw.closed {
- return 0, errors.New("cannot use closed writer")
- }
- return mw.mw.Write(p)
-}
-
-func (mw *msgWriter) Close() error {
- if mw.closed {
- return errors.New("cannot use closed writer")
- }
- mw.closed = true
- return mw.mw.Close()
-}
-
-type msgWriterState struct {
- c *Conn
-
- mu *mu
- writeMu *mu
-
- ctx context.Context
- opcode opcode
- flate bool
-
- trimWriter *trimLastFourBytesWriter
- dict slidingWindow
-}
-
-func newMsgWriterState(c *Conn) *msgWriterState {
- mw := &msgWriterState{
- c: c,
- mu: newMu(c),
- writeMu: newMu(c),
- }
- return mw
-}
-
-func (mw *msgWriterState) ensureFlate() {
- if mw.trimWriter == nil {
- mw.trimWriter = &trimLastFourBytesWriter{
- w: writerFunc(mw.write),
- }
- }
-
- mw.dict.init(8192)
- mw.flate = true
-}
-
-func (mw *msgWriterState) flateContextTakeover() bool {
- if mw.c.client {
- return !mw.c.copts.clientNoContextTakeover
- }
- return !mw.c.copts.serverNoContextTakeover
-}
-
-func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
- err := c.msgWriterState.reset(ctx, typ)
- if err != nil {
- return nil, err
- }
- return &msgWriter{
- mw: c.msgWriterState,
- closed: false,
- }, nil
-}
-
-func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) {
- mw, err := c.writer(ctx, typ)
- if err != nil {
- return 0, err
- }
-
- if !c.flate() {
- defer c.msgWriterState.mu.unlock()
- return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p)
- }
-
- n, err := mw.Write(p)
- if err != nil {
- return n, err
- }
-
- err = mw.Close()
- return n, err
-}
-
-func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error {
- err := mw.mu.lock(ctx)
- if err != nil {
- return err
- }
-
- mw.ctx = ctx
- mw.opcode = opcode(typ)
- mw.flate = false
-
- mw.trimWriter.reset()
-
- return nil
-}
-
-// Write writes the given bytes to the WebSocket connection.
-func (mw *msgWriterState) Write(p []byte) (_ int, err error) {
- err = mw.writeMu.lock(mw.ctx)
- if err != nil {
- return 0, fmt.Errorf("failed to write: %w", err)
- }
- defer mw.writeMu.unlock()
-
- defer func() {
- if err != nil {
- err = fmt.Errorf("failed to write: %w", err)
- mw.c.close(err)
- }
- }()
-
- if mw.c.flate() {
- // Only enables flate if the length crosses the
- // threshold on the first frame
- if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold {
- mw.ensureFlate()
- }
- }
-
- if mw.flate {
- err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf)
- if err != nil {
- return 0, err
- }
- mw.dict.write(p)
- return len(p), nil
- }
-
- return mw.write(p)
-}
-
-func (mw *msgWriterState) write(p []byte) (int, error) {
- n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p)
- if err != nil {
- return n, fmt.Errorf("failed to write data frame: %w", err)
- }
- mw.opcode = opContinuation
- return n, nil
-}
-
-// Close flushes the frame to the connection.
-func (mw *msgWriterState) Close() (err error) {
- defer errd.Wrap(&err, "failed to close writer")
-
- err = mw.writeMu.lock(mw.ctx)
- if err != nil {
- return err
- }
- defer mw.writeMu.unlock()
-
- _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil)
- if err != nil {
- return fmt.Errorf("failed to write fin frame: %w", err)
- }
-
- if mw.flate && !mw.flateContextTakeover() {
- mw.dict.close()
- }
- mw.mu.unlock()
- return nil
-}
-
-func (mw *msgWriterState) close() {
- if mw.c.client {
- mw.c.writeFrameMu.forceLock()
- putBufioWriter(mw.c.bw)
- }
-
- mw.writeMu.forceLock()
- mw.dict.close()
-}
-
-func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error {
- ctx, cancel := context.WithTimeout(ctx, time.Second*5)
- defer cancel()
-
- _, err := c.writeFrame(ctx, true, false, opcode, p)
- if err != nil {
- return fmt.Errorf("failed to write control frame %v: %w", opcode, err)
- }
- return nil
-}
-
-// frame handles all writes to the connection.
-func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) {
- err = c.writeFrameMu.lock(ctx)
- if err != nil {
- return 0, err
- }
- defer func() {
- // We leave it locked when writing the close frame to avoid
- // any other goroutine writing any other frame.
- if opcode != opClose {
- c.writeFrameMu.unlock()
- }
- }()
-
- select {
- case <-c.closed:
- return 0, c.closeErr
- case c.writeTimeout <- ctx:
- }
-
- defer func() {
- if err != nil {
- select {
- case <-c.closed:
- err = c.closeErr
- case <-ctx.Done():
- err = ctx.Err()
- }
- c.close(err)
- err = fmt.Errorf("failed to write frame: %w", err)
- }
- }()
-
- c.writeHeader.fin = fin
- c.writeHeader.opcode = opcode
- c.writeHeader.payloadLength = int64(len(p))
-
- if c.client {
- c.writeHeader.masked = true
- _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4])
- if err != nil {
- return 0, fmt.Errorf("failed to generate masking key: %w", err)
- }
- c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:])
- }
-
- c.writeHeader.rsv1 = false
- if flate && (opcode == opText || opcode == opBinary) {
- c.writeHeader.rsv1 = true
- }
-
- err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:])
- if err != nil {
- return 0, err
- }
-
- n, err := c.writeFramePayload(p)
- if err != nil {
- return n, err
- }
-
- if c.writeHeader.fin {
- err = c.bw.Flush()
- if err != nil {
- return n, fmt.Errorf("failed to flush: %w", err)
- }
- }
-
- select {
- case <-c.closed:
- return n, c.closeErr
- case c.writeTimeout <- context.Background():
- }
-
- return n, nil
-}
-
-func (c *Conn) writeFramePayload(p []byte) (n int, err error) {
- defer errd.Wrap(&err, "failed to write frame payload")
-
- if !c.writeHeader.masked {
- return c.bw.Write(p)
- }
-
- maskKey := c.writeHeader.maskKey
- for len(p) > 0 {
- // If the buffer is full, we need to flush.
- if c.bw.Available() == 0 {
- err = c.bw.Flush()
- if err != nil {
- return n, err
- }
- }
-
- // Start of next write in the buffer.
- i := c.bw.Buffered()
-
- j := len(p)
- if j > c.bw.Available() {
- j = c.bw.Available()
- }
-
- _, err := c.bw.Write(p[:j])
- if err != nil {
- return n, err
- }
-
- maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()])
-
- p = p[j:]
- n += j
- }
-
- return n, nil
-}
-
-type writerFunc func(p []byte) (int, error)
-
-func (f writerFunc) Write(p []byte) (int, error) {
- return f(p)
-}
-
-// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer
-// and returns it.
-func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte {
- var writeBuf []byte
- bw.Reset(writerFunc(func(p2 []byte) (int, error) {
- writeBuf = p2[:cap(p2)]
- return len(p2), nil
- }))
-
- bw.WriteByte(0)
- bw.Flush()
-
- bw.Reset(w)
-
- return writeBuf
-}
-
-func (c *Conn) writeError(code StatusCode, err error) {
- c.setCloseErr(err)
- c.writeClose(code, err.Error())
- c.close(nil)
-}
diff --git a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go b/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
deleted file mode 100644
index b87e32cdafb..00000000000
--- a/ql/test/library-tests/semmle/go/frameworks/Websocket/vendor/nhooyr.io/websocket/ws_js.go
+++ /dev/null
@@ -1,379 +0,0 @@
-package websocket // import "nhooyr.io/websocket"
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "syscall/js"
-
- "nhooyr.io/websocket/internal/bpool"
- "nhooyr.io/websocket/internal/wsjs"
- "nhooyr.io/websocket/internal/xsync"
-)
-
-// Conn provides a wrapper around the browser WebSocket API.
-type Conn struct {
- ws wsjs.WebSocket
-
- // read limit for a message in bytes.
- msgReadLimit xsync.Int64
-
- closingMu sync.Mutex
- isReadClosed xsync.Int64
- closeOnce sync.Once
- closed chan struct{}
- closeErrOnce sync.Once
- closeErr error
- closeWasClean bool
-
- releaseOnClose func()
- releaseOnMessage func()
-
- readSignal chan struct{}
- readBufMu sync.Mutex
- readBuf []wsjs.MessageEvent
-}
-
-func (c *Conn) close(err error, wasClean bool) {
- c.closeOnce.Do(func() {
- runtime.SetFinalizer(c, nil)
-
- if !wasClean {
- err = fmt.Errorf("unclean connection close: %w", err)
- }
- c.setCloseErr(err)
- c.closeWasClean = wasClean
- close(c.closed)
- })
-}
-
-func (c *Conn) init() {
- c.closed = make(chan struct{})
- c.readSignal = make(chan struct{}, 1)
-
- c.msgReadLimit.Store(32768)
-
- c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) {
- err := CloseError{
- Code: StatusCode(e.Code),
- Reason: e.Reason,
- }
- // We do not know if we sent or received this close as
- // its possible the browser triggered it without us
- // explicitly sending it.
- c.close(err, e.WasClean)
-
- c.releaseOnClose()
- c.releaseOnMessage()
- })
-
- c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) {
- c.readBufMu.Lock()
- defer c.readBufMu.Unlock()
-
- c.readBuf = append(c.readBuf, e)
-
- // Lets the read goroutine know there is definitely something in readBuf.
- select {
- case c.readSignal <- struct{}{}:
- default:
- }
- })
-
- runtime.SetFinalizer(c, func(c *Conn) {
- c.setCloseErr(errors.New("connection garbage collected"))
- c.closeWithInternal()
- })
-}
-
-func (c *Conn) closeWithInternal() {
- c.Close(StatusInternalError, "something went wrong")
-}
-
-// Read attempts to read a message from the connection.
-// The maximum time spent waiting is bounded by the context.
-func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
- if c.isReadClosed.Load() == 1 {
- return 0, nil, errors.New("WebSocket connection read closed")
- }
-
- typ, p, err := c.read(ctx)
- if err != nil {
- return 0, nil, fmt.Errorf("failed to read: %w", err)
- }
- if int64(len(p)) > c.msgReadLimit.Load() {
- err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load())
- c.Close(StatusMessageTooBig, err.Error())
- return 0, nil, err
- }
- return typ, p, nil
-}
-
-func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) {
- select {
- case <-ctx.Done():
- c.Close(StatusPolicyViolation, "read timed out")
- return 0, nil, ctx.Err()
- case <-c.readSignal:
- case <-c.closed:
- return 0, nil, c.closeErr
- }
-
- c.readBufMu.Lock()
- defer c.readBufMu.Unlock()
-
- me := c.readBuf[0]
- // We copy the messages forward and decrease the size
- // of the slice to avoid reallocating.
- copy(c.readBuf, c.readBuf[1:])
- c.readBuf = c.readBuf[:len(c.readBuf)-1]
-
- if len(c.readBuf) > 0 {
- // Next time we read, we'll grab the message.
- select {
- case c.readSignal <- struct{}{}:
- default:
- }
- }
-
- switch p := me.Data.(type) {
- case string:
- return MessageText, []byte(p), nil
- case []byte:
- return MessageBinary, p, nil
- default:
- panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String())
- }
-}
-
-// Ping is mocked out for Wasm.
-func (c *Conn) Ping(ctx context.Context) error {
- return nil
-}
-
-// Write writes a message of the given type to the connection.
-// Always non blocking.
-func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
- err := c.write(ctx, typ, p)
- if err != nil {
- // Have to ensure the WebSocket is closed after a write error
- // to match the Go API. It can only error if the message type
- // is unexpected or the passed bytes contain invalid UTF-8 for
- // MessageText.
- err := fmt.Errorf("failed to write: %w", err)
- c.setCloseErr(err)
- c.closeWithInternal()
- return err
- }
- return nil
-}
-
-func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error {
- if c.isClosed() {
- return c.closeErr
- }
- switch typ {
- case MessageBinary:
- return c.ws.SendBytes(p)
- case MessageText:
- return c.ws.SendText(string(p))
- default:
- return fmt.Errorf("unexpected message type: %v", typ)
- }
-}
-
-// Close closes the WebSocket with the given code and reason.
-// It will wait until the peer responds with a close frame
-// or the connection is closed.
-// It thus performs the full WebSocket close handshake.
-func (c *Conn) Close(code StatusCode, reason string) error {
- err := c.exportedClose(code, reason)
- if err != nil {
- return fmt.Errorf("failed to close WebSocket: %w", err)
- }
- return nil
-}
-
-func (c *Conn) exportedClose(code StatusCode, reason string) error {
- c.closingMu.Lock()
- defer c.closingMu.Unlock()
-
- ce := fmt.Errorf("sent close: %w", CloseError{
- Code: code,
- Reason: reason,
- })
-
- if c.isClosed() {
- return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr)
- }
-
- c.setCloseErr(ce)
- err := c.ws.Close(int(code), reason)
- if err != nil {
- return err
- }
-
- <-c.closed
- if !c.closeWasClean {
- return c.closeErr
- }
- return nil
-}
-
-// Subprotocol returns the negotiated subprotocol.
-// An empty string means the default protocol.
-func (c *Conn) Subprotocol() string {
- return c.ws.Subprotocol()
-}
-
-// DialOptions represents the options available to pass to Dial.
-type DialOptions struct {
- // Subprotocols lists the subprotocols to negotiate with the server.
- Subprotocols []string
-}
-
-// Dial creates a new WebSocket connection to the given url with the given options.
-// The passed context bounds the maximum time spent waiting for the connection to open.
-// The returned *http.Response is always nil or a mock. It's only in the signature
-// to match the core API.
-func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
- c, resp, err := dial(ctx, url, opts)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err)
- }
- return c, resp, nil
-}
-
-func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
- if opts == nil {
- opts = &DialOptions{}
- }
-
- url = strings.Replace(url, "http://", "ws://", 1)
- url = strings.Replace(url, "https://", "wss://", 1)
-
- ws, err := wsjs.New(url, opts.Subprotocols)
- if err != nil {
- return nil, nil, err
- }
-
- c := &Conn{
- ws: ws,
- }
- c.init()
-
- opench := make(chan struct{})
- releaseOpen := ws.OnOpen(func(e js.Value) {
- close(opench)
- })
- defer releaseOpen()
-
- select {
- case <-ctx.Done():
- c.Close(StatusPolicyViolation, "dial timed out")
- return nil, nil, ctx.Err()
- case <-opench:
- return c, &http.Response{
- StatusCode: http.StatusSwitchingProtocols,
- }, nil
- case <-c.closed:
- return nil, nil, c.closeErr
- }
-}
-
-// Reader attempts to read a message from the connection.
-// The maximum time spent waiting is bounded by the context.
-func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
- typ, p, err := c.Read(ctx)
- if err != nil {
- return 0, nil, err
- }
- return typ, bytes.NewReader(p), nil
-}
-
-// Writer returns a writer to write a WebSocket data message to the connection.
-// It buffers the entire message in memory and then sends it when the writer
-// is closed.
-func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
- return writer{
- c: c,
- ctx: ctx,
- typ: typ,
- b: bpool.Get(),
- }, nil
-}
-
-type writer struct {
- closed bool
-
- c *Conn
- ctx context.Context
- typ MessageType
-
- b *bytes.Buffer
-}
-
-func (w writer) Write(p []byte) (int, error) {
- if w.closed {
- return 0, errors.New("cannot write to closed writer")
- }
- n, err := w.b.Write(p)
- if err != nil {
- return n, fmt.Errorf("failed to write message: %w", err)
- }
- return n, nil
-}
-
-func (w writer) Close() error {
- if w.closed {
- return errors.New("cannot close closed writer")
- }
- w.closed = true
- defer bpool.Put(w.b)
-
- err := w.c.Write(w.ctx, w.typ, w.b.Bytes())
- if err != nil {
- return fmt.Errorf("failed to close writer: %w", err)
- }
- return nil
-}
-
-// CloseRead implements *Conn.CloseRead for wasm.
-func (c *Conn) CloseRead(ctx context.Context) context.Context {
- c.isReadClosed.Store(1)
-
- ctx, cancel := context.WithCancel(ctx)
- go func() {
- defer cancel()
- c.read(ctx)
- c.Close(StatusPolicyViolation, "unexpected data message")
- }()
- return ctx
-}
-
-// SetReadLimit implements *Conn.SetReadLimit for wasm.
-func (c *Conn) SetReadLimit(n int64) {
- c.msgReadLimit.Store(n)
-}
-
-func (c *Conn) setCloseErr(err error) {
- c.closeErrOnce.Do(func() {
- c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
- })
-}
-
-func (c *Conn) isClosed() bool {
- select {
- case <-c.closed:
- return true
- default:
- return false
- }
-}
diff --git a/ql/test/query-tests/Security/CWE-918/RequestForgery.expected b/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
index 2707f133c5c..5709a91dcaf 100644
--- a/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
+++ b/ql/test/query-tests/Security/CWE-918/RequestForgery.expected
@@ -13,15 +13,15 @@ edges
| tst.go:36:2:36:2 | implicit dereference : URL | tst.go:36:2:36:2 | implicit dereference : URL |
| tst.go:36:2:36:2 | implicit dereference : URL | tst.go:37:11:37:20 | call to String |
| tst.go:36:2:36:2 | u [pointer] : URL | tst.go:36:2:36:2 | implicit dereference : URL |
-| websocket.go:54:21:54:31 | call to Referer : string | websocket.go:59:27:59:40 | untrustedInput |
-| websocket.go:68:21:68:31 | call to Referer : string | websocket.go:72:36:72:49 | untrustedInput |
-| websocket.go:82:21:82:31 | call to Referer : string | websocket.go:85:31:85:44 | untrustedInput |
-| websocket.go:101:21:101:31 | call to Referer : string | websocket.go:104:15:104:28 | untrustedInput |
-| websocket.go:120:21:120:31 | call to Referer : string | websocket.go:123:38:123:51 | untrustedInput |
-| websocket.go:148:21:148:31 | call to Referer : string | websocket.go:149:31:149:44 | untrustedInput |
-| websocket.go:154:21:154:31 | call to Referer : string | websocket.go:156:31:156:44 | untrustedInput |
-| websocket.go:189:21:189:31 | call to Referer : string | websocket.go:191:18:191:31 | untrustedInput |
-| websocket.go:196:21:196:31 | call to Referer : string | websocket.go:198:11:198:24 | untrustedInput |
+| websocket.go:60:21:60:31 | call to Referer : string | websocket.go:65:27:65:40 | untrustedInput |
+| websocket.go:74:21:74:31 | call to Referer : string | websocket.go:78:36:78:49 | untrustedInput |
+| websocket.go:88:21:88:31 | call to Referer : string | websocket.go:91:31:91:44 | untrustedInput |
+| websocket.go:107:21:107:31 | call to Referer : string | websocket.go:110:15:110:28 | untrustedInput |
+| websocket.go:126:21:126:31 | call to Referer : string | websocket.go:129:38:129:51 | untrustedInput |
+| websocket.go:154:21:154:31 | call to Referer : string | websocket.go:155:31:155:44 | untrustedInput |
+| websocket.go:160:21:160:31 | call to Referer : string | websocket.go:162:31:162:44 | untrustedInput |
+| websocket.go:195:21:195:31 | call to Referer : string | websocket.go:197:18:197:31 | untrustedInput |
+| websocket.go:202:21:202:31 | call to Referer : string | websocket.go:204:11:204:24 | untrustedInput |
nodes
| RequestForgery.go:8:12:8:34 | call to FormValue : string | semmle.label | call to FormValue : string |
| RequestForgery.go:11:24:11:65 | ...+... | semmle.label | ...+... |
@@ -36,24 +36,24 @@ nodes
| tst.go:36:2:36:2 | implicit dereference : URL | semmle.label | implicit dereference : URL |
| tst.go:36:2:36:2 | u [pointer] : URL | semmle.label | u [pointer] : URL |
| tst.go:37:11:37:20 | call to String | semmle.label | call to String |
-| websocket.go:54:21:54:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:59:27:59:40 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:68:21:68:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:72:36:72:49 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:82:21:82:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:85:31:85:44 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:101:21:101:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:104:15:104:28 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:120:21:120:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:123:38:123:51 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:148:21:148:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:149:31:149:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:60:21:60:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:65:27:65:40 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:74:21:74:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:78:36:78:49 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:88:21:88:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:91:31:91:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:107:21:107:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:110:15:110:28 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:126:21:126:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:129:38:129:51 | untrustedInput | semmle.label | untrustedInput |
| websocket.go:154:21:154:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:156:31:156:44 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:189:21:189:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:191:18:191:31 | untrustedInput | semmle.label | untrustedInput |
-| websocket.go:196:21:196:31 | call to Referer : string | semmle.label | call to Referer : string |
-| websocket.go:198:11:198:24 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:155:31:155:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:160:21:160:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:162:31:162:44 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:195:21:195:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:197:18:197:31 | untrustedInput | semmle.label | untrustedInput |
+| websocket.go:202:21:202:31 | call to Referer : string | semmle.label | call to Referer : string |
+| websocket.go:204:11:204:24 | untrustedInput | semmle.label | untrustedInput |
#select
| RequestForgery.go:11:15:11:66 | call to Get | RequestForgery.go:8:12:8:34 | call to FormValue : string | RequestForgery.go:11:24:11:65 | ...+... | The $@ of this request depends on $@. | RequestForgery.go:11:24:11:65 | ...+... | URL | RequestForgery.go:8:12:8:34 | call to FormValue : string | a user-provided value |
| tst.go:14:2:14:18 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:14:11:14:17 | tainted | The $@ of this request depends on $@. | tst.go:14:11:14:17 | tainted | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
@@ -63,12 +63,12 @@ nodes
| tst.go:27:2:27:30 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:27:11:27:29 | ...+... | The $@ of this request depends on $@. | tst.go:27:11:27:29 | ...+... | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
| tst.go:29:2:29:41 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:29:11:29:40 | ...+... | The $@ of this request depends on $@. | tst.go:29:11:29:40 | ...+... | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
| tst.go:37:2:37:21 | call to Get | tst.go:10:13:10:35 | call to FormValue : string | tst.go:37:11:37:20 | call to String | The $@ of this request depends on $@. | tst.go:37:11:37:20 | call to String | URL | tst.go:10:13:10:35 | call to FormValue : string | a user-provided value |
-| websocket.go:59:12:59:53 | call to Dial | websocket.go:54:21:54:31 | call to Referer : string | websocket.go:59:27:59:40 | untrustedInput | The $@ of this request depends on $@. | websocket.go:59:27:59:40 | untrustedInput | WebSocket URL | websocket.go:54:21:54:31 | call to Referer : string | a user-provided value |
-| websocket.go:73:13:73:40 | call to DialConfig | websocket.go:68:21:68:31 | call to Referer : string | websocket.go:72:36:72:49 | untrustedInput | The $@ of this request depends on $@. | websocket.go:72:36:72:49 | untrustedInput | WebSocket URL | websocket.go:68:21:68:31 | call to Referer : string | a user-provided value |
-| websocket.go:85:3:85:50 | call to Dial | websocket.go:82:21:82:31 | call to Referer : string | websocket.go:85:31:85:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:85:31:85:44 | untrustedInput | WebSocket URL | websocket.go:82:21:82:31 | call to Referer : string | a user-provided value |
-| websocket.go:104:3:104:39 | call to Dial | websocket.go:101:21:101:31 | call to Referer : string | websocket.go:104:15:104:28 | untrustedInput | The $@ of this request depends on $@. | websocket.go:104:15:104:28 | untrustedInput | WebSocket URL | websocket.go:101:21:101:31 | call to Referer : string | a user-provided value |
-| websocket.go:123:3:123:62 | call to DialContext | websocket.go:120:21:120:31 | call to Referer : string | websocket.go:123:38:123:51 | untrustedInput | The $@ of this request depends on $@. | websocket.go:123:38:123:51 | untrustedInput | WebSocket URL | websocket.go:120:21:120:31 | call to Referer : string | a user-provided value |
-| websocket.go:149:3:149:45 | call to Dial | websocket.go:148:21:148:31 | call to Referer : string | websocket.go:149:31:149:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:149:31:149:44 | untrustedInput | WebSocket URL | websocket.go:148:21:148:31 | call to Referer : string | a user-provided value |
-| websocket.go:156:3:156:45 | call to Dial | websocket.go:154:21:154:31 | call to Referer : string | websocket.go:156:31:156:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:156:31:156:44 | untrustedInput | WebSocket URL | websocket.go:154:21:154:31 | call to Referer : string | a user-provided value |
-| websocket.go:191:3:191:32 | call to BuildProxy | websocket.go:189:21:189:31 | call to Referer : string | websocket.go:191:18:191:31 | untrustedInput | The $@ of this request depends on $@. | websocket.go:191:18:191:31 | untrustedInput | WebSocket URL | websocket.go:189:21:189:31 | call to Referer : string | a user-provided value |
-| websocket.go:198:3:198:25 | call to New | websocket.go:196:21:196:31 | call to Referer : string | websocket.go:198:11:198:24 | untrustedInput | The $@ of this request depends on $@. | websocket.go:198:11:198:24 | untrustedInput | WebSocket URL | websocket.go:196:21:196:31 | call to Referer : string | a user-provided value |
+| websocket.go:65:12:65:53 | call to Dial | websocket.go:60:21:60:31 | call to Referer : string | websocket.go:65:27:65:40 | untrustedInput | The $@ of this request depends on $@. | websocket.go:65:27:65:40 | untrustedInput | WebSocket URL | websocket.go:60:21:60:31 | call to Referer : string | a user-provided value |
+| websocket.go:79:13:79:40 | call to DialConfig | websocket.go:74:21:74:31 | call to Referer : string | websocket.go:78:36:78:49 | untrustedInput | The $@ of this request depends on $@. | websocket.go:78:36:78:49 | untrustedInput | WebSocket URL | websocket.go:74:21:74:31 | call to Referer : string | a user-provided value |
+| websocket.go:91:3:91:50 | call to Dial | websocket.go:88:21:88:31 | call to Referer : string | websocket.go:91:31:91:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:91:31:91:44 | untrustedInput | WebSocket URL | websocket.go:88:21:88:31 | call to Referer : string | a user-provided value |
+| websocket.go:110:3:110:39 | call to Dial | websocket.go:107:21:107:31 | call to Referer : string | websocket.go:110:15:110:28 | untrustedInput | The $@ of this request depends on $@. | websocket.go:110:15:110:28 | untrustedInput | WebSocket URL | websocket.go:107:21:107:31 | call to Referer : string | a user-provided value |
+| websocket.go:129:3:129:62 | call to DialContext | websocket.go:126:21:126:31 | call to Referer : string | websocket.go:129:38:129:51 | untrustedInput | The $@ of this request depends on $@. | websocket.go:129:38:129:51 | untrustedInput | WebSocket URL | websocket.go:126:21:126:31 | call to Referer : string | a user-provided value |
+| websocket.go:155:3:155:45 | call to Dial | websocket.go:154:21:154:31 | call to Referer : string | websocket.go:155:31:155:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:155:31:155:44 | untrustedInput | WebSocket URL | websocket.go:154:21:154:31 | call to Referer : string | a user-provided value |
+| websocket.go:162:3:162:45 | call to Dial | websocket.go:160:21:160:31 | call to Referer : string | websocket.go:162:31:162:44 | untrustedInput | The $@ of this request depends on $@. | websocket.go:162:31:162:44 | untrustedInput | WebSocket URL | websocket.go:160:21:160:31 | call to Referer : string | a user-provided value |
+| websocket.go:197:3:197:32 | call to BuildProxy | websocket.go:195:21:195:31 | call to Referer : string | websocket.go:197:18:197:31 | untrustedInput | The $@ of this request depends on $@. | websocket.go:197:18:197:31 | untrustedInput | WebSocket URL | websocket.go:195:21:195:31 | call to Referer : string | a user-provided value |
+| websocket.go:204:3:204:25 | call to New | websocket.go:202:21:202:31 | call to Referer : string | websocket.go:204:11:204:24 | untrustedInput | The $@ of this request depends on $@. | websocket.go:204:11:204:24 | untrustedInput | WebSocket URL | websocket.go:202:21:202:31 | call to Referer : string | a user-provided value |
diff --git a/ql/test/query-tests/Security/CWE-918/go.mod b/ql/test/query-tests/Security/CWE-918/go.mod
index 5f614a3d1d3..ce6c493a190 100644
--- a/ql/test/query-tests/Security/CWE-918/go.mod
+++ b/ql/test/query-tests/Security/CWE-918/go.mod
@@ -5,7 +5,6 @@ go 1.14
require (
github.com/gobwas/ws v1.0.3
github.com/gorilla/websocket v1.4.2
- github.com/sacOO7/go-logger v0.0.0-20180719173527-9ac9add5a50d // indirect
github.com/sacOO7/gowebsocket v0.0.0-20180719182212-1436bb906a4e
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
nhooyr.io/websocket v1.8.5
diff --git a/ql/test/query-tests/Security/CWE-918/main b/ql/test/query-tests/Security/CWE-918/main
new file mode 100755
index 0000000000000000000000000000000000000000..e713a460ed5df969e554a8bb572b6599d7475bd1
GIT binary patch
literal 6935277
zcmeFad3;pm`8Pa)42%k#0YM_Nj5=s=!9+n5H9Ap{6C7$#)TpRYF`}YIn1NVPC(Hym
z4pXD0)vB$jc5%TMeA~=g90v)fSULFz3y{n&V-{9X
zZ*x&`-a&lbuzizH`T>PpWdx0bctc>bz)(aj1SCQ*b{5{Tfh^q&_
zb0tmX2awKx_Oqaye7fwX%d5V-xinO#ZrD
zM%HtCuA)r;PL;CjQCv(38Q*YB<>vOaNiuHOrZ9UUEa*!Z_!r|@sTPT>z)cy0W4Is17e
zUVd=Y4_vS62d-E3E3Nv6;>kKTek<9v4)n7f+VC2z`VCh7SFD0Ie!ISH+sjekV$lXE
z&&nGV{(Kv-m5bt8|8`pS?P3i#D*794RP{&N83(EVIqKW8Ws&VSsruV*QuXUC;>bU7
z+w|M|tFh8;G?80X{m8AV{$ShTp&bADwSqQxJSMAnVx=PiOsx>!?wetKmA;atof4&-5Q(
z`c1vi4oQoRWqWQyGPdVVkY{U?ZCUc^@~KDq?-0B>>o=}`K=K+_UnI}@qtiv@#h3l*
zD#y3fw;Ob)d|OU-a_8^YkwN}5|LWDNSJ8iDc4k_CcKPARcNRG^N7X;Fud09SsO%`A
zGyZJ=HKh{O5JTADqG#<#Gi>m&h}Anmp2fI^(~gQ~lP5
zRsGgKtNKe6IR~vD>Qp~=^1}*m$s~E^r)#R0Y;IhxZ;|XwTV&N=WYw?KvnkI0a=ChU
z!ryD=6N0GM+{yC%dd`o_|CvtpJKFvr^*TJ)%X2p!gyUal{41^cww2rQrX7cNa=%%;
z66|I8&o*q=x8qJb9-YEMj(?ra{P)PS>pF^|Be_97BfnR{V9yX`vb
zM{29&^R|iEx!KZi*7!f~|3?De9PTzW(M@ipq*^x^m2Qx6L|zN_Dupye4?Xm1mrC%SF|rFB*6Kw4Yyi^Y|O8#$Pge
z>|GO28Fkv&vEy&Ma@_4Fk3HkIE5;QDe;&Nfb;`7xuRnd%DbuE$JniP&!!;+@6rFzZ
z>7z!@xMSq0uF}g&FD;#P=_R_c-}D{T!&PRyVYXEr>(b*F=IQZEyn1|wM~|P?B68A^
znL_56c-U2HtTJ~F>fs8GFvl2Ey8Ff$yRN#rbaLs{rPnl9RgKh!#Z;B}xyd!CVt?DX
zmY{0P#>5K^-?Y>dlgmyaR06HTmqH7cWSm8y%XKB@X<`roCx
z@%<%w?T)RWMmIwKeBGGjFVKykzbI&o@dtu<(_@XHGqe$7{N>t+pubWZG09)8jd#mo2)n12`YTXOyM@hs-i6Jz|omNEX^0xQL9z9Mx_d!6y9
z`8qz#b@-EJ@6AhqZ|f@;x%B7`Zx&8ZVWas1ipB^09@%x9!pG@}A%DSZmB>{$K0wV6
z@NNEvrQCP3RAcCKFf3$0zwcQ}omN3!~v4
zdUTcds%Gu$Up>cLZ|6EA;pne?CO1Y$oi7
zB#OouZFY+)y%wz@!fJZ_9+x=|Ra0-8E<%Xz$kUB|y0N=(PZqJzKNMvS#$dm$F+6|>
zs4EM>NkEX-Y_yoY2qQ82i|Yy@+Le0TebtvbkQBjsa*$n#jF5wPu
zFn+xG1#*J0hXGTTK+F)qqFm4jH5N9q30Kuqu>YW7|0zD^^&;{Bnh&IP^w9ueE&@P!
z106D)BJkwSPkx*_jhz8e-7!Hzd?zNy}(Pzr85@@IIqXWB7d?eh
zjwUo<4yIkq3Ey{g6m9}v0*XF(J?f0D`vlE9t|-VTd8A&uibRTArpd#fj)6_&3C@zo
zp0Ih{vah2f^}4zEI?w{;jy2YrF_ww8b%gp*o0;}vZmUGb4WeV%q?9*rLgBhbZ9YSH
zDQ*r2sEm_xyy@{-FrIT?V$)Bai+Ya6s|^ddl4l{!QNHx@6^Em!)lKQf+jQm>v~Hkn
z1tB?aF!74GDajnBvbXGrld{1|wmc_L^^j
z>{K7pp+}o@^yvOTXjoZ%Y@V(?wROgh#Ecx4M2`7)5!L=c_#gKBp0e$jLgDqddt{6ytsmiKlqp9S^6pkqZ?zOygaB^LG6ZP-W2=eowz4<}VVVjD>V#ezKmigR!s!ipev`GK86j$ha0`>e{nzs1A<|FJ{FGD|u{e`^sdecYl)noePCKKd+yQF69Q
z*P?GASz8+FGbpu=22r8Mm0Xjh@Za;t_>Tuc8ViF*9aQC71Zjy?LhX>R_K~QPfAc}2
zt|1;YGY}Pw2Q{?sv$e9~-QgAn76FN5b>nP5C5rYIgjGI*4l**^pXUl6r7abVKt;6&
z^f+Y?vZkiy>+#<61l52Vc_0|pi`Qt;r096Ezi9QCL3ueSb)718u@Hm;%_sm4{l^F9
zhXx&)&E#bCPi94NcUXhF@~aa;INAl;y=N$xGw(YO8N@Ehcqv!SDfOyDuHqIuNIa{8;jK_a}<`I-#aS!Sivmqnygl)C)~Sd
zIVuQQ%aEb)XOVR+$b!{&*Xpi?!^l`|l~(sNNmx3mXSdSGK75~p@507p5ydq^G19O5
zNf~h+O}ff0y3oXxcPUWfDpGV^aR6PzNY|AFmqk|#LYYjfy#g$x8{`;%uo-T4v_Ke1
zwVpV;hi+`ujdi-wo?Iyq+?Th}G<)lOz!Cj|Q@`Rt3OB)yqfI`|o1^uO`JYC*=7np+
zJ9{8yt1=Sm$A6n41Vu>47}K0hcE)77Blij=_qK
z&6^xBk0i|V4^BtEqT{MHjyeNbMvNd`4f49tY{PRoypqfO5^v@+R`N&ucpQJ~%?na)SF&$=N5`9#fLB|}{)nT+!fmPU
zdPlQf+Tqc}<{gbxtBkp3qbp8~{@qT7xYPW*Sag^9@<1>fzs(gbneq&lE3~>P(iVE>JdTA%3wh6~FD^OZ>v!s_Tzx&HzN*z%o%*U*Ut#sNSbg27zFPU!;$Hks(4`Ssi~D)&
zHCi(BY2o%p7BSog((RbcTmT7xa}R<`ZO)k}fNsRcT)eu>+f}X?@kD>^rH=+_54ra|
zcackbX}b%rJJf3qUO!N;hv0RKdhLbR)#~+dyslKQ{qee7y$;0d)9Up|y#7+X9;H3h
zG{5&R_%SC65(?J>*1LJW1xEnyXVPUVsX!(5W77F5saz%HFsVQ#RjZ_3XhP-?l~k{iK48+JDrvDwTFa!p
zJ;?2fR+Y4bNuMyuSd%Pc4><61xQl-1J>Ptjwg_SM(w>Cay>JZu;hyu*Aq-Y-VFh&T
z(sEjn>p^O-EN20Kfv7bv$B}6HA3Y%Mr)Yd248JqJv`8=27B(Ur%WH<}_NK%AY=1Lz
zJbVqJhiPaMa3DScgUK}pbVGH>Md|~z(0h5s`455_GVIC&bYNw*=&Jl?^VH8U2x#c!
z&)6Gwg(I#b&coy@!Z1C4nqGXNCvJO+@8Lv1Ru={Ao6wodYxSqfSB
zDDOPV8(ju{vP0gM@WG{d1A1pN=LJ~Eh^3>ee9h+iPXP9rGzVGoo_Tw=tfBft)L4lF
zlxUlF&p8xfp5CQ&`e~S97ZZz%En!4w#A-!4l
zZspJGE~yTDl0wwL4X)rcjHVKiE
z9e+e;66%Ex#{a}cu5&>ix^8zK-iP5m^_5*xJvrh4zRBblLlR2W7fiq#CegKkF}?T)
zEp{RuL-FP^qcx}no6DkY9&HZ8JiMJdvtOj-E}+cOV$Y*kav=U7-8=IdjQe}wzgDG%
z)>LU*n)J~f9WEDUU8AeLdbGt`7B#)8f8q;P(^8P?hc`6E7GKI^m(aCWw(5nA_xBol
zX0Kt|!;N|(cPzx;QD)=_6*CXRG?5-bhoap(Y3XDVUw=C
zx>nz8>e1~v&idmMz1{!|E`9I2p`QgJKeWw)Nu8eZjE{$UwCS6_)Hikbaen?4M5!Z-xkp3UX9e`9YFfV
z4|g1a|HJUV%7;I9_}=(5wDOJbw=_ak9Ub9g-*`Lp4L(<^&%vERP4Aa_<&Cyk$w>gn
z=b6^?J9HYBnMF!&cnZc5awm^LV#eC;N3#fGip7`Y%1Op1t90%5#wxAS`d{4$_q*Ra
z`}VRH4!_YQ?ZvlGnU!p1%!`V!tD0$7uo^O_h;62gu
zLmE8;d_DDa9^k=$`kcLY=kk5@_?4Iohi;Rc74F>G6Ml3*oX6Ls4YRk}oMm_e|G%e=
zOjSvLKoTS@z5~z8*etH)pDH$iJNRcN&HX-_dlUX|#{W<8e{YqxZ6*GHhX1SJaawJA
z)uX9EE#}bS7aw0ue+bmmOCYu$_^;vrWwicleQ$gp`UoxHKX1Ifjb{H&=lUHTp;eF?
zL5Q69duAS`$HDYKl!Vu6!GCzK;*Fty3z~RvXd680FnFYm+CstW3Z0=Rdc6-8>3JM}
z2ZBf8&0jj44m>id!s(3I3%^4ElsAV@@Y3+tR>Xt(xqUDge^K&WK+RTXr}ENc`>gW$
zNe)Chm*+I#o$&pbjPz>Jki4~Zn{{jz#$|e9(!h?Y_!(8jUuv-xlGTFAx-HXF$#Yb}
z?z7IRieH^$7ydyN<|2hExHCBb1*_t-a;l0y&|+Z+jDIHgp^2fF&zq1PgSbttP+xwgV~YzWcPQd!NM6#yOYnMU_4ax9TD|%fY_3JP=WBwI+mkNf@#;|
zXXwQBZ^^Q`nyLzFv(C41p+UHzQ`C|Q8GJjFXR$C4LfgRjr~}aEq%3KmSDaM{!<`@~
zcjIj!tac+T^((y=t`h-MjQhuIVSSJ^X(rLOVY9Ba@uSih_bvg_Mg0spA14=~E{aSp
zqRZI|BwweI{2*WzlPi)O#sP_M)(A8LMcYSsv)#_uBn_5-lTFsYQxk1!P;88QyHx9<
z2G$A`j(?}v#Y`^Rj>fee4eJ{VWPiKBN;IdkmhQyEmX)fElob9}T;hG~HKJ|3V2U6^
zPlUZydspw$v{)4~L=0G+8@Rhnx!kf5@CRpJr}YXJubr6}%-Nt9@16bylYZ90v{{ep
ziF-TroDIPdTgr^Hq&?a34|*g-J_u-&`4EWREV}BkJdXT>^jDM89hJ;+OGh;Tko
z3`a*}yu111rvG?ZVi9vACF>p&(N+QcL3$f>Jp;F4$r8~RI+@v~qmxlthl#=zfl%mX
zKo>gq0Ryr!ol16me|~m+&tMmGruJAZJvu@g!2W>UZZs!G`$!Ksf_7soMi76t6!gCY
zAI`vKUVJe4BmT$m8Elwe?K%kkYquPf{tLR2PaOU}fPo>FkuWa6)LIZd0A%7dKfHq;
zrwb8P5yc-UqAdAnbH4*+UEugy^SZRl*J>l1!acP3(7%}d?;4v6*EJ=37H%}&Yf5PyM3u};>%s=exTAKRuG-MGDaXG*@bvB;hI~ehO
z4p-8Z{b+NQXFOm|;VUJdDoa4zbtmSH|#vS3)Pe4GDc}ODM~JW>&y<
zI*&fptj1YSm24Y_t)mN
zqg155Z}@g@URWsGWm02kDdl>+qJWd42Il3o7clMqxGXs8IUiUvoEW^AQxJSmqL@+F
zF(ZemE-vB`wO*_TW-Zl?g|alM$1TEYSCQUk;;w50QF^o{;0m9`TI{cMgFZ-Y+mRM%
zK9q4aYL&AysDoGiT$WQg2Z*)E<%E6!6?uq2(EuauM{0F9lbkrGumeSO6Tdi%O%tM+
zifuZWz|-Su15|1Vm-eg6qK#-xt94^b&}h_+_qcfgUUDkQu4c=tfHJJP9NeZw#MY&qdz(D4=gl5~g}h>D_@DPVrrv#AF32GB&2(OIH9}Qpr+(
zjv$BoUm&KaDCv-0Mq*nw)5$(!cvh`MuL9+%IH(@+HRbL#i-mrf3=R49O(h1*S|VF%Ra<&-
z|2-=`SoBE!wQwya5Un(CS`fpJSZW__UJMeBwD%5;iM038>bSS>;I^OI
zqZ;NxAs)PH{kK0+HPWvASc|RS`xEsKj{1$Ct_qLQmaD~TX>FLn{xL$%!1DNyN|k1&
zybgo%jLCUaFTWSd3ak$+?%IDGoRMk&`6<*iz|*g?OTqG-o%g*j{78pV`IPEV;GMbL
zY1m!#@-Pj8MwIBiXt}dP>9dC&O5S>OC@*riz(LpVP#=U8<#4IQx!i&IC&1N~sr;(q
z0}4ofTxB1a%lR(SWfjm5EMeuN;WJ_>J~WKNtetU3vOqNm1|BEs?`m%$_Ao5=`O=EA?bgs~{Ho36OhNXTIZ~p}=K?L3O@s1sNTg&8U&2A`
z_V^;k?M$!=jJ-TQR^}j#FXrFYb_1y1>ret~;RktQ2&vZD>sPz~5MjxL`>weTm=S;y
zn0zs>Ave7|%5J4fq4?XQqSnc%&6TVt_DT$9^_?6dyeb!XNZ5
zdv)VT-MAALs&b=E2r1NS@%(a>56KoGECsL1LlnvV4~W9d_!He+sn5~`WaE#NTo|RO
z2C~)U)AoJ(JZT#i7$o~de#in1oQ@zrsSP&3&dS(7hw!lJS7sumugU5gCy(mkkdQ)4
z{Qy2fN5X#(HjsMV0UznpfE@D5kCZ$n@Xuw%09zsi#EB09A+$=vAA_I~nO^hN(|RaJ
zgsuy&gK(m&3J{v7D=}~vyw?-&pR?NIOq6!23(Eg!32ompsH3R!LDHv>X@*x_$@RD$
z)ueYS>7A-aOU^qI7Zqr+^(aa^DZmp`gN<5WP{2Pr|5O0-rkER$Wfn(<84_XJ`!G=s
zF)PD)lqkn5Z$ZH8B)rzE*9yEYRIiiugeCq;zKM9ui<7~?F0acqW2oMucB7&W
z5L<&qhzL}#s)hwwCp%E8fkp6-0<>}gs76UWL!t*1SR{J!YLVCh+{Y6v>8BvYM
zg{~ZLmMJA@uLin<;~FWeF092W>M@rH^>zt!EQFh^j0BEu%JGO-
z$y%{z=A{Hp@}01Q90GF``P2_??i0)dN`G{=-+x5-S1jMDc$$g7SL(~5aNA#B2~--B=bOcMsS#UC&fO!BT@i~c0}YP
zQG9*?A9@S^pi#elzuL^~TOG+m3S6SB6mWFHu{Ou&=0;5Hf^;unAvA&OTwTkut`36$
z&FR1B&g{X+)*ZH#a9@6d!-dGkwJp1$_k_D)t|No%{pM@PsLyHu;1{x*Mvs9%8K!^*
z{ta7ri)$NEQ>){a0AyQ-dE#XS^&PcbeRiZxEU4#PiE+Oy%f>H+SmIkzjc_z?Z5v8c
z&3SnDV48~W`FPK>-l1N-n#Kk8%Ha$+4RudK$xUdnF_Og+Kn&dw-VMj(hu;*QBes3|SpK5o|iE)09E>2IVr_~&$QZ_XREXPs4
z{jC+AD+?ko=hGszSP4KZ=PSij$=RhI1d1sdnKi82ro~do*5iNUQ8p~9<^~8bUhL7t}GK@!)iT*;RO$Qa50RpU8d(1x8ReW
z`+}#tg-?7shvQI7^tG#~iRS3`9~4S^Qf}kD=z`ykZvQ?*jcs^6zz9bI7o`3UQu4&H
zu@sKX-MS3B!yz_$IFF#edALiH-{8qOt*pn-D_9*7Ljj7z{!Rg-4|;M1%@cK#
zZ)4^)K4kn%99Z9mU3zoEplZ1oWpa)
zHfePm@TAl$w53-L#Ky14e(WQA0Eybt@`2Hhheh@c3SYQ_rP%=B`7ZWoODhJ71;LO#
zdzyV5nR@33{e7X$_Wg82j51(ez7eQq{zou4mZso#VScb9PZbJzbLhUB4y^aLta_3C
zeZv}-3XE3LQIVVhD~;?Q7@n$Vvy&&&c;OuFp{|655^ljA*JG<7cg1?5U%^VH=SEiY
zTi9fzeUKL0h?fkjNpCi+gnLP)!^id_Z+%i6$`vh#`p!pXm>%pPbDmV2`z4o3qC3!A
zOrs0oAH;+avKFoJKxsQ=?tg)DvRoqk010W?576eq)?JZ(1GTyDLkW?6ST}f5Ua)R(
z7+$oc3nkiwE@QtV*m!wBts`u@R|1d#j^yC%7M2X+)o?~N80g&VafSQICg9DpKc%#)
zVTLt`fF+LMmULqjCdYT`iCm3|a#S=&TN?AT`M{5$MJ!Z~93}w3kNX>l8uL9qbHB83
zLc+YJbR7~wU|zc@6$f?DOichbRwRs{2Wu*~tO*7II~8ln#$`v!G%H9?;evUY4+t)s
zewZq@K)NXt6@lY;XNX=HlPlNL?+Z0_HS^&3)N)3^#)K{^cANC9nQ}xX-Icg)2WGE1
z9B7hFS?HaDT$6f$h>7c{G_C;-Ty=G*o19>E4VcfoNu
zt?oB?^2{7nfbUqD)Wu3!^lmK6grMacsQ@%CX{K;oMlUgdbSr~C6gTwXSvnghOI?;-
zGN^*e3iQO4XeH>|2rmbRT`hGMU*Wy)SIIY|iYHG&D#y0BK!Jkb?*t2wnc}+ICoe@hVtny%3(d=s=x9b=%8^J5!Np@If*uD
zbvcj%_*-JxNeFgwldR*|Bq;v!d8&}?O$OkcclF}(z^sMIFIiFsA^r@
z+LN(a^Ep_ivhAxJ_7=NX>R{x}9zqeIr%AOt2$iSoGiKkZ!3_mJ$#q#wIU3v1
zw#^@J)1cLj7reFvuqu~&-(f6iOz05s-T-Nc!!d8Xnk+)aSW>v6;W(tym_e!e^7}G&
z$iJ$yFP`%$qXL`s&H(#60D7_j-a>!}t@p3V8A4j0a1wIrkJkHxquwJ~_4fVi$M8*a
z)T_*@xAI5pjdIk>HGn>m}TKujnkWiJwTu2M1&nG2@^TS$9=NBCq|(n29Pcx71n9BaJlKo*NwOlPG{va;SvrXKfCB)rtEkdvg_dUz8=uiz;
z*8{y*5ZBohufOL^Jw9ry9)FFE0S8R<;$KJbHtP_*c%L@s2x(QXiQ6mwHN)sxZy;kv
zWAZ!NC8l~R-#LsinC)l!GC|6L~HcpF?)Q^QqmuB3Xx;(gc$mOSH@lB
zigl~kUlvFa6w7JWn^J>t6OhXlaB0ymq+I{^=;32JKd_YFWV&^DLtM9#)5@98rZ2nW
zP)K*Z7N-r6D8rt3=~RZ{MZ8m>EHT~_EN;@GoFc&$4IVA_B@m9$mU_yJb;j<}&B;=n
z=gifjoGb~#g9eyasy(%K`ZZ$<*N%-L4jhTPIL$u8owze6^*rux2=&xE8gX9e_f!Yt
zGkDToGF&k~6^7x65zqVJGSOUxdydEORun`p5lG(1&I`!O*%1N2C^p%OvP3SF1TN4U
z_6AN+0@wslf!K5uJD75SEHK{Unzs|4>?6RA1S*N!7pS0&a)i_5FyO>nY76$jV!K2m
zH%ugX{yFUxyVNYvBPvsFB0{+0UB0Lw2IY4?ddz9u=fBHF(PgFOj!KKND)nI{V=E3d
z23+PuY^tm`5Bm$6$7kqf)Hnr=A&-p#j*fqTK$+8HLJ}mlAC@ZqJUkl>;?-wVgSf@f
zAbi;kBG7e%aKrAW_f#(+ac`(~_y*Uv{6P5e>JrSnt7tUkG$AocK%mr=a%Z&rJs
zSq1jcWpmmP?B$a^1ef4y^sm&aiN|VE=x1cYNd}jc0X`uY+v#6+c}6Yw-#8pHwR0A2
zq0d>m2t(l*@gImVAP70|6ftVCMv68~$=&IUy5AxL^k;^FST9z+aXtitS@h!4^3bm6
zid`t@3M0U^xmgBxrTSdyyH#n6v#jaZk$1So&_vN7Oxln@AR{jrtUGr@e*
zNxAS)E5VF8}3MNt`IavbUf0Eit!6?1|G3zMNz8D$YTPDaHX?4#O9UsMB6
zIQw~uACklA^PhN&u8NSK8p^wd<03jMImTq&U%?$wbMqOkV}lTOay2l&_(NSAx1VF~
zJi)(qN_dqAQY5KL%ecfl|cC1WoD#KyXo@MAaH>Fl{
za~I6a19L>b8T5P2=g~4~r1>y(Ka9)A-V)C4ajP#=SDe^pA#-TS+33*#VH6-0u*Zkp
zV~acPjl*&%8o)Yiq){!S?q47d@X&>#0HC8i9SktQDCk^Lph+u@(zzFI!VbV>JG)FY
zmU6(fS3N2?&5M;i_vyE~(SSIS4Wh7zOlX09Mi5_rV|Mu&m70}bUnvg!u{4;jRFHxH3d|5W|PovA;PeQWwE;ok6%&g?(7HEW5)+27`AbI)c2
z%3hKMM))P{+2QO`o3sqeNx{BlR!JWL$7dN%@p?F>cR5Jh(*(yeEe
zce}JD&JG9{9jV?jHO9>b?*xr?K^&AREB4MRnt;u-#Vc`fS(!GjwSEHJ$B5Rl5gW%R
z`X3Q2Zl2jkPt3{*jzC24*6i0ft(D4aGMu83`^!R&wGcF)U=vh
zSMtwjvsQ=gv@dMmK1A79Ruc=N)yO(X4&8VT-1u5ZJ#yCok2dd5B7X$<`rePV$ZUSS
z)Bq2}!-{fa+|B6%7!NJDieIpRd>K-y!OdNq@>oFQiZ)i#1EyCl5R-ay%tLQO7KD?B
zDf)z787-N59p=Zw2nul54+M%v*0;(i&x@8^BpHK|0r`oUp;b^Rl$PD(YM?aSm$k+`
zW+j}N(TW=e5|}hqX??7;KeHM_Ecb5*oPa`begB-Ls1Sfw3pWs?3IX6q&7qoCPqT=P
zmNZq8Ohg~D6QmR<)+B@CoSA^TWXuLEvZqXL&Z;E)aLyh)xl{zU54Rza?HX^i~b5H{zCft437vPn1DmT4}VM@aN!hH^#zy=$a
zIr5*9QS`PW<2YsvKt{cJ?(Z>{c`suuuo*a?0{Y)h0Y4QK5M)91H*)wyI^+ai%(w;0FOYY_c8$b|LL=nR<6Z!d*D401T5_Z+cq$7TlBKsqnZ(*~
zDm4rB&Q|w8%8G3un79l#6JAq3f$?n(3P(#`S2fQhHlqSpwep6W3(8@M=RB+y1ua2A
zxUy8F9l=JB2TELKbU%e+^`=GG20)^55g$YyExA!aUQ1e9B!^_Ju3gTD9$g{iYq5v$
z6)h=Z;cyrQZLZK=7NEPlFAyy`N-~cHXBgy@Gbl}Wb+?r!!)D8p;HAsa=y132=dVhe
zOQBoESeo$$3`N?nkUsxLi}chJobf=rV5PrVmF8sGT1-ckbz%!Ee>8oSJE@E$a_n3u_OXY%)O0@0FjG{UJIH>o}VW#k@Dry(xQ-s6+@~$8z%{=wtk=7vTQICeg!G~Y
z8oSqQeVhW$^Zj6Ay7@XR!5aBjV($9tN-~5&YNo@s
z*67(rJj6z(_~F$P5<75w04w*wkX
zf|j*wj>DB7NfhJX5+8r~{ppTj`5Sh}@UzvDcj*F0rK4DB04qV}N(@^tXaMY1BMoAB
z8pL4$Ap*(uOB>FW2@%zXU*9X>!vP0)_6O$Df3SPm{wTValsY$H&mVM(M}9zQ^bh&$
zAAIR45A@|VJ~#u38gTRy3c#3a662IX>PBJ&3(=BuuV6a}(w!DzYpEzAdRKuf>|--0
zd*z%aprXu4*p9I3r+H@OS8NYp!#Y2q$VM#`;tVcJn(=j%gx!Ce
zCy|<2psTC+apr$d585xV$N_>hJ*`+Lbkc(YD#8L`X?phKc!7hS#~=9-dbGJ$(XnvIXoJrT0%B!cPL4pr9AiZ96OQHkmfT0~G
z&J+n4)XqJn4uqw3YZ16cYhZ&X;>>U?CtC98Wi*K1j@~KtTimrrD}yE6vkW0(!u-=>
zGOf%J!hktqq3tz=7QQd-@66jippji5FkxgIhqjdZ0L#T1bt**}*)as21UDl8+6I)#
z=0?PCp6K8vG(B4KpG!&aWwz~RGxHD0%wpzu8DI>Lg
z#!!|dqX%UdYoJ@62<@bX=q(YF%709iURPY{L*FQu+WzZ6X9ZnR
zacpMb+!`kgAW>;|V^_v=if->OL2uEf_Uk>Q&f_Y_v&bJ|-Guc-2T{*&8Z@pzkTA3X
z30MG%ajCd)6L*fl(YoU0gE0hKxRSeqZoxMq|3h%8HMLC>Eak0?FeJ%w;(KqP?(AI%s=;38Hv#?6lzL
zwq!wY}orEBku|{
zmBE;Gz#N`5T=Uhpo!C$&c{_X?WBCSD!;oWS2G2%F1MrE5PvQY%*?7QACbHMf+i^Tb
z+U<`-_P@E9N~ptt3q|d1H_pf&{aUP>o{-&((UMzK@p&w6%wbTh8(tOmS71xu-^gyX
zWQ;1sU+oQA3Je;73cY0x^Z5^v{V-K%2VpoOd)Vo01T=8Y6`aW^pODZ46R_=Q0#`Ke
zMB!-3r(!sDr{N=rp?8^a9)iR`0BvPTJQLYNJunN23i=GpME1|@R1Fo~4!asVvcFm)
z`?!L<5)^h3*>i(P-KP8Q)#DG64=NJvc^@dcg!RH>9c~mXWkUKeoP(u1*ak-x(KeU%
zE2YKGN@OHnu7dXOvDnSe3b>@*e1}W=3jrYN$1DOOG>+OKWtaKT6;8I2S4f{Of@Da)
zL@Ku?PepPt5rD`LQ@0?d4jQfI!_1VBe!CgRd(hZmM&Q~LvNEGh=jZ?tj%xX4
z8+#vPZ<&luQuqiOYe4)2qcyn{70j)YzaI;?!3SrZ^c3(4I&ZA@M_E>T13@V(PnKmA0;}c6y*c
zNPCa{V*$zriC^0Bb*hTY>?WS3${FXG-@Jp)X_fi?)1ci7-01?7f5L813*teYM&YL9
zO(^D^*3ey{L7djW-Erv8=(dxiPy{7U#zS^@CXdA%#{><)6Kf0?{}dXu&S?=xBrbve
zNh`C%_wJ7H-L(T2op3K5<)|d#`(Ek$j(z}rUobm-cl2L!-}gh%Mduim5p`5%{XVA}
zB@)D>LEUosf)8d`6rCVmk*90Y5e#?SNnFeMMmy5?G19-3rB$RK=sMD`NJsj&kgH5U
zd*Pp{F%?>&u8Q=1og@8#`StU(N{RFXi1bwq^#KyCIonbnJ0rRVK%tq!e|pXLzrlv8
z0XX8{Fb*A8010th$pKOpnD=}Wb13@8PJuNV=a%4xj5-#^HxWw
z81b6IqPg}wv@du-B(Y3%mABa(0D!3%v*H(?FV;KjUZNnUm^fW1EP@Nhh5t&?sG((S
zL)9H>TCxBLo^-}Xgn!O~;Z~8Y1_3DHmdpGTE-jGryMPKgPxD*DylV(`kt6(4=eZd=
zV&|ZXBzfgxdoM)5>lJ@8KNG)+{eMXN&DxKSK$L6J>`NCjGkzr+__om9(USV{%7|Oe
z&m%S-k8C+l5J!HF1^yFY@`a7^b0)jAzrHvXElXS2jLny^ZQ)Z}4QgS9eMw&TI8E;eS{r#82b@+TFvQA{M_Ps(ks
zdDD%t5_*n{usKV7BLvhJ!okl>d>kz~8>t9dx%-1{sS*?F(d89bdUk2QkVLdwa}@!D
zH(hw+3#m1G<76`{O+}@6?+;(Y`aQin`yzJ*tEjfh0bzm$D-95}eXLD*`SJn?)yyj(
zEOcekqGSFD3*jSRhM(`QSXcmws1vK@Xjv7Qzg}J`q|TGgQqkp8)z>)4J9#1gn2#C|
z|J!WK{0C$6Q8;-NP9B8w+^6pQ1_iW*s{^q;q3zgnH1U0M#rfmJsP1}V
zW)ALwG!cwxiC>bw5_ylZH`DpsJBiWTUquC@*J?d6|46Py!Ls&ZgYCPzb`gH&rsG_?
z+NSN@^`qOk+KLq}MDM%sn|N{eXa7RYL@tO6z?ZwX;IpuCUN^SLJh1CSoSqBma`~+3
zBOJ%zyvdvUMM9tKy6(r4Ei})pds4Ua|pbzW3)3b=r4sza!
zDyjr$qN_@!E49rSOXd-#Lp|_AGtgj0`K=%Y&N0LQ8Oyq?-$~br#Y!mH<@dw6Tpew~
zHiVS1TV1Eky@1xiOA;`ZFP2VDqAy#P^bhqVmNX0AXpx$u
zw9X-srE-)2jwp*z0%&UPZugzUv)l=Hj(Ul^cinpu3iaNNhb5UBtCy|Oc_F)f6ia^9
z3%|X9%?1#iY$mV+Hj#L`nIKOZM*(RlVpPqSfP~kVJWB~#p#ka4-_N!
z26wMg9_UBc1(!HV@-Vk`_*uwmWizW#iJNsC8Hw><<0UfzxVF<=(@9YG(?)ws-V
zSQQZfhWlmg%rT#|zB_|44t0m8*6yXu%)=R5rDNi?(o+W9H19_
zX7u1lf5tG7&{j(l*e0Of@*yk)KcVJt?3PHIl17-1Y#QgZGIn)LgfRv9=HnYWF|v*~
zG(bpd0MAW}48SxpIS@sO@(c#Rn)M?DkT?G8q@Tx0Mb>o
zGNl6%S>~Z=AM#Q(8|}-cyN~cfD2ThHv&U^9ESaMq1CCEv`+1P8*@7}(#(@OK=QJ>$
zWapvC?@a&6^cm3|$71
zF15Q~hddK8znnLhp9oyTX|dWJUW(UPqhqAM_`h5`e$A#4I%
zUc&BZnem8MK*odp=A^#VGFg*Wi-{uz1NY}pMN82!(*Hg}H~>&Bm4;cBysU(pR;B3A
zeQ1Sp*S37A14
z8{*{L&_aJBGZO>W9Y)R;svk*2y;dZHailHqZv6{4gSSW~eP{CFwRq>L?7jpp)xl)d>0m~Yu(i*Du;`c&662sn|
zl4Kyqu2NK2yNa5W6Lm1K1^$Sjx29qZgfP&lB2A1B;i1`#Z86v+h9C9ArTMf=Yllzj
zQ5IIp<8~FeYSXK93r({_6kP#HgCMaLeKB%z3hNcCEOxbmx6s5i-vAn61u=g?h&MPH
z_vf