Add RabbitMQ agent and containers
This commit is contained in:
49
cmd/agent/Dockerfile
Normal file
49
cmd/agent/Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
FROM golang:1.22 AS builder
|
||||||
|
|
||||||
|
# Copy the entire project
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Set the working directory to the cmd/agent subproject
|
||||||
|
WORKDIR /app/cmd/agent
|
||||||
|
|
||||||
|
# Build the agent
|
||||||
|
RUN go build -o /bin/mrva_agent ./main.go
|
||||||
|
|
||||||
|
FROM ubuntu:24.10 as runner
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Build argument for CodeQL version, defaulting to the latest release
|
||||||
|
ARG CODEQL_VERSION=latest
|
||||||
|
|
||||||
|
# Install packages
|
||||||
|
RUN apt-get update && apt-get install --no-install-recommends --assume-yes \
|
||||||
|
unzip \
|
||||||
|
curl \
|
||||||
|
ca-certificates
|
||||||
|
|
||||||
|
# If the version is 'latest', lsget the latest release version from GitHub, unzip the bundle into /opt, and delete the archive
|
||||||
|
RUN if [ "$CODEQL_VERSION" = "latest" ]; then \
|
||||||
|
CODEQL_VERSION=$(curl -s https://api.github.com/repos/github/codeql-cli-binaries/releases/latest | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/'); \
|
||||||
|
fi && \
|
||||||
|
echo "Using CodeQL version $CODEQL_VERSION" && \
|
||||||
|
curl -L "https://github.com/github/codeql-cli-binaries/releases/download/$CODEQL_VERSION/codeql-linux64.zip" -o /tmp/codeql.zip && \
|
||||||
|
unzip /tmp/codeql.zip -d /opt && \
|
||||||
|
rm /tmp/codeql.zip
|
||||||
|
|
||||||
|
# Set environment variables for CodeQL
|
||||||
|
ENV CODEQL_CLI_PATH=/opt/codeql
|
||||||
|
|
||||||
|
# Set environment variable for CodeQL for `codeql database analyze` support on ARM
|
||||||
|
# This env var has no functional effect on CodeQL when running on x86_64 linux
|
||||||
|
ENV CODEQL_JAVA_HOME=/usr/lib/jvm/
|
||||||
|
|
||||||
|
# Copy built agent binary from the builder stage
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /bin/mrva_agent ./mrva_agent
|
||||||
|
|
||||||
|
# Run the agent
|
||||||
|
ENTRYPOINT ["./mrva_agent"]
|
||||||
@@ -1 +1,317 @@
|
|||||||
package agent
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"mrvacommander/pkg/codeql"
|
||||||
|
"mrvacommander/pkg/common"
|
||||||
|
"mrvacommander/pkg/queue"
|
||||||
|
"mrvacommander/pkg/storage"
|
||||||
|
"mrvacommander/utils"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
amqp "github.com/rabbitmq/amqp091-go"
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
|
||||||
|
"github.com/elastic/go-sysinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func downloadFile(url string, dest string) error {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to download file: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
out, err := os.Create(dest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file content: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateWorkers() int {
|
||||||
|
const workerMemoryGB = 2
|
||||||
|
|
||||||
|
host, err := sysinfo.Host()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to get host info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
memInfo, err := host.Memory()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to get memory info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert total memory to GB
|
||||||
|
totalMemoryGB := memInfo.Available / (1024 * 1024 * 1024)
|
||||||
|
|
||||||
|
// Ensure we have at least one worker
|
||||||
|
workers := int(totalMemoryGB / workerMemoryGB)
|
||||||
|
if workers < 1 {
|
||||||
|
workers = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of workers to the number of CPUs
|
||||||
|
cpuCount := runtime.NumCPU()
|
||||||
|
if workers > cpuCount {
|
||||||
|
workers = max(cpuCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return workers
|
||||||
|
}
|
||||||
|
|
||||||
|
type RabbitMQQueue struct {
|
||||||
|
jobs chan common.AnalyzeJob
|
||||||
|
results chan common.AnalyzeResult
|
||||||
|
conn *amqp.Connection
|
||||||
|
channel *amqp.Channel
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitializeQueue(jobsQueueName, resultsQueueName string) (*RabbitMQQueue, error) {
|
||||||
|
rabbitMQHost := os.Getenv("MRVA_RABBITMQ_HOST")
|
||||||
|
rabbitMQPort := os.Getenv("MRVA_RABBITMQ_PORT")
|
||||||
|
rabbitMQUser := os.Getenv("MRVA_RABBITMQ_USER")
|
||||||
|
rabbitMQPassword := os.Getenv("MRVA_RABBITMQ_PASSWORD")
|
||||||
|
|
||||||
|
if rabbitMQHost == "" || rabbitMQPort == "" || rabbitMQUser == "" || rabbitMQPassword == "" {
|
||||||
|
return nil, fmt.Errorf("RabbitMQ environment variables not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
rabbitMQURL := fmt.Sprintf("amqp://%s:%s@%s:%s/", rabbitMQUser, rabbitMQPassword, rabbitMQHost, rabbitMQPort)
|
||||||
|
|
||||||
|
conn, err := amqp.Dial(rabbitMQURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to RabbitMQ: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, err := conn.Channel()
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, fmt.Errorf("failed to open a channel: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ch.QueueDeclare(jobsQueueName, false, false, false, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, fmt.Errorf("failed to declare tasks queue: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ch.QueueDeclare(resultsQueueName, false, false, false, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, fmt.Errorf("failed to declare results queue: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ch.Qos(1, 0, false)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, fmt.Errorf("failed to set QoS: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &RabbitMQQueue{
|
||||||
|
conn: conn,
|
||||||
|
channel: ch,
|
||||||
|
jobs: make(chan common.AnalyzeJob),
|
||||||
|
results: make(chan common.AnalyzeResult),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) Jobs() chan common.AnalyzeJob {
|
||||||
|
return q.jobs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) Results() chan common.AnalyzeResult {
|
||||||
|
return q.results
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) StartAnalyses(analysis_repos *map[common.NameWithOwner]storage.DBLocation, session_id int, session_language string) {
|
||||||
|
slog.Info("Queueing codeql database analyze jobs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) Close() {
|
||||||
|
q.channel.Close()
|
||||||
|
q.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) ConsumeJobs(queueName string) {
|
||||||
|
msgs, err := q.channel.Consume(queueName, "", true, false, false, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to register a consumer", slog.Any("error", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
for msg := range msgs {
|
||||||
|
job := common.AnalyzeJob{}
|
||||||
|
err := json.Unmarshal(msg.Body, &job)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to unmarshal job", slog.Any("error", err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q.jobs <- job
|
||||||
|
}
|
||||||
|
close(q.jobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) PublishResults(queueName string) {
|
||||||
|
for result := range q.results {
|
||||||
|
q.publishResult(queueName, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *RabbitMQQueue) publishResult(queueName string, result interface{}) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resultBytes, err := json.Marshal(result)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to marshal result", slog.Any("error", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Publishing result", slog.String("result", string(resultBytes)))
|
||||||
|
err = q.channel.PublishWithContext(ctx, "", queueName, false, false,
|
||||||
|
amqp.Publishing{
|
||||||
|
ContentType: "application/json",
|
||||||
|
Body: resultBytes,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to publish result", slog.Any("error", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunAnalysisJob(job common.AnalyzeJob) (common.AnalyzeResult, error) {
|
||||||
|
var result = common.AnalyzeResult{
|
||||||
|
RequestId: job.RequestId,
|
||||||
|
ResultCount: 0,
|
||||||
|
ResultArchiveURL: "",
|
||||||
|
Status: common.StatusError,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log job info
|
||||||
|
slog.Info("Running analysis job", slog.Any("job", job))
|
||||||
|
|
||||||
|
// Create a temporary directory
|
||||||
|
tempDir := filepath.Join(os.TempDir(), uuid.New().String())
|
||||||
|
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
||||||
|
return result, fmt.Errorf("failed to create temporary directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Extract the query pack
|
||||||
|
// TODO: download from the 'job' query pack URL
|
||||||
|
utils.UntarGz("qp-54674.tgz", filepath.Join(tempDir, "qp-54674"))
|
||||||
|
|
||||||
|
// Perform the CodeQL analysis
|
||||||
|
runResult, err := codeql.RunQuery("google_flatbuffers_db.zip", "cpp", "qp-54674", tempDir)
|
||||||
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("failed to run analysis: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a ZIP archive containing SARIF and BQRS files
|
||||||
|
resultsArchive, err := codeql.GenerateResultsZipArchive(runResult)
|
||||||
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("failed to generate results archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Upload the archive to storage
|
||||||
|
slog.Info("Results archive size", slog.Int("size", len(resultsArchive)))
|
||||||
|
slog.Info("Analysis job successful.")
|
||||||
|
|
||||||
|
result = common.AnalyzeResult{
|
||||||
|
RequestId: job.RequestId,
|
||||||
|
ResultCount: runResult.ResultCount,
|
||||||
|
ResultArchiveURL: "REPLACE_THIS_WITH_STORED_RESULTS_ARCHIVE",
|
||||||
|
Status: common.StatusSuccess,
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunWorker(queue queue.Queue, wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
for job := range queue.Jobs() {
|
||||||
|
result, err := RunAnalysisJob(job)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to run analysis job", slog.Any("error", err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
queue.Results() <- result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
slog.Info("Starting agent")
|
||||||
|
|
||||||
|
workerCount := flag.Int("workers", 0, "number of workers")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
requiredEnvVars := []string{
|
||||||
|
"MRVA_RABBITMQ_HOST",
|
||||||
|
"MRVA_RABBITMQ_PORT",
|
||||||
|
"MRVA_RABBITMQ_USER",
|
||||||
|
"MRVA_RABBITMQ_PASSWORD",
|
||||||
|
"CODEQL_JAVA_HOME",
|
||||||
|
"CODEQL_CLI_PATH",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, envVar := range requiredEnvVars {
|
||||||
|
if os.Getenv(envVar) == "" {
|
||||||
|
log.Fatalf("Fatal: Missing required environment variable %s", envVar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Initializing RabbitMQ connection")
|
||||||
|
rabbitMQQueue, err := InitializeQueue("tasks", "results")
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to initialize RabbitMQ", slog.Any("error", err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer rabbitMQQueue.Close()
|
||||||
|
|
||||||
|
if *workerCount == 0 {
|
||||||
|
*workerCount = calculateWorkers()
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Starting workers", slog.Int("count", *workerCount))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < *workerCount; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go RunWorker(rabbitMQQueue, &wg)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Starting tasks consumer")
|
||||||
|
go rabbitMQQueue.ConsumeJobs("tasks")
|
||||||
|
|
||||||
|
slog.Info("Starting results publisher")
|
||||||
|
go rabbitMQQueue.PublishResults("results")
|
||||||
|
|
||||||
|
slog.Info("Agent startup complete")
|
||||||
|
|
||||||
|
sigChan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-sigChan
|
||||||
|
|
||||||
|
slog.Info("Shutting down agent")
|
||||||
|
close(rabbitMQQueue.results)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:16.3-bookworm
|
image: postgres:16.3-bookworm
|
||||||
@@ -18,32 +16,32 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- backend
|
- backend
|
||||||
|
|
||||||
|
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
image: rabbitmq:3.13-management
|
image: rabbitmq:3-management
|
||||||
|
hostname: rabbitmq
|
||||||
container_name: rabbitmq
|
container_name: rabbitmq
|
||||||
environment:
|
volumes:
|
||||||
RABBITMQ_DEFAULT_USER: user
|
- ./init/rabbitmq/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro
|
||||||
RABBITMQ_DEFAULT_PASS: password
|
- ./init/rabbitmq/definitions.json:/etc/rabbitmq/definitions.json:ro
|
||||||
expose:
|
expose:
|
||||||
- "5672"
|
- "5672"
|
||||||
- "15672"
|
- "15672"
|
||||||
ports:
|
ports:
|
||||||
|
- "5672:5672"
|
||||||
- "15672:15672"
|
- "15672:15672"
|
||||||
networks:
|
networks:
|
||||||
- backend
|
- backend
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "rabbitmqctl", "status" ]
|
||||||
|
interval: 1s
|
||||||
|
|
||||||
server:
|
server:
|
||||||
image: server-image
|
build:
|
||||||
|
context: ./cmd/server
|
||||||
|
dockerfile: Dockerfile
|
||||||
container_name: server
|
container_name: server
|
||||||
environment:
|
|
||||||
- MRVA_SERVER_ROOT=/mrva/mrvacommander/cmd/server
|
|
||||||
command: sh -c "tail -f /dev/null"
|
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
volumes:
|
|
||||||
- /Users/hohn/work-gh/mrva/mrvacommander:/mrva/mrvacommander
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- postgres
|
- postgres
|
||||||
- rabbitmq
|
- rabbitmq
|
||||||
@@ -63,6 +61,22 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- minio-data:/data
|
- minio-data:/data
|
||||||
|
|
||||||
|
agent:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./cmd/agent/Dockerfile
|
||||||
|
container_name: agent
|
||||||
|
depends_on:
|
||||||
|
- rabbitmq
|
||||||
|
- minio
|
||||||
|
environment:
|
||||||
|
MRVA_RABBITMQ_HOST: rabbitmq
|
||||||
|
MRVA_RABBITMQ_PORT: 5672
|
||||||
|
MRVA_RABBITMQ_USER: user
|
||||||
|
MRVA_RABBITMQ_PASSWORD: password
|
||||||
|
networks:
|
||||||
|
- backend
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
minio-data:
|
minio-data:
|
||||||
postgres_data:
|
postgres_data:
|
||||||
@@ -71,7 +85,3 @@ volumes:
|
|||||||
networks:
|
networks:
|
||||||
backend:
|
backend:
|
||||||
driver: bridge
|
driver: bridge
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
28
go.mod
28
go.mod
@@ -3,24 +3,32 @@ module mrvacommander
|
|||||||
go 1.22.0
|
go 1.22.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/BurntSushi/toml v1.4.0
|
||||||
|
github.com/elastic/go-sysinfo v1.14.0
|
||||||
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/hohn/ghes-mirva-server v0.0.0-20240313191620-9917867ea540
|
github.com/rabbitmq/amqp091-go v1.10.0
|
||||||
github.com/spf13/cobra v1.8.0
|
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
gorm.io/driver/postgres v1.5.9
|
||||||
|
gorm.io/gorm v1.25.10
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
github.com/elastic/go-windows v1.0.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||||
github.com/jackc/pgx/v5 v5.6.0 // indirect
|
github.com/jackc/pgx/v5 v5.6.0 // indirect
|
||||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
golang.org/x/crypto v0.23.0 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||||
|
golang.org/x/crypto v0.24.0 // indirect
|
||||||
golang.org/x/sync v0.7.0 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
golang.org/x/text v0.15.0 // indirect
|
golang.org/x/sys v0.21.0 // indirect
|
||||||
gorm.io/driver/postgres v1.5.7 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
gorm.io/gorm v1.25.10 // indirect
|
howett.net/plist v1.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
71
go.sum
71
go.sum
@@ -1,44 +1,75 @@
|
|||||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/elastic/go-sysinfo v1.14.0 h1:dQRtiqLycoOOla7IflZg3aN213vqJmP0lpVpKQ9lUEY=
|
||||||
|
github.com/elastic/go-sysinfo v1.14.0/go.mod h1:FKUXnZWhnYI0ueO7jhsGV3uQJ5hiz8OqM5b3oGyaRr8=
|
||||||
|
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||||
|
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||||
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||||
github.com/hohn/ghes-mirva-server v0.0.0-20240313191620-9917867ea540 h1:ohnDVLM/VvVCVfjvSYKAPZIQhOPRKk1ZcZcMzf4yT8k=
|
|
||||||
github.com/hohn/ghes-mirva-server v0.0.0-20240313191620-9917867ea540/go.mod h1:ircD+yE4AxWL/DufgcLDi191c+JM9ge/C3yiT/0zL+U=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||||
github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
|
github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
|
||||||
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
||||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||||
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
|
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||||
|
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||||
|
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
|
||||||
|
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
|
||||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||||
|
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||||
|
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM=
|
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
|
||||||
gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA=
|
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
|
||||||
gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s=
|
gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s=
|
||||||
gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||||
|
howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
||||||
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
|
|||||||
43
init/rabbitmq/definitions.json
Normal file
43
init/rabbitmq/definitions.json
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"users": [
|
||||||
|
{
|
||||||
|
"name": "user",
|
||||||
|
"password": "password",
|
||||||
|
"tags": "administrator"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"vhosts": [
|
||||||
|
{
|
||||||
|
"name": "/"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"queues": [
|
||||||
|
{
|
||||||
|
"name": "tasks",
|
||||||
|
"vhost": "/",
|
||||||
|
"durable": false,
|
||||||
|
"persistent": false,
|
||||||
|
"arguments": {
|
||||||
|
"x-queue-type": "classic"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "results",
|
||||||
|
"vhost": "/",
|
||||||
|
"durable": false,
|
||||||
|
"persistent": false,
|
||||||
|
"arguments": {
|
||||||
|
"x-queue-type": "classic"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"permissions": [
|
||||||
|
{
|
||||||
|
"user": "user",
|
||||||
|
"vhost": "/",
|
||||||
|
"configure": ".*",
|
||||||
|
"write": ".*",
|
||||||
|
"read": ".*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
1
init/rabbitmq/rabbitmq.conf
Normal file
1
init/rabbitmq/rabbitmq.conf
Normal file
@@ -0,0 +1 @@
|
|||||||
|
management.load_definitions = /etc/rabbitmq/definitions.json
|
||||||
@@ -54,17 +54,14 @@ func (r *RunnerSingle) worker(wid int) {
|
|||||||
slog.Debug("Analysis: running", "job", job)
|
slog.Debug("Analysis: running", "job", job)
|
||||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusQueued)
|
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusQueued)
|
||||||
|
|
||||||
resultFile, err := r.RunAnalysis(job)
|
_, err := RunAnalysis(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("Analysis run finished", "job", job)
|
slog.Debug("Analysis run finished", "job", job)
|
||||||
|
|
||||||
res := common.AnalyzeResult{
|
res := common.AnalyzeResult{}
|
||||||
RunAnalysisSARIF: resultFile,
|
|
||||||
RunAnalysisBQRS: "", // FIXME ?
|
|
||||||
}
|
|
||||||
r.queue.Results() <- res
|
r.queue.Results() <- res
|
||||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusSuccess)
|
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusSuccess)
|
||||||
storage.SetResult(job.QueryPackId, job.NWO, res)
|
storage.SetResult(job.QueryPackId, job.NWO, res)
|
||||||
@@ -72,7 +69,7 @@ func (r *RunnerSingle) worker(wid int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerSingle) RunAnalysis(job common.AnalyzeJob) (string, error) {
|
func RunAnalysis(job common.AnalyzeJob) (string, error) {
|
||||||
// TODO Add multi-language tests including queryLanguage
|
// TODO Add multi-language tests including queryLanguage
|
||||||
// queryPackID, queryLanguage, dbOwner, dbRepo :=
|
// queryPackID, queryLanguage, dbOwner, dbRepo :=
|
||||||
// job.QueryPackId, job.QueryLanguage, job.NWO.Owner, job.NWO.Repo
|
// job.QueryPackId, job.QueryLanguage, job.NWO.Owner, job.NWO.Repo
|
||||||
|
|||||||
421
pkg/codeql/codeql.go
Normal file
421
pkg/codeql/codeql.go
Normal file
@@ -0,0 +1,421 @@
|
|||||||
|
package codeql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"log/slog"
|
||||||
|
"mrvacommander/utils"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper Functions
|
||||||
|
func contains(slice []string, item string) bool {
|
||||||
|
for _, s := range slice {
|
||||||
|
if s == item {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main Functions
|
||||||
|
func getCodeQLCLIPath() (string, error) {
|
||||||
|
// get the CODEQL_CLI_PATH environment variable
|
||||||
|
codeqlCliPath := os.Getenv("CODEQL_CLI_PATH")
|
||||||
|
if codeqlCliPath == "" {
|
||||||
|
return "", fmt.Errorf("CODEQL_CLI_PATH environment variable not set")
|
||||||
|
}
|
||||||
|
return codeqlCliPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateResultsZipArchive(runQueryResult *RunQueryResult) ([]byte, error) {
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
zipWriter := zip.NewWriter(buffer)
|
||||||
|
|
||||||
|
if runQueryResult.SarifFilePath != "" {
|
||||||
|
err := addFileToZip(zipWriter, runQueryResult.SarifFilePath, "results.sarif")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to add SARIF file to zip: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, relativePath := range runQueryResult.BqrsFilePaths.RelativeFilePaths {
|
||||||
|
fullPath := filepath.Join(runQueryResult.BqrsFilePaths.BasePath, relativePath)
|
||||||
|
err := addFileToZip(zipWriter, fullPath, relativePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to add BQRS file to zip: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := zipWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to close zip writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addFileToZip(zipWriter *zip.Writer, filePath, zipPath string) error {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file %s: %v", filePath, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
w, err := zipWriter.Create(zipPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create zip entry for %s: %v", zipPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(w, file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file content to zip entry for %s: %v", zipPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunQuery(database string, nwo string, queryPackPath string, tempDir string) (*RunQueryResult, error) {
|
||||||
|
path, err := getCodeQLCLIPath()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get codeql cli path: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
codeql := CodeqlCli{path}
|
||||||
|
|
||||||
|
resultsDir := filepath.Join(tempDir, "results")
|
||||||
|
if err = os.Mkdir(resultsDir, 0755); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create results directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
databasePath := filepath.Join(tempDir, "db")
|
||||||
|
if utils.UnzipFile(database, databasePath) != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unzip database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dbMetadata, err := getDatabaseMetadata(databasePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get database metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the database has CreationMetadata / a SHA
|
||||||
|
var databaseSHA string
|
||||||
|
if dbMetadata.CreationMetadata == nil || dbMetadata.CreationMetadata.SHA == nil {
|
||||||
|
// If the database does not have a SHA, we can proceed regardless
|
||||||
|
slog.Warn("Database does not have a SHA")
|
||||||
|
databaseSHA = ""
|
||||||
|
} else {
|
||||||
|
databaseSHA = *dbMetadata.CreationMetadata.SHA
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(codeql.Path, "database", "run-queries", "--ram=1024", "--additional-packs", queryPackPath, "--", databasePath, queryPackPath)
|
||||||
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to run queries: %v\nOutput: %s", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
queryPackRunResults, err := getQueryPackRunResults(codeql, databasePath, queryPackPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get query pack run results: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sourceLocationPrefix, err := getSourceLocationPrefix(codeql, databasePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get source location prefix: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldGenerateSarif := queryPackSupportsSarif(queryPackRunResults)
|
||||||
|
|
||||||
|
if shouldGenerateSarif {
|
||||||
|
slog.Info("Query pack supports SARIF")
|
||||||
|
} else {
|
||||||
|
slog.Info("Query pack does not support SARIF")
|
||||||
|
}
|
||||||
|
|
||||||
|
var resultCount int
|
||||||
|
var sarifFilePath string
|
||||||
|
|
||||||
|
if shouldGenerateSarif {
|
||||||
|
sarif, err := generateSarif(codeql, nwo, databasePath, queryPackPath, databaseSHA, resultsDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate SARIF: %v", err)
|
||||||
|
}
|
||||||
|
resultCount = getSarifResultCount(sarif)
|
||||||
|
slog.Info("Generated SARIF", "resultCount", resultCount)
|
||||||
|
sarifFilePath = filepath.Join(resultsDir, "results.sarif")
|
||||||
|
if err := os.WriteFile(sarifFilePath, sarif, 0644); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to write SARIF file: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resultCount = queryPackRunResults.TotalResultsCount
|
||||||
|
slog.Info("Did not generate SARIF", "resultCount", resultCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Adjusting BQRS files")
|
||||||
|
bqrsFilePaths, err := adjustBqrsFiles(queryPackRunResults, resultsDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to adjust BQRS files: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &RunQueryResult{
|
||||||
|
ResultCount: resultCount,
|
||||||
|
DatabaseSHA: databaseSHA,
|
||||||
|
SourceLocationPrefix: sourceLocationPrefix,
|
||||||
|
BqrsFilePaths: bqrsFilePaths,
|
||||||
|
SarifFilePath: sarifFilePath,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDatabaseMetadata(databasePath string) (*DatabaseMetadata, error) {
|
||||||
|
data, err := os.ReadFile(filepath.Join(databasePath, "codeql-database.yml"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read database metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var metadata DatabaseMetadata
|
||||||
|
if err := yaml.Unmarshal(data, &metadata); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal database metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCommand(command []string) (CodeQLCommandOutput, error) {
|
||||||
|
slog.Info("Running command", "command", command)
|
||||||
|
cmd := exec.Command(command[0], command[1:]...)
|
||||||
|
stdout, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return CodeQLCommandOutput{ExitCode: 1}, err
|
||||||
|
}
|
||||||
|
return CodeQLCommandOutput{ExitCode: 0, Stdout: string(stdout)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateQueryMetadataObject(data []byte) (QueryMetadata, error) {
|
||||||
|
var queryMetadata QueryMetadata
|
||||||
|
if err := json.Unmarshal(data, &queryMetadata); err != nil {
|
||||||
|
return QueryMetadata{}, err
|
||||||
|
}
|
||||||
|
return queryMetadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateBQRSInfoObject(data []byte) (BQRSInfo, error) {
|
||||||
|
var bqrsInfo BQRSInfo
|
||||||
|
if err := json.Unmarshal(data, &bqrsInfo); err != nil {
|
||||||
|
return BQRSInfo{}, err
|
||||||
|
}
|
||||||
|
return bqrsInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBqrsInfo(codeql CodeqlCli, bqrs string) (BQRSInfo, error) {
|
||||||
|
bqrsInfoOutput, err := runCommand([]string{codeql.Path, "bqrs", "info", "--format=json", bqrs})
|
||||||
|
if err != nil {
|
||||||
|
return BQRSInfo{}, fmt.Errorf("unable to run codeql bqrs info. Error: %v", err)
|
||||||
|
}
|
||||||
|
if bqrsInfoOutput.ExitCode != 0 {
|
||||||
|
return BQRSInfo{}, fmt.Errorf("unable to run codeql bqrs info. Exit code: %d", bqrsInfoOutput.ExitCode)
|
||||||
|
}
|
||||||
|
return validateBQRSInfoObject([]byte(bqrsInfoOutput.Stdout))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getQueryMetadata(codeql CodeqlCli, query string) (QueryMetadata, error) {
|
||||||
|
queryMetadataOutput, err := runCommand([]string{codeql.Path, "resolve", "metadata", "--format=json", query})
|
||||||
|
if err != nil {
|
||||||
|
return QueryMetadata{}, fmt.Errorf("unable to run codeql resolve metadata. Error: %v", err)
|
||||||
|
}
|
||||||
|
if queryMetadataOutput.ExitCode != 0 {
|
||||||
|
return QueryMetadata{}, fmt.Errorf("unable to run codeql resolve metadata. Exit code: %d", queryMetadataOutput.ExitCode)
|
||||||
|
}
|
||||||
|
return validateQueryMetadataObject([]byte(queryMetadataOutput.Stdout))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getQueryPackRunResults(codeql CodeqlCli, databasePath, queryPackPath string) (*QueryPackRunResults, error) {
|
||||||
|
resultsBasePath := filepath.Join(databasePath, "results")
|
||||||
|
|
||||||
|
queryPaths := []string{} // Replace with actual query paths resolution logic
|
||||||
|
|
||||||
|
var queries []Query
|
||||||
|
for _, queryPath := range queryPaths {
|
||||||
|
relativeBqrsFilePath := filepath.Join(queryPackPath, queryPath)
|
||||||
|
bqrsFilePath := filepath.Join(resultsBasePath, relativeBqrsFilePath)
|
||||||
|
|
||||||
|
if _, err := os.Stat(bqrsFilePath); os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("could not find BQRS file for query %s at %s", queryPath, bqrsFilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
bqrsInfo, err := getBqrsInfo(codeql, bqrsFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get BQRS info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
queryMetadata, err := getQueryMetadata(codeql, queryPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get query metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
queries = append(queries, Query{
|
||||||
|
QueryPath: queryPath,
|
||||||
|
QueryMetadata: queryMetadata,
|
||||||
|
RelativeBqrsFilePath: relativeBqrsFilePath,
|
||||||
|
BqrsInfo: bqrsInfo,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
totalResultsCount := 0
|
||||||
|
for _, query := range queries {
|
||||||
|
count, err := getBqrsResultCount(query.BqrsInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get BQRS result count: %v", err)
|
||||||
|
}
|
||||||
|
totalResultsCount += count
|
||||||
|
}
|
||||||
|
|
||||||
|
return &QueryPackRunResults{
|
||||||
|
Queries: queries,
|
||||||
|
TotalResultsCount: totalResultsCount,
|
||||||
|
ResultsBasePath: resultsBasePath,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func adjustBqrsFiles(queryPackRunResults *QueryPackRunResults, resultsDir string) (BqrsFilePaths, error) {
|
||||||
|
if len(queryPackRunResults.Queries) == 1 {
|
||||||
|
currentBqrsFilePath := filepath.Join(queryPackRunResults.ResultsBasePath, queryPackRunResults.Queries[0].RelativeBqrsFilePath)
|
||||||
|
newBqrsFilePath := filepath.Join(resultsDir, "results.bqrs")
|
||||||
|
|
||||||
|
if err := os.MkdirAll(resultsDir, os.ModePerm); err != nil {
|
||||||
|
return BqrsFilePaths{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(currentBqrsFilePath, newBqrsFilePath); err != nil {
|
||||||
|
return BqrsFilePaths{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return BqrsFilePaths{BasePath: resultsDir, RelativeFilePaths: []string{"results.bqrs"}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
relativeFilePaths := make([]string, len(queryPackRunResults.Queries))
|
||||||
|
for i, query := range queryPackRunResults.Queries {
|
||||||
|
relativeFilePaths[i] = query.RelativeBqrsFilePath
|
||||||
|
}
|
||||||
|
|
||||||
|
return BqrsFilePaths{
|
||||||
|
BasePath: queryPackRunResults.ResultsBasePath,
|
||||||
|
RelativeFilePaths: relativeFilePaths,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSourceLocationPrefix(codeql CodeqlCli, databasePath string) (string, error) {
|
||||||
|
cmd := exec.Command(codeql.Path, "resolve", "database", databasePath)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to resolve database: %v\nOutput: %s", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resolvedDatabase ResolvedDatabase
|
||||||
|
if err := json.Unmarshal(output, &resolvedDatabase); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to unmarshal resolved database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resolvedDatabase.SourceLocationPrefix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryPackSupportsSarif(queryPackRunResults *QueryPackRunResults) bool {
|
||||||
|
for _, query := range queryPackRunResults.Queries {
|
||||||
|
if !querySupportsSarif(query.QueryMetadata, query.BqrsInfo) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func querySupportsSarif(queryMetadata QueryMetadata, bqrsInfo BQRSInfo) bool {
|
||||||
|
return getSarifOutputType(queryMetadata, bqrsInfo.CompatibleQueryKinds) != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSarifOutputType(queryMetadata QueryMetadata, compatibleQueryKinds []string) string {
|
||||||
|
if (*queryMetadata.Kind == "path-problem" || *queryMetadata.Kind == "path-alert") && contains(compatibleQueryKinds, "PathProblem") {
|
||||||
|
return "path-problem"
|
||||||
|
}
|
||||||
|
if (*queryMetadata.Kind == "problem" || *queryMetadata.Kind == "alert") && contains(compatibleQueryKinds, "Problem") {
|
||||||
|
return "problem"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateSarif(codeql CodeqlCli, nwo, databasePath, queryPackPath, databaseSHA string, resultsDir string) ([]byte, error) {
|
||||||
|
sarifFile := filepath.Join(resultsDir, "results.sarif")
|
||||||
|
cmd := exec.Command(codeql.Path, "database", "interpret-results", "--format=sarif-latest", "--output="+sarifFile, "--sarif-add-snippets", "--no-group-results", databasePath, queryPackPath)
|
||||||
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate SARIF: %v\nOutput: %s", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
sarifData, err := os.ReadFile(sarifFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read SARIF file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sarif Sarif
|
||||||
|
if err := json.Unmarshal(sarifData, &sarif); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal SARIF: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
injectVersionControlInfo(&sarif, nwo, databaseSHA)
|
||||||
|
sarifBytes, err := json.Marshal(sarif)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal SARIF: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sarifBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func injectVersionControlInfo(sarif *Sarif, nwo, databaseSHA string) {
|
||||||
|
for _, run := range sarif.Runs {
|
||||||
|
run.VersionControlProvenance = append(run.VersionControlProvenance, map[string]interface{}{
|
||||||
|
"repositoryUri": fmt.Sprintf("%s/%s", os.Getenv("GITHUB_SERVER_URL"), nwo),
|
||||||
|
"revisionId": databaseSHA,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSarifResultCount returns the number of results in the SARIF file.
|
||||||
|
func getSarifResultCount(sarif []byte) int {
|
||||||
|
var sarifData Sarif
|
||||||
|
if err := json.Unmarshal(sarif, &sarifData); err != nil {
|
||||||
|
log.Printf("failed to unmarshal SARIF for result count: %v", err)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
count := 0
|
||||||
|
for _, run := range sarifData.Runs {
|
||||||
|
count += len(run.Results)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Known result set names
|
||||||
|
var KnownResultSetNames = []string{"#select", "problems"}
|
||||||
|
|
||||||
|
// getBqrssResultCount returns the number of results in the BQRS file.
|
||||||
|
func getBqrsResultCount(bqrsInfo BQRSInfo) (int, error) {
|
||||||
|
for _, name := range KnownResultSetNames {
|
||||||
|
for _, resultSet := range bqrsInfo.ResultSets {
|
||||||
|
if resultSet.Name == name {
|
||||||
|
return resultSet.Rows, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var resultSetNames []string
|
||||||
|
for _, resultSet := range bqrsInfo.ResultSets {
|
||||||
|
resultSetNames = append(resultSetNames, resultSet.Name)
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf(
|
||||||
|
"BQRS does not contain any result sets matching known names. Expected one of %s but found %s",
|
||||||
|
KnownResultSetNames, resultSetNames,
|
||||||
|
)
|
||||||
|
}
|
||||||
81
pkg/codeql/types.go
Normal file
81
pkg/codeql/types.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package codeql
|
||||||
|
|
||||||
|
// Types
|
||||||
|
type CodeqlCli struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunQueryResult struct {
|
||||||
|
ResultCount int
|
||||||
|
DatabaseSHA string
|
||||||
|
SourceLocationPrefix string
|
||||||
|
BqrsFilePaths BqrsFilePaths
|
||||||
|
SarifFilePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
type BqrsFilePaths struct {
|
||||||
|
BasePath string `json:"basePath"`
|
||||||
|
RelativeFilePaths []string `json:"relativeFilePaths"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SarifOutputType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
Problem SarifOutputType = "problem"
|
||||||
|
PathProblem SarifOutputType = "path-problem"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SarifRun struct {
|
||||||
|
VersionControlProvenance []interface{} `json:"versionControlProvenance,omitempty"`
|
||||||
|
Results []interface{} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Sarif struct {
|
||||||
|
Runs []SarifRun `json:"runs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreationMetadata struct {
|
||||||
|
SHA *string `yaml:"sha,omitempty"`
|
||||||
|
CLIVersion *string `yaml:"cliVersion,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DatabaseMetadata struct {
|
||||||
|
CreationMetadata *CreationMetadata `yaml:"creationMetadata,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueryMetadata struct {
|
||||||
|
ID *string `json:"id,omitempty"`
|
||||||
|
Kind *string `json:"kind,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResultSet struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Rows int `json:"rows"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BQRSInfo struct {
|
||||||
|
ResultSets []ResultSet `json:"resultSets"`
|
||||||
|
CompatibleQueryKinds []string `json:"compatibleQueryKinds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Query struct {
|
||||||
|
QueryPath string `json:"queryPath"`
|
||||||
|
QueryMetadata QueryMetadata `json:"queryMetadata"`
|
||||||
|
RelativeBqrsFilePath string `json:"relativeBqrsFilePath"`
|
||||||
|
BqrsInfo BQRSInfo `json:"bqrsInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueryPackRunResults struct {
|
||||||
|
Queries []Query `json:"queries"`
|
||||||
|
TotalResultsCount int `json:"totalResultsCount"`
|
||||||
|
ResultsBasePath string `json:"resultsBasePath"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResolvedDatabase struct {
|
||||||
|
SourceLocationPrefix string `json:"sourceLocationPrefix"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CodeQLCommandOutput struct {
|
||||||
|
ExitCode int `json:"exitCode"`
|
||||||
|
Stdout string `json:"stdout"`
|
||||||
|
}
|
||||||
@@ -9,18 +9,21 @@ type NameWithOwner struct {
|
|||||||
// AnalyzeJob represents a job specifying a repository and a query pack to analyze it with.
|
// AnalyzeJob represents a job specifying a repository and a query pack to analyze it with.
|
||||||
// This is the message format that the agent receives from the queue.
|
// This is the message format that the agent receives from the queue.
|
||||||
type AnalyzeJob struct {
|
type AnalyzeJob struct {
|
||||||
MRVARequestID int
|
RequestId int // json:"request_id"
|
||||||
QueryPackId int
|
QueryPackId int // json:"query_pack_id"
|
||||||
QueryPackURL string
|
QueryPackURL string // json:"query_pack_url"
|
||||||
QueryLanguage string
|
QueryLanguage string // json:"query_language"
|
||||||
NWO NameWithOwner
|
NWO NameWithOwner // json:"nwo"
|
||||||
}
|
}
|
||||||
|
|
||||||
// AnalyzeResult represents the result of an analysis job.
|
// AnalyzeResult represents the result of an analysis job.
|
||||||
// This is the message format that the agent sends to the queue.
|
// This is the message format that the agent sends to the queue.
|
||||||
|
// Status will only ever be StatusSuccess or StatusError when sent in a result.
|
||||||
type AnalyzeResult struct {
|
type AnalyzeResult struct {
|
||||||
RunAnalysisSARIF string
|
Status Status // json:"status"
|
||||||
RunAnalysisBQRS string
|
RequestId int // json:"request_id"
|
||||||
|
ResultCount int // json:"result_count"
|
||||||
|
ResultArchiveURL string // json:"result_archive_url"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status represents the status of a job.
|
// Status represents the status of a job.
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"archive/zip"
|
"archive/zip"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
@@ -129,10 +128,10 @@ func GetResult(js common.JobSpec) common.AnalyzeResult {
|
|||||||
return ar
|
return ar
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetResult(sessionid int, orl common.NameWithOwner, ar common.AnalyzeResult) {
|
func SetResult(sessionid int, nwo common.NameWithOwner, ar common.AnalyzeResult) {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
defer mutex.Unlock()
|
defer mutex.Unlock()
|
||||||
result[common.JobSpec{JobID: sessionid, NameWithOwner: orl}] = ar
|
result[common.JobSpec{JobID: sessionid, NameWithOwner: nwo}] = ar
|
||||||
}
|
}
|
||||||
|
|
||||||
func PackageResults(ar common.AnalyzeResult, owre common.NameWithOwner, vaid int) (zipPath string, e error) {
|
func PackageResults(ar common.AnalyzeResult, owre common.NameWithOwner, vaid int) (zipPath string, e error) {
|
||||||
@@ -166,29 +165,31 @@ func PackageResults(ar common.AnalyzeResult, owre common.NameWithOwner, vaid int
|
|||||||
defer zwriter.Close()
|
defer zwriter.Close()
|
||||||
|
|
||||||
// Add each result file to the zip archive
|
// Add each result file to the zip archive
|
||||||
names := []([]string){{ar.RunAnalysisSARIF, "results.sarif"}}
|
/*
|
||||||
for _, fpath := range names {
|
names := []([]string){{ar.RunAnalysisSARIF, "results.sarif"}}
|
||||||
file, err := os.Open(fpath[0])
|
for _, fpath := range names {
|
||||||
if err != nil {
|
file, err := os.Open(fpath[0])
|
||||||
return "", err
|
if err != nil {
|
||||||
}
|
return "", err
|
||||||
defer file.Close()
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
// Create a new file in the zip archive with custom name
|
// Create a new file in the zip archive with custom name
|
||||||
// The client is very specific:
|
// The client is very specific:
|
||||||
// if zf.Name != "results.sarif" && zf.Name != "results.bqrs" { continue }
|
// if zf.Name != "results.sarif" && zf.Name != "results.bqrs" { continue }
|
||||||
|
|
||||||
zipEntry, err := zwriter.Create(fpath[1])
|
zipEntry, err := zwriter.Create(fpath[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the contents of the file to the zip entry
|
// Copy the contents of the file to the zip entry
|
||||||
_, err = io.Copy(zipEntry, file)
|
_, err = io.Copy(zipEntry, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
*/
|
||||||
return zpath, nil
|
return zpath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,10 +211,10 @@ func SetJobInfo(js common.JobSpec, ji common.JobInfo) {
|
|||||||
info[js] = ji
|
info[js] = ji
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetStatus(sessionid int, orl common.NameWithOwner) common.Status {
|
func GetStatus(sessionid int, nwo common.NameWithOwner) common.Status {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
defer mutex.Unlock()
|
defer mutex.Unlock()
|
||||||
return status[common.JobSpec{JobID: sessionid, NameWithOwner: orl}]
|
return status[common.JobSpec{JobID: sessionid, NameWithOwner: nwo}]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ResultAsFile(path string) (string, []byte, error) {
|
func ResultAsFile(path string) (string, []byte, error) {
|
||||||
@@ -231,10 +232,10 @@ func ResultAsFile(path string) (string, []byte, error) {
|
|||||||
return fpath, file, nil
|
return fpath, file, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetStatus(sessionid int, orl common.NameWithOwner, s common.Status) {
|
func SetStatus(sessionid int, nwo common.NameWithOwner, s common.Status) {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
defer mutex.Unlock()
|
defer mutex.Unlock()
|
||||||
status[common.JobSpec{JobID: sessionid, NameWithOwner: orl}] = s
|
status[common.JobSpec{JobID: sessionid, NameWithOwner: nwo}] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddJob(sessionid int, job common.AnalyzeJob) {
|
func AddJob(sessionid int, job common.AnalyzeJob) {
|
||||||
|
|||||||
Reference in New Issue
Block a user