Separate queue and agent logic and refactor
This commit is contained in:
@@ -1,20 +1,20 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"mrvacommander/pkg/codeql"
|
||||
"mrvacommander/pkg/common"
|
||||
"mrvacommander/pkg/logger"
|
||||
"mrvacommander/pkg/qpstore"
|
||||
"mrvacommander/pkg/queue"
|
||||
"mrvacommander/pkg/storage"
|
||||
"mrvacommander/utils"
|
||||
|
||||
"log/slog"
|
||||
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type RunnerSingle struct {
|
||||
@@ -40,102 +40,101 @@ type Visibles struct {
|
||||
}
|
||||
|
||||
func (c *RunnerSingle) Setup(st *Visibles) {
|
||||
return
|
||||
// TODO: implement
|
||||
}
|
||||
|
||||
func (r *RunnerSingle) worker(wid int) {
|
||||
var job common.AnalyzeJob
|
||||
// TODO: reimplement this later
|
||||
/*
|
||||
var job common.AnalyzeJob
|
||||
|
||||
for {
|
||||
job = <-r.queue.Jobs()
|
||||
for {
|
||||
job = <-r.queue.Jobs()
|
||||
|
||||
slog.Debug("Picking up job", "job", job, "worker", wid)
|
||||
slog.Debug("Picking up job", "job", job, "worker", wid)
|
||||
|
||||
slog.Debug("Analysis: running", "job", job)
|
||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusQueued)
|
||||
slog.Debug("Analysis: running", "job", job)
|
||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusQueued)
|
||||
|
||||
_, err := RunAnalysis(job)
|
||||
resultFile, err := RunAnalysis(job)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Debug("Analysis run finished", "job", job)
|
||||
|
||||
// TODO: FIX THIS
|
||||
res := common.AnalyzeResult{
|
||||
RunAnalysisSARIF: resultFile,
|
||||
RunAnalysisBQRS: "", // FIXME ?
|
||||
}
|
||||
r.queue.Results() <- res
|
||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusSuccess)
|
||||
storage.SetResult(job.QueryPackId, job.NWO, res)
|
||||
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// RunAnalysisJob runs a CodeQL analysis job (AnalyzeJob) returning an AnalyzeResult
|
||||
func RunAnalysisJob(job common.AnalyzeJob) (common.AnalyzeResult, error) {
|
||||
var result = common.AnalyzeResult{
|
||||
RequestId: job.RequestId,
|
||||
ResultCount: 0,
|
||||
ResultArchiveURL: "",
|
||||
Status: common.StatusError,
|
||||
}
|
||||
|
||||
// Create a temporary directory
|
||||
tempDir := filepath.Join(os.TempDir(), uuid.New().String())
|
||||
if err := os.MkdirAll(tempDir, 0755); err != nil {
|
||||
return result, fmt.Errorf("failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Extract the query pack
|
||||
// TODO: download from the 'job' query pack URL
|
||||
// utils.downloadFile
|
||||
queryPackPath := filepath.Join(tempDir, "qp-54674")
|
||||
utils.UntarGz("qp-54674.tgz", queryPackPath)
|
||||
|
||||
// Perform the CodeQL analysis
|
||||
runResult, err := codeql.RunQuery("google_flatbuffers_db.zip", "cpp", queryPackPath, tempDir)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to run analysis: %w", err)
|
||||
}
|
||||
|
||||
// Generate a ZIP archive containing SARIF and BQRS files
|
||||
resultsArchive, err := codeql.GenerateResultsZipArchive(runResult)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to generate results archive: %w", err)
|
||||
}
|
||||
|
||||
// TODO: Upload the archive to storage
|
||||
slog.Info("Results archive size", slog.Int("size", len(resultsArchive)))
|
||||
slog.Info("Analysis job successful.")
|
||||
|
||||
result = common.AnalyzeResult{
|
||||
RequestId: job.RequestId,
|
||||
ResultCount: runResult.ResultCount,
|
||||
ResultArchiveURL: "REPLACE_THIS_WITH_STORED_RESULTS_ARCHIVE", // TODO
|
||||
Status: common.StatusSuccess,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// RunWorker runs a worker that processes jobs from queue
|
||||
func RunWorker(queue queue.Queue, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for job := range queue.Jobs() {
|
||||
slog.Info("Running analysis job", slog.Any("job", job))
|
||||
result, err := RunAnalysisJob(job)
|
||||
if err != nil {
|
||||
slog.Error("Failed to run analysis job", slog.Any("error", err))
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Debug("Analysis run finished", "job", job)
|
||||
|
||||
res := common.AnalyzeResult{}
|
||||
r.queue.Results() <- res
|
||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusSuccess)
|
||||
storage.SetResult(job.QueryPackId, job.NWO, res)
|
||||
|
||||
slog.Info("Analysis job completed", slog.Any("result", result))
|
||||
queue.Results() <- result
|
||||
}
|
||||
}
|
||||
|
||||
func RunAnalysis(job common.AnalyzeJob) (string, error) {
|
||||
// TODO Add multi-language tests including queryLanguage
|
||||
// queryPackID, queryLanguage, dbOwner, dbRepo :=
|
||||
// job.QueryPackId, job.QueryLanguage, job.NWO.Owner, job.NWO.Repo
|
||||
queryPackID, dbOwner, dbRepo :=
|
||||
job.QueryPackId, job.NWO.Owner, job.NWO.Repo
|
||||
|
||||
serverRoot := os.Getenv("MRVA_SERVER_ROOT")
|
||||
|
||||
// Set up derived paths
|
||||
dbPath := filepath.Join(serverRoot, "var/codeql/dbs", dbOwner, dbRepo)
|
||||
dbZip := filepath.Join(serverRoot, "codeql/dbs", dbOwner, dbRepo,
|
||||
fmt.Sprintf("%s_%s_db.zip", dbOwner, dbRepo))
|
||||
dbExtract := filepath.Join(serverRoot, "var/codeql/dbs", dbOwner, dbRepo)
|
||||
|
||||
queryPack := filepath.Join(serverRoot,
|
||||
"var/codeql/querypacks", fmt.Sprintf("qp-%d.tgz", queryPackID))
|
||||
queryExtract := filepath.Join(serverRoot,
|
||||
"var/codeql/querypacks", fmt.Sprintf("qp-%d", queryPackID))
|
||||
|
||||
queryOutDir := filepath.Join(serverRoot,
|
||||
"var/codeql/sarif/localrun", dbOwner, dbRepo)
|
||||
queryOutFile := filepath.Join(queryOutDir,
|
||||
fmt.Sprintf("%s_%s.sarif", dbOwner, dbRepo))
|
||||
|
||||
// Prepare directory, extract database
|
||||
if err := os.MkdirAll(dbExtract, 0755); err != nil {
|
||||
slog.Error("Failed to create DB directory %s: %v", dbExtract, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := utils.UnzipFile(dbZip, dbExtract); err != nil {
|
||||
slog.Error("Failed to unzip DB", dbZip, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Prepare directory, extract query pack
|
||||
if err := os.MkdirAll(queryExtract, 0755); err != nil {
|
||||
slog.Error("Failed to create query pack directory %s: %v", queryExtract, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := utils.UntarGz(queryPack, queryExtract); err != nil {
|
||||
slog.Error("Failed to extract querypack %s: %v", queryPack, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Prepare query result directory
|
||||
if err := os.MkdirAll(queryOutDir, 0755); err != nil {
|
||||
slog.Error("Failed to create query result directory %s: %v", queryOutDir, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Run database analyze
|
||||
cmd := exec.Command("codeql", "database", "analyze",
|
||||
"--format=sarif-latest", "--rerun", "--output", queryOutFile,
|
||||
"-j8", dbPath, queryExtract)
|
||||
cmd.Dir = serverRoot
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
slog.Error("codeql database analyze failed:", "error", err, "job", job)
|
||||
storage.SetStatus(job.QueryPackId, job.NWO, common.StatusError)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Return result path
|
||||
return queryOutFile, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user