From abde8f3faebd2f820168e79a120827e3136f1af8 Mon Sep 17 00:00:00 2001 From: Charis Kyriakou Date: Tue, 17 Oct 2023 12:07:30 +0100 Subject: [PATCH] Make automodel batch size configurable (#2985) --- extensions/ql-vscode/src/config.ts | 12 ++++++++++++ .../ql-vscode/src/model-editor/auto-modeler.ts | 10 +++++----- .../ql-vscode/src/model-editor/model-editor-view.ts | 1 + 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/extensions/ql-vscode/src/config.ts b/extensions/ql-vscode/src/config.ts index 172cddcee..108ea8a5a 100644 --- a/extensions/ql-vscode/src/config.ts +++ b/extensions/ql-vscode/src/config.ts @@ -702,6 +702,10 @@ export function showQueriesPanel(): boolean { const MODEL_SETTING = new Setting("model", ROOT_SETTING); const FLOW_GENERATION = new Setting("flowGeneration", MODEL_SETTING); const LLM_GENERATION = new Setting("llmGeneration", MODEL_SETTING); +const LLM_GENERATION_BATCH_SIZE = new Setting( + "llmGenerationBatchSize", + MODEL_SETTING, +); const EXTENSIONS_DIRECTORY = new Setting("extensionsDirectory", MODEL_SETTING); const SHOW_MULTIPLE_MODELS = new Setting("showMultipleModels", MODEL_SETTING); @@ -725,6 +729,14 @@ export class ModelConfigListener extends ConfigListener implements ModelConfig { return !!LLM_GENERATION.getValue(); } + /** + * Limits the number of candidates we send to the model in each request to avoid long requests. + * Note that the model may return fewer than this number of candidates. + */ + public get llmGenerationBatchSize(): number { + return LLM_GENERATION_BATCH_SIZE.getValue() || 10; + } + public getExtensionsDirectory(languageId: string): string | undefined { return EXTENSIONS_DIRECTORY.getValue({ languageId, diff --git a/extensions/ql-vscode/src/model-editor/auto-modeler.ts b/extensions/ql-vscode/src/model-editor/auto-modeler.ts index 45612c07c..4e99bd996 100644 --- a/extensions/ql-vscode/src/model-editor/auto-modeler.ts +++ b/extensions/ql-vscode/src/model-editor/auto-modeler.ts @@ -17,11 +17,7 @@ import { DatabaseItem } from "../databases/local-databases"; import { Mode } from "./shared/mode"; import { CancellationTokenSource } from "vscode"; import { ModelingStore } from "./modeling-store"; - -// Limit the number of candidates we send to the model in each request -// to avoid long requests. -// Note that the model may return fewer than this number of candidates. -const candidateBatchSize = 20; +import { ModelConfigListener } from "../config"; /** * The auto-modeler holds state around auto-modeling jobs and allows @@ -36,6 +32,7 @@ export class AutoModeler { private readonly app: App, private readonly cliServer: CodeQLCliServer, private readonly queryRunner: QueryRunner, + private readonly modelConfig: ModelConfigListener, private readonly modelingStore: ModelingStore, private readonly queryStorageDir: string, private readonly databaseItem: DatabaseItem, @@ -109,6 +106,9 @@ export class AutoModeler { cancellationTokenSource: CancellationTokenSource, ): Promise { void extLogger.log(`Modeling package ${packageName}`); + + const candidateBatchSize = this.modelConfig.llmGenerationBatchSize; + await withProgress(async (progress) => { // Fetch the candidates to send to the model const allCandidateMethods = getCandidates(mode, methods, modeledMethods); diff --git a/extensions/ql-vscode/src/model-editor/model-editor-view.ts b/extensions/ql-vscode/src/model-editor/model-editor-view.ts index fde566225..b6f307182 100644 --- a/extensions/ql-vscode/src/model-editor/model-editor-view.ts +++ b/extensions/ql-vscode/src/model-editor/model-editor-view.ts @@ -76,6 +76,7 @@ export class ModelEditorView extends AbstractWebview< app, cliServer, queryRunner, + this.modelConfig, modelingStore, queryStorageDir, databaseItem,