Make automodel batch size configurable (#2985)
This commit is contained in:
@@ -702,6 +702,10 @@ export function showQueriesPanel(): boolean {
|
||||
const MODEL_SETTING = new Setting("model", ROOT_SETTING);
|
||||
const FLOW_GENERATION = new Setting("flowGeneration", MODEL_SETTING);
|
||||
const LLM_GENERATION = new Setting("llmGeneration", MODEL_SETTING);
|
||||
const LLM_GENERATION_BATCH_SIZE = new Setting(
|
||||
"llmGenerationBatchSize",
|
||||
MODEL_SETTING,
|
||||
);
|
||||
const EXTENSIONS_DIRECTORY = new Setting("extensionsDirectory", MODEL_SETTING);
|
||||
const SHOW_MULTIPLE_MODELS = new Setting("showMultipleModels", MODEL_SETTING);
|
||||
|
||||
@@ -725,6 +729,14 @@ export class ModelConfigListener extends ConfigListener implements ModelConfig {
|
||||
return !!LLM_GENERATION.getValue<boolean>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Limits the number of candidates we send to the model in each request to avoid long requests.
|
||||
* Note that the model may return fewer than this number of candidates.
|
||||
*/
|
||||
public get llmGenerationBatchSize(): number {
|
||||
return LLM_GENERATION_BATCH_SIZE.getValue<number | null>() || 10;
|
||||
}
|
||||
|
||||
public getExtensionsDirectory(languageId: string): string | undefined {
|
||||
return EXTENSIONS_DIRECTORY.getValue<string>({
|
||||
languageId,
|
||||
|
||||
@@ -17,11 +17,7 @@ import { DatabaseItem } from "../databases/local-databases";
|
||||
import { Mode } from "./shared/mode";
|
||||
import { CancellationTokenSource } from "vscode";
|
||||
import { ModelingStore } from "./modeling-store";
|
||||
|
||||
// Limit the number of candidates we send to the model in each request
|
||||
// to avoid long requests.
|
||||
// Note that the model may return fewer than this number of candidates.
|
||||
const candidateBatchSize = 20;
|
||||
import { ModelConfigListener } from "../config";
|
||||
|
||||
/**
|
||||
* The auto-modeler holds state around auto-modeling jobs and allows
|
||||
@@ -36,6 +32,7 @@ export class AutoModeler {
|
||||
private readonly app: App,
|
||||
private readonly cliServer: CodeQLCliServer,
|
||||
private readonly queryRunner: QueryRunner,
|
||||
private readonly modelConfig: ModelConfigListener,
|
||||
private readonly modelingStore: ModelingStore,
|
||||
private readonly queryStorageDir: string,
|
||||
private readonly databaseItem: DatabaseItem,
|
||||
@@ -109,6 +106,9 @@ export class AutoModeler {
|
||||
cancellationTokenSource: CancellationTokenSource,
|
||||
): Promise<void> {
|
||||
void extLogger.log(`Modeling package ${packageName}`);
|
||||
|
||||
const candidateBatchSize = this.modelConfig.llmGenerationBatchSize;
|
||||
|
||||
await withProgress(async (progress) => {
|
||||
// Fetch the candidates to send to the model
|
||||
const allCandidateMethods = getCandidates(mode, methods, modeledMethods);
|
||||
|
||||
@@ -76,6 +76,7 @@ export class ModelEditorView extends AbstractWebview<
|
||||
app,
|
||||
cliServer,
|
||||
queryRunner,
|
||||
this.modelConfig,
|
||||
modelingStore,
|
||||
queryStorageDir,
|
||||
databaseItem,
|
||||
|
||||
Reference in New Issue
Block a user