Extract modeling of specific set of candidates into its own function
This commit is contained in:
@@ -1,8 +1,8 @@
|
||||
import { ExternalApiUsage } from "./external-api-usage";
|
||||
import { ExternalApiUsage, MethodSignature } from "./external-api-usage";
|
||||
import { ModeledMethod } from "./modeled-method";
|
||||
import { extLogger } from "../common/logging/vscode";
|
||||
import { load as loadYaml } from "js-yaml";
|
||||
import { withProgress } from "../common/vscode/progress";
|
||||
import { ProgressCallback, withProgress } from "../common/vscode/progress";
|
||||
import { createAutoModelV2Request, getCandidates } from "./auto-model-v2";
|
||||
import { runAutoModelQueries } from "./auto-model-codeml-queries";
|
||||
import { loadDataExtensionYaml } from "./yaml";
|
||||
@@ -33,10 +33,10 @@ export class AutoModeler {
|
||||
modeledMethods: Record<string, ModeledMethod>,
|
||||
mode: Mode,
|
||||
): Promise<void> {
|
||||
await this.model(externalApiUsages, modeledMethods, mode);
|
||||
await this.modelDependency(externalApiUsages, modeledMethods, mode);
|
||||
}
|
||||
|
||||
private async model(
|
||||
private async modelDependency(
|
||||
externalApiUsages: ExternalApiUsage[],
|
||||
modeledMethods: Record<string, ModeledMethod>,
|
||||
mode: Mode,
|
||||
@@ -63,84 +63,93 @@ export class AutoModeler {
|
||||
return;
|
||||
}
|
||||
|
||||
const usages = await runAutoModelQueries({
|
||||
mode,
|
||||
candidateMethods,
|
||||
cliServer: this.cliServer,
|
||||
queryRunner: this.queryRunner,
|
||||
queryStorageDir: this.queryStorageDir,
|
||||
databaseItem: this.databaseItem,
|
||||
progress: (update) => progress({ ...update, maxStep }),
|
||||
});
|
||||
if (!usages) {
|
||||
return;
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 1800,
|
||||
maxStep,
|
||||
message: "Creating request",
|
||||
});
|
||||
|
||||
const request = await createAutoModelV2Request(mode, usages);
|
||||
|
||||
progress({
|
||||
step: 2000,
|
||||
maxStep,
|
||||
message: "Sending request",
|
||||
});
|
||||
|
||||
const response = await this.callAutoModelApi(request);
|
||||
if (!response) {
|
||||
return;
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 2500,
|
||||
maxStep,
|
||||
message: "Parsing response",
|
||||
});
|
||||
|
||||
const models = loadYaml(response.models, {
|
||||
filename: "auto-model.yml",
|
||||
});
|
||||
|
||||
const loadedMethods = loadDataExtensionYaml(models);
|
||||
if (!loadedMethods) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Any candidate that was part of the response is a negative result
|
||||
// meaning that the canidate is not a sink for the kinds that the LLM is checking for.
|
||||
// For now we model this as a sink neutral method, however this is subject
|
||||
// to discussion.
|
||||
for (const candidate of candidateMethods) {
|
||||
if (!(candidate.signature in loadedMethods)) {
|
||||
loadedMethods[candidate.signature] = {
|
||||
type: "neutral",
|
||||
kind: "sink",
|
||||
input: "",
|
||||
output: "",
|
||||
provenance: "ai-generated",
|
||||
signature: candidate.signature,
|
||||
packageName: candidate.packageName,
|
||||
typeName: candidate.typeName,
|
||||
methodName: candidate.methodName,
|
||||
methodParameters: candidate.methodParameters,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 2800,
|
||||
maxStep,
|
||||
message: "Applying results",
|
||||
});
|
||||
|
||||
await this.addModeledMethods(loadedMethods);
|
||||
await this.modelCandidates(candidateMethods, mode, progress, maxStep);
|
||||
});
|
||||
}
|
||||
|
||||
private async modelCandidates(
|
||||
candidateMethods: MethodSignature[],
|
||||
mode: Mode,
|
||||
progress: ProgressCallback,
|
||||
maxStep: number,
|
||||
): Promise<void> {
|
||||
const usages = await runAutoModelQueries({
|
||||
mode,
|
||||
candidateMethods,
|
||||
cliServer: this.cliServer,
|
||||
queryRunner: this.queryRunner,
|
||||
queryStorageDir: this.queryStorageDir,
|
||||
databaseItem: this.databaseItem,
|
||||
progress: (update) => progress({ ...update, maxStep }),
|
||||
});
|
||||
if (!usages) {
|
||||
return;
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 1800,
|
||||
maxStep,
|
||||
message: "Creating request",
|
||||
});
|
||||
|
||||
const request = await createAutoModelV2Request(mode, usages);
|
||||
|
||||
progress({
|
||||
step: 2000,
|
||||
maxStep,
|
||||
message: "Sending request",
|
||||
});
|
||||
|
||||
const response = await this.callAutoModelApi(request);
|
||||
if (!response) {
|
||||
return;
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 2500,
|
||||
maxStep,
|
||||
message: "Parsing response",
|
||||
});
|
||||
|
||||
const models = loadYaml(response.models, {
|
||||
filename: "auto-model.yml",
|
||||
});
|
||||
|
||||
const loadedMethods = loadDataExtensionYaml(models);
|
||||
if (!loadedMethods) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Any candidate that was part of the response is a negative result
|
||||
// meaning that the canidate is not a sink for the kinds that the LLM is checking for.
|
||||
// For now we model this as a sink neutral method, however this is subject
|
||||
// to discussion.
|
||||
for (const candidate of candidateMethods) {
|
||||
if (!(candidate.signature in loadedMethods)) {
|
||||
loadedMethods[candidate.signature] = {
|
||||
type: "neutral",
|
||||
kind: "sink",
|
||||
input: "",
|
||||
output: "",
|
||||
provenance: "ai-generated",
|
||||
signature: candidate.signature,
|
||||
packageName: candidate.packageName,
|
||||
typeName: candidate.typeName,
|
||||
methodName: candidate.methodName,
|
||||
methodParameters: candidate.methodParameters,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
progress({
|
||||
step: 2800,
|
||||
maxStep,
|
||||
message: "Applying results",
|
||||
});
|
||||
|
||||
await this.addModeledMethods(loadedMethods);
|
||||
}
|
||||
|
||||
private async callAutoModelApi(
|
||||
request: ModelRequest,
|
||||
): Promise<ModelResponse | null> {
|
||||
|
||||
Reference in New Issue
Block a user