Add ability to use a dev endpoint for auto-model (#3038)

This commit is contained in:
Charis Kyriakou
2023-11-01 08:55:56 +00:00
committed by GitHub
parent 2988aceddf
commit 02c1d7ef9e
3 changed files with 52 additions and 9 deletions

View File

@@ -706,6 +706,10 @@ const LLM_GENERATION_BATCH_SIZE = new Setting(
"llmGenerationBatchSize",
MODEL_SETTING,
);
const LLM_GENERATION_DEV_ENDPOINT = new Setting(
"llmGenerationDevEndpoint",
MODEL_SETTING,
);
const EXTENSIONS_DIRECTORY = new Setting("extensionsDirectory", MODEL_SETTING);
const ENABLE_RUBY = new Setting("enableRuby", MODEL_SETTING);
@@ -738,6 +742,14 @@ export class ModelConfigListener extends ConfigListener implements ModelConfig {
return LLM_GENERATION_BATCH_SIZE.getValue<number | null>() || 5;
}
/**
* The URL of the endpoint to use for LLM generation. This should only be set
* if you want to test against a dev server.
*/
public get llmGenerationDevEndpoint(): string | undefined {
return LLM_GENERATION_DEV_ENDPOINT.getValue<string | undefined>();
}
public getExtensionsDirectory(languageId: string): string | undefined {
return EXTENSIONS_DIRECTORY.getValue<string>({
languageId,

View File

@@ -1,5 +1,7 @@
import { Credentials } from "../common/authentication";
import { OctokitResponse } from "@octokit/types";
import fetch from "node-fetch";
import { ModelConfigListener } from "../config";
export enum AutomodelMode {
Unspecified = "AUTOMODEL_MODE_UNSPECIFIED",
@@ -20,15 +22,44 @@ export interface ModelResponse {
export async function autoModel(
credentials: Credentials,
request: ModelRequest,
modelingConfig: ModelConfigListener,
): Promise<ModelResponse> {
const octokit = await credentials.getOctokit();
const devEndpoint = modelingConfig.llmGenerationDevEndpoint;
if (devEndpoint) {
return callAutoModelDevEndpoint(devEndpoint, request);
} else {
const octokit = await credentials.getOctokit();
const response: OctokitResponse<ModelResponse> = await octokit.request(
"POST /repos/github/codeql/code-scanning/codeql/auto-model",
{
data: request,
},
);
const response: OctokitResponse<ModelResponse> = await octokit.request(
"POST /repos/github/codeql/code-scanning/codeql/auto-model",
{
data: request,
},
);
return response.data;
return response.data;
}
}
async function callAutoModelDevEndpoint(
endpoint: string,
request: ModelRequest,
): Promise<ModelResponse> {
const json = JSON.stringify(request);
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: json,
});
if (!response.ok) {
throw new Error(
`Error calling auto-model API: ${response.status} ${response.statusText}`,
);
}
const data = await response.json();
return data as ModelResponse;
}

View File

@@ -238,7 +238,7 @@ export class AutoModeler {
request: ModelRequest,
): Promise<ModelResponse | null> {
try {
return await autoModel(this.app.credentials, request);
return await autoModel(this.app.credentials, request, this.modelConfig);
} catch (e) {
if (e instanceof RequestError && e.status === 429) {
void showAndLogExceptionWithTelemetry(