Move structurally typed prompt injection sinks to Models as Data

Move OpenAI, Anthropic, Google GenAI, and LangChain sinks that are
structurally typed (identified by API name alone) into MaD YAML files.

Role-filtered sinks that require inspecting a sibling 'role' property
remain in QL code since MaD cannot express conditional logic.

Use two distinct sink kinds:
- user-prompt-injection: picked up by UserPromptInjection.ql
- system-prompt-injection: picked up by SystemPromptInjection.ql

New files:
- javascript/ql/lib/ext/openai.model.yml
- javascript/ql/lib/ext/anthropic.model.yml
- javascript/ql/lib/ext/google-genai.model.yml
- javascript/ql/lib/ext/langchain.model.yml
This commit is contained in:
BazookaMusic
2026-05-13 11:08:25 +02:00
parent 98379cffcb
commit 34da804aee
9 changed files with 236 additions and 382 deletions

View File

@@ -0,0 +1,17 @@
extensions:
- addsTo:
pack: codeql/javascript-all
extensible: typeModel
data:
- ["anthropic.Client", "@anthropic-ai/sdk", "Instance"]
- addsTo:
pack: codeql/javascript-all
extensible: sinkModel
data:
- ["anthropic.Client", "Member[messages].Member[create].Argument[0].Member[system]", "system-prompt-injection"]
- ["anthropic.Client", "Member[messages].Member[create].Argument[0].Member[system].ArrayElement.Member[text]", "system-prompt-injection"]
- ["anthropic.Client", "Member[beta].Member[messages].Member[create].Argument[0].Member[system]", "system-prompt-injection"]
- ["anthropic.Client", "Member[beta].Member[messages].Member[create].Argument[0].Member[system].ArrayElement.Member[text]", "system-prompt-injection"]
- ["anthropic.Client", "Member[beta].Member[agents].Member[create].Argument[0].Member[system]", "system-prompt-injection"]
- ["anthropic.Client", "Member[beta].Member[agents].Member[update].Argument[1].Member[system]", "system-prompt-injection"]

View File

@@ -0,0 +1,23 @@
extensions:
- addsTo:
pack: codeql/javascript-all
extensible: typeModel
data:
- ["google-genai.Client", "@google/genai", "Member[GoogleGenAI].Instance"]
- addsTo:
pack: codeql/javascript-all
extensible: sinkModel
data:
- ["google-genai.Client", "Member[models].Member[generateContent,generateContentStream].Argument[0].Member[config].Member[systemInstruction]", "system-prompt-injection"]
- ["google-genai.Client", "Member[chats].Member[create].Argument[0].Member[config].Member[systemInstruction]", "system-prompt-injection"]
- ["google-genai.Client", "Member[chats].Member[create].ReturnValue.Member[sendMessage].Argument[0].Member[config].Member[systemInstruction]", "system-prompt-injection"]
- ["google-genai.Client", "Member[live].Member[connect].Argument[0].Member[config].Member[systemInstruction]", "system-prompt-injection"]
- ["google-genai.Client", "Member[models].Member[generateContent,generateContentStream].Argument[0].Member[contents]", "user-prompt-injection"]
- ["google-genai.Client", "Member[models].Member[generateImages].Argument[0].Member[prompt]", "user-prompt-injection"]
- ["google-genai.Client", "Member[models].Member[editImage].Argument[0].Member[prompt]", "user-prompt-injection"]
- ["google-genai.Client", "Member[models].Member[generateVideos].Argument[0].Member[prompt]", "user-prompt-injection"]
- ["google-genai.Client", "Member[chats].Member[create].ReturnValue.Member[sendMessage,sendMessageStream].Argument[0].Member[message]", "user-prompt-injection"]
- ["google-genai.Client", "Member[chats].Member[create].ReturnValue.Member[sendMessage,sendMessageStream].Argument[0].Member[content]", "user-prompt-injection"]
- ["google-genai.Client", "Member[models].Member[embedContent].Argument[0].Member[content]", "user-prompt-injection"]
- ["google-genai.Client", "Member[interactions].Member[create].Argument[0].Member[input]", "user-prompt-injection"]

View File

@@ -0,0 +1,48 @@
extensions:
- addsTo:
pack: codeql/javascript-all
extensible: typeModel
data:
- ["langchain.ChatModel", "@langchain/openai", "Member[ChatOpenAI].Instance"]
- ["langchain.ChatModel", "@langchain/anthropic", "Member[ChatAnthropic].Instance"]
- ["langchain.ChatModel", "@langchain/google-genai", "Member[ChatGoogleGenerativeAI].Instance"]
- ["langchain.ChatModel", "@langchain/mistralai", "Member[ChatMistralAI].Instance"]
- ["langchain.ChatModel", "@langchain/groq", "Member[ChatGroq].Instance"]
- ["langchain.ChatModel", "@langchain/cohere", "Member[ChatCohere].Instance"]
- ["langchain.ChatModel", "@langchain/community/chat_models/fireworks", "Member[ChatFireworks].Instance"]
- ["langchain.ChatModel", "@langchain/ollama", "Member[ChatOllama].Instance"]
- ["langchain.ChatModel", "@langchain/aws", "Member[BedrockChat,ChatBedrockConverse].Instance"]
- ["langchain.ChatModel", "@langchain/community/chat_models/togetherai", "Member[ChatTogetherAI].Instance"]
- ["langchain.ChatModel", "@langchain/xai", "Member[ChatXAI].Instance"]
- ["langchain.ChatModel", "@langchain/openrouter", "Member[ChatOpenRouter].Instance"]
- ["langchain.ChatModel", "langchain", "Member[initChatModel].ReturnValue.Awaited"]
- ["langchain.AgentExecutor", "langchain/agents", "Member[AgentExecutor].Instance"]
- ["langchain.AgentExecutor", "langchain/agents", "Member[AgentExecutor].Member[fromAgentAndTools].ReturnValue"]
- ["langchain.Agent", "langchain", "Member[createAgent].ReturnValue"]
- ["langchain.LLMChain", "langchain/chains", "Member[LLMChain].Instance"]
- addsTo:
pack: codeql/javascript-all
extensible: sinkModel
data:
- ["@langchain/core/messages", "Member[HumanMessage].Argument[0]", "user-prompt-injection"]
- ["@langchain/core/messages", "Member[HumanMessage].Argument[0].Member[content]", "user-prompt-injection"]
- ["langchain", "Member[HumanMessage].Argument[0]", "user-prompt-injection"]
- ["langchain", "Member[HumanMessage].Argument[0].Member[content]", "user-prompt-injection"]
- ["@langchain/core/messages", "Member[SystemMessage].Argument[0]", "system-prompt-injection"]
- ["@langchain/core/messages", "Member[SystemMessage].Argument[0].Member[content]", "system-prompt-injection"]
- ["langchain", "Member[SystemMessage].Argument[0]", "system-prompt-injection"]
- ["langchain", "Member[SystemMessage].Argument[0].Member[content]", "system-prompt-injection"]
- ["langchain.ChatModel", "Member[invoke].Argument[0]", "user-prompt-injection"]
- ["langchain.ChatModel", "Member[stream].Argument[0]", "user-prompt-injection"]
- ["langchain.ChatModel", "Member[call].Argument[0]", "user-prompt-injection"]
- ["langchain.ChatModel", "Member[predict].Argument[0]", "user-prompt-injection"]
- ["langchain.ChatModel", "Member[batch].Argument[0].ArrayElement", "user-prompt-injection"]
- ["langchain.ChatModel", "Member[generate].Argument[0].ArrayElement.ArrayElement", "user-prompt-injection"]
- ["langchain.AgentExecutor", "Member[invoke].Argument[0].Member[input]", "user-prompt-injection"]
- ["langchain.Agent", "Member[invoke].Argument[0].Member[messages].ArrayElement.Member[content]", "user-prompt-injection"]
- ["langchain.Agent", "Member[stream].Argument[0].Member[messages].ArrayElement.Member[content]", "user-prompt-injection"]
- ["langchain", "Member[createAgent].Argument[0].Member[systemPrompt]", "system-prompt-injection"]
- ["langchain.LLMChain", "Member[call,invoke].Argument[0].Member[input]", "user-prompt-injection"]
- ["@langchain/core/prompts", "Member[ChatPromptTemplate].Member[fromMessages].Argument[0].ArrayElement.ArrayElement", "user-prompt-injection"]
- ["@langchain/core/prompts", "Member[PromptTemplate].Instance.Member[format].Argument[0]", "user-prompt-injection"]

View File

@@ -0,0 +1,28 @@
extensions:
- addsTo:
pack: codeql/javascript-all
extensible: typeModel
data:
- ["openai.Client", "openai", "Instance"]
- ["openai.Client", "openai", "Member[OpenAI,AzureOpenAI].Instance"]
- ["openai.Client", "@openai/guardrails", "Member[GuardrailsOpenAI,GuardrailsAzureOpenAI].Member[create].ReturnValue.Awaited"]
- addsTo:
pack: codeql/javascript-all
extensible: sinkModel
data:
- ["openai.Client", "Member[responses].Member[create].Argument[0].Member[instructions]", "system-prompt-injection"]
- ["openai.Client", "Member[beta].Member[assistants].Member[create,update].Argument[0].Member[instructions]", "system-prompt-injection"]
- ["openai.Client", "Member[beta].Member[threads].Member[runs].Member[create].Argument[1].Member[instructions,additional_instructions]", "system-prompt-injection"]
- ["@openai/agents", "Member[Agent].Argument[0].Member[instructions,handoffDescription]", "system-prompt-injection"]
- ["@openai/guardrails", "Member[Agent].Argument[0].Member[instructions,handoffDescription]", "system-prompt-injection"]
- ["@openai/agents", "Member[Agent].Instance.Member[asTool].Argument[0].Member[toolDescription]", "system-prompt-injection"]
- ["@openai/guardrails", "Member[Agent].Instance.Member[asTool].Argument[0].Member[toolDescription]", "system-prompt-injection"]
- ["@openai/agents", "Member[tool].Argument[0].Member[description]", "system-prompt-injection"]
- ["@openai/guardrails", "Member[tool].Argument[0].Member[description]", "system-prompt-injection"]
- ["@openai/guardrails", "Member[GuardrailAgent].Member[create].Argument[2]", "system-prompt-injection"]
- ["openai.Client", "Member[responses].Member[create].Argument[0].Member[input]", "user-prompt-injection"]
- ["openai.Client", "Member[completions].Member[create].Argument[0].Member[prompt]", "user-prompt-injection"]
- ["openai.Client", "Member[images].Member[generate,edit].Argument[0].Member[prompt]", "user-prompt-injection"]
- ["openai.Client", "Member[embeddings].Member[create].Argument[0].Member[input]", "user-prompt-injection"]
- ["openai.Client", "Member[audio].Member[transcriptions,translations].Member[create].Argument[0].Member[prompt]", "user-prompt-injection"]

View File

@@ -1,89 +1,55 @@
/**
* Provides classes modeling security-relevant aspects of the `@anthropic-ai/sdk` package.
* See https://github.com/anthropics/anthropic-sdk-typescript
*
* Structurally typed sinks (system, beta.agents) have been moved to
* Models as Data: javascript/ql/lib/ext/anthropic.model.yml
*
* This file retains only role-filtered message sinks that require inspecting
* a sibling `role` property, which MaD cannot express.
*/
private import javascript
module Anthropic {
/** Gets a reference to the `Anthropic` client instance. */
API::Node classRef() {
// Default export: import Anthropic from '@anthropic-ai/sdk'; new Anthropic()
private API::Node classRef() {
result = API::moduleImport("@anthropic-ai/sdk").getInstance()
}
/** Gets a reference to a sink for the system prompt in the Anthropic messages API. */
API::Node getSystemOrAssistantPromptNode() {
exists(API::Node createParams |
// client.messages.create({ ... })
createParams = classRef()
.getMember("messages")
.getMember("create")
.getParameter(0)
or
// client.beta.messages.create({ ... })
createParams = classRef()
.getMember("beta")
.getMember("messages")
.getMember("create")
.getParameter(0)
|
// system: "string"
result = createParams.getMember("system")
or
// system: [{ type: "text", text: "..." }]
result = createParams.getMember("system").getArrayElement().getMember("text")
or
// messages: [{ role: "assistant", content: "..." }]
// Injecting content into what the model said from external sources is very likely an injection.
exists(API::Node msg |
msg = createParams.getMember("messages").getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
result = msg.getMember("content")
)
)
/** Gets a reference to the messages.create params (both stable and beta). */
private API::Node messagesCreateParams() {
result = classRef().getMember("messages").getMember("create").getParameter(0)
or
// client.beta.agents.create({ system: "..." })
result = classRef()
.getMember("beta")
.getMember("agents")
.getMember("create")
.getParameter(0)
.getMember("system")
or
// client.beta.agents.update(agentId, { system: "..." })
result = classRef()
.getMember("beta")
.getMember("agents")
.getMember("update")
.getParameter(1)
.getMember("system")
result =
classRef().getMember("beta").getMember("messages").getMember("create").getParameter(0)
}
/** Gets a reference to nodes where potential user input can land. */
API::Node getUserPromptNode() {
exists(API::Node createParams |
// client.messages.create({ ... })
createParams = classRef()
.getMember("messages")
.getMember("create")
.getParameter(0)
or
// client.beta.messages.create({ ... })
createParams = classRef()
.getMember("beta")
.getMember("messages")
.getMember("create")
.getParameter(0)
/**
* Gets role-filtered system/assistant message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getSystemOrAssistantPromptNode() {
// messages: [{ role: "assistant", content: "..." }]
exists(API::Node msg |
msg = messagesCreateParams().getMember("messages").getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
// messages: [{ role: "user", content: "..." }]
exists(API::Node msg |
msg = createParams.getMember("messages").getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
result = msg.getMember("content")
)
result = msg.getMember("content")
)
}
/**
* Gets role-filtered user message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getUserPromptNode() {
// messages: [{ role: "user", content: "..." }]
exists(API::Node msg |
msg = messagesCreateParams().getMember("messages").getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
result = msg.getMember("content")
)
}
}

View File

@@ -1,148 +1,61 @@
/**
* Provides classes modeling security-relevant aspects of the `@google/genai` package.
* See https://github.com/googleapis/js-genai
*
* Structurally typed sinks (systemInstruction, prompt, message, etc.) have been
* moved to Models as Data: javascript/ql/lib/ext/google-genai.model.yml
*
* This file retains only role-filtered content sinks that require inspecting
* a sibling `role` property, which MaD cannot express.
*/
private import javascript
module GoogleGenAI {
/** Gets a reference to the `GoogleGenAI` client instance. */
API::Node clientRef() {
// import { GoogleGenAI } from '@google/genai'; const ai = new GoogleGenAI(...)
private API::Node clientRef() {
result =
API::moduleImport("@google/genai").getMember("GoogleGenAI").getInstance()
}
/** Gets a reference to a sink for prompt content in the Google GenAI SDK. */
/**
* Gets role-filtered system/model message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getSystemOrAssistantPromptNode() {
exists(API::Node params |
// ai.models.generateContent({ contents, config })
// ai.models.generateContentStream({ contents, config })
params =
// contents: [{ role: "model", parts: [{ text: "..." }] }]
// Gemini uses "model" role instead of "assistant"
exists(API::Node msg |
msg =
clientRef()
.getMember("models")
.getMember(["generateContent", "generateContentStream"])
.getParameter(0)
.getMember("contents")
.getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("model")
|
// config.systemInstruction
result = params.getMember("config").getMember("systemInstruction")
or
// contents: [{ role: "model", parts: [{ text: "..." }] }]
// Gemini uses "model" role instead of "assistant"
exists(API::Node msg |
msg = params.getMember("contents").getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("model")
|
result = msg.getMember("parts").getArrayElement().getMember("text")
)
result = msg.getMember("parts").getArrayElement().getMember("text")
)
or
// ai.chats.create({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("chats")
.getMember("create")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
or
// chat.sendMessage({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("chats")
.getMember("create")
.getReturn()
.getMember("sendMessage")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
or
// ai.live.connect({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("live")
.getMember("connect")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
}
/** Gets a reference to nodes where potential user input can land. */
/**
* Gets role-filtered user message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getUserPromptNode() {
exists(API::Node params |
// ai.models.generateContent({ contents: ... }) / generateContentStream
params =
// contents: [{ role: "user", parts: [{ text: "..." }] }]
exists(API::Node msg |
msg =
clientRef()
.getMember("models")
.getMember(["generateContent", "generateContentStream"])
.getParameter(0)
.getMember("contents")
.getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("model")
|
// contents: "string" or contents: [Part]
result = params.getMember("contents")
or
// contents: [{ role: "user", parts: [{ text: "..." }] }]
exists(API::Node msg |
msg = params.getMember("contents").getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("model")
|
result = msg.getMember("parts").getArrayElement().getMember("text")
)
result = msg.getMember("parts").getArrayElement().getMember("text")
)
or
// ai.models.generateImages({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateImages")
.getParameter(0)
.getMember("prompt")
or
// ai.models.editImage({ prompt, referenceImages, config })
result =
clientRef()
.getMember("models")
.getMember("editImage")
.getParameter(0)
.getMember("prompt")
or
// ai.models.generateVideos({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateVideos")
.getParameter(0)
.getMember("prompt")
or
// chat.sendMessage({ message: ... }) and chat.sendMessageStream({ message: ... })
exists(API::Node sendParam |
sendParam =
clientRef()
.getMember("chats")
.getMember("create")
.getReturn()
.getMember(["sendMessage", "sendMessageStream"])
.getParameter(0)
|
result = sendParam.getMember("message")
or
// chat.sendMessage({ content: [...] }) — used for image editing
result = sendParam.getMember("content")
)
or
// ai.models.embedContent({ content: ... })
result =
clientRef()
.getMember("models")
.getMember("embedContent")
.getParameter(0)
.getMember("content")
or
// ai.interactions.create({ input: ... })
result =
clientRef()
.getMember("interactions")
.getMember("create")
.getParameter(0)
.getMember("input")
}
}

View File

@@ -1,11 +1,17 @@
/**
* Provides classes modeling security-relevant aspects of the `openAI-Node` package.
* See https://github.com/openai/openai-node
* See https://github.com/openai/openai-node
*
* Structurally typed sinks (instructions, prompt, input, etc.) have been moved to
* Models as Data: javascript/ql/lib/ext/openai.model.yml
*
* This file retains only role-filtered sinks that require inspecting a sibling
* `role` property, which MaD cannot express.
*/
private import javascript
/** Holds if `msg` is a message array element with a privileged role. */
/** Holds if `msg` is a message array element with a privileged role. */
private predicate isSystemOrDevMessage(API::Node msg) {
msg.getMember("role").asSink().mayHaveStringValue(["system", "developer", "assistant"])
}
@@ -18,36 +24,17 @@ module OpenAIGuardrails {
API::Node getSanitizerNode() {
// checkPlainText(userInput, bundle) or runGuardrails(userInput, bundle)
result = classRef()
.getMember(["checkPlainText", "runGuardrails"])
result = classRef().getMember(["checkPlainText", "runGuardrails"])
}
}
module OpenAI {
/** Gets a reference to all clients without guardrails. */
API::Node clientsNoGuardrails() {
// Default export: import OpenAI from 'openai'; new OpenAI()
/** Gets a reference to all OpenAI client instances. */
private API::Node allClients() {
result = API::moduleImport("openai").getInstance()
or
// Named import: import { OpenAI, AzureOpenAI } from 'openai'; new AzureOpenAI()
result = API::moduleImport("openai").getMember(["OpenAI", "AzureOpenAI"]).getInstance()
or
result = unprotectedGuardedClient()
}
/** Gets a reference to the `openai.OpenAI` class or a guardrails-wrapped equivalent. */
API::Node allClients() {
// Default export: import OpenAI from 'openai'; new OpenAI()
result = clientsNoGuardrails()
or
// Guardrails drop-in: import { GuardrailsOpenAI } from '@openai/guardrails';
// const client = await GuardrailsOpenAI.create(config);
result = guardedClient()
}
/** Gets a reference to an open AI client from Guardrails. */
API::Node guardedClient() {
result =
API::moduleImport("@openai/guardrails")
.getMember(["GuardrailsOpenAI", "GuardrailsAzureOpenAI"])
@@ -56,57 +43,26 @@ module OpenAI {
.getPromised()
}
/** Gets a guarded client that is clearly configured without input guardrails. */
API::Node unprotectedGuardedClient() {
exists(API::Node createCall |
createCall =
API::moduleImport("@openai/guardrails")
.getMember(["GuardrailsOpenAI", "GuardrailsAzureOpenAI"])
.getMember("create") and
result = createCall.getReturn().getPromised() and
// Config is an inspectable object literal, e.g. GuardrailsOpenAI.create({ version: 1 })
exists(createCall.getParameter(0).getMember("version")) and
// No input-stage guardrails, e.g. missing input: { guardrails: [{ name: '...' }] }
not exists(
createCall.getParameter(0).getMember("input").getMember("guardrails").getArrayElement()
) and
// No pre_flight-stage guardrails, e.g. missing pre_flight: { guardrails: [{ name: '...' }] }
not exists(
createCall.getParameter(0).getMember("pre_flight").getMember("guardrails").getArrayElement()
)
)
}
/** Gets a reference to a potential property of `openai.OpenAI` called instructions which refers to the system prompt. */
/**
* Gets role-filtered system/developer/assistant message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getSystemOrAssistantPromptNode() {
// responses.create({ input: ..., instructions: ... })
// input can be a string or an array of message objects
exists(API::Node responsesCreate |
responsesCreate =
// responses.create({ input: [{ role: "system"/"developer", content: "..." }] })
exists(API::Node msg |
msg =
allClients()
.getMember("responses")
.getMember("create")
.getParameter(0)
.getMember("input")
.getArrayElement() and
isSystemOrDevMessage(msg)
|
// instructions: "string"
result = responsesCreate.getMember("instructions")
// intended that user data can flow into input
// or
// // input: "string"
// result = responsesCreate.getMember("input")
or
// input: [{ role: "system"/"developer", content: "..." }]
exists(API::Node msg |
msg = responsesCreate.getMember("input").getArrayElement() and
isSystemOrDevMessage(msg)
|
result = msg.getMember("content")
)
result = msg.getMember("content")
)
or
// chat.completions.create({ messages: [{ role: "system"/"developer", content: ... }] })
// content can be a string or an array of content parts
exists(API::Node msg, API::Node content |
msg =
allClients()
@@ -119,32 +75,11 @@ module OpenAI {
isSystemOrDevMessage(msg) and
content = msg.getMember("content")
|
// content: "string"
result = content
or
// content: [{ type: "text", text: "..." }]
result = content.getArrayElement().getMember("text")
)
or
// beta.assistants.create({ instructions: ... }) and beta.assistants.update(id, { instructions: ... })
result =
allClients()
.getMember("beta")
.getMember("assistants")
.getMember(["create", "update"])
.getParameter(0)
.getMember("instructions")
or
// beta.threads.runs.create(threadId, { instructions: ..., additional_instructions: ... })
result =
allClients()
.getMember("beta")
.getMember("threads")
.getMember("runs")
.getMember("create")
.getParameter(1)
.getMember(["instructions", "additional_instructions"])
or
// beta.threads.messages.create(threadId, { role: "system"/"developer", content: ... })
exists(API::Node msg |
msg =
@@ -160,20 +95,15 @@ module OpenAI {
)
}
/** Gets a reference to nodes where potential user input can land. */
/**
* Gets role-filtered user message sinks.
* These require checking a sibling `role` property and cannot be expressed in MaD.
*/
API::Node getUserPromptNode() {
// responses.create({ input: ... }) — string input
result =
clientsNoGuardrails()
.getMember("responses")
.getMember("create")
.getParameter(0)
.getMember("input")
or
// responses.create({ input: [{ role: "user", content: ... }] })
exists(API::Node msg |
msg =
clientsNoGuardrails()
allClients()
.getMember("responses")
.getMember("create")
.getParameter(0)
@@ -185,10 +115,9 @@ module OpenAI {
)
or
// chat.completions.create({ messages: [{ role: "user", content: ... }] })
// content can be a string or an array of content parts
exists(API::Node msg, API::Node content |
msg =
clientsNoGuardrails()
allClients()
.getMember("chat")
.getMember("completions")
.getMember("create")
@@ -198,41 +127,15 @@ module OpenAI {
not isSystemOrDevMessage(msg) and
content = msg.getMember("content")
|
// content: "string"
result = content
or
// content: [{ type: "text", text: "..." }]
result = content.getArrayElement().getMember("text")
)
or
// Legacy completions API: completions.create({ prompt: ... })
result =
clientsNoGuardrails()
.getMember("completions")
.getMember("create")
.getParameter(0)
.getMember("prompt")
or
// images.generate({ prompt: ... }) and images.edit({ prompt: ... })
result =
clientsNoGuardrails()
.getMember("images")
.getMember(["generate", "edit"])
.getParameter(0)
.getMember("prompt")
or
// embeddings.create({ input: ... })
result =
clientsNoGuardrails()
.getMember("embeddings")
.getMember("create")
.getParameter(0)
.getMember("input")
or
// beta.threads.messages.create(threadId, { role: "user", content: ... })
exists(API::Node msg |
msg =
clientsNoGuardrails()
allClients()
.getMember("beta")
.getMember("threads")
.getMember("messages")
@@ -242,28 +145,18 @@ module OpenAI {
|
result = msg.getMember("content")
)
or
// audio.transcriptions.create({ prompt: ... }) and audio.translations.create({ prompt: ... })
result =
clientsNoGuardrails()
.getMember("audio")
.getMember(["transcriptions", "translations"])
.getMember("create")
.getParameter(0)
.getMember("prompt")
}
}
/**
* Provides models for agents SDK (instances of the `agents` class etc).
* Provides models for agents SDK.
*
* See https://github.com/openai/openai-agents-js and
* https://github.com/openai/openai-guardrails-js.
*
* Note: Agent.run is not covered currently for the user prompt because it necessitates a more complex analysis.
* Specifically, the call looks like run(agent, input), where the agent may have been initiated as a guardrails agent or an unsafe agent.
* The input may also be coming from a non-external source so we'd need to cross-reference two analyses. Instead, we will flag unsafe agent creations, thus
* guaranteeing that when the value reaches the run call, it is either safe or previously flagged.
*
* Structurally typed sinks have been moved to openai.model.yml.
* This module retains only role-filtered sinks, callback-based sinks, and
* unsafe agent detection that MaD cannot express.
*/
module AgentSDK {
API::Node moduleRef() {
@@ -272,78 +165,43 @@ module AgentSDK {
result = API::moduleImport("@openai/guardrails")
}
/** Gets a reference to the `agents.Runner` class. */
API::Node agentConstructor() { result = moduleRef().getMember("Agent") }
API::Node classInstance() { result = agentConstructor().getInstance() }
/** Gets a reference to the top-level run() or Runner.run() functions. */
API::Node run() {
// import { run } from '@openai/agents'; run(agent, input)
private API::Node run() {
result = moduleRef().getMember("run")
or
// const runner = new Runner(); runner.run(agent, input)
result = moduleRef().getMember("Runner").getInstance().getMember("run")
}
API::Node asTool() { result = classInstance().getMember("asTool")}
API::Node toolFunction() { result = moduleRef().getMember("tool") }
/** Gets a reference to a potential property of `agents.Runner` called input which can refer to a system prompt depending on the role specified. */
/**
* Gets role-filtered and callback-based system prompt sinks that MaD cannot express.
*/
API::Node getSystemOrAssistantPromptNode() {
// Agent({ instructions: ... })
result = agentConstructor()
.getParameter(0)
.getMember(["instructions", "handoffDescription"])
or
// Agent({ instructions: (runContext) => returnValue })
result = agentConstructor()
.getParameter(0)
.getMember("instructions")
.getReturn()
// Agent({ instructions: (runContext) => returnValue }) — callback form
result = moduleRef()
.getMember("Agent")
.getParameter(0)
.getMember("instructions")
.getReturn()
or
// run(agent, [{ role: "system"/"developer", content: ... }])
exists(API::Node msg |
msg = run()
.getParameter(1)
.getArrayElement() and
.getParameter(1)
.getArrayElement() and
isSystemOrDevMessage(msg)
|
result = msg.getMember("content")
)
or
// agent.asTool({..., toolDescription: ...})
result = asTool().getParameter(0).getMember("toolDescription")
or
// tool({..., description: ...})
result = toolFunction().getParameter(0).getMember("description")
or
// GuardrailAgent.create(config, name, instructions)
// import { GuardrailAgent } from '@openai/guardrails';
result =
moduleRef()
.getMember("GuardrailAgent")
.getMember("create")
.getParameter(2)
or
// GuardrailAgent.create(config, name, (ctx, agent) => "...") — callback form
result =
moduleRef()
.getMember("GuardrailAgent")
.getMember("create")
.getParameter(2)
.getReturn()
}
/**
/**
* Gets an agent constructor config that visibly lacks input guardrails.
* Covers both native Agent({ inputGuardrails: [...] }) and
* GuardrailAgent.create({ input: { guardrails: [...] } }, ...).
*/
API::Node getUnsafeAgentNode() {
// new Agent({ name: '...', ... }) without inputGuardrails
result = agentConstructor().getParameter(0) and
result = moduleRef().getMember("Agent").getParameter(0) and
// Config is an inspectable object literal
(exists(result.getMember("name")) or exists(result.getMember("instructions"))) and
not exists(result.getMember("inputGuardrails").getArrayElement())
@@ -355,13 +213,10 @@ module AgentSDK {
.getMember("GuardrailAgent")
.getMember("create") and
result = createCall.getParameter(0) and
// Config is an inspectable object literal
exists(result.getMember("version")) and
// No input-stage guardrails
not exists(
result.getMember("input").getMember("guardrails").getArrayElement()
) and
// No pre_flight-stage guardrails
not exists(
result.getMember("pre_flight").getMember("guardrails").getArrayElement()
)

View File

@@ -50,7 +50,9 @@ module SystemPromptInjection {
}
private class SinkFromModel extends Sink {
SinkFromModel() { this = ModelOutput::getASinkNode("prompt-injection").asSink() }
SinkFromModel() {
this = ModelOutput::getASinkNode("system-prompt-injection").asSink()
}
}
private class PromptContentSink extends Sink {

View File

@@ -51,7 +51,9 @@ module UserPromptInjection {
}
private class SinkFromModel extends Sink {
SinkFromModel() { this = ModelOutput::getASinkNode("prompt-injection").asSink() }
SinkFromModel() {
this = ModelOutput::getASinkNode("user-prompt-injection").asSink()
}
}
private class PromptContentSink extends Sink {