changes for spliting into system and user

This commit is contained in:
BazookaMusic
2026-05-04 11:57:43 +02:00
parent 0b7133c4ce
commit 74a3ba1f0d
26 changed files with 997 additions and 160 deletions

View File

@@ -1,24 +0,0 @@
<!DOCTYPE qhelp PUBLIC
"-//Semmle//qhelp//EN"
"qhelp.dtd">
<qhelp>
<overview>
<p>Prompts can be constructed to bypass the original purposes of an agent and lead to sensitive data leak or
operations that were not intended.</p>
</overview>
<recommendation>
<p>Sanitize user input and also avoid using user input in developer or system level prompts.</p>
</recommendation>
<example>
<p>In the following examples, the cases marked GOOD show secure prompt construction; whereas in the case marked BAD they may be susceptible to prompt injection.</p>
<sample src="examples/example.py" />
</example>
<references>
<li>OpenAI: <a href="https://openai.github.io/openai-guardrails-python">Guardrails</a>.</li>
</references>
</qhelp>

View File

@@ -0,0 +1,31 @@
<!DOCTYPE qhelp PUBLIC
"-//Semmle//qhelp//EN"
"qhelp.dtd">
<qhelp>
<overview>
<p>If user-controlled data is included in a system prompt, an attacker can manipulate the instructions
that govern the AI model's behavior, bypassing intended restrictions and potentially causing sensitive
data leaks or unintended operations.</p>
</overview>
<recommendation>
<p>Do not include user input in system-level or developer-level prompts. If user input must influence
the system prompt, validate it against a fixed allowlist of permitted values.</p>
</recommendation>
<example>
<p>In the following example, a user-controlled value is inserted directly into a system-level prompt
without validation, allowing an attacker to manipulate the AI's behavior.</p>
<sample src="examples/prompt-injection.js" />
<p>The fix validates the user input against a fixed allowlist of permitted values before
including it in the prompt.</p>
<sample src="examples/prompt-injection_fixed.js" />
</example>
<references>
<li>OWASP: <a href="https://genai.owasp.org/llmrisk/llm01-prompt-injection/">LLM01: Prompt Injection</a>.</li>
<li>MITRE CWE: <a href="https://cwe.mitre.org/data/definitions/1427.html">CWE-1427: Improper Neutralization of Input Used for LLM Prompting</a>.</li>
</references>
</qhelp>

View File

@@ -11,10 +11,10 @@
*/
import javascript
import experimental.semmle.javascript.security.PromptInjection.PromptInjectionQuery
import PromptInjectionFlow::PathGraph
import experimental.semmle.javascript.security.PromptInjection.SystemPromptInjectionQuery
import SystemPromptInjectionFlow::PathGraph
from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink
where PromptInjectionFlow::flowPath(source, sink)
from SystemPromptInjectionFlow::PathNode source, SystemPromptInjectionFlow::PathNode sink
where SystemPromptInjectionFlow::flowPath(source, sink)
select sink.getNode(), source, sink, "This prompt construction depends on a $@.", source.getNode(),
"user-provided value"

View File

@@ -0,0 +1,22 @@
/**
* @name User prompt injection
* @description Untrusted input flowing into a user-role prompt of an AI model
* may allow an attacker to manipulate the model's behavior.
* @kind path-problem
* @problem.severity error
* @security-severity 5.0
* @precision high
* @id js/user-prompt-injection
* @tags security
* experimental
* external/cwe/cwe-1427
*/
import javascript
import experimental.semmle.javascript.security.PromptInjection.UserPromptinjectionQuery
import UserPromptInjectionFlow::PathGraph
from UserPromptInjectionFlow::PathNode source, UserPromptInjectionFlow::PathNode sink
where UserPromptInjectionFlow::flowPath(source, sink)
select sink.getNode(), source, sink, "This prompt construction depends on a $@.", source.getNode(),
"user-provided value"

View File

@@ -1,17 +0,0 @@
from flask import Flask, request
from agents import Agent
from guardrails import GuardrailAgent
@app.route("/parameter-route")
def get_input():
input = request.args.get("input")
goodAgent = GuardrailAgent( # GOOD: Agent created with guardrails automatically configured.
config=Path("guardrails_config.json"),
name="Assistant",
instructions="This prompt is customized for " + input)
badAgent = Agent(
name="Assistant",
instructions="This prompt is customized for " + input # BAD: user input in agent instruction.
)

View File

@@ -0,0 +1,26 @@
const express = require("express");
const OpenAI = require("openai");
const app = express();
const client = new OpenAI();
app.get("/chat", async (req, res) => {
let persona = req.query.persona;
// BAD: user input is used directly in a system-level prompt
const response = await client.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "system",
content: "You are a helpful assistant. Act as a " + persona,
},
{
role: "user",
content: req.query.message,
},
],
});
res.json(response);
});

View File

@@ -0,0 +1,32 @@
const express = require("express");
const OpenAI = require("openai");
const app = express();
const client = new OpenAI();
const ALLOWED_PERSONAS = ["pirate", "teacher", "poet"];
app.get("/chat", async (req, res) => {
let persona = req.query.persona;
// GOOD: user input is validated against a fixed allowlist before use in a prompt
if (!ALLOWED_PERSONAS.includes(persona)) {
return res.status(400).json({ error: "Invalid persona" });
}
const response = await client.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "system",
content: "You are a helpful assistant. Act as a " + persona,
},
{
role: "user",
content: req.query.message,
},
],
});
res.json(response);
});

View File

@@ -12,9 +12,8 @@ module Anthropic {
result = API::moduleImport("@anthropic-ai/sdk").getInstance()
}
/** Gets a reference to a sink for the system prompt in the Anthropic messages API. */
API::Node getContentNode() {
API::Node getSystemOrAssistantPromptNode() {
exists(API::Node createParams |
// client.messages.create({ ... })
createParams = classRef()
@@ -61,4 +60,30 @@ module Anthropic {
.getParameter(1)
.getMember("system")
}
/** Gets a reference to nodes where potential user input can land. */
API::Node getUserPromptNode() {
exists(API::Node createParams |
// client.messages.create({ ... })
createParams = classRef()
.getMember("messages")
.getMember("create")
.getParameter(0)
or
// client.beta.messages.create({ ... })
createParams = classRef()
.getMember("beta")
.getMember("messages")
.getMember("create")
.getParameter(0)
|
// messages: [{ role: "user", content: "..." }]
exists(API::Node msg |
msg = createParams.getMember("messages").getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
result = msg.getMember("content")
)
)
}
}

View File

@@ -14,7 +14,7 @@ module GoogleGenAI {
}
/** Gets a reference to a sink for prompt content in the Google GenAI SDK. */
API::Node getContentNode() {
API::Node getSystemOrAssistantPromptNode() {
exists(API::Node params |
// ai.models.generateContent({ contents, config })
// ai.models.generateContentStream({ contents, config })
@@ -37,22 +37,6 @@ module GoogleGenAI {
)
)
or
// ai.models.generateImages({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateImages")
.getParameter(0)
.getMember("prompt")
or
// ai.models.editImage({ prompt, referenceImages, config })
result =
clientRef()
.getMember("models")
.getMember("editImage")
.getParameter(0)
.getMember("prompt")
or
// ai.chats.create({ config: { systemInstruction: ... } })
result =
clientRef()
@@ -82,4 +66,83 @@ module GoogleGenAI {
.getMember("config")
.getMember("systemInstruction")
}
/** Gets a reference to nodes where potential user input can land. */
API::Node getUserPromptNode() {
exists(API::Node params |
// ai.models.generateContent({ contents: ... }) / generateContentStream
params =
clientRef()
.getMember("models")
.getMember(["generateContent", "generateContentStream"])
.getParameter(0)
|
// contents: "string" or contents: [Part]
result = params.getMember("contents")
or
// contents: [{ role: "user", parts: [{ text: "..." }] }]
exists(API::Node msg |
msg = params.getMember("contents").getArrayElement() and
not msg.getMember("role").asSink().mayHaveStringValue("model")
|
result = msg.getMember("parts").getArrayElement().getMember("text")
)
)
or
// ai.models.generateImages({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateImages")
.getParameter(0)
.getMember("prompt")
or
// ai.models.editImage({ prompt, referenceImages, config })
result =
clientRef()
.getMember("models")
.getMember("editImage")
.getParameter(0)
.getMember("prompt")
or
// ai.models.generateVideos({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateVideos")
.getParameter(0)
.getMember("prompt")
or
// chat.sendMessage({ message: ... }) and chat.sendMessageStream({ message: ... })
exists(API::Node sendParam |
sendParam =
clientRef()
.getMember("chats")
.getMember("create")
.getReturn()
.getMember(["sendMessage", "sendMessageStream"])
.getParameter(0)
|
result = sendParam.getMember("message")
or
// chat.sendMessage({ content: [...] }) — used for image editing
result = sendParam.getMember("content")
)
or
// ai.models.embedContent({ content: ... })
result =
clientRef()
.getMember("models")
.getMember("embedContent")
.getParameter(0)
.getMember("content")
or
// ai.interactions.create({ input: ... })
result =
clientRef()
.getMember("interactions")
.getMember("create")
.getParameter(0)
.getMember("input")
}
}

View File

@@ -10,24 +10,81 @@ private predicate isSystemOrDevMessage(API::Node msg) {
msg.getMember("role").asSink().mayHaveStringValue(["system", "developer", "assistant"])
}
module OpenAI {
/** Gets a reference to the `openai.OpenAI` class. */
module OpenAIGuardrails {
/** Gets a reference to the `GuardrailsOpenAI` class. */
API::Node classRef() {
result = API::moduleImport("@openai/guardrails")
}
API::Node getSanitizerNode() {
// checkPlainText(userInput, bundle) or runGuardrails(userInput, bundle)
result = classRef()
.getMember(["checkPlainText", "runGuardrails"])
}
}
module OpenAI {
/** Gets a reference to all clients without guardrails. */
API::Node clientsNoGuardrails() {
// Default export: import OpenAI from 'openai'; new OpenAI()
result = API::moduleImport("openai").getInstance()
or
// Named import: import { OpenAI, AzureOpenAI } from 'openai'; new AzureOpenAI()
result = API::moduleImport("openai").getMember(["OpenAI", "AzureOpenAI"]).getInstance()
or
result = unprotectedGuardedClient()
}
/** Gets a reference to the `openai.OpenAI` class or a guardrails-wrapped equivalent. */
API::Node allClients() {
// Default export: import OpenAI from 'openai'; new OpenAI()
result = clientsNoGuardrails()
or
// Guardrails drop-in: import { GuardrailsOpenAI } from '@openai/guardrails';
// const client = await GuardrailsOpenAI.create(config);
result = guardedClient()
}
/** Gets a reference to an open AI client from Guardrails. */
API::Node guardedClient() {
result =
API::moduleImport("@openai/guardrails")
.getMember(["GuardrailsOpenAI", "GuardrailsAzureOpenAI"])
.getMember("create")
.getReturn()
.getPromised()
}
/** Gets a guarded client that is clearly configured without input guardrails. */
API::Node unprotectedGuardedClient() {
exists(API::Node createCall |
createCall =
API::moduleImport("@openai/guardrails")
.getMember(["GuardrailsOpenAI", "GuardrailsAzureOpenAI"])
.getMember("create") and
result = createCall.getReturn().getPromised() and
// Config is an inspectable object literal, e.g. GuardrailsOpenAI.create({ version: 1 })
exists(createCall.getParameter(0).getMember("version")) and
// No input-stage guardrails, e.g. missing input: { guardrails: [{ name: '...' }] }
not exists(
createCall.getParameter(0).getMember("input").getMember("guardrails").getArrayElement()
) and
// No pre_flight-stage guardrails, e.g. missing pre_flight: { guardrails: [{ name: '...' }] }
not exists(
createCall.getParameter(0).getMember("pre_flight").getMember("guardrails").getArrayElement()
)
)
}
/** Gets a reference to a potential property of `openai.OpenAI` called instructions which refers to the system prompt. */
API::Node getContentNode() {
API::Node getSystemOrAssistantPromptNode() {
// responses.create({ input: ..., instructions: ... })
// input can be a string or an array of message objects
exists(API::Node responsesCreate |
responsesCreate =
classRef()
allClients()
.getMember("responses")
.getMember("create")
.getParameter(0)
@@ -52,7 +109,7 @@ module OpenAI {
// content can be a string or an array of content parts
exists(API::Node msg, API::Node content |
msg =
classRef()
allClients()
.getMember("chat")
.getMember("completions")
.getMember("create")
@@ -69,33 +126,9 @@ module OpenAI {
result = content.getArrayElement().getMember("text")
)
or
// Legacy completions API: completions.create({ prompt: ... })
result =
classRef()
.getMember("completions")
.getMember("create")
.getParameter(0)
.getMember("prompt")
or
// images.generate({ prompt: ... }) and images.edit({ prompt: ... })
result =
classRef()
.getMember("images")
.getMember(["generate", "edit"])
.getParameter(0)
.getMember("prompt")
or
// embeddings.create({ input: ... })
result =
classRef()
.getMember("embeddings")
.getMember("create")
.getParameter(0)
.getMember("input")
or
// beta.assistants.create({ instructions: ... }) and beta.assistants.update(id, { instructions: ... })
result =
classRef()
allClients()
.getMember("beta")
.getMember("assistants")
.getMember(["create", "update"])
@@ -104,7 +137,7 @@ module OpenAI {
or
// beta.threads.runs.create(threadId, { instructions: ..., additional_instructions: ... })
result =
classRef()
allClients()
.getMember("beta")
.getMember("threads")
.getMember("runs")
@@ -115,7 +148,7 @@ module OpenAI {
// beta.threads.messages.create(threadId, { role: "system"/"developer", content: ... })
exists(API::Node msg |
msg =
classRef()
allClients()
.getMember("beta")
.getMember("threads")
.getMember("messages")
@@ -125,10 +158,94 @@ module OpenAI {
|
result = msg.getMember("content")
)
}
/** Gets a reference to nodes where potential user input can land. */
API::Node getUserPromptNode() {
// responses.create({ input: ... }) — string input
result =
clientsNoGuardrails()
.getMember("responses")
.getMember("create")
.getParameter(0)
.getMember("input")
or
// responses.create({ input: [{ role: "user", content: ... }] })
exists(API::Node msg |
msg =
clientsNoGuardrails()
.getMember("responses")
.getMember("create")
.getParameter(0)
.getMember("input")
.getArrayElement() and
not isSystemOrDevMessage(msg)
|
result = msg.getMember("content")
)
or
// chat.completions.create({ messages: [{ role: "user", content: ... }] })
// content can be a string or an array of content parts
exists(API::Node msg, API::Node content |
msg =
clientsNoGuardrails()
.getMember("chat")
.getMember("completions")
.getMember("create")
.getParameter(0)
.getMember("messages")
.getArrayElement() and
not isSystemOrDevMessage(msg) and
content = msg.getMember("content")
|
// content: "string"
result = content
or
// content: [{ type: "text", text: "..." }]
result = content.getArrayElement().getMember("text")
)
or
// Legacy completions API: completions.create({ prompt: ... })
result =
clientsNoGuardrails()
.getMember("completions")
.getMember("create")
.getParameter(0)
.getMember("prompt")
or
// images.generate({ prompt: ... }) and images.edit({ prompt: ... })
result =
clientsNoGuardrails()
.getMember("images")
.getMember(["generate", "edit"])
.getParameter(0)
.getMember("prompt")
or
// embeddings.create({ input: ... })
result =
clientsNoGuardrails()
.getMember("embeddings")
.getMember("create")
.getParameter(0)
.getMember("input")
or
// beta.threads.messages.create(threadId, { role: "user", content: ... })
exists(API::Node msg |
msg =
clientsNoGuardrails()
.getMember("beta")
.getMember("threads")
.getMember("messages")
.getMember("create")
.getParameter(1) and
not isSystemOrDevMessage(msg)
|
result = msg.getMember("content")
)
or
// audio.transcriptions.create({ prompt: ... }) and audio.translations.create({ prompt: ... })
result =
classRef()
clientsNoGuardrails()
.getMember("audio")
.getMember(["transcriptions", "translations"])
.getMember("create")
@@ -140,10 +257,20 @@ module OpenAI {
/**
* Provides models for agents SDK (instances of the `agents` class etc).
*
* See https://github.com/openai/openai-agents-js.
* See https://github.com/openai/openai-agents-js and
* https://github.com/openai/openai-guardrails-js.
*
* Note: Agent.run is not covered currently for the user prompt because it necessitates a more complex analysis.
* Specifically, the call looks like run(agent, input), where the agent may have been initiated as a guardrails agent or an unsafe agent.
* The input may also be coming from a non-external source so we'd need to cross-reference two analyses. Instead, we will flag unsafe agent creations, thus
* guaranteeing that when the value reaches the run call, it is either safe or previously flagged.
*/
module AgentSDK {
API::Node moduleRef() { result = API::moduleImport("@openai/agents") }
API::Node moduleRef() {
result = API::moduleImport("@openai/agents")
or
result = API::moduleImport("@openai/guardrails")
}
/** Gets a reference to the `agents.Runner` class. */
API::Node agentConstructor() { result = moduleRef().getMember("Agent") }
@@ -164,7 +291,7 @@ module AgentSDK {
API::Node toolFunction() { result = moduleRef().getMember("tool") }
/** Gets a reference to a potential property of `agents.Runner` called input which can refer to a system prompt depending on the role specified. */
API::Node getContentNode() {
API::Node getSystemOrAssistantPromptNode() {
// Agent({ instructions: ... })
result = agentConstructor()
.getParameter(0)
@@ -176,10 +303,6 @@ module AgentSDK {
.getMember("instructions")
.getReturn()
or
// run(agent, input) or runner.run(agent, input) — string input
result = run()
.getParameter(1)
or
// run(agent, [{ role: "system"/"developer", content: ... }])
exists(API::Node msg |
msg = run()
@@ -195,5 +318,53 @@ module AgentSDK {
or
// tool({..., description: ...})
result = toolFunction().getParameter(0).getMember("description")
or
// GuardrailAgent.create(config, name, instructions)
// import { GuardrailAgent } from '@openai/guardrails';
result =
moduleRef()
.getMember("GuardrailAgent")
.getMember("create")
.getParameter(2)
or
// GuardrailAgent.create(config, name, (ctx, agent) => "...") — callback form
result =
moduleRef()
.getMember("GuardrailAgent")
.getMember("create")
.getParameter(2)
.getReturn()
}
/**
* Gets an agent constructor config that visibly lacks input guardrails.
* Covers both native Agent({ inputGuardrails: [...] }) and
* GuardrailAgent.create({ input: { guardrails: [...] } }, ...).
*/
API::Node getUnsafeAgentNode() {
// new Agent({ name: '...', ... }) without inputGuardrails
result = agentConstructor().getParameter(0) and
// Config is an inspectable object literal
(exists(result.getMember("name")) or exists(result.getMember("instructions"))) and
not exists(result.getMember("inputGuardrails").getArrayElement())
or
// GuardrailAgent.create(config, ...) without input/pre_flight guardrails
exists(API::Node createCall |
createCall =
moduleRef()
.getMember("GuardrailAgent")
.getMember("create") and
result = createCall.getParameter(0) and
// Config is an inspectable object literal
exists(result.getMember("version")) and
// No input-stage guardrails
not exists(
result.getMember("input").getMember("guardrails").getArrayElement()
) and
// No pre_flight-stage guardrails
not exists(
result.getMember("pre_flight").getMember("guardrails").getArrayElement()
)
)
}
}

View File

@@ -20,7 +20,7 @@ private import experimental.semmle.javascript.frameworks.GoogleGenAI
* "prompt injection"
* vulnerabilities, as well as extension points for adding your own.
*/
module PromptInjection {
module SystemPromptInjection {
/**
* A data flow source for "prompt injection" vulnerabilities.
*/
@@ -39,7 +39,14 @@ module PromptInjection {
/**
* An active threat-model source, considered as a flow source.
*/
private class ActiveThreatModelSourceAsSource extends Source, ActiveThreatModelSource { }
private class ActiveThreatModelSourceAsSource extends Source, ActiveThreatModelSource {
ActiveThreatModelSourceAsSource()
{
this instanceof RemoteFlowSource
or
this.isClientSideSource()
}
}
/**
* A prompt to an AI model, considered as a flow sink.
@@ -54,13 +61,13 @@ module PromptInjection {
private class PromptContentSink extends Sink {
PromptContentSink() {
this = OpenAI::getContentNode().asSink()
this = OpenAI::getSystemOrAssistantPromptNode().asSink()
or
this = AgentSDK::getContentNode().asSink()
this = AgentSDK::getSystemOrAssistantPromptNode().asSink()
or
this = Anthropic::getContentNode().asSink()
this = Anthropic::getSystemOrAssistantPromptNode().asSink()
or
this = GoogleGenAI::getContentNode().asSink()
this = GoogleGenAI::getSystemOrAssistantPromptNode().asSink()
}
}

View File

@@ -9,9 +9,9 @@
private import javascript
import semmle.javascript.dataflow.DataFlow
import semmle.javascript.dataflow.TaintTracking
import PromptInjectionCustomizations::PromptInjection
import SystemPromptInjectionCustomizations::SystemPromptInjection
private module PromptInjectionConfig implements DataFlow::ConfigSig {
private module SystemPromptInjectionConfig implements DataFlow::ConfigSig {
predicate isSource(DataFlow::Node node) { node instanceof Source }
predicate isSink(DataFlow::Node node) { node instanceof Sink }
@@ -22,4 +22,4 @@ private module PromptInjectionConfig implements DataFlow::ConfigSig {
}
/** Global taint-tracking for detecting "prompt injection" vulnerabilities. */
module PromptInjectionFlow = TaintTracking::Global<PromptInjectionConfig>;
module SystemPromptInjectionFlow = TaintTracking::Global<SystemPromptInjectionConfig>;

View File

@@ -0,0 +1,92 @@
/**
* Provides default sources, sinks and sanitizers for detecting
* "user prompt injection"
* vulnerabilities, as well as extension points for adding your own.
*/
import javascript
private import semmle.javascript.dataflow.DataFlow
private import semmle.javascript.Concepts
private import semmle.javascript.security.dataflow.RemoteFlowSources
private import semmle.javascript.dataflow.internal.BarrierGuards
private import semmle.javascript.frameworks.data.ModelsAsData
private import experimental.semmle.javascript.frameworks.OpenAI
private import experimental.semmle.javascript.frameworks.Anthropic
private import experimental.semmle.javascript.frameworks.GoogleGenAI
/**
* Provides default sources, sinks and sanitizers for detecting
* "user prompt injection"
* vulnerabilities, as well as extension points for adding your own.
*/
module UserPromptInjection {
/**
* A data flow source for "user prompt injection" vulnerabilities.
*/
abstract class Source extends DataFlow::Node { }
/**
* A data flow sink for "user prompt injection" vulnerabilities.
*/
abstract class Sink extends DataFlow::Node {
}
/**
* A sanitizer for "user prompt injection" vulnerabilities.
*/
abstract class Sanitizer extends DataFlow::Node { }
/**
* An active threat-model source, considered as a flow source.
*/
private class ActiveThreatModelSourceAsSource extends Source, ActiveThreatModelSource {
ActiveThreatModelSourceAsSource()
{
this instanceof RemoteFlowSource
or
this.isClientSideSource()
}
}
/**
* A prompt to an AI model, considered as a flow sink.
*/
class AIPromptAsSink extends Sink {
AIPromptAsSink() { this = any(AIPrompt p).getAPrompt() }
}
private class SinkFromModel extends Sink {
SinkFromModel() { this = ModelOutput::getASinkNode("prompt-injection").asSink() }
}
private class PromptContentSink extends Sink {
PromptContentSink() {
this = OpenAI::getUserPromptNode().asSink()
or
this = Anthropic::getUserPromptNode().asSink()
or
this = GoogleGenAI::getUserPromptNode().asSink()
}
}
/**
* A comparison with a constant, considered as a sanitizer-guard.
*/
private class ConstCompareBarrierGuard extends DataFlow::ValueNode
{
override EqualityTest astNode;
ConstCompareBarrierGuard()
{
astNode.hasOperands(_, any(ConstantString cs))
}
predicate blocksExpr(boolean outcome, Expr e) {
outcome = astNode.getPolarity() and
e = astNode.getLeftOperand() and
e = astNode.getAnOperand() and
not e instanceof ConstantString
}
}
}

View File

@@ -0,0 +1,25 @@
/**
* Provides a taint-tracking configuration for detecting "prompt injection" vulnerabilities.
*
* Note, for performance reasons: only import this file if
* `PromptInjection::Configuration` is needed, otherwise
* `PromptInjectionCustomizations` should be imported instead.
*/
private import javascript
import semmle.javascript.dataflow.DataFlow
import semmle.javascript.dataflow.TaintTracking
import UserPromptInjectionCustomizations::UserPromptInjection
private module UserPromptInjectionConfig implements DataFlow::ConfigSig {
predicate isSource(DataFlow::Node node) { node instanceof Source }
predicate isSink(DataFlow::Node node) { node instanceof Sink }
predicate isBarrier(DataFlow::Node node) { node instanceof Sanitizer }
predicate observeDiffInformedIncrementalMode() { any() }
}
/** Global taint-tracking for detecting "user prompt injection" vulnerabilities. */
module UserPromptInjectionFlow = TaintTracking::Global<UserPromptInjectionConfig>;

View File

@@ -1 +0,0 @@
./experimental/Security/CWE-1427/PromptInjection.ql

View File

@@ -7,8 +7,6 @@ edges
| agents_test.js:8:9:8:15 | persona | agents_test.js:81:52:81:58 | persona | provenance | |
| agents_test.js:8:9:8:15 | persona | agents_test.js:96:49:96:55 | persona | provenance | |
| agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:8:9:8:15 | persona | provenance | |
| agents_test.js:9:9:9:13 | query | agents_test.js:67:32:67:36 | query | provenance | |
| agents_test.js:9:17:9:31 | req.query.query | agents_test.js:9:9:9:13 | query | provenance | |
| agents_test.js:16:36:16:42 | persona | agents_test.js:16:19:16:42 | "Talk l ... persona | provenance | |
| agents_test.js:16:36:16:42 | persona | agents_test.js:25:31:25:37 | persona | provenance | |
| agents_test.js:16:36:16:42 | persona | agents_test.js:33:31:33:37 | persona | provenance | |
@@ -47,8 +45,6 @@ edges
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:18:43:18:49 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:30:42:30:48 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:59:43:59:49 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:68:36:68:42 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:76:36:76:42 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:85:43:85:49 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:95:43:95:49 | persona | provenance | |
| gemini_test.js:8:9:8:15 | persona | gemini_test.js:105:43:105:49 | persona | provenance | |
@@ -56,8 +52,6 @@ edges
| gemini_test.js:18:43:18:49 | persona | gemini_test.js:18:26:18:49 | "Talk l ... persona | provenance | |
| gemini_test.js:30:42:30:48 | persona | gemini_test.js:30:25:30:48 | "Talk l ... persona | provenance | |
| gemini_test.js:59:43:59:49 | persona | gemini_test.js:59:26:59:49 | "Talk l ... persona | provenance | |
| gemini_test.js:68:36:68:42 | persona | gemini_test.js:68:13:68:42 | "Draw a ... persona | provenance | |
| gemini_test.js:76:36:76:42 | persona | gemini_test.js:76:13:76:42 | "Edit t ... persona | provenance | |
| gemini_test.js:85:43:85:49 | persona | gemini_test.js:85:26:85:49 | "Talk l ... persona | provenance | |
| gemini_test.js:95:43:95:49 | persona | gemini_test.js:95:26:95:49 | "Talk l ... persona | provenance | |
| gemini_test.js:105:43:105:49 | persona | gemini_test.js:105:26:105:49 | "Talk l ... persona | provenance | |
@@ -68,16 +62,10 @@ edges
| openai_test.js:11:9:11:15 | persona | openai_test.js:83:35:83:41 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:97:36:97:42 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:110:35:110:41 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:120:30:120:36 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:127:36:127:42 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:132:36:132:42 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:140:29:140:35 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:149:36:149:42 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:160:36:160:42 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:166:52:166:58 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:172:31:172:37 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:187:35:187:41 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:194:34:194:40 | persona | provenance | |
| openai_test.js:11:9:11:15 | persona | openai_test.js:200:49:200:55 | persona | provenance | |
| openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:11:9:11:15 | persona | provenance | |
| openai_test.js:19:36:19:42 | persona | openai_test.js:19:19:19:42 | "Talk l ... persona | provenance | |
@@ -87,22 +75,14 @@ edges
| openai_test.js:83:35:83:41 | persona | openai_test.js:83:18:83:41 | "Talk l ... persona | provenance | |
| openai_test.js:97:36:97:42 | persona | openai_test.js:97:19:97:42 | "Talk l ... persona | provenance | |
| openai_test.js:110:35:110:41 | persona | openai_test.js:110:18:110:41 | "Talk l ... persona | provenance | |
| openai_test.js:120:30:120:36 | persona | openai_test.js:120:13:120:36 | "Talk l ... persona | provenance | |
| openai_test.js:127:36:127:42 | persona | openai_test.js:127:13:127:42 | "Draw a ... persona | provenance | |
| openai_test.js:132:36:132:42 | persona | openai_test.js:132:13:132:42 | "Edit t ... persona | provenance | |
| openai_test.js:140:29:140:35 | persona | openai_test.js:140:12:140:35 | "Embed ... persona | provenance | |
| openai_test.js:149:36:149:42 | persona | openai_test.js:149:19:149:42 | "Talk l ... persona | provenance | |
| openai_test.js:160:36:160:42 | persona | openai_test.js:160:19:160:42 | "Talk l ... persona | provenance | |
| openai_test.js:166:52:166:58 | persona | openai_test.js:166:30:166:58 | "Also t ... persona | provenance | |
| openai_test.js:172:31:172:37 | persona | openai_test.js:172:14:172:37 | "Talk l ... persona | provenance | |
| openai_test.js:187:35:187:41 | persona | openai_test.js:187:13:187:41 | "Transc ... persona | provenance | |
| openai_test.js:194:34:194:40 | persona | openai_test.js:194:13:194:40 | "Transl ... persona | provenance | |
| openai_test.js:200:49:200:55 | persona | openai_test.js:200:32:200:55 | "Talk l ... persona | provenance | |
nodes
| agents_test.js:8:9:8:15 | persona | semmle.label | persona |
| agents_test.js:8:19:8:35 | req.query.persona | semmle.label | req.query.persona |
| agents_test.js:9:9:9:13 | query | semmle.label | query |
| agents_test.js:9:17:9:31 | req.query.query | semmle.label | req.query.query |
| agents_test.js:16:19:16:42 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| agents_test.js:16:36:16:42 | persona | semmle.label | persona |
| agents_test.js:25:14:25:37 | "Talk l ... persona | semmle.label | "Talk l ... persona |
@@ -116,7 +96,6 @@ nodes
| agents_test.js:51:37:51:43 | persona | semmle.label | persona |
| agents_test.js:59:18:59:48 | "Look u ... persona | semmle.label | "Look u ... persona |
| agents_test.js:59:42:59:48 | persona | semmle.label | persona |
| agents_test.js:67:32:67:36 | query | semmle.label | query |
| agents_test.js:73:32:73:55 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| agents_test.js:73:49:73:55 | persona | semmle.label | persona |
| agents_test.js:81:35:81:58 | "Talk l ... persona | semmle.label | "Talk l ... persona |
@@ -149,10 +128,6 @@ nodes
| gemini_test.js:30:42:30:48 | persona | semmle.label | persona |
| gemini_test.js:59:26:59:49 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| gemini_test.js:59:43:59:49 | persona | semmle.label | persona |
| gemini_test.js:68:13:68:42 | "Draw a ... persona | semmle.label | "Draw a ... persona |
| gemini_test.js:68:36:68:42 | persona | semmle.label | persona |
| gemini_test.js:76:13:76:42 | "Edit t ... persona | semmle.label | "Edit t ... persona |
| gemini_test.js:76:36:76:42 | persona | semmle.label | persona |
| gemini_test.js:85:26:85:49 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| gemini_test.js:85:43:85:49 | persona | semmle.label | persona |
| gemini_test.js:95:26:95:49 | "Talk l ... persona | semmle.label | "Talk l ... persona |
@@ -175,14 +150,6 @@ nodes
| openai_test.js:97:36:97:42 | persona | semmle.label | persona |
| openai_test.js:110:18:110:41 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| openai_test.js:110:35:110:41 | persona | semmle.label | persona |
| openai_test.js:120:13:120:36 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| openai_test.js:120:30:120:36 | persona | semmle.label | persona |
| openai_test.js:127:13:127:42 | "Draw a ... persona | semmle.label | "Draw a ... persona |
| openai_test.js:127:36:127:42 | persona | semmle.label | persona |
| openai_test.js:132:13:132:42 | "Edit t ... persona | semmle.label | "Edit t ... persona |
| openai_test.js:132:36:132:42 | persona | semmle.label | persona |
| openai_test.js:140:12:140:35 | "Embed ... persona | semmle.label | "Embed ... persona |
| openai_test.js:140:29:140:35 | persona | semmle.label | persona |
| openai_test.js:149:19:149:42 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| openai_test.js:149:36:149:42 | persona | semmle.label | persona |
| openai_test.js:160:19:160:42 | "Talk l ... persona | semmle.label | "Talk l ... persona |
@@ -191,10 +158,6 @@ nodes
| openai_test.js:166:52:166:58 | persona | semmle.label | persona |
| openai_test.js:172:14:172:37 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| openai_test.js:172:31:172:37 | persona | semmle.label | persona |
| openai_test.js:187:13:187:41 | "Transc ... persona | semmle.label | "Transc ... persona |
| openai_test.js:187:35:187:41 | persona | semmle.label | persona |
| openai_test.js:194:13:194:40 | "Transl ... persona | semmle.label | "Transl ... persona |
| openai_test.js:194:34:194:40 | persona | semmle.label | persona |
| openai_test.js:200:32:200:55 | "Talk l ... persona | semmle.label | "Talk l ... persona |
| openai_test.js:200:49:200:55 | persona | semmle.label | persona |
subpaths
@@ -205,7 +168,6 @@ subpaths
| agents_test.js:43:25:43:44 | "Handles " + persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:43:25:43:44 | "Handles " + persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
| agents_test.js:51:22:51:43 | "Ask ab ... persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:51:22:51:43 | "Ask ab ... persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
| agents_test.js:59:18:59:48 | "Look u ... persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:59:18:59:48 | "Look u ... persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
| agents_test.js:67:32:67:36 | query | agents_test.js:9:17:9:31 | req.query.query | agents_test.js:67:32:67:36 | query | This prompt construction depends on a $@. | agents_test.js:9:17:9:31 | req.query.query | user-provided value |
| agents_test.js:73:32:73:55 | "Talk l ... persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:73:32:73:55 | "Talk l ... persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
| agents_test.js:81:35:81:58 | "Talk l ... persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:81:35:81:58 | "Talk l ... persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
| agents_test.js:96:32:96:55 | "Talk l ... persona | agents_test.js:8:19:8:35 | req.query.persona | agents_test.js:96:32:96:55 | "Talk l ... persona | This prompt construction depends on a $@. | agents_test.js:8:19:8:35 | req.query.persona | user-provided value |
@@ -220,8 +182,6 @@ subpaths
| gemini_test.js:18:26:18:49 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:18:26:18:49 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:30:25:30:48 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:30:25:30:48 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:59:26:59:49 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:59:26:59:49 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:68:13:68:42 | "Draw a ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:68:13:68:42 | "Draw a ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:76:13:76:42 | "Edit t ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:76:13:76:42 | "Edit t ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:85:26:85:49 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:85:26:85:49 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:95:26:95:49 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:95:26:95:49 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
| gemini_test.js:105:26:105:49 | "Talk l ... persona | gemini_test.js:8:19:8:35 | req.query.persona | gemini_test.js:105:26:105:49 | "Talk l ... persona | This prompt construction depends on a $@. | gemini_test.js:8:19:8:35 | req.query.persona | user-provided value |
@@ -232,14 +192,8 @@ subpaths
| openai_test.js:83:18:83:41 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:83:18:83:41 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:97:19:97:42 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:97:19:97:42 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:110:18:110:41 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:110:18:110:41 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:120:13:120:36 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:120:13:120:36 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:127:13:127:42 | "Draw a ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:127:13:127:42 | "Draw a ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:132:13:132:42 | "Edit t ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:132:13:132:42 | "Edit t ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:140:12:140:35 | "Embed ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:140:12:140:35 | "Embed ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:149:19:149:42 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:149:19:149:42 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:160:19:160:42 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:160:19:160:42 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:166:30:166:58 | "Also t ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:166:30:166:58 | "Also t ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:172:14:172:37 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:172:14:172:37 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:187:13:187:41 | "Transc ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:187:13:187:41 | "Transc ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:194:13:194:40 | "Transl ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:194:13:194:40 | "Transl ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |
| openai_test.js:200:32:200:55 | "Talk l ... persona | openai_test.js:11:19:11:35 | req.query.persona | openai_test.js:200:32:200:55 | "Talk l ... persona | This prompt construction depends on a $@. | openai_test.js:11:19:11:35 | req.query.persona | user-provided value |

View File

@@ -0,0 +1 @@
experimental/Security/CWE-1427/SystemPromptInjection.ql

View File

@@ -0,0 +1,76 @@
edges
| anthropic_user_test.js:8:9:8:17 | userInput | anthropic_user_test.js:18:18:18:26 | userInput | provenance | |
| anthropic_user_test.js:8:9:8:17 | userInput | anthropic_user_test.js:31:18:31:26 | userInput | provenance | |
| anthropic_user_test.js:8:21:8:39 | req.query.userInput | anthropic_user_test.js:8:9:8:17 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:14:15:14:23 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:26:19:26:27 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:37:15:37:23 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:44:13:44:21 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:51:13:51:21 | userInput | provenance | |
| gemini_user_test.js:8:9:8:17 | userInput | gemini_user_test.js:58:13:58:21 | userInput | provenance | |
| gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:8:9:8:17 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:24:12:24:20 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:33:18:33:26 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:44:18:44:26 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:58:19:58:27 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:68:13:68:21 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:73:13:73:21 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:77:13:77:21 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:83:12:83:20 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:90:13:90:21 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:96:13:96:21 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:102:14:102:22 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:108:12:108:20 | userInput | provenance | |
| openai_user_test.js:16:9:16:17 | userInput | openai_user_test.js:155:12:155:20 | userInput | provenance | |
| openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:16:9:16:17 | userInput | provenance | |
nodes
| anthropic_user_test.js:8:9:8:17 | userInput | semmle.label | userInput |
| anthropic_user_test.js:8:21:8:39 | req.query.userInput | semmle.label | req.query.userInput |
| anthropic_user_test.js:18:18:18:26 | userInput | semmle.label | userInput |
| anthropic_user_test.js:31:18:31:26 | userInput | semmle.label | userInput |
| gemini_user_test.js:8:9:8:17 | userInput | semmle.label | userInput |
| gemini_user_test.js:8:21:8:39 | req.query.userInput | semmle.label | req.query.userInput |
| gemini_user_test.js:14:15:14:23 | userInput | semmle.label | userInput |
| gemini_user_test.js:26:19:26:27 | userInput | semmle.label | userInput |
| gemini_user_test.js:37:15:37:23 | userInput | semmle.label | userInput |
| gemini_user_test.js:44:13:44:21 | userInput | semmle.label | userInput |
| gemini_user_test.js:51:13:51:21 | userInput | semmle.label | userInput |
| gemini_user_test.js:58:13:58:21 | userInput | semmle.label | userInput |
| openai_user_test.js:16:9:16:17 | userInput | semmle.label | userInput |
| openai_user_test.js:16:21:16:39 | req.query.userInput | semmle.label | req.query.userInput |
| openai_user_test.js:24:12:24:20 | userInput | semmle.label | userInput |
| openai_user_test.js:33:18:33:26 | userInput | semmle.label | userInput |
| openai_user_test.js:44:18:44:26 | userInput | semmle.label | userInput |
| openai_user_test.js:58:19:58:27 | userInput | semmle.label | userInput |
| openai_user_test.js:68:13:68:21 | userInput | semmle.label | userInput |
| openai_user_test.js:73:13:73:21 | userInput | semmle.label | userInput |
| openai_user_test.js:77:13:77:21 | userInput | semmle.label | userInput |
| openai_user_test.js:83:12:83:20 | userInput | semmle.label | userInput |
| openai_user_test.js:90:13:90:21 | userInput | semmle.label | userInput |
| openai_user_test.js:96:13:96:21 | userInput | semmle.label | userInput |
| openai_user_test.js:102:14:102:22 | userInput | semmle.label | userInput |
| openai_user_test.js:108:12:108:20 | userInput | semmle.label | userInput |
| openai_user_test.js:155:12:155:20 | userInput | semmle.label | userInput |
subpaths
#select
| anthropic_user_test.js:18:18:18:26 | userInput | anthropic_user_test.js:8:21:8:39 | req.query.userInput | anthropic_user_test.js:18:18:18:26 | userInput | This prompt construction depends on a $@. | anthropic_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| anthropic_user_test.js:31:18:31:26 | userInput | anthropic_user_test.js:8:21:8:39 | req.query.userInput | anthropic_user_test.js:31:18:31:26 | userInput | This prompt construction depends on a $@. | anthropic_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:14:15:14:23 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:14:15:14:23 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:26:19:26:27 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:26:19:26:27 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:37:15:37:23 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:37:15:37:23 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:44:13:44:21 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:44:13:44:21 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:51:13:51:21 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:51:13:51:21 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| gemini_user_test.js:58:13:58:21 | userInput | gemini_user_test.js:8:21:8:39 | req.query.userInput | gemini_user_test.js:58:13:58:21 | userInput | This prompt construction depends on a $@. | gemini_user_test.js:8:21:8:39 | req.query.userInput | user-provided value |
| openai_user_test.js:24:12:24:20 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:24:12:24:20 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:33:18:33:26 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:33:18:33:26 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:44:18:44:26 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:44:18:44:26 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:58:19:58:27 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:58:19:58:27 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:68:13:68:21 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:68:13:68:21 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:73:13:73:21 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:73:13:73:21 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:77:13:77:21 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:77:13:77:21 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:83:12:83:20 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:83:12:83:20 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:90:13:90:21 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:90:13:90:21 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:96:13:96:21 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:96:13:96:21 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:102:14:102:22 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:102:14:102:22 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:108:12:108:20 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:108:12:108:20 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |
| openai_user_test.js:155:12:155:20 | userInput | openai_user_test.js:16:21:16:39 | req.query.userInput | openai_user_test.js:155:12:155:20 | userInput | This prompt construction depends on a $@. | openai_user_test.js:16:21:16:39 | req.query.userInput | user-provided value |

View File

@@ -0,0 +1 @@
experimental/Security/CWE-1427/UserPromptInjection.ql

View File

@@ -0,0 +1,53 @@
const express = require("express");
const Anthropic = require("@anthropic-ai/sdk");
const app = express();
const client = new Anthropic();
app.get("/test", async (req, res) => {
const userInput = req.query.userInput;
// === User role message (SHOULD ALERT) ===
await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [
{
role: "user",
content: userInput, // $ Alert[js/user-prompt-injection]
},
],
});
// === Beta messages (SHOULD ALERT) ===
await client.beta.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [
{
role: "user",
content: userInput, // $ Alert[js/user-prompt-injection]
},
],
});
// === Constant comparison sanitizer (SHOULD NOT ALERT) ===
const userInput2 = req.query.userInput2;
if (userInput2 === "hello") {
await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [
{
role: "user",
content: userInput2, // OK - sanitized by constant comparison
},
],
});
}
res.send("done");
});

View File

@@ -0,0 +1,88 @@
const express = require("express");
const { GoogleGenAI } = require("@google/genai");
const app = express();
const ai = new GoogleGenAI({ apiKey: "test-key" });
app.get("/test", async (req, res) => {
const userInput = req.query.userInput;
// === generateContent with string contents (SHOULD ALERT) ===
await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: userInput, // $ Alert[js/user-prompt-injection]
});
// === generateContent with user role parts (SHOULD ALERT) ===
await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: [
{
role: "user",
parts: [
{
text: userInput, // $ Alert[js/user-prompt-injection]
},
],
},
],
});
// === generateContentStream (SHOULD ALERT) ===
await ai.models.generateContentStream({
model: "gemini-2.0-flash",
contents: userInput, // $ Alert[js/user-prompt-injection]
});
// === generateImages (SHOULD ALERT) ===
await ai.models.generateImages({
model: "imagen-3.0-generate-002",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// === editImage (SHOULD ALERT) ===
await ai.models.editImage({
model: "imagen-3.0-generate-002",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// === generateVideos (SHOULD ALERT) ===
await ai.models.generateVideos({
model: "veo-2.0-generate-001",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// === Constant comparison sanitizer (SHOULD NOT ALERT) ===
const userInput2 = req.query.userInput2;
if (userInput2 === "hello") {
await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: userInput2, // OK - sanitized by constant comparison
});
}
// === Model role should not be a user prompt sink ===
await ai.models.generateContent({
model: "gemini-2.0-flash",
contents: [
{
role: "model",
parts: [
{
text: userInput, // OK for user-prompt-injection (model role)
},
],
},
],
});
res.send("done");
});

View File

@@ -0,0 +1,212 @@
const express = require("express");
const OpenAI = require("openai");
const { AzureOpenAI } = require("openai");
const {
GuardrailsOpenAI,
GuardrailsAzureOpenAI,
checkPlainText,
runGuardrails,
} = require("@openai/guardrails");
const app = express();
const client = new OpenAI();
const azureClient = new AzureOpenAI();
app.get("/test", async (req, res) => {
const userInput = req.query.userInput;
// === Bare OpenAI client: user prompt sinks (SHOULD ALERT) ===
// responses.create input as string
await client.responses.create({
model: "gpt-4.1",
instructions: "You are a helpful assistant",
input: userInput, // $ Alert[js/user-prompt-injection]
});
// responses.create input as array with user role
await client.responses.create({
model: "gpt-4.1",
input: [
{
role: "user",
content: userInput, // $ Alert[js/user-prompt-injection]
},
],
});
// chat.completions.create with user role
await client.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "user",
content: userInput, // $ Alert[js/user-prompt-injection]
},
],
});
// chat.completions.create with user role content parts
await client.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "user",
content: [
{
type: "text",
text: userInput, // $ Alert[js/user-prompt-injection]
},
],
},
],
});
// Legacy completions API
await client.completions.create({
model: "gpt-3.5-turbo-instruct",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// Images API
await client.images.generate({
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
await client.images.edit({
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// Embeddings API
await client.embeddings.create({
model: "text-embedding-3-small",
input: userInput, // $ Alert[js/user-prompt-injection]
});
// Audio API
await client.audio.transcriptions.create({
file: "audio.mp3",
model: "whisper-1",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
await client.audio.translations.create({
file: "audio.mp3",
model: "whisper-1",
prompt: userInput, // $ Alert[js/user-prompt-injection]
});
// beta.threads.messages.create with user role
await client.beta.threads.messages.create("thread_123", {
role: "user",
content: userInput, // $ Alert[js/user-prompt-injection]
});
// Azure client (SHOULD ALERT)
await azureClient.responses.create({
model: "gpt-4.1",
input: userInput, // $ Alert[js/user-prompt-injection]
});
// === GuardrailsOpenAI client: user prompt sinks (SHOULD NOT ALERT) ===
const guardedClient = await GuardrailsOpenAI.create({
version: 1,
input: { guardrails: [{ name: "prompt_injection_detection" }] },
});
// Guarded client — responses.create input as string (OK)
await guardedClient.responses.create({
model: "gpt-4.1",
input: userInput, // OK - guarded client with input guardrails
});
// Guarded client — chat.completions.create with user role (OK)
await guardedClient.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "user",
content: userInput, // OK - guarded client with input guardrails
},
],
});
// Guarded Azure client (OK)
const guardedAzure = await GuardrailsAzureOpenAI.create({
version: 1,
pre_flight: { guardrails: [{ name: "prompt_injection_detection" }] },
});
await guardedAzure.responses.create({
model: "gpt-4.1",
input: userInput, // OK - guarded Azure client with pre_flight guardrails
});
// === Unprotected GuardrailsOpenAI: no input guardrails (SHOULD ALERT) ===
const unprotected = await GuardrailsOpenAI.create({
version: 1,
output: { guardrails: [{ name: "moderation" }] },
});
await unprotected.responses.create({
model: "gpt-4.1",
input: userInput, // $ Alert[js/user-prompt-injection]
});
// === checkPlainText sanitizer (SHOULD NOT ALERT) ===
await checkPlainText(userInput, configBundle);
// After checkPlainText, the input is safe because it would have thrown
await client.responses.create({
model: "gpt-4.1",
input: userInput, // OK - sanitized by checkPlainText
});
// === runGuardrails sanitizer (SHOULD NOT ALERT) ===
const userInput2 = req.query.userInput2;
await runGuardrails(userInput2, configBundle);
await client.responses.create({
model: "gpt-4.1",
input: userInput2, // OK - sanitized by runGuardrails
});
// === Constant comparison sanitizer (SHOULD NOT ALERT) ===
const userInput3 = req.query.userInput3;
if (userInput3 === "hello") {
await client.responses.create({
model: "gpt-4.1",
input: userInput3, // OK - sanitized by constant comparison
});
}
// === System/developer role messages should NOT be user prompt sinks ===
// These are system prompt injection sinks, not user prompt sinks
await client.responses.create({
model: "gpt-4.1",
input: [
{
role: "system",
content: userInput, // OK for user-prompt-injection (this is a system prompt sink)
},
],
});
await client.chat.completions.create({
model: "gpt-4.1",
messages: [
{
role: "developer",
content: userInput, // OK for user-prompt-injection (this is a system prompt sink)
},
],
});
res.send("done");
});