+
+
+Prompts can be constructed to bypass the original purposes of an agent and lead to sensitive data leak or
+operations that were not intended.
+
+
+
+Sanitize user input and also avoid using user input in developer or system level prompts.
+
+
+
+In the following examples, the cases marked GOOD show secure prompt construction; whereas in the case marked BAD they may be susceptible to prompt injection.
+
+
+
+
+OpenAI: Guardrails.
+
+
+
diff --git a/javascript/ql/src/experimental/Security/CWE-1427/PromptInjection.ql b/javascript/ql/src/experimental/Security/CWE-1427/PromptInjection.ql
new file mode 100644
index 000000000000..69f5f7e836c1
--- /dev/null
+++ b/javascript/ql/src/experimental/Security/CWE-1427/PromptInjection.ql
@@ -0,0 +1,20 @@
+/**
+ * @name Prompt injection
+ * @kind path-problem
+ * @problem.severity error
+ * @security-severity 5.0
+ * @precision high
+ * @id js/prompt-injection
+ * @tags security
+ * experimental
+ * external/cwe/cwe-1427
+ */
+
+import javascript
+import experimental.semmle.javascript.security.PromptInjection.PromptInjectionQuery
+import PromptInjectionFlow::PathGraph
+
+from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink
+where PromptInjectionFlow::flowPath(source, sink)
+select sink.getNode(), source, sink, "This prompt construction depends on a $@.", source.getNode(),
+ "user-provided value"
diff --git a/javascript/ql/src/experimental/Security/CWE-1427/examples/example.py b/javascript/ql/src/experimental/Security/CWE-1427/examples/example.py
new file mode 100644
index 000000000000..a049f727b37a
--- /dev/null
+++ b/javascript/ql/src/experimental/Security/CWE-1427/examples/example.py
@@ -0,0 +1,17 @@
+from flask import Flask, request
+from agents import Agent
+from guardrails import GuardrailAgent
+
+@app.route("/parameter-route")
+def get_input():
+ input = request.args.get("input")
+
+ goodAgent = GuardrailAgent( # GOOD: Agent created with guardrails automatically configured.
+ config=Path("guardrails_config.json"),
+ name="Assistant",
+ instructions="This prompt is customized for " + input)
+
+ badAgent = Agent(
+ name="Assistant",
+ instructions="This prompt is customized for " + input # BAD: user input in agent instruction.
+ )
diff --git a/javascript/ql/src/experimental/semmle/javascript/frameworks/Anthropic.qll b/javascript/ql/src/experimental/semmle/javascript/frameworks/Anthropic.qll
new file mode 100644
index 000000000000..be500876c75f
--- /dev/null
+++ b/javascript/ql/src/experimental/semmle/javascript/frameworks/Anthropic.qll
@@ -0,0 +1,64 @@
+/**
+ * Provides classes modeling security-relevant aspects of the `@anthropic-ai/sdk` package.
+ * See https://github.com/anthropics/anthropic-sdk-typescript
+ */
+
+private import javascript
+
+module Anthropic {
+ /** Gets a reference to the `Anthropic` client instance. */
+ API::Node classRef() {
+ // Default export: import Anthropic from '@anthropic-ai/sdk'; new Anthropic()
+ result = API::moduleImport("@anthropic-ai/sdk").getInstance()
+ }
+
+
+ /** Gets a reference to a sink for the system prompt in the Anthropic messages API. */
+ API::Node getContentNode() {
+ exists(API::Node createParams |
+ // client.messages.create({ ... })
+ createParams = classRef()
+ .getMember("messages")
+ .getMember("create")
+ .getParameter(0)
+ or
+ // client.beta.messages.create({ ... })
+ createParams = classRef()
+ .getMember("beta")
+ .getMember("messages")
+ .getMember("create")
+ .getParameter(0)
+ |
+ // system: "string"
+ result = createParams.getMember("system")
+ or
+ // system: [{ type: "text", text: "..." }]
+ result = createParams.getMember("system").getArrayElement().getMember("text")
+ or
+ // messages: [{ role: "assistant", content: "..." }]
+ // Injecting content into what the model said from external sources is very likely an injection.
+ exists(API::Node msg |
+ msg = createParams.getMember("messages").getArrayElement() and
+ msg.getMember("role").asSink().mayHaveStringValue("assistant")
+ |
+ result = msg.getMember("content")
+ )
+ )
+ or
+ // client.beta.agents.create({ system: "..." })
+ result = classRef()
+ .getMember("beta")
+ .getMember("agents")
+ .getMember("create")
+ .getParameter(0)
+ .getMember("system")
+ or
+ // client.beta.agents.update(agentId, { system: "..." })
+ result = classRef()
+ .getMember("beta")
+ .getMember("agents")
+ .getMember("update")
+ .getParameter(1)
+ .getMember("system")
+ }
+}
\ No newline at end of file
diff --git a/javascript/ql/src/experimental/semmle/javascript/frameworks/GoogleGenAI.qll b/javascript/ql/src/experimental/semmle/javascript/frameworks/GoogleGenAI.qll
new file mode 100644
index 000000000000..c6f119f00f70
--- /dev/null
+++ b/javascript/ql/src/experimental/semmle/javascript/frameworks/GoogleGenAI.qll
@@ -0,0 +1,85 @@
+/**
+ * Provides classes modeling security-relevant aspects of the `@google/genai` package.
+ * See https://github.com/googleapis/js-genai
+ */
+
+private import javascript
+
+module GoogleGenAI {
+ /** Gets a reference to the `GoogleGenAI` client instance. */
+ API::Node clientRef() {
+ // import { GoogleGenAI } from '@google/genai'; const ai = new GoogleGenAI(...)
+ result =
+ API::moduleImport("@google/genai").getMember("GoogleGenAI").getInstance()
+ }
+
+ /** Gets a reference to a sink for prompt content in the Google GenAI SDK. */
+ API::Node getContentNode() {
+ exists(API::Node params |
+ // ai.models.generateContent({ contents, config })
+ // ai.models.generateContentStream({ contents, config })
+ params =
+ clientRef()
+ .getMember("models")
+ .getMember(["generateContent", "generateContentStream"])
+ .getParameter(0)
+ |
+ // config.systemInstruction
+ result = params.getMember("config").getMember("systemInstruction")
+ or
+ // contents: [{ role: "model", parts: [{ text: "..." }] }]
+ // Gemini uses "model" role instead of "assistant"
+ exists(API::Node msg |
+ msg = params.getMember("contents").getArrayElement() and
+ msg.getMember("role").asSink().mayHaveStringValue("model")
+ |
+ result = msg.getMember("parts").getArrayElement().getMember("text")
+ )
+ )
+ or
+ // ai.models.generateImages({ prompt, config })
+ result =
+ clientRef()
+ .getMember("models")
+ .getMember("generateImages")
+ .getParameter(0)
+ .getMember("prompt")
+ or
+ // ai.models.editImage({ prompt, referenceImages, config })
+ result =
+ clientRef()
+ .getMember("models")
+ .getMember("editImage")
+ .getParameter(0)
+ .getMember("prompt")
+ or
+ // ai.chats.create({ config: { systemInstruction: ... } })
+ result =
+ clientRef()
+ .getMember("chats")
+ .getMember("create")
+ .getParameter(0)
+ .getMember("config")
+ .getMember("systemInstruction")
+ or
+ // chat.sendMessage({ config: { systemInstruction: ... } })
+ result =
+ clientRef()
+ .getMember("chats")
+ .getMember("create")
+ .getReturn()
+ .getMember("sendMessage")
+ .getParameter(0)
+ .getMember("config")
+ .getMember("systemInstruction")
+ or
+ // ai.live.connect({ config: { systemInstruction: ... } })
+ result =
+ clientRef()
+ .getMember("live")
+ .getMember("connect")
+ .getParameter(0)
+ .getMember("config")
+ .getMember("systemInstruction")
+ }
+}
diff --git a/javascript/ql/src/experimental/semmle/javascript/frameworks/OpenAI.qll b/javascript/ql/src/experimental/semmle/javascript/frameworks/OpenAI.qll
new file mode 100644
index 000000000000..4704fae2081d
--- /dev/null
+++ b/javascript/ql/src/experimental/semmle/javascript/frameworks/OpenAI.qll
@@ -0,0 +1,199 @@
+/**
+ * Provides classes modeling security-relevant aspects of the `openAI-Node` package.
+ * See https://github.com/openai/openai-node
+ */
+
+private import javascript
+
+ /** Holds if `msg` is a message array element with a privileged role. */
+private predicate isSystemOrDevMessage(API::Node msg) {
+ msg.getMember("role").asSink().mayHaveStringValue(["system", "developer", "assistant"])
+}
+
+module OpenAI {
+ /** Gets a reference to the `openai.OpenAI` class. */
+ API::Node classRef() {
+ // Default export: import OpenAI from 'openai'; new OpenAI()
+ result = API::moduleImport("openai").getInstance()
+ or
+ // Named import: import { OpenAI, AzureOpenAI } from 'openai'; new AzureOpenAI()
+ result = API::moduleImport("openai").getMember(["OpenAI", "AzureOpenAI"]).getInstance()
+ }
+
+
+ /** Gets a reference to a potential property of `openai.OpenAI` called instructions which refers to the system prompt. */
+ API::Node getContentNode() {
+ // responses.create({ input: ..., instructions: ... })
+ // input can be a string or an array of message objects
+ exists(API::Node responsesCreate |
+ responsesCreate =
+ classRef()
+ .getMember("responses")
+ .getMember("create")
+ .getParameter(0)
+ |
+ // instructions: "string"
+ result = responsesCreate.getMember("instructions")
+ // intended that user data can flow into input
+ // or
+ // // input: "string"
+ // result = responsesCreate.getMember("input")
+ or
+ // input: [{ role: "system"/"developer", content: "..." }]
+ exists(API::Node msg |
+ msg = responsesCreate.getMember("input").getArrayElement() and
+ isSystemOrDevMessage(msg)
+ |
+ result = msg.getMember("content")
+ )
+ )
+ or
+ // chat.completions.create({ messages: [{ role: "system"/"developer", content: ... }] })
+ // content can be a string or an array of content parts
+ exists(API::Node msg, API::Node content |
+ msg =
+ classRef()
+ .getMember("chat")
+ .getMember("completions")
+ .getMember("create")
+ .getParameter(0)
+ .getMember("messages")
+ .getArrayElement() and
+ isSystemOrDevMessage(msg) and
+ content = msg.getMember("content")
+ |
+ // content: "string"
+ result = content
+ or
+ // content: [{ type: "text", text: "..." }]
+ result = content.getArrayElement().getMember("text")
+ )
+ or
+ // Legacy completions API: completions.create({ prompt: ... })
+ result =
+ classRef()
+ .getMember("completions")
+ .getMember("create")
+ .getParameter(0)
+ .getMember("prompt")
+ or
+ // images.generate({ prompt: ... }) and images.edit({ prompt: ... })
+ result =
+ classRef()
+ .getMember("images")
+ .getMember(["generate", "edit"])
+ .getParameter(0)
+ .getMember("prompt")
+ or
+ // embeddings.create({ input: ... })
+ result =
+ classRef()
+ .getMember("embeddings")
+ .getMember("create")
+ .getParameter(0)
+ .getMember("input")
+ or
+ // beta.assistants.create({ instructions: ... }) and beta.assistants.update(id, { instructions: ... })
+ result =
+ classRef()
+ .getMember("beta")
+ .getMember("assistants")
+ .getMember(["create", "update"])
+ .getParameter(0)
+ .getMember("instructions")
+ or
+ // beta.threads.runs.create(threadId, { instructions: ..., additional_instructions: ... })
+ result =
+ classRef()
+ .getMember("beta")
+ .getMember("threads")
+ .getMember("runs")
+ .getMember("create")
+ .getParameter(1)
+ .getMember(["instructions", "additional_instructions"])
+ or
+ // beta.threads.messages.create(threadId, { role: "system"/"developer", content: ... })
+ exists(API::Node msg |
+ msg =
+ classRef()
+ .getMember("beta")
+ .getMember("threads")
+ .getMember("messages")
+ .getMember("create")
+ .getParameter(1) and
+ isSystemOrDevMessage(msg)
+ |
+ result = msg.getMember("content")
+ )
+ or
+ // audio.transcriptions.create({ prompt: ... }) and audio.translations.create({ prompt: ... })
+ result =
+ classRef()
+ .getMember("audio")
+ .getMember(["transcriptions", "translations"])
+ .getMember("create")
+ .getParameter(0)
+ .getMember("prompt")
+ }
+}
+
+/**
+ * Provides models for agents SDK (instances of the `agents` class etc).
+ *
+ * See https://github.com/openai/openai-agents-js.
+ */
+module AgentSDK {
+ API::Node moduleRef() { result = API::moduleImport("@openai/agents") }
+
+ /** Gets a reference to the `agents.Runner` class. */
+ API::Node agentConstructor() { result = moduleRef().getMember("Agent") }
+
+ API::Node classInstance() { result = agentConstructor().getInstance() }
+
+ /** Gets a reference to the top-level run() or Runner.run() functions. */
+ API::Node run() {
+ // import { run } from '@openai/agents'; run(agent, input)
+ result = moduleRef().getMember("run")
+ or
+ // const runner = new Runner(); runner.run(agent, input)
+ result = moduleRef().getMember("Runner").getInstance().getMember("run")
+ }
+
+ API::Node asTool() { result = classInstance().getMember("asTool")}
+
+ API::Node toolFunction() { result = moduleRef().getMember("tool") }
+
+ /** Gets a reference to a potential property of `agents.Runner` called input which can refer to a system prompt depending on the role specified. */
+ API::Node getContentNode() {
+ // Agent({ instructions: ... })
+ result = agentConstructor()
+ .getParameter(0)
+ .getMember(["instructions", "handoffDescription"])
+ or
+ // Agent({ instructions: (runContext) => returnValue })
+ result = agentConstructor()
+ .getParameter(0)
+ .getMember("instructions")
+ .getReturn()
+ or
+ // run(agent, input) or runner.run(agent, input) — string input
+ result = run()
+ .getParameter(1)
+ or
+ // run(agent, [{ role: "system"/"developer", content: ... }])
+ exists(API::Node msg |
+ msg = run()
+ .getParameter(1)
+ .getArrayElement() and
+ isSystemOrDevMessage(msg)
+ |
+ result = msg.getMember("content")
+ )
+ or
+ // agent.asTool({..., toolDescription: ...})
+ result = asTool().getParameter(0).getMember("toolDescription")
+ or
+ // tool({..., description: ...})
+ result = toolFunction().getParameter(0).getMember("description")
+ }
+}
diff --git a/javascript/ql/src/experimental/semmle/javascript/security/PromptInjection/PromptInjectionCustomizations.qll b/javascript/ql/src/experimental/semmle/javascript/security/PromptInjection/PromptInjectionCustomizations.qll
new file mode 100644
index 000000000000..ea769b860865
--- /dev/null
+++ b/javascript/ql/src/experimental/semmle/javascript/security/PromptInjection/PromptInjectionCustomizations.qll
@@ -0,0 +1,93 @@
+/**
+ * Provides default sources, sinks and sanitizers for detecting
+ * "prompt injection"
+ * vulnerabilities, as well as extension points for adding your own.
+ */
+
+import javascript
+
+private import semmle.javascript.dataflow.DataFlow
+private import semmle.javascript.Concepts
+private import semmle.javascript.security.dataflow.RemoteFlowSources
+private import semmle.javascript.dataflow.internal.BarrierGuards
+private import semmle.javascript.frameworks.data.ModelsAsData
+private import experimental.semmle.javascript.frameworks.OpenAI
+private import experimental.semmle.javascript.frameworks.Anthropic
+private import experimental.semmle.javascript.frameworks.GoogleGenAI
+
+/**
+ * Provides default sources, sinks and sanitizers for detecting
+ * "prompt injection"
+ * vulnerabilities, as well as extension points for adding your own.
+ */
+module PromptInjection {
+ /**
+ * A data flow source for "prompt injection" vulnerabilities.
+ */
+ abstract class Source extends DataFlow::Node { }
+
+ /**
+ * A data flow sink for "prompt injection" vulnerabilities.
+ */
+ abstract class Sink extends DataFlow::Node { }
+
+ /**
+ * A sanitizer for "prompt injection" vulnerabilities.
+ */
+ abstract class Sanitizer extends DataFlow::Node { }
+
+ /**
+ * An active threat-model source, considered as a flow source.
+ */
+ private class ActiveThreatModelSourceAsSource extends Source, ActiveThreatModelSource { }
+
+ /**
+ * A prompt to an AI model, considered as a flow sink.
+ */
+ class AIPromptAsSink extends Sink {
+ AIPromptAsSink() { this = any(AIPrompt p).getAPrompt() }
+ }
+
+ private class SinkFromModel extends Sink {
+ SinkFromModel() { this = ModelOutput::getASinkNode("prompt-injection").asSink() }
+ }
+
+ private class PromptContentSink extends Sink {
+ PromptContentSink() {
+ this = OpenAI::getContentNode().asSink()
+ or
+ this = AgentSDK::getContentNode().asSink()
+ or
+ this = Anthropic::getContentNode().asSink()
+ or
+ this = GoogleGenAI::getContentNode().asSink()
+ }
+ }
+
+ private class ConstCompareAsSanitizerGuard extends Sanitizer {
+ ConstCompareAsSanitizerGuard()
+ {
+ this = DataFlow::MakeBarrierGuard