init: add source code from src.zip

This commit is contained in:
sigridjineth
2026-03-31 01:55:58 -07:00
commit f5a40b86de
1902 changed files with 513237 additions and 0 deletions

View File

@@ -0,0 +1,63 @@
import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js'
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js'
/**
* In-process linked transport pair for running an MCP server and client
* in the same process without spawning a subprocess.
*
* `send()` on one side delivers to `onmessage` on the other.
* `close()` on either side calls `onclose` on both.
*/
class InProcessTransport implements Transport {
private peer: InProcessTransport | undefined
private closed = false
onclose?: () => void
onerror?: (error: Error) => void
onmessage?: (message: JSONRPCMessage) => void
/** @internal */
_setPeer(peer: InProcessTransport): void {
this.peer = peer
}
async start(): Promise<void> {}
async send(message: JSONRPCMessage): Promise<void> {
if (this.closed) {
throw new Error('Transport is closed')
}
// Deliver to the other side asynchronously to avoid stack depth issues
// with synchronous request/response cycles
queueMicrotask(() => {
this.peer?.onmessage?.(message)
})
}
async close(): Promise<void> {
if (this.closed) {
return
}
this.closed = true
this.onclose?.()
// Close the peer if it hasn't already closed
if (this.peer && !this.peer.closed) {
this.peer.closed = true
this.peer.onclose?.()
}
}
}
/**
* Creates a pair of linked transports for in-process MCP communication.
* Messages sent on one transport are delivered to the other's `onmessage`.
*
* @returns [clientTransport, serverTransport]
*/
export function createLinkedTransportPair(): [Transport, Transport] {
const a = new InProcessTransport()
const b = new InProcessTransport()
a._setPeer(b)
b._setPeer(a)
return [a, b]
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,136 @@
/**
* SDK MCP Transport Bridge
*
* This file implements a transport bridge that allows MCP servers running in the SDK process
* to communicate with the Claude Code CLI process through control messages.
*
* ## Architecture Overview
*
* Unlike regular MCP servers that run as separate processes, SDK MCP servers run in-process
* within the SDK. This requires a special transport mechanism to bridge communication between:
* - The CLI process (where the MCP client runs)
* - The SDK process (where the SDK MCP server runs)
*
* ## Message Flow
*
* ### CLI → SDK (via SdkControlClientTransport)
* 1. CLI's MCP Client calls a tool → sends JSONRPC request to SdkControlClientTransport
* 2. Transport wraps the message in a control request with server_name and request_id
* 3. Control request is sent via stdout to the SDK process
* 4. SDK's StructuredIO receives the control response and routes it back to the transport
* 5. Transport unwraps the response and returns it to the MCP Client
*
* ### SDK → CLI (via SdkControlServerTransport)
* 1. Query receives control request with MCP message and calls transport.onmessage
* 2. MCP server processes the message and calls transport.send() with response
* 3. Transport calls sendMcpMessage callback with the response
* 4. Query's callback resolves the pending promise with the response
* 5. Query returns the response to complete the control request
*
* ## Key Design Points
*
* - SdkControlClientTransport: StructuredIO tracks pending requests
* - SdkControlServerTransport: Query tracks pending requests
* - The control request wrapper includes server_name to route to the correct SDK server
* - The system supports multiple SDK MCP servers running simultaneously
* - Message IDs are preserved through the entire flow for proper correlation
*/
import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js'
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js'
/**
* Callback function to send an MCP message and get the response
*/
export type SendMcpMessageCallback = (
serverName: string,
message: JSONRPCMessage,
) => Promise<JSONRPCMessage>
/**
* CLI-side transport for SDK MCP servers.
*
* This transport is used in the CLI process to bridge communication between:
* - The CLI's MCP Client (which wants to call tools on SDK MCP servers)
* - The SDK process (where the actual MCP server runs)
*
* It converts MCP protocol messages into control requests that can be sent
* through stdout/stdin to the SDK process.
*/
export class SdkControlClientTransport implements Transport {
private isClosed = false
onclose?: () => void
onerror?: (error: Error) => void
onmessage?: (message: JSONRPCMessage) => void
constructor(
private serverName: string,
private sendMcpMessage: SendMcpMessageCallback,
) {}
async start(): Promise<void> {}
async send(message: JSONRPCMessage): Promise<void> {
if (this.isClosed) {
throw new Error('Transport is closed')
}
// Send the message and wait for the response
const response = await this.sendMcpMessage(this.serverName, message)
// Pass the response back to the MCP client
if (this.onmessage) {
this.onmessage(response)
}
}
async close(): Promise<void> {
if (this.isClosed) {
return
}
this.isClosed = true
this.onclose?.()
}
}
/**
* SDK-side transport for SDK MCP servers.
*
* This transport is used in the SDK process to bridge communication between:
* - Control requests coming from the CLI (via stdin)
* - The actual MCP server running in the SDK process
*
* It acts as a simple pass-through that forwards messages to the MCP server
* and sends responses back via a callback.
*
* Note: Query handles all request/response correlation and async flow.
*/
export class SdkControlServerTransport implements Transport {
private isClosed = false
constructor(private sendMcpMessage: (message: JSONRPCMessage) => void) {}
onclose?: () => void
onerror?: (error: Error) => void
onmessage?: (message: JSONRPCMessage) => void
async start(): Promise<void> {}
async send(message: JSONRPCMessage): Promise<void> {
if (this.isClosed) {
throw new Error('Transport is closed')
}
// Simply pass the response back through the callback
this.sendMcpMessage(message)
}
async close(): Promise<void> {
if (this.isClosed) {
return
}
this.isClosed = true
this.onclose?.()
}
}

2465
src/services/mcp/auth.ts Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,76 @@
/**
* Approved channel plugins allowlist. --channels plugin:name@marketplace
* entries only register if {marketplace, plugin} is on this list. server:
* entries always fail (schema is plugin-only). The
* --dangerously-load-development-channels flag bypasses for both kinds.
* Lives in GrowthBook so it can be updated without a release.
*
* Plugin-level granularity: if a plugin is approved, all its channel
* servers are. Per-server gating was overengineering — a plugin that
* sprouts a malicious second server is already compromised, and per-server
* entries would break on harmless plugin refactors.
*
* The allowlist check is a pure {marketplace, plugin} comparison against
* the user's typed tag. The gate's separate 'marketplace' step verifies
* the tag matches what's actually installed before this check runs.
*/
import { z } from 'zod/v4'
import { lazySchema } from '../../utils/lazySchema.js'
import { parsePluginIdentifier } from '../../utils/plugins/pluginIdentifier.js'
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../analytics/growthbook.js'
export type ChannelAllowlistEntry = {
marketplace: string
plugin: string
}
const ChannelAllowlistSchema = lazySchema(() =>
z.array(
z.object({
marketplace: z.string(),
plugin: z.string(),
}),
),
)
export function getChannelAllowlist(): ChannelAllowlistEntry[] {
const raw = getFeatureValue_CACHED_MAY_BE_STALE<unknown>(
'tengu_harbor_ledger',
[],
)
const parsed = ChannelAllowlistSchema().safeParse(raw)
return parsed.success ? parsed.data : []
}
/**
* Overall channels on/off. Checked before any per-server gating —
* when false, --channels is a no-op and no handlers register.
* Default false; GrowthBook 5-min refresh.
*/
export function isChannelsEnabled(): boolean {
return getFeatureValue_CACHED_MAY_BE_STALE('tengu_harbor', false)
}
/**
* Pure allowlist check keyed off the connection's pluginSource — for UI
* pre-filtering so the IDE only shows "Enable channel?" for servers that will
* actually pass the gate. Not a security boundary: channel_enable still runs
* the full gate. Matches the allowlist comparison inside gateChannelServer()
* but standalone (no session/marketplace coupling — those are tautologies
* when the entry is derived from pluginSource).
*
* Returns false for undefined pluginSource (non-plugin server — can never
* match the {marketplace, plugin}-keyed ledger) and for @-less sources
* (builtin/inline — same reason).
*/
export function isChannelAllowlisted(
pluginSource: string | undefined,
): boolean {
if (!pluginSource) return false
const { name, marketplace } = parsePluginIdentifier(pluginSource)
if (!marketplace) return false
return getChannelAllowlist().some(
e => e.plugin === name && e.marketplace === marketplace,
)
}

View File

@@ -0,0 +1,316 @@
/**
* Channel notifications — lets an MCP server push user messages into the
* conversation. A "channel" (Discord, Slack, SMS, etc.) is just an MCP server
* that:
* - exposes tools for outbound messages (e.g. `send_message`) — standard MCP
* - sends `notifications/claude/channel` notifications for inbound — this file
*
* The notification handler wraps the content in a <channel> tag and
* enqueues it. SleepTool polls hasCommandsInQueue() and wakes within 1s.
* The model sees where the message came from and decides which tool to reply
* with (the channel's MCP tool, SendUserMessage, or both).
*
* feature('KAIROS') || feature('KAIROS_CHANNELS'). Runtime gate tengu_harbor.
* Requires claude.ai OAuth auth — API key users are blocked until
* console gets a channelsEnabled admin surface. Teams/Enterprise orgs
* must explicitly opt in via channelsEnabled: true in managed settings.
*/
import type { ServerCapabilities } from '@modelcontextprotocol/sdk/types.js'
import { z } from 'zod/v4'
import { type ChannelEntry, getAllowedChannels } from '../../bootstrap/state.js'
import { CHANNEL_TAG } from '../../constants/xml.js'
import {
getClaudeAIOAuthTokens,
getSubscriptionType,
} from '../../utils/auth.js'
import { lazySchema } from '../../utils/lazySchema.js'
import { parsePluginIdentifier } from '../../utils/plugins/pluginIdentifier.js'
import { getSettingsForSource } from '../../utils/settings/settings.js'
import { escapeXmlAttr } from '../../utils/xml.js'
import {
type ChannelAllowlistEntry,
getChannelAllowlist,
isChannelsEnabled,
} from './channelAllowlist.js'
export const ChannelMessageNotificationSchema = lazySchema(() =>
z.object({
method: z.literal('notifications/claude/channel'),
params: z.object({
content: z.string(),
// Opaque passthrough — thread_id, user, whatever the channel wants the
// model to see. Rendered as attributes on the <channel> tag.
meta: z.record(z.string(), z.string()).optional(),
}),
}),
)
/**
* Structured permission reply from a channel server. Servers that support
* this declare `capabilities.experimental['claude/channel/permission']` and
* emit this event INSTEAD of relaying "yes tbxkq" as text via
* notifications/claude/channel. Explicit opt-in per server — a channel that
* just wants to relay text never becomes a permission surface by accident.
*
* The server parses the user's reply (spec: /^\s*(y|yes|n|no)\s+([a-km-z]{5})\s*$/i)
* and emits {request_id, behavior}. CC matches request_id against its
* pending map. Unlike the regex-intercept approach, text in the general
* channel can never accidentally match — approval requires the server
* to deliberately emit this specific event.
*/
export const CHANNEL_PERMISSION_METHOD =
'notifications/claude/channel/permission'
export const ChannelPermissionNotificationSchema = lazySchema(() =>
z.object({
method: z.literal(CHANNEL_PERMISSION_METHOD),
params: z.object({
request_id: z.string(),
behavior: z.enum(['allow', 'deny']),
}),
}),
)
/**
* Outbound: CC → server. Fired from interactiveHandler.ts when a
* permission dialog opens and the server has declared the permission
* capability. Server formats the message for its platform (Telegram
* markdown, iMessage rich text, Discord embed) and sends it to the
* human. When the human replies "yes tbxkq", the server parses that
* against PERMISSION_REPLY_RE and emits the inbound schema above.
*
* Not a zod schema — CC SENDS this, doesn't validate it. A type here
* keeps both halves of the protocol documented side by side.
*/
export const CHANNEL_PERMISSION_REQUEST_METHOD =
'notifications/claude/channel/permission_request'
export type ChannelPermissionRequestParams = {
request_id: string
tool_name: string
description: string
/** JSON-stringified tool input, truncated to 200 chars with …. Full
* input is in the local terminal dialog; this is a phone-sized
* preview. Server decides whether/how to show it. */
input_preview: string
}
/**
* Meta keys become XML attribute NAMES — a crafted key like
* `x="" injected="y` would break out of the attribute structure. Only
* accept keys that look like plain identifiers. This is stricter than
* the XML spec (which allows `:`, `.`, `-`) but channel servers only
* send `chat_id`, `user`, `thread_ts`, `message_id` in practice.
*/
const SAFE_META_KEY = /^[a-zA-Z_][a-zA-Z0-9_]*$/
export function wrapChannelMessage(
serverName: string,
content: string,
meta?: Record<string, string>,
): string {
const attrs = Object.entries(meta ?? {})
.filter(([k]) => SAFE_META_KEY.test(k))
.map(([k, v]) => ` ${k}="${escapeXmlAttr(v)}"`)
.join('')
return `<${CHANNEL_TAG} source="${escapeXmlAttr(serverName)}"${attrs}>\n${content}\n</${CHANNEL_TAG}>`
}
/**
* Effective allowlist for the current session. Team/enterprise orgs can set
* allowedChannelPlugins in managed settings — when set, it REPLACES the
* GrowthBook ledger (admin owns the trust decision). Undefined falls back
* to the ledger. Unmanaged users always get the ledger.
*
* Callers already read sub/policy for the policy gate — pass them in to
* avoid double-reading getSettingsForSource (uncached).
*/
export function getEffectiveChannelAllowlist(
sub: ReturnType<typeof getSubscriptionType>,
orgList: ChannelAllowlistEntry[] | undefined,
): {
entries: ChannelAllowlistEntry[]
source: 'org' | 'ledger'
} {
if ((sub === 'team' || sub === 'enterprise') && orgList) {
return { entries: orgList, source: 'org' }
}
return { entries: getChannelAllowlist(), source: 'ledger' }
}
export type ChannelGateResult =
| { action: 'register' }
| {
action: 'skip'
kind:
| 'capability'
| 'disabled'
| 'auth'
| 'policy'
| 'session'
| 'marketplace'
| 'allowlist'
reason: string
}
/**
* Match a connected MCP server against the user's parsed --channels entries.
* server-kind is exact match on bare name; plugin-kind matches on the second
* segment of plugin:X:Y. Returns the matching entry so callers can read its
* kind — that's the user's trust declaration, not inferred from runtime shape.
*/
export function findChannelEntry(
serverName: string,
channels: readonly ChannelEntry[],
): ChannelEntry | undefined {
// split unconditionally — for a bare name like 'slack', parts is ['slack']
// and the plugin-kind branch correctly never matches (parts[0] !== 'plugin').
const parts = serverName.split(':')
return channels.find(c =>
c.kind === 'server'
? serverName === c.name
: parts[0] === 'plugin' && parts[1] === c.name,
)
}
/**
* Gate an MCP server's channel-notification path. Caller checks
* feature('KAIROS') || feature('KAIROS_CHANNELS') first (build-time
* elimination). Gate order: capability → runtime gate (tengu_harbor) →
* auth (OAuth only) → org policy → session --channels → allowlist.
* API key users are blocked at the auth layer — channels requires
* claude.ai auth; console orgs have no admin opt-in surface yet.
*
* skip Not a channel server, or managed org hasn't opted in, or
* not in session --channels. Connection stays up; handler
* not registered.
* register Subscribe to notifications/claude/channel.
*
* Which servers can connect at all is governed by allowedMcpServers —
* this gate only decides whether the notification handler registers.
*/
export function gateChannelServer(
serverName: string,
capabilities: ServerCapabilities | undefined,
pluginSource: string | undefined,
): ChannelGateResult {
// Channel servers declare `experimental['claude/channel']: {}` (MCP's
// presence-signal idiom — same as `tools: {}`). Truthy covers `{}` and
// `true`; absent/undefined/explicit-`false` all fail. Key matches the
// notification method namespace (notifications/claude/channel).
if (!capabilities?.experimental?.['claude/channel']) {
return {
action: 'skip',
kind: 'capability',
reason: 'server did not declare claude/channel capability',
}
}
// Overall runtime gate. After capability so normal MCP servers never hit
// this path. Before auth/policy so the killswitch works regardless of
// session state.
if (!isChannelsEnabled()) {
return {
action: 'skip',
kind: 'disabled',
reason: 'channels feature is not currently available',
}
}
// OAuth-only. API key users (console) are blocked — there's no
// channelsEnabled admin surface in console yet, so the policy opt-in
// flow doesn't exist for them. Drop this when console parity lands.
if (!getClaudeAIOAuthTokens()?.accessToken) {
return {
action: 'skip',
kind: 'auth',
reason: 'channels requires claude.ai authentication (run /login)',
}
}
// Teams/Enterprise opt-in. Managed orgs must explicitly enable channels.
// Default OFF — absent or false blocks. Keyed off subscription tier, not
// "policy settings exist" — a team org with zero configured policy keys
// (remote endpoint returns 404) is still a managed org and must not fall
// through to the unmanaged path.
const sub = getSubscriptionType()
const managed = sub === 'team' || sub === 'enterprise'
const policy = managed ? getSettingsForSource('policySettings') : undefined
if (managed && policy?.channelsEnabled !== true) {
return {
action: 'skip',
kind: 'policy',
reason:
'channels not enabled by org policy (set channelsEnabled: true in managed settings)',
}
}
// User-level session opt-in. A server must be explicitly listed in
// --channels to push inbound this session — protects against a trusted
// server surprise-adding the capability.
const entry = findChannelEntry(serverName, getAllowedChannels())
if (!entry) {
return {
action: 'skip',
kind: 'session',
reason: `server ${serverName} not in --channels list for this session`,
}
}
if (entry.kind === 'plugin') {
// Marketplace verification: the tag is intent (plugin:slack@anthropic),
// the runtime name is just plugin:slack:X — could be slack@anthropic or
// slack@evil depending on what's installed. Verify they match before
// trusting the tag for the allowlist check below. Source is stashed on
// the config at addPluginScopeToServers — undefined (non-plugin server,
// shouldn't happen for plugin-kind entry) or @-less (builtin/inline)
// both fail the comparison.
const actual = pluginSource
? parsePluginIdentifier(pluginSource).marketplace
: undefined
if (actual !== entry.marketplace) {
return {
action: 'skip',
kind: 'marketplace',
reason: `you asked for plugin:${entry.name}@${entry.marketplace} but the installed ${entry.name} plugin is from ${actual ?? 'an unknown source'}`,
}
}
// Approved-plugin allowlist. Marketplace gate already verified
// tag == reality, so this is a pure entry check. entry.dev (per-entry,
// not the session-wide bit) bypasses — so accepting the dev dialog for
// one entry doesn't leak allowlist-bypass to --channels entries.
if (!entry.dev) {
const { entries, source } = getEffectiveChannelAllowlist(
sub,
policy?.allowedChannelPlugins,
)
if (
!entries.some(
e => e.plugin === entry.name && e.marketplace === entry.marketplace,
)
) {
return {
action: 'skip',
kind: 'allowlist',
reason:
source === 'org'
? `plugin ${entry.name}@${entry.marketplace} is not on your org's approved channels list (set allowedChannelPlugins in managed settings)`
: `plugin ${entry.name}@${entry.marketplace} is not on the approved channels allowlist (use --dangerously-load-development-channels for local dev)`,
}
}
}
} else {
// server-kind: allowlist schema is {marketplace, plugin} — a server entry
// can never match. Without this, --channels server:plugin:foo:bar would
// match a plugin's runtime name and register with no allowlist check.
if (!entry.dev) {
return {
action: 'skip',
kind: 'allowlist',
reason: `server ${entry.name} is not on the approved channels allowlist (use --dangerously-load-development-channels for local dev)`,
}
}
}
return { action: 'register' }
}

View File

@@ -0,0 +1,240 @@
/**
* Permission prompts over channels (Telegram, iMessage, Discord).
*
* Mirrors `BridgePermissionCallbacks` — when CC hits a permission dialog,
* it ALSO sends the prompt via active channels and races the reply against
* local UI / bridge / hooks / classifier. First resolver wins via claim().
*
* Inbound is a structured event: the server parses the user's "yes tbxkq"
* reply and emits notifications/claude/channel/permission with
* {request_id, behavior}. CC never sees the reply as text — approval
* requires the server to deliberately emit that specific event, not just
* relay content. Servers opt in by declaring
* capabilities.experimental['claude/channel/permission'].
*
* Kenneth's "would this let Claude self-approve?": the approving party is
* the human via the channel, not Claude. But the trust boundary isn't the
* terminal — it's the allowlist (tengu_harbor_ledger). A compromised
* channel server CAN fabricate "yes <id>" without the human seeing the
* prompt. Accepted risk: a compromised channel already has unlimited
* conversation-injection turns (social-engineer over time, wait for
* acceptEdits, etc.); inject-then-self-approve is faster, not more
* capable. The dialog slows a compromised channel; it doesn't stop one.
* See PR discussion 2956440848.
*/
import { jsonStringify } from '../../utils/slowOperations.js'
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../analytics/growthbook.js'
/**
* GrowthBook runtime gate — separate from the channels gate (tengu_harbor)
* so channels can ship without permission-relay riding along (Kenneth: "no
* bake time if it goes out tomorrow"). Default false; flip without a release.
* Checked once at useManageMCPConnections mount — mid-session flag changes
* don't apply until restart.
*/
export function isChannelPermissionRelayEnabled(): boolean {
return getFeatureValue_CACHED_MAY_BE_STALE('tengu_harbor_permissions', false)
}
export type ChannelPermissionResponse = {
behavior: 'allow' | 'deny'
/** Which channel server the reply came from (e.g., "plugin:telegram:tg"). */
fromServer: string
}
export type ChannelPermissionCallbacks = {
/** Register a resolver for a request ID. Returns unsubscribe. */
onResponse(
requestId: string,
handler: (response: ChannelPermissionResponse) => void,
): () => void
/** Resolve a pending request from a structured channel event
* (notifications/claude/channel/permission). Returns true if the ID
* was pending — the server parsed the user's reply and emitted
* {request_id, behavior}; we just match against the map. */
resolve(
requestId: string,
behavior: 'allow' | 'deny',
fromServer: string,
): boolean
}
/**
* Reply format spec for channel servers to implement:
* /^\s*(y|yes|n|no)\s+([a-km-z]{5})\s*$/i
*
* 5 lowercase letters, no 'l' (looks like 1/I). Case-insensitive (phone
* autocorrect). No bare yes/no (conversational). No prefix/suffix chatter.
*
* CC generates the ID and sends the prompt. The SERVER parses the user's
* reply and emits notifications/claude/channel/permission with {request_id,
* behavior} — CC doesn't regex-match text anymore. Exported so plugins can
* import the exact regex rather than hand-copying it.
*/
export const PERMISSION_REPLY_RE = /^\s*(y|yes|n|no)\s+([a-km-z]{5})\s*$/i
// 25-letter alphabet: a-z minus 'l' (looks like 1/I). 25^5 ≈ 9.8M space.
const ID_ALPHABET = 'abcdefghijkmnopqrstuvwxyz'
// Substring blocklist — 5 random letters can spell things (Kenneth, in the
// launch thread: "this is why i bias to numbers, hard to have anything worse
// than 80085"). Non-exhaustive, covers the send-to-your-boss-by-accident
// tier. If a generated ID contains any of these, re-hash with a salt.
// prettier-ignore
const ID_AVOID_SUBSTRINGS = [
'fuck',
'shit',
'cunt',
'cock',
'dick',
'twat',
'piss',
'crap',
'bitch',
'whore',
'ass',
'tit',
'cum',
'fag',
'dyke',
'nig',
'kike',
'rape',
'nazi',
'damn',
'poo',
'pee',
'wank',
'anus',
]
function hashToId(input: string): string {
// FNV-1a → uint32, then base-25 encode. Not crypto, just a stable
// short letters-only ID. 32 bits / log2(25) ≈ 6.9 letters of entropy;
// taking 5 wastes a little, plenty for this.
let h = 0x811c9dc5
for (let i = 0; i < input.length; i++) {
h ^= input.charCodeAt(i)
h = Math.imul(h, 0x01000193)
}
h = h >>> 0
let s = ''
for (let i = 0; i < 5; i++) {
s += ID_ALPHABET[h % 25]
h = Math.floor(h / 25)
}
return s
}
/**
* Short ID from a toolUseID. 5 letters from a 25-char alphabet (a-z minus
* 'l' — looks like 1/I in many fonts). 25^5 ≈ 9.8M space, birthday
* collision at 50% needs ~3K simultaneous pending prompts, absurd for a
* single interactive session. Letters-only so phone users don't switch
* keyboard modes (hex alternates a-f/0-9 → mode toggles). Re-hashes with
* a salt suffix if the result contains a blocklisted substring — 5 random
* letters can spell things you don't want in a text message to your phone.
* toolUseIDs are `toolu_` + base64-ish; we hash rather than slice.
*/
export function shortRequestId(toolUseID: string): string {
// 7 length-3 × 3 positions × 25² + 15 length-4 × 2 × 25 + 2 length-5
// ≈ 13,877 blocked IDs out of 9.8M — roughly 1 in 700 hits the blocklist.
// Cap at 10 retries; (1/700)^10 is negligible.
let candidate = hashToId(toolUseID)
for (let salt = 0; salt < 10; salt++) {
if (!ID_AVOID_SUBSTRINGS.some(bad => candidate.includes(bad))) {
return candidate
}
candidate = hashToId(`${toolUseID}:${salt}`)
}
return candidate
}
/**
* Truncate tool input to a phone-sized JSON preview. 200 chars is
* roughly 3 lines on a narrow phone screen. Full input is in the local
* terminal dialog; the channel gets a summary so Write(5KB-file) doesn't
* flood your texts. Server decides whether/how to show it.
*/
export function truncateForPreview(input: unknown): string {
try {
const s = jsonStringify(input)
return s.length > 200 ? s.slice(0, 200) + '…' : s
} catch {
return '(unserializable)'
}
}
/**
* Filter MCP clients down to those that can relay permission prompts.
* Three conditions, ALL required: connected + in the session's --channels
* allowlist + declares BOTH capabilities. The second capability is the
* server's explicit opt-in — a relay-only channel never becomes a
* permission surface by accident (Kenneth's "users may be unpleasantly
* surprised"). Centralized here so a future fourth condition lands once.
*/
export function filterPermissionRelayClients<
T extends {
type: string
name: string
capabilities?: { experimental?: Record<string, unknown> }
},
>(
clients: readonly T[],
isInAllowlist: (name: string) => boolean,
): (T & { type: 'connected' })[] {
return clients.filter(
(c): c is T & { type: 'connected' } =>
c.type === 'connected' &&
isInAllowlist(c.name) &&
c.capabilities?.experimental?.['claude/channel'] !== undefined &&
c.capabilities?.experimental?.['claude/channel/permission'] !== undefined,
)
}
/**
* Factory for the callbacks object. The pending Map is closed over — NOT
* module-level (per src/CLAUDE.md), NOT in AppState (functions-in-state
* causes issues with equality/serialization). Same lifetime pattern as
* `replBridgePermissionCallbacks`: constructed once per session inside
* a React hook, stable reference stored in AppState.
*
* resolve() is called from the dedicated notification handler
* (notifications/claude/channel/permission) with the structured payload.
* The server already parsed "yes tbxkq" → {request_id, behavior}; we just
* match against the pending map. No regex on CC's side — text in the
* general channel can't accidentally approve anything.
*/
export function createChannelPermissionCallbacks(): ChannelPermissionCallbacks {
const pending = new Map<
string,
(response: ChannelPermissionResponse) => void
>()
return {
onResponse(requestId, handler) {
// Lowercase here too — resolve() already does; asymmetry means a
// future caller passing a mixed-case ID would silently never match.
// shortRequestId always emits lowercase so this is a noop today,
// but the symmetry makes the contract explicit.
const key = requestId.toLowerCase()
pending.set(key, handler)
return () => {
pending.delete(key)
}
},
resolve(requestId, behavior, fromServer) {
const key = requestId.toLowerCase()
const resolver = pending.get(key)
if (!resolver) return false
// Delete BEFORE calling — if resolver throws or re-enters, the
// entry is already gone. Also handles duplicate events (second
// emission falls through — server bug or network dup, ignore).
pending.delete(key)
resolver({ behavior, fromServer })
return true
},
}
}

View File

@@ -0,0 +1,164 @@
import axios from 'axios'
import memoize from 'lodash-es/memoize.js'
import { getOauthConfig } from 'src/constants/oauth.js'
import {
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
logEvent,
} from 'src/services/analytics/index.js'
import { getClaudeAIOAuthTokens } from 'src/utils/auth.js'
import { getGlobalConfig, saveGlobalConfig } from 'src/utils/config.js'
import { logForDebugging } from 'src/utils/debug.js'
import { isEnvDefinedFalsy } from 'src/utils/envUtils.js'
import { clearMcpAuthCache } from './client.js'
import { normalizeNameForMCP } from './normalization.js'
import type { ScopedMcpServerConfig } from './types.js'
type ClaudeAIMcpServer = {
type: 'mcp_server'
id: string
display_name: string
url: string
created_at: string
}
type ClaudeAIMcpServersResponse = {
data: ClaudeAIMcpServer[]
has_more: boolean
next_page: string | null
}
const FETCH_TIMEOUT_MS = 5000
const MCP_SERVERS_BETA_HEADER = 'mcp-servers-2025-12-04'
/**
* Fetches MCP server configurations from Claude.ai org configs.
* These servers are managed by the organization via Claude.ai.
*
* Results are memoized for the session lifetime (fetch once per CLI session).
*/
export const fetchClaudeAIMcpConfigsIfEligible = memoize(
async (): Promise<Record<string, ScopedMcpServerConfig>> => {
try {
if (isEnvDefinedFalsy(process.env.ENABLE_CLAUDEAI_MCP_SERVERS)) {
logForDebugging('[claudeai-mcp] Disabled via env var')
logEvent('tengu_claudeai_mcp_eligibility', {
state:
'disabled_env_var' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
return {}
}
const tokens = getClaudeAIOAuthTokens()
if (!tokens?.accessToken) {
logForDebugging('[claudeai-mcp] No access token')
logEvent('tengu_claudeai_mcp_eligibility', {
state:
'no_oauth_token' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
return {}
}
// Check for user:mcp_servers scope directly instead of isClaudeAISubscriber().
// In non-interactive mode, isClaudeAISubscriber() returns false when ANTHROPIC_API_KEY
// is set (even with valid OAuth tokens) because preferThirdPartyAuthentication() causes
// isAnthropicAuthEnabled() to return false. Checking the scope directly allows users
// with both API keys and OAuth tokens to access claude.ai MCPs in print mode.
if (!tokens.scopes?.includes('user:mcp_servers')) {
logForDebugging(
`[claudeai-mcp] Missing user:mcp_servers scope (scopes=${tokens.scopes?.join(',') || 'none'})`,
)
logEvent('tengu_claudeai_mcp_eligibility', {
state:
'missing_scope' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
return {}
}
const baseUrl = getOauthConfig().BASE_API_URL
const url = `${baseUrl}/v1/mcp_servers?limit=1000`
logForDebugging(`[claudeai-mcp] Fetching from ${url}`)
const response = await axios.get<ClaudeAIMcpServersResponse>(url, {
headers: {
Authorization: `Bearer ${tokens.accessToken}`,
'Content-Type': 'application/json',
'anthropic-beta': MCP_SERVERS_BETA_HEADER,
'anthropic-version': '2023-06-01',
},
timeout: FETCH_TIMEOUT_MS,
})
const configs: Record<string, ScopedMcpServerConfig> = {}
// Track used normalized names to detect collisions and assign (2), (3), etc. suffixes.
// We check the final normalized name (including suffix) to handle edge cases where
// a suffixed name collides with another server's base name (e.g., "Example Server 2"
// colliding with "Example Server! (2)" which both normalize to claude_ai_Example_Server_2).
const usedNormalizedNames = new Set<string>()
for (const server of response.data.data) {
const baseName = `claude.ai ${server.display_name}`
// Try without suffix first, then increment until we find an unused normalized name
let finalName = baseName
let finalNormalized = normalizeNameForMCP(finalName)
let count = 1
while (usedNormalizedNames.has(finalNormalized)) {
count++
finalName = `${baseName} (${count})`
finalNormalized = normalizeNameForMCP(finalName)
}
usedNormalizedNames.add(finalNormalized)
configs[finalName] = {
type: 'claudeai-proxy',
url: server.url,
id: server.id,
scope: 'claudeai',
}
}
logForDebugging(
`[claudeai-mcp] Fetched ${Object.keys(configs).length} servers`,
)
logEvent('tengu_claudeai_mcp_eligibility', {
state:
'eligible' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
return configs
} catch {
logForDebugging(`[claudeai-mcp] Fetch failed`)
return {}
}
},
)
/**
* Clears the memoized cache for fetchClaudeAIMcpConfigsIfEligible.
* Call this after login so the next fetch will use the new auth tokens.
*/
export function clearClaudeAIMcpConfigsCache(): void {
fetchClaudeAIMcpConfigsIfEligible.cache.clear?.()
// Also clear the auth cache so freshly-authorized servers get re-connected
clearMcpAuthCache()
}
/**
* Record that a claude.ai connector successfully connected. Idempotent.
*
* Gates the "N connectors unavailable/need auth" startup notifications: a
* connector that was working yesterday and is now failed is a state change
* worth surfacing; an org-configured connector that's been needs-auth since
* it showed up is one the user has demonstrably ignored.
*/
export function markClaudeAiMcpConnected(name: string): void {
saveGlobalConfig(current => {
const seen = current.claudeAiMcpEverConnected ?? []
if (seen.includes(name)) return current
return { ...current, claudeAiMcpEverConnected: [...seen, name] }
})
}
export function hasClaudeAiMcpEverConnected(name: string): boolean {
return (getGlobalConfig().claudeAiMcpEverConnected ?? []).includes(name)
}

3348
src/services/mcp/client.ts Normal file

File diff suppressed because it is too large Load Diff

1578
src/services/mcp/config.ts Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,313 @@
import type { Client } from '@modelcontextprotocol/sdk/client/index.js'
import {
ElicitationCompleteNotificationSchema,
type ElicitRequestParams,
ElicitRequestSchema,
type ElicitResult,
} from '@modelcontextprotocol/sdk/types.js'
import type { AppState } from '../../state/AppState.js'
import {
executeElicitationHooks,
executeElicitationResultHooks,
executeNotificationHooks,
} from '../../utils/hooks.js'
import { logMCPDebug, logMCPError } from '../../utils/log.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import {
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
logEvent,
} from '../analytics/index.js'
/** Configuration for the waiting state shown after the user opens a URL. */
export type ElicitationWaitingState = {
/** Button label, e.g. "Retry now" or "Skip confirmation" */
actionLabel: string
/** Whether to show a visible Cancel button (e.g. for error-based retry flow) */
showCancel?: boolean
}
export type ElicitationRequestEvent = {
serverName: string
/** The JSON-RPC request ID, unique per server connection. */
requestId: string | number
params: ElicitRequestParams
signal: AbortSignal
/**
* Resolves the elicitation. For explicit elicitations, all actions are
* meaningful. For error-based retry (-32042), 'accept' is a no-op —
* the retry is driven by onWaitingDismiss instead.
*/
respond: (response: ElicitResult) => void
/** For URL elicitations: shown after user opens the browser. */
waitingState?: ElicitationWaitingState
/** Called when phase 2 (waiting) is dismissed by user action or completion. */
onWaitingDismiss?: (action: 'dismiss' | 'retry' | 'cancel') => void
/** Set to true by the completion notification handler when the server confirms completion. */
completed?: boolean
}
function getElicitationMode(params: ElicitRequestParams): 'form' | 'url' {
return params.mode === 'url' ? 'url' : 'form'
}
/** Find a queued elicitation event by server name and elicitationId. */
function findElicitationInQueue(
queue: ElicitationRequestEvent[],
serverName: string,
elicitationId: string,
): number {
return queue.findIndex(
e =>
e.serverName === serverName &&
e.params.mode === 'url' &&
'elicitationId' in e.params &&
e.params.elicitationId === elicitationId,
)
}
export function registerElicitationHandler(
client: Client,
serverName: string,
setAppState: (f: (prevState: AppState) => AppState) => void,
): void {
// Register the elicitation request handler.
// Wrapped in try/catch because setRequestHandler throws if the client wasn't
// created with elicitation capability declared.
try {
client.setRequestHandler(ElicitRequestSchema, async (request, extra) => {
logMCPDebug(
serverName,
`Received elicitation request: ${jsonStringify(request)}`,
)
const mode = getElicitationMode(request.params)
logEvent('tengu_mcp_elicitation_shown', {
mode: mode as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
try {
// Run elicitation hooks first - they can provide a response programmatically
const hookResponse = await runElicitationHooks(
serverName,
request.params,
extra.signal,
)
if (hookResponse) {
logMCPDebug(
serverName,
`Elicitation resolved by hook: ${jsonStringify(hookResponse)}`,
)
logEvent('tengu_mcp_elicitation_response', {
mode: mode as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
action:
hookResponse.action as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
return hookResponse
}
const elicitationId =
mode === 'url' && 'elicitationId' in request.params
? (request.params.elicitationId as string | undefined)
: undefined
const response = new Promise<ElicitResult>(resolve => {
const onAbort = () => {
resolve({ action: 'cancel' })
}
if (extra.signal.aborted) {
onAbort()
return
}
const waitingState: ElicitationWaitingState | undefined =
elicitationId ? { actionLabel: 'Skip confirmation' } : undefined
setAppState(prev => ({
...prev,
elicitation: {
queue: [
...prev.elicitation.queue,
{
serverName,
requestId: extra.requestId,
params: request.params,
signal: extra.signal,
waitingState,
respond: (result: ElicitResult) => {
extra.signal.removeEventListener('abort', onAbort)
logEvent('tengu_mcp_elicitation_response', {
mode: mode as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
action:
result.action as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
})
resolve(result)
},
},
],
},
}))
extra.signal.addEventListener('abort', onAbort, { once: true })
})
const rawResult = await response
logMCPDebug(
serverName,
`Elicitation response: ${jsonStringify(rawResult)}`,
)
const result = await runElicitationResultHooks(
serverName,
rawResult,
extra.signal,
mode,
elicitationId,
)
return result
} catch (error) {
logMCPError(serverName, `Elicitation error: ${error}`)
return { action: 'cancel' as const }
}
})
// Register handler for elicitation completion notifications (URL mode).
// Sets `completed: true` on the matching queue event; the dialog reacts to this flag.
client.setNotificationHandler(
ElicitationCompleteNotificationSchema,
notification => {
const { elicitationId } = notification.params
logMCPDebug(
serverName,
`Received elicitation completion notification: ${elicitationId}`,
)
void executeNotificationHooks({
message: `MCP server "${serverName}" confirmed elicitation ${elicitationId} complete`,
notificationType: 'elicitation_complete',
})
let found = false
setAppState(prev => {
const idx = findElicitationInQueue(
prev.elicitation.queue,
serverName,
elicitationId,
)
if (idx === -1) return prev
found = true
const queue = [...prev.elicitation.queue]
queue[idx] = { ...queue[idx]!, completed: true }
return { ...prev, elicitation: { queue } }
})
if (!found) {
logMCPDebug(
serverName,
`Ignoring completion notification for unknown elicitation: ${elicitationId}`,
)
}
},
)
} catch {
// Client wasn't created with elicitation capability - nothing to register
return
}
}
export async function runElicitationHooks(
serverName: string,
params: ElicitRequestParams,
signal: AbortSignal,
): Promise<ElicitResult | undefined> {
try {
const mode = params.mode === 'url' ? 'url' : 'form'
const url = 'url' in params ? (params.url as string) : undefined
const elicitationId =
'elicitationId' in params
? (params.elicitationId as string | undefined)
: undefined
const { elicitationResponse, blockingError } =
await executeElicitationHooks({
serverName,
message: params.message,
requestedSchema:
'requestedSchema' in params
? (params.requestedSchema as Record<string, unknown>)
: undefined,
signal,
mode,
url,
elicitationId,
})
if (blockingError) {
return { action: 'decline' }
}
if (elicitationResponse) {
return {
action: elicitationResponse.action,
content: elicitationResponse.content,
}
}
return undefined
} catch (error) {
logMCPError(serverName, `Elicitation hook error: ${error}`)
return undefined
}
}
/**
* Run ElicitationResult hooks after the user has responded, then fire a
* `elicitation_response` notification. Returns a (potentially modified)
* ElicitResult — hooks may override the action/content or block the response.
*/
export async function runElicitationResultHooks(
serverName: string,
result: ElicitResult,
signal: AbortSignal,
mode?: 'form' | 'url',
elicitationId?: string,
): Promise<ElicitResult> {
try {
const { elicitationResultResponse, blockingError } =
await executeElicitationResultHooks({
serverName,
action: result.action,
content: result.content as Record<string, unknown> | undefined,
signal,
mode,
elicitationId,
})
if (blockingError) {
void executeNotificationHooks({
message: `Elicitation response for server "${serverName}": decline`,
notificationType: 'elicitation_response',
})
return { action: 'decline' }
}
const finalResult = elicitationResultResponse
? {
action: elicitationResultResponse.action,
content: elicitationResultResponse.content ?? result.content,
}
: result
// Fire a notification for observability
void executeNotificationHooks({
message: `Elicitation response for server "${serverName}": ${finalResult.action}`,
notificationType: 'elicitation_response',
})
return finalResult
} catch (error) {
logMCPError(serverName, `ElicitationResult hook error: ${error}`)
// Fire notification even on error
void executeNotificationHooks({
message: `Elicitation response for server "${serverName}": ${result.action}`,
notificationType: 'elicitation_response',
})
return result
}
}

View File

@@ -0,0 +1,38 @@
/**
* Shared utilities for expanding environment variables in MCP server configurations
*/
/**
* Expand environment variables in a string value
* Handles ${VAR} and ${VAR:-default} syntax
* @returns Object with expanded string and list of missing variables
*/
export function expandEnvVarsInString(value: string): {
expanded: string
missingVars: string[]
} {
const missingVars: string[] = []
const expanded = value.replace(/\$\{([^}]+)\}/g, (match, varContent) => {
// Split on :- to support default values (limit to 2 parts to preserve :- in defaults)
const [varName, defaultValue] = varContent.split(':-', 2)
const envValue = process.env[varName]
if (envValue !== undefined) {
return envValue
}
if (defaultValue !== undefined) {
return defaultValue
}
// Track missing variable for error reporting
missingVars.push(varName)
// Return original if not found (allows debugging but will be reported as error)
return match
})
return {
expanded,
missingVars,
}
}

View File

@@ -0,0 +1,138 @@
import { getIsNonInteractiveSession } from '../../bootstrap/state.js'
import { checkHasTrustDialogAccepted } from '../../utils/config.js'
import { logAntError } from '../../utils/debug.js'
import { errorMessage } from '../../utils/errors.js'
import { execFileNoThrowWithCwd } from '../../utils/execFileNoThrow.js'
import { logError, logMCPDebug, logMCPError } from '../../utils/log.js'
import { jsonParse } from '../../utils/slowOperations.js'
import { logEvent } from '../analytics/index.js'
import type {
McpHTTPServerConfig,
McpSSEServerConfig,
McpWebSocketServerConfig,
ScopedMcpServerConfig,
} from './types.js'
/**
* Check if the MCP server config comes from project settings (projectSettings or localSettings)
* This is important for security checks
*/
function isMcpServerFromProjectOrLocalSettings(
config: ScopedMcpServerConfig,
): boolean {
return config.scope === 'project' || config.scope === 'local'
}
/**
* Get dynamic headers for an MCP server using the headersHelper script
* @param serverName The name of the MCP server
* @param config The MCP server configuration
* @returns Headers object or null if not configured or failed
*/
export async function getMcpHeadersFromHelper(
serverName: string,
config: McpSSEServerConfig | McpHTTPServerConfig | McpWebSocketServerConfig,
): Promise<Record<string, string> | null> {
if (!config.headersHelper) {
return null
}
// Security check for project/local settings
// Skip trust check in non-interactive mode (e.g., CI/CD, automation)
if (
'scope' in config &&
isMcpServerFromProjectOrLocalSettings(config as ScopedMcpServerConfig) &&
!getIsNonInteractiveSession()
) {
// Check if trust has been established for this project
const hasTrust = checkHasTrustDialogAccepted()
if (!hasTrust) {
const error = new Error(
`Security: headersHelper for MCP server '${serverName}' executed before workspace trust is confirmed. If you see this message, post in ${MACRO.FEEDBACK_CHANNEL}.`,
)
logAntError('MCP headersHelper invoked before trust check', error)
logEvent('tengu_mcp_headersHelper_missing_trust', {})
return null
}
}
try {
logMCPDebug(serverName, 'Executing headersHelper to get dynamic headers')
const execResult = await execFileNoThrowWithCwd(config.headersHelper, [], {
shell: true,
timeout: 10000,
// Pass server context so one helper script can serve multiple MCP servers
// (git credential-helper style). See deshaw/anthropic-issues#28.
env: {
...process.env,
CLAUDE_CODE_MCP_SERVER_NAME: serverName,
CLAUDE_CODE_MCP_SERVER_URL: config.url,
},
})
if (execResult.code !== 0 || !execResult.stdout) {
throw new Error(
`headersHelper for MCP server '${serverName}' did not return a valid value`,
)
}
const result = execResult.stdout.trim()
const headers = jsonParse(result)
if (
typeof headers !== 'object' ||
headers === null ||
Array.isArray(headers)
) {
throw new Error(
`headersHelper for MCP server '${serverName}' must return a JSON object with string key-value pairs`,
)
}
// Validate all values are strings
for (const [key, value] of Object.entries(headers)) {
if (typeof value !== 'string') {
throw new Error(
`headersHelper for MCP server '${serverName}' returned non-string value for key "${key}": ${typeof value}`,
)
}
}
logMCPDebug(
serverName,
`Successfully retrieved ${Object.keys(headers).length} headers from headersHelper`,
)
return headers as Record<string, string>
} catch (error) {
logMCPError(
serverName,
`Error getting headers from headersHelper: ${errorMessage(error)}`,
)
logError(
new Error(
`Error getting MCP headers from headersHelper for server '${serverName}': ${errorMessage(error)}`,
),
)
// Return null instead of throwing to avoid blocking the connection
return null
}
}
/**
* Get combined headers for an MCP server (static + dynamic)
* @param serverName The name of the MCP server
* @param config The MCP server configuration
* @returns Combined headers object
*/
export async function getMcpServerHeaders(
serverName: string,
config: McpSSEServerConfig | McpHTTPServerConfig | McpWebSocketServerConfig,
): Promise<Record<string, string>> {
const staticHeaders = config.headers || {}
const dynamicHeaders =
(await getMcpHeadersFromHelper(serverName, config)) || {}
// Dynamic headers override static headers if both are present
return {
...staticHeaders,
...dynamicHeaders,
}
}

View File

@@ -0,0 +1,106 @@
/**
* Pure string utility functions for MCP tool/server name parsing.
* This file has no heavy dependencies to keep it lightweight for
* consumers that only need string parsing (e.g., permissionValidation).
*/
import { normalizeNameForMCP } from './normalization.js'
/*
* Extracts MCP server information from a tool name string
* @param toolString The string to parse. Expected format: "mcp__serverName__toolName"
* @returns An object containing server name and optional tool name, or null if not a valid MCP rule
*
* Known limitation: If a server name contains "__", parsing will be incorrect.
* For example, "mcp__my__server__tool" would parse as server="my" and tool="server__tool"
* instead of server="my__server" and tool="tool". This is rare in practice since server
* names typically don't contain double underscores.
*/
export function mcpInfoFromString(toolString: string): {
serverName: string
toolName: string | undefined
} | null {
const parts = toolString.split('__')
const [mcpPart, serverName, ...toolNameParts] = parts
if (mcpPart !== 'mcp' || !serverName) {
return null
}
// Join all parts after server name to preserve double underscores in tool names
const toolName =
toolNameParts.length > 0 ? toolNameParts.join('__') : undefined
return { serverName, toolName }
}
/**
* Generates the MCP tool/command name prefix for a given server
* @param serverName Name of the MCP server
* @returns The prefix string
*/
export function getMcpPrefix(serverName: string): string {
return `mcp__${normalizeNameForMCP(serverName)}__`
}
/**
* Builds a fully qualified MCP tool name from server and tool names.
* Inverse of mcpInfoFromString().
* @param serverName Name of the MCP server (unnormalized)
* @param toolName Name of the tool (unnormalized)
* @returns The fully qualified name, e.g., "mcp__server__tool"
*/
export function buildMcpToolName(serverName: string, toolName: string): string {
return `${getMcpPrefix(serverName)}${normalizeNameForMCP(toolName)}`
}
/**
* Returns the name to use for permission rule matching.
* For MCP tools, uses the fully qualified mcp__server__tool name so that
* deny rules targeting builtins (e.g., "Write") don't match unprefixed MCP
* replacements that share the same display name. Falls back to `tool.name`.
*/
export function getToolNameForPermissionCheck(tool: {
name: string
mcpInfo?: { serverName: string; toolName: string }
}): string {
return tool.mcpInfo
? buildMcpToolName(tool.mcpInfo.serverName, tool.mcpInfo.toolName)
: tool.name
}
/*
* Extracts the display name from an MCP tool/command name
* @param fullName The full MCP tool/command name (e.g., "mcp__server_name__tool_name")
* @param serverName The server name to remove from the prefix
* @returns The display name without the MCP prefix
*/
export function getMcpDisplayName(
fullName: string,
serverName: string,
): string {
const prefix = `mcp__${normalizeNameForMCP(serverName)}__`
return fullName.replace(prefix, '')
}
/**
* Extracts just the tool/command display name from a userFacingName
* @param userFacingName The full user-facing name (e.g., "github - Add comment to issue (MCP)")
* @returns The display name without server prefix and (MCP) suffix
*/
export function extractMcpToolDisplayName(userFacingName: string): string {
// This is really ugly but our current Tool type doesn't make it easy to have different display names for different purposes.
// First, remove the (MCP) suffix if present
let withoutSuffix = userFacingName.replace(/\s*\(MCP\)\s*$/, '')
// Trim the result
withoutSuffix = withoutSuffix.trim()
// Then, remove the server prefix (everything before " - ")
const dashIndex = withoutSuffix.indexOf(' - ')
if (dashIndex !== -1) {
const displayName = withoutSuffix.substring(dashIndex + 3).trim()
return displayName
}
// If no dash found, return the string without (MCP)
return withoutSuffix
}

View File

@@ -0,0 +1,23 @@
/**
* Pure utility functions for MCP name normalization.
* This file has no dependencies to avoid circular imports.
*/
// Claude.ai server names are prefixed with this string
const CLAUDEAI_SERVER_PREFIX = 'claude.ai '
/**
* Normalize server names to be compatible with the API pattern ^[a-zA-Z0-9_-]{1,64}$
* Replaces any invalid characters (including dots and spaces) with underscores.
*
* For claude.ai servers (names starting with "claude.ai "), also collapses
* consecutive underscores and strips leading/trailing underscores to prevent
* interference with the __ delimiter used in MCP tool names.
*/
export function normalizeNameForMCP(name: string): string {
let normalized = name.replace(/[^a-zA-Z0-9_-]/g, '_')
if (name.startsWith(CLAUDEAI_SERVER_PREFIX)) {
normalized = normalized.replace(/_+/g, '_').replace(/^_|_$/g, '')
}
return normalized
}

View File

@@ -0,0 +1,78 @@
/**
* OAuth redirect port helpers — extracted from auth.ts to break the
* auth.ts ↔ xaaIdpLogin.ts circular dependency.
*/
import { createServer } from 'http'
import { getPlatform } from '../../utils/platform.js'
// Windows dynamic port range 49152-65535 is reserved
const REDIRECT_PORT_RANGE =
getPlatform() === 'windows'
? { min: 39152, max: 49151 }
: { min: 49152, max: 65535 }
const REDIRECT_PORT_FALLBACK = 3118
/**
* Builds a redirect URI on localhost with the given port and a fixed `/callback` path.
*
* RFC 8252 Section 7.3 (OAuth for Native Apps): loopback redirect URIs match any
* port as long as the path matches.
*/
export function buildRedirectUri(
port: number = REDIRECT_PORT_FALLBACK,
): string {
return `http://localhost:${port}/callback`
}
function getMcpOAuthCallbackPort(): number | undefined {
const port = parseInt(process.env.MCP_OAUTH_CALLBACK_PORT || '', 10)
return port > 0 ? port : undefined
}
/**
* Finds an available port in the specified range for OAuth redirect
* Uses random selection for better security
*/
export async function findAvailablePort(): Promise<number> {
// First, try the configured port if specified
const configuredPort = getMcpOAuthCallbackPort()
if (configuredPort) {
return configuredPort
}
const { min, max } = REDIRECT_PORT_RANGE
const range = max - min + 1
const maxAttempts = Math.min(range, 100) // Don't try forever
for (let attempt = 0; attempt < maxAttempts; attempt++) {
const port = min + Math.floor(Math.random() * range)
try {
await new Promise<void>((resolve, reject) => {
const testServer = createServer()
testServer.once('error', reject)
testServer.listen(port, () => {
testServer.close(() => resolve())
})
})
return port
} catch {
// Port in use, try another random port
continue
}
}
// If random selection failed, try the fallback port
try {
await new Promise<void>((resolve, reject) => {
const testServer = createServer()
testServer.once('error', reject)
testServer.listen(REDIRECT_PORT_FALLBACK, () => {
testServer.close(() => resolve())
})
})
return REDIRECT_PORT_FALLBACK
} catch {
throw new Error(`No available ports for OAuth redirect`)
}
}

View File

@@ -0,0 +1,72 @@
import axios from 'axios'
import { logForDebugging } from '../../utils/debug.js'
import { errorMessage } from '../../utils/errors.js'
type RegistryServer = {
server: {
remotes?: Array<{ url: string }>
}
}
type RegistryResponse = {
servers: RegistryServer[]
}
// URLs stripped of query string and trailing slash — matches the normalization
// done by getLoggingSafeMcpBaseUrl so direct Set.has() lookup works.
let officialUrls: Set<string> | undefined = undefined
function normalizeUrl(url: string): string | undefined {
try {
const u = new URL(url)
u.search = ''
return u.toString().replace(/\/$/, '')
} catch {
return undefined
}
}
/**
* Fire-and-forget fetch of the official MCP registry.
* Populates officialUrls for isOfficialMcpUrl lookups.
*/
export async function prefetchOfficialMcpUrls(): Promise<void> {
if (process.env.CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC) {
return
}
try {
const response = await axios.get<RegistryResponse>(
'https://api.anthropic.com/mcp-registry/v0/servers?version=latest&visibility=commercial',
{ timeout: 5000 },
)
const urls = new Set<string>()
for (const entry of response.data.servers) {
for (const remote of entry.server.remotes ?? []) {
const normalized = normalizeUrl(remote.url)
if (normalized) {
urls.add(normalized)
}
}
}
officialUrls = urls
logForDebugging(`[mcp-registry] Loaded ${urls.size} official MCP URLs`)
} catch (error) {
logForDebugging(`Failed to fetch MCP registry: ${errorMessage(error)}`, {
level: 'error',
})
}
}
/**
* Returns true iff the given (already-normalized via getLoggingSafeMcpBaseUrl)
* URL is in the official MCP registry. Undefined registry → false (fail-closed).
*/
export function isOfficialMcpUrl(normalizedUrl: string): boolean {
return officialUrls?.has(normalizedUrl) ?? false
}
export function resetOfficialMcpUrlsForTesting(): void {
officialUrls = undefined
}

258
src/services/mcp/types.ts Normal file
View File

@@ -0,0 +1,258 @@
import type { Client } from '@modelcontextprotocol/sdk/client/index.js'
import type {
Resource,
ServerCapabilities,
} from '@modelcontextprotocol/sdk/types.js'
import { z } from 'zod/v4'
import { lazySchema } from '../../utils/lazySchema.js'
// Configuration schemas and types
export const ConfigScopeSchema = lazySchema(() =>
z.enum([
'local',
'user',
'project',
'dynamic',
'enterprise',
'claudeai',
'managed',
]),
)
export type ConfigScope = z.infer<ReturnType<typeof ConfigScopeSchema>>
export const TransportSchema = lazySchema(() =>
z.enum(['stdio', 'sse', 'sse-ide', 'http', 'ws', 'sdk']),
)
export type Transport = z.infer<ReturnType<typeof TransportSchema>>
export const McpStdioServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('stdio').optional(), // Optional for backwards compatibility
command: z.string().min(1, 'Command cannot be empty'),
args: z.array(z.string()).default([]),
env: z.record(z.string(), z.string()).optional(),
}),
)
// Cross-App Access (XAA / SEP-990): just a per-server flag. IdP connection
// details (issuer, clientId, callbackPort) come from settings.xaaIdp — configured
// once, shared across all XAA-enabled servers. clientId/clientSecret (parent
// oauth config + keychain slot) are for the MCP server's AS.
const McpXaaConfigSchema = lazySchema(() => z.boolean())
const McpOAuthConfigSchema = lazySchema(() =>
z.object({
clientId: z.string().optional(),
callbackPort: z.number().int().positive().optional(),
authServerMetadataUrl: z
.string()
.url()
.startsWith('https://', {
message: 'authServerMetadataUrl must use https://',
})
.optional(),
xaa: McpXaaConfigSchema().optional(),
}),
)
export const McpSSEServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('sse'),
url: z.string(),
headers: z.record(z.string(), z.string()).optional(),
headersHelper: z.string().optional(),
oauth: McpOAuthConfigSchema().optional(),
}),
)
// Internal-only server type for IDE extensions
export const McpSSEIDEServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('sse-ide'),
url: z.string(),
ideName: z.string(),
ideRunningInWindows: z.boolean().optional(),
}),
)
// Internal-only server type for IDE extensions
export const McpWebSocketIDEServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('ws-ide'),
url: z.string(),
ideName: z.string(),
authToken: z.string().optional(),
ideRunningInWindows: z.boolean().optional(),
}),
)
export const McpHTTPServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('http'),
url: z.string(),
headers: z.record(z.string(), z.string()).optional(),
headersHelper: z.string().optional(),
oauth: McpOAuthConfigSchema().optional(),
}),
)
export const McpWebSocketServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('ws'),
url: z.string(),
headers: z.record(z.string(), z.string()).optional(),
headersHelper: z.string().optional(),
}),
)
export const McpSdkServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('sdk'),
name: z.string(),
}),
)
// Config type for Claude.ai proxy servers
export const McpClaudeAIProxyServerConfigSchema = lazySchema(() =>
z.object({
type: z.literal('claudeai-proxy'),
url: z.string(),
id: z.string(),
}),
)
export const McpServerConfigSchema = lazySchema(() =>
z.union([
McpStdioServerConfigSchema(),
McpSSEServerConfigSchema(),
McpSSEIDEServerConfigSchema(),
McpWebSocketIDEServerConfigSchema(),
McpHTTPServerConfigSchema(),
McpWebSocketServerConfigSchema(),
McpSdkServerConfigSchema(),
McpClaudeAIProxyServerConfigSchema(),
]),
)
export type McpStdioServerConfig = z.infer<
ReturnType<typeof McpStdioServerConfigSchema>
>
export type McpSSEServerConfig = z.infer<
ReturnType<typeof McpSSEServerConfigSchema>
>
export type McpSSEIDEServerConfig = z.infer<
ReturnType<typeof McpSSEIDEServerConfigSchema>
>
export type McpWebSocketIDEServerConfig = z.infer<
ReturnType<typeof McpWebSocketIDEServerConfigSchema>
>
export type McpHTTPServerConfig = z.infer<
ReturnType<typeof McpHTTPServerConfigSchema>
>
export type McpWebSocketServerConfig = z.infer<
ReturnType<typeof McpWebSocketServerConfigSchema>
>
export type McpSdkServerConfig = z.infer<
ReturnType<typeof McpSdkServerConfigSchema>
>
export type McpClaudeAIProxyServerConfig = z.infer<
ReturnType<typeof McpClaudeAIProxyServerConfigSchema>
>
export type McpServerConfig = z.infer<ReturnType<typeof McpServerConfigSchema>>
export type ScopedMcpServerConfig = McpServerConfig & {
scope: ConfigScope
// For plugin-provided servers: the providing plugin's LoadedPlugin.source
// (e.g. 'slack@anthropic'). Stashed at config-build time so the channel
// gate doesn't have to race AppState.plugins.enabled hydration.
pluginSource?: string
}
export const McpJsonConfigSchema = lazySchema(() =>
z.object({
mcpServers: z.record(z.string(), McpServerConfigSchema()),
}),
)
export type McpJsonConfig = z.infer<ReturnType<typeof McpJsonConfigSchema>>
// Server connection types
export type ConnectedMCPServer = {
client: Client
name: string
type: 'connected'
capabilities: ServerCapabilities
serverInfo?: {
name: string
version: string
}
instructions?: string
config: ScopedMcpServerConfig
cleanup: () => Promise<void>
}
export type FailedMCPServer = {
name: string
type: 'failed'
config: ScopedMcpServerConfig
error?: string
}
export type NeedsAuthMCPServer = {
name: string
type: 'needs-auth'
config: ScopedMcpServerConfig
}
export type PendingMCPServer = {
name: string
type: 'pending'
config: ScopedMcpServerConfig
reconnectAttempt?: number
maxReconnectAttempts?: number
}
export type DisabledMCPServer = {
name: string
type: 'disabled'
config: ScopedMcpServerConfig
}
export type MCPServerConnection =
| ConnectedMCPServer
| FailedMCPServer
| NeedsAuthMCPServer
| PendingMCPServer
| DisabledMCPServer
// Resource types
export type ServerResource = Resource & { server: string }
// MCP CLI State types
export interface SerializedTool {
name: string
description: string
inputJSONSchema?: {
[x: string]: unknown
type: 'object'
properties?: {
[x: string]: unknown
}
}
isMcp?: boolean
originalToolName?: string // Original unnormalized tool name from MCP server
}
export interface SerializedClient {
name: string
type: 'connected' | 'failed' | 'needs-auth' | 'pending' | 'disabled'
capabilities?: ServerCapabilities
}
export interface MCPCliState {
clients: SerializedClient[]
configs: Record<string, ScopedMcpServerConfig>
tools: SerializedTool[]
resources: Record<string, ServerResource[]>
normalizedNames?: Record<string, string> // Maps normalized names to original names
}

File diff suppressed because it is too large Load Diff

575
src/services/mcp/utils.ts Normal file
View File

@@ -0,0 +1,575 @@
import { createHash } from 'crypto'
import { join } from 'path'
import { getIsNonInteractiveSession } from '../../bootstrap/state.js'
import type { Command } from '../../commands.js'
import type { AgentMcpServerInfo } from '../../components/mcp/types.js'
import type { Tool } from '../../Tool.js'
import type { AgentDefinition } from '../../tools/AgentTool/loadAgentsDir.js'
import { getCwd } from '../../utils/cwd.js'
import { getGlobalClaudeFile } from '../../utils/env.js'
import { isSettingSourceEnabled } from '../../utils/settings/constants.js'
import {
getSettings_DEPRECATED,
hasSkipDangerousModePermissionPrompt,
} from '../../utils/settings/settings.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import { getEnterpriseMcpFilePath, getMcpConfigByName } from './config.js'
import { mcpInfoFromString } from './mcpStringUtils.js'
import { normalizeNameForMCP } from './normalization.js'
import {
type ConfigScope,
ConfigScopeSchema,
type MCPServerConnection,
type McpHTTPServerConfig,
type McpServerConfig,
type McpSSEServerConfig,
type McpStdioServerConfig,
type McpWebSocketServerConfig,
type ScopedMcpServerConfig,
type ServerResource,
} from './types.js'
/**
* Filters tools by MCP server name
*
* @param tools Array of tools to filter
* @param serverName Name of the MCP server
* @returns Tools belonging to the specified server
*/
export function filterToolsByServer(tools: Tool[], serverName: string): Tool[] {
const prefix = `mcp__${normalizeNameForMCP(serverName)}__`
return tools.filter(tool => tool.name?.startsWith(prefix))
}
/**
* True when a command belongs to the given MCP server.
*
* MCP **prompts** are named `mcp__<server>__<prompt>` (wire-format constraint);
* MCP **skills** are named `<server>:<skill>` (matching plugin/nested-dir skill
* naming). Both live in `mcp.commands`, so cleanup and filtering must match
* either shape.
*/
export function commandBelongsToServer(
command: Command,
serverName: string,
): boolean {
const normalized = normalizeNameForMCP(serverName)
const name = command.name
if (!name) return false
return (
name.startsWith(`mcp__${normalized}__`) || name.startsWith(`${normalized}:`)
)
}
/**
* Filters commands by MCP server name
* @param commands Array of commands to filter
* @param serverName Name of the MCP server
* @returns Commands belonging to the specified server
*/
export function filterCommandsByServer(
commands: Command[],
serverName: string,
): Command[] {
return commands.filter(c => commandBelongsToServer(c, serverName))
}
/**
* Filters MCP **prompts** (not skills) by server. Used by the `/mcp` menu
* capabilities display — skills are a separate feature shown in `/skills`,
* so they mustn't inflate the "prompts" capability badge.
*
* The distinguisher is `loadedFrom === 'mcp'`: MCP skills set it, MCP
* prompts don't (they use `isMcp: true` instead).
*/
export function filterMcpPromptsByServer(
commands: Command[],
serverName: string,
): Command[] {
return commands.filter(
c =>
commandBelongsToServer(c, serverName) &&
!(c.type === 'prompt' && c.loadedFrom === 'mcp'),
)
}
/**
* Filters resources by MCP server name
* @param resources Array of resources to filter
* @param serverName Name of the MCP server
* @returns Resources belonging to the specified server
*/
export function filterResourcesByServer(
resources: ServerResource[],
serverName: string,
): ServerResource[] {
return resources.filter(resource => resource.server === serverName)
}
/**
* Removes tools belonging to a specific MCP server
* @param tools Array of tools
* @param serverName Name of the MCP server to exclude
* @returns Tools not belonging to the specified server
*/
export function excludeToolsByServer(
tools: Tool[],
serverName: string,
): Tool[] {
const prefix = `mcp__${normalizeNameForMCP(serverName)}__`
return tools.filter(tool => !tool.name?.startsWith(prefix))
}
/**
* Removes commands belonging to a specific MCP server
* @param commands Array of commands
* @param serverName Name of the MCP server to exclude
* @returns Commands not belonging to the specified server
*/
export function excludeCommandsByServer(
commands: Command[],
serverName: string,
): Command[] {
return commands.filter(c => !commandBelongsToServer(c, serverName))
}
/**
* Removes resources belonging to a specific MCP server
* @param resources Map of server resources
* @param serverName Name of the MCP server to exclude
* @returns Resources map without the specified server
*/
export function excludeResourcesByServer(
resources: Record<string, ServerResource[]>,
serverName: string,
): Record<string, ServerResource[]> {
const result = { ...resources }
delete result[serverName]
return result
}
/**
* Stable hash of an MCP server config for change detection on /reload-plugins.
* Excludes `scope` (provenance, not content — moving a server from .mcp.json
* to settings.json shouldn't reconnect it). Keys sorted so `{a:1,b:2}` and
* `{b:2,a:1}` hash the same.
*/
export function hashMcpConfig(config: ScopedMcpServerConfig): string {
const { scope: _scope, ...rest } = config
const stable = jsonStringify(rest, (_k, v: unknown) => {
if (v && typeof v === 'object' && !Array.isArray(v)) {
const obj = v as Record<string, unknown>
const sorted: Record<string, unknown> = {}
for (const k of Object.keys(obj).sort()) sorted[k] = obj[k]
return sorted
}
return v
})
return createHash('sha256').update(stable).digest('hex').slice(0, 16)
}
/**
* Remove stale MCP clients and their tools/commands/resources. A client is
* stale if:
* - scope 'dynamic' and name no longer in configs (plugin disabled), or
* - config hash changed (args/url/env edited in .mcp.json) — any scope
*
* The removal case is scoped to 'dynamic' so /reload-plugins can't
* accidentally disconnect a user-configured server that's just temporarily
* absent from the in-memory config (e.g. during a partial reload). The
* config-changed case applies to all scopes — if the config actually changed
* on disk, reconnecting is what you want.
*
* Returns the stale clients so the caller can disconnect them (clearServerCache).
*/
export function excludeStalePluginClients(
mcp: {
clients: MCPServerConnection[]
tools: Tool[]
commands: Command[]
resources: Record<string, ServerResource[]>
},
configs: Record<string, ScopedMcpServerConfig>,
): {
clients: MCPServerConnection[]
tools: Tool[]
commands: Command[]
resources: Record<string, ServerResource[]>
stale: MCPServerConnection[]
} {
const stale = mcp.clients.filter(c => {
const fresh = configs[c.name]
if (!fresh) return c.config.scope === 'dynamic'
return hashMcpConfig(c.config) !== hashMcpConfig(fresh)
})
if (stale.length === 0) {
return { ...mcp, stale: [] }
}
let { tools, commands, resources } = mcp
for (const s of stale) {
tools = excludeToolsByServer(tools, s.name)
commands = excludeCommandsByServer(commands, s.name)
resources = excludeResourcesByServer(resources, s.name)
}
const staleNames = new Set(stale.map(c => c.name))
return {
clients: mcp.clients.filter(c => !staleNames.has(c.name)),
tools,
commands,
resources,
stale,
}
}
/**
* Checks if a tool name belongs to a specific MCP server
* @param toolName The tool name to check
* @param serverName The server name to match against
* @returns True if the tool belongs to the specified server
*/
export function isToolFromMcpServer(
toolName: string,
serverName: string,
): boolean {
const info = mcpInfoFromString(toolName)
return info?.serverName === serverName
}
/**
* Checks if a tool belongs to any MCP server
* @param tool The tool to check
* @returns True if the tool is from an MCP server
*/
export function isMcpTool(tool: Tool): boolean {
return tool.name?.startsWith('mcp__') || tool.isMcp === true
}
/**
* Checks if a command belongs to any MCP server
* @param command The command to check
* @returns True if the command is from an MCP server
*/
export function isMcpCommand(command: Command): boolean {
return command.name?.startsWith('mcp__') || command.isMcp === true
}
/**
* Describe the file path for a given MCP config scope.
* @param scope The config scope ('user', 'project', 'local', or 'dynamic')
* @returns A description of where the config is stored
*/
export function describeMcpConfigFilePath(scope: ConfigScope): string {
switch (scope) {
case 'user':
return getGlobalClaudeFile()
case 'project':
return join(getCwd(), '.mcp.json')
case 'local':
return `${getGlobalClaudeFile()} [project: ${getCwd()}]`
case 'dynamic':
return 'Dynamically configured'
case 'enterprise':
return getEnterpriseMcpFilePath()
case 'claudeai':
return 'claude.ai'
default:
return scope
}
}
export function getScopeLabel(scope: ConfigScope): string {
switch (scope) {
case 'local':
return 'Local config (private to you in this project)'
case 'project':
return 'Project config (shared via .mcp.json)'
case 'user':
return 'User config (available in all your projects)'
case 'dynamic':
return 'Dynamic config (from command line)'
case 'enterprise':
return 'Enterprise config (managed by your organization)'
case 'claudeai':
return 'claude.ai config'
default:
return scope
}
}
export function ensureConfigScope(scope?: string): ConfigScope {
if (!scope) return 'local'
if (!ConfigScopeSchema().options.includes(scope as ConfigScope)) {
throw new Error(
`Invalid scope: ${scope}. Must be one of: ${ConfigScopeSchema().options.join(', ')}`,
)
}
return scope as ConfigScope
}
export function ensureTransport(type?: string): 'stdio' | 'sse' | 'http' {
if (!type) return 'stdio'
if (type !== 'stdio' && type !== 'sse' && type !== 'http') {
throw new Error(
`Invalid transport type: ${type}. Must be one of: stdio, sse, http`,
)
}
return type as 'stdio' | 'sse' | 'http'
}
export function parseHeaders(headerArray: string[]): Record<string, string> {
const headers: Record<string, string> = {}
for (const header of headerArray) {
const colonIndex = header.indexOf(':')
if (colonIndex === -1) {
throw new Error(
`Invalid header format: "${header}". Expected format: "Header-Name: value"`,
)
}
const key = header.substring(0, colonIndex).trim()
const value = header.substring(colonIndex + 1).trim()
if (!key) {
throw new Error(
`Invalid header: "${header}". Header name cannot be empty.`,
)
}
headers[key] = value
}
return headers
}
export function getProjectMcpServerStatus(
serverName: string,
): 'approved' | 'rejected' | 'pending' {
const settings = getSettings_DEPRECATED()
const normalizedName = normalizeNameForMCP(serverName)
// TODO: This fails an e2e test if the ?. is not present. This is likely a bug in the e2e test.
// Will fix this in a follow-up PR.
if (
settings?.disabledMcpjsonServers?.some(
name => normalizeNameForMCP(name) === normalizedName,
)
) {
return 'rejected'
}
if (
settings?.enabledMcpjsonServers?.some(
name => normalizeNameForMCP(name) === normalizedName,
) ||
settings?.enableAllProjectMcpServers
) {
return 'approved'
}
// In bypass permissions mode (--dangerously-skip-permissions), there's no way
// to show an approval popup. Auto-approve if projectSettings is enabled since
// the user has explicitly chosen to bypass all permission checks.
// SECURITY: We intentionally only check skipDangerousModePermissionPrompt via
// hasSkipDangerousModePermissionPrompt(), which reads from userSettings/localSettings/
// flagSettings/policySettings but NOT projectSettings (repo-level .claude/settings.json).
// This is intentional: a repo should not be able to accept the bypass dialog on behalf of
// users. We also do NOT check getSessionBypassPermissionsMode() here because
// sessionBypassPermissionsMode can be set from project settings before the dialog is shown,
// which would allow RCE attacks via malicious project settings.
if (
hasSkipDangerousModePermissionPrompt() &&
isSettingSourceEnabled('projectSettings')
) {
return 'approved'
}
// In non-interactive mode (SDK, claude -p, piped input), there's no way to
// show an approval popup. Auto-approve if projectSettings is enabled since:
// 1. The user/developer explicitly chose to run in this mode
// 2. For SDK, projectSettings is off by default - they must explicitly enable it
// 3. For -p mode, the help text warns to only use in trusted directories
if (
getIsNonInteractiveSession() &&
isSettingSourceEnabled('projectSettings')
) {
return 'approved'
}
return 'pending'
}
/**
* Get the scope/settings source for an MCP server from a tool name
* @param toolName MCP tool name (format: mcp__serverName__toolName)
* @returns ConfigScope or null if not an MCP tool or server not found
*/
export function getMcpServerScopeFromToolName(
toolName: string,
): ConfigScope | null {
if (!isMcpTool({ name: toolName } as Tool)) {
return null
}
// Extract server name from tool name (format: mcp__serverName__toolName)
const mcpInfo = mcpInfoFromString(toolName)
if (!mcpInfo) {
return null
}
// Look up server config
const serverConfig = getMcpConfigByName(mcpInfo.serverName)
// Fallback: claude.ai servers have normalized names starting with "claude_ai_"
// but aren't in getMcpConfigByName (they're fetched async separately)
if (!serverConfig && mcpInfo.serverName.startsWith('claude_ai_')) {
return 'claudeai'
}
return serverConfig?.scope ?? null
}
// Type guards for MCP server config types
function isStdioConfig(
config: McpServerConfig,
): config is McpStdioServerConfig {
return config.type === 'stdio' || config.type === undefined
}
function isSSEConfig(config: McpServerConfig): config is McpSSEServerConfig {
return config.type === 'sse'
}
function isHTTPConfig(config: McpServerConfig): config is McpHTTPServerConfig {
return config.type === 'http'
}
function isWebSocketConfig(
config: McpServerConfig,
): config is McpWebSocketServerConfig {
return config.type === 'ws'
}
/**
* Extracts MCP server definitions from agent frontmatter and groups them by server name.
* This is used to show agent-specific MCP servers in the /mcp command.
*
* @param agents Array of agent definitions
* @returns Array of AgentMcpServerInfo, grouped by server name with list of source agents
*/
export function extractAgentMcpServers(
agents: AgentDefinition[],
): AgentMcpServerInfo[] {
// Map: server name -> { config, sourceAgents }
const serverMap = new Map<
string,
{
config: McpServerConfig & { name: string }
sourceAgents: string[]
}
>()
for (const agent of agents) {
if (!agent.mcpServers?.length) continue
for (const spec of agent.mcpServers) {
// Skip string references - these refer to servers already in global config
if (typeof spec === 'string') continue
// Inline definition as { [name]: config }
const entries = Object.entries(spec)
if (entries.length !== 1) continue
const [serverName, serverConfig] = entries[0]!
const existing = serverMap.get(serverName)
if (existing) {
// Add this agent as another source
if (!existing.sourceAgents.includes(agent.agentType)) {
existing.sourceAgents.push(agent.agentType)
}
} else {
// New server
serverMap.set(serverName, {
config: { ...serverConfig, name: serverName } as McpServerConfig & {
name: string
},
sourceAgents: [agent.agentType],
})
}
}
}
// Convert map to array of AgentMcpServerInfo
// Only include transport types supported by AgentMcpServerInfo
const result: AgentMcpServerInfo[] = []
for (const [name, { config, sourceAgents }] of serverMap) {
// Use type guards to properly narrow the discriminated union type
// Only include transport types that are supported by AgentMcpServerInfo
if (isStdioConfig(config)) {
result.push({
name,
sourceAgents,
transport: 'stdio',
command: config.command,
needsAuth: false,
})
} else if (isSSEConfig(config)) {
result.push({
name,
sourceAgents,
transport: 'sse',
url: config.url,
needsAuth: true,
})
} else if (isHTTPConfig(config)) {
result.push({
name,
sourceAgents,
transport: 'http',
url: config.url,
needsAuth: true,
})
} else if (isWebSocketConfig(config)) {
result.push({
name,
sourceAgents,
transport: 'ws',
url: config.url,
needsAuth: false,
})
}
// Skip unsupported transport types (sdk, claudeai-proxy, sse-ide, ws-ide)
// These are internal types not meant for agent MCP server display
}
return result.sort((a, b) => a.name.localeCompare(b.name))
}
/**
* Extracts the MCP server base URL (without query string) for analytics logging.
* Query strings are stripped because they can contain access tokens.
* Trailing slashes are also removed for normalization.
* Returns undefined for stdio/sdk servers or if URL parsing fails.
*/
export function getLoggingSafeMcpBaseUrl(
config: McpServerConfig,
): string | undefined {
if (!('url' in config) || typeof config.url !== 'string') {
return undefined
}
try {
const url = new URL(config.url)
url.search = ''
return url.toString().replace(/\/$/, '')
} catch {
return undefined
}
}

View File

@@ -0,0 +1,112 @@
import { logForDebugging } from 'src/utils/debug.js'
import { z } from 'zod/v4'
import { lazySchema } from '../../utils/lazySchema.js'
import {
checkStatsigFeatureGate_CACHED_MAY_BE_STALE,
getFeatureValue_CACHED_MAY_BE_STALE,
} from '../analytics/growthbook.js'
import { logEvent } from '../analytics/index.js'
import type { ConnectedMCPServer, MCPServerConnection } from './types.js'
// Mirror of AutoModeEnabledState in permissionSetup.ts — inlined because that
// file pulls in too many deps for this thin IPC module.
type AutoModeEnabledState = 'enabled' | 'disabled' | 'opt-in'
function readAutoModeEnabledState(): AutoModeEnabledState | undefined {
const v = getFeatureValue_CACHED_MAY_BE_STALE<{ enabled?: string }>(
'tengu_auto_mode_config',
{},
)?.enabled
return v === 'enabled' || v === 'disabled' || v === 'opt-in' ? v : undefined
}
export const LogEventNotificationSchema = lazySchema(() =>
z.object({
method: z.literal('log_event'),
params: z.object({
eventName: z.string(),
eventData: z.object({}).passthrough(),
}),
}),
)
// Store the VSCode MCP client reference for sending notifications
let vscodeMcpClient: ConnectedMCPServer | null = null
/**
* Sends a file_updated notification to the VSCode MCP server. This is used to
* notify VSCode when files are edited or written by Claude.
*/
export function notifyVscodeFileUpdated(
filePath: string,
oldContent: string | null,
newContent: string | null,
): void {
if (process.env.USER_TYPE !== 'ant' || !vscodeMcpClient) {
return
}
void vscodeMcpClient.client
.notification({
method: 'file_updated',
params: { filePath, oldContent, newContent },
})
.catch((error: Error) => {
// Do not throw if the notification failed
logForDebugging(
`[VSCode] Failed to send file_updated notification: ${error.message}`,
)
})
}
/**
* Sets up the speicial internal VSCode MCP for bidirectional communication using notifications.
*/
export function setupVscodeSdkMcp(sdkClients: MCPServerConnection[]): void {
const client = sdkClients.find(client => client.name === 'claude-vscode')
if (client && client.type === 'connected') {
// Store the client reference for later use
vscodeMcpClient = client
client.client.setNotificationHandler(
LogEventNotificationSchema(),
async notification => {
const { eventName, eventData } = notification.params
logEvent(
`tengu_vscode_${eventName}`,
eventData as { [key: string]: boolean | number | undefined },
)
},
)
// Send necessary experiment gates to VSCode immediately.
const gates: Record<string, boolean | string> = {
tengu_vscode_review_upsell: checkStatsigFeatureGate_CACHED_MAY_BE_STALE(
'tengu_vscode_review_upsell',
),
tengu_vscode_onboarding: checkStatsigFeatureGate_CACHED_MAY_BE_STALE(
'tengu_vscode_onboarding',
),
// Browser support.
tengu_quiet_fern: getFeatureValue_CACHED_MAY_BE_STALE(
'tengu_quiet_fern',
false,
),
// In-band OAuth via claude_authenticate (vs. extension-native PKCE).
tengu_vscode_cc_auth: getFeatureValue_CACHED_MAY_BE_STALE(
'tengu_vscode_cc_auth',
false,
),
}
// Tri-state: 'enabled' | 'disabled' | 'opt-in'. Omit if unknown so VSCode
// fails closed (treats absent as 'disabled').
const autoModeState = readAutoModeEnabledState()
if (autoModeState !== undefined) {
gates.tengu_auto_mode_state = autoModeState
}
void client.client.notification({
method: 'experiment_gates',
params: { gates },
})
}
}

511
src/services/mcp/xaa.ts Normal file
View File

@@ -0,0 +1,511 @@
/**
* Cross-App Access (XAA) / Enterprise Managed Authorization (SEP-990)
*
* Obtains an MCP access token WITHOUT a browser consent screen by chaining:
* 1. RFC 8693 Token Exchange at the IdP: id_token → ID-JAG
* 2. RFC 7523 JWT Bearer Grant at the AS: ID-JAG → access_token
*
* Spec refs:
* - ID-JAG (IETF draft): https://datatracker.ietf.org/doc/draft-ietf-oauth-identity-assertion-authz-grant/
* - MCP ext-auth (SEP-990): https://github.com/modelcontextprotocol/ext-auth
* - RFC 8693 (Token Exchange), RFC 7523 (JWT Bearer), RFC 9728 (PRM)
*
* Reference impl: ~/code/mcp/conformance/examples/clients/typescript/everything-client.ts:375-522
*
* Structure: four Layer-2 ops (aligned with TS SDK PR #1593's Layer-2 shapes so
* a future SDK swap is mechanical) + one Layer-3 orchestrator that composes them.
*/
import {
discoverAuthorizationServerMetadata,
discoverOAuthProtectedResourceMetadata,
} from '@modelcontextprotocol/sdk/client/auth.js'
import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport.js'
import { z } from 'zod/v4'
import { lazySchema } from '../../utils/lazySchema.js'
import { logMCPDebug } from '../../utils/log.js'
import { jsonStringify } from '../../utils/slowOperations.js'
const XAA_REQUEST_TIMEOUT_MS = 30000
const TOKEN_EXCHANGE_GRANT = 'urn:ietf:params:oauth:grant-type:token-exchange'
const JWT_BEARER_GRANT = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
const ID_JAG_TOKEN_TYPE = 'urn:ietf:params:oauth:token-type:id-jag'
const ID_TOKEN_TYPE = 'urn:ietf:params:oauth:token-type:id_token'
/**
* Creates a fetch wrapper that enforces the XAA request timeout and optionally
* composes a caller-provided abort signal. Using AbortSignal.any ensures the
* user's cancel (e.g. Esc in the auth menu) actually aborts in-flight requests
* rather than being clobbered by the timeout signal.
*/
function makeXaaFetch(abortSignal?: AbortSignal): FetchLike {
return (url, init) => {
const timeout = AbortSignal.timeout(XAA_REQUEST_TIMEOUT_MS)
const signal = abortSignal
? // eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
AbortSignal.any([timeout, abortSignal])
: timeout
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
return fetch(url, { ...init, signal })
}
}
const defaultFetch = makeXaaFetch()
/**
* RFC 8414 §3.3 / RFC 9728 §3.3 identifier comparison. Roundtrip through URL
* to apply RFC 3986 §6.2.2 syntax-based normalization (lowercases scheme+host,
* drops default port), then strip trailing slash.
*/
function normalizeUrl(url: string): string {
try {
return new URL(url).href.replace(/\/$/, '')
} catch {
return url.replace(/\/$/, '')
}
}
/**
* Thrown by requestJwtAuthorizationGrant when the IdP token-exchange leg
* fails. Carries `shouldClearIdToken` so callers can decide whether to drop
* the cached id_token based on OAuth error semantics (not substring matching):
* - 4xx / invalid_grant / invalid_token → id_token is bad, clear it
* - 5xx → IdP is down, id_token may still be valid, keep it
* - 200 with structurally-invalid body → protocol violation, clear it
*/
export class XaaTokenExchangeError extends Error {
readonly shouldClearIdToken: boolean
constructor(message: string, shouldClearIdToken: boolean) {
super(message)
this.name = 'XaaTokenExchangeError'
this.shouldClearIdToken = shouldClearIdToken
}
}
// Matches quoted values for known token-bearing keys regardless of nesting
// depth. Works on both parsed-then-stringified bodies AND raw text() error
// bodies from !res.ok paths — a misbehaving AS that echoes the request's
// subject_token/assertion/client_secret in a 4xx error envelope must not leak
// into debug logs.
const SENSITIVE_TOKEN_RE =
/"(access_token|refresh_token|id_token|assertion|subject_token|client_secret)"\s*:\s*"[^"]*"/g
function redactTokens(raw: unknown): string {
const s = typeof raw === 'string' ? raw : jsonStringify(raw)
return s.replace(SENSITIVE_TOKEN_RE, (_, k) => `"${k}":"[REDACTED]"`)
}
// ─── Zod Schemas ────────────────────────────────────────────────────────────
const TokenExchangeResponseSchema = lazySchema(() =>
z.object({
access_token: z.string().optional(),
issued_token_type: z.string().optional(),
// z.coerce tolerates IdPs that send expires_in as a string (common in
// PHP-backed IdPs) — technically non-conformant JSON but widespread.
expires_in: z.coerce.number().optional(),
scope: z.string().optional(),
}),
)
const JwtBearerResponseSchema = lazySchema(() =>
z.object({
access_token: z.string().min(1),
// Many ASes omit token_type since Bearer is the only value anyone uses
// (RFC 6750). Don't reject a valid access_token over a missing label.
token_type: z.string().default('Bearer'),
expires_in: z.coerce.number().optional(),
scope: z.string().optional(),
refresh_token: z.string().optional(),
}),
)
// ─── Layer 2: Discovery ─────────────────────────────────────────────────────
export type ProtectedResourceMetadata = {
resource: string
authorization_servers: string[]
}
/**
* RFC 9728 PRM discovery via SDK, plus RFC 9728 §3.3 resource-mismatch
* validation (mix-up protection — TODO: upstream to SDK).
*/
export async function discoverProtectedResource(
serverUrl: string,
opts?: { fetchFn?: FetchLike },
): Promise<ProtectedResourceMetadata> {
let prm
try {
prm = await discoverOAuthProtectedResourceMetadata(
serverUrl,
undefined,
opts?.fetchFn ?? defaultFetch,
)
} catch (e) {
throw new Error(
`XAA: PRM discovery failed: ${e instanceof Error ? e.message : String(e)}`,
)
}
if (!prm.resource || !prm.authorization_servers?.[0]) {
throw new Error(
'XAA: PRM discovery failed: PRM missing resource or authorization_servers',
)
}
if (normalizeUrl(prm.resource) !== normalizeUrl(serverUrl)) {
throw new Error(
`XAA: PRM discovery failed: PRM resource mismatch: expected ${serverUrl}, got ${prm.resource}`,
)
}
return {
resource: prm.resource,
authorization_servers: prm.authorization_servers,
}
}
export type AuthorizationServerMetadata = {
issuer: string
token_endpoint: string
grant_types_supported?: string[]
token_endpoint_auth_methods_supported?: string[]
}
/**
* AS metadata discovery via SDK (RFC 8414 + OIDC fallback), plus RFC 8414
* §3.3 issuer-mismatch validation (mix-up protection — TODO: upstream to SDK).
*/
export async function discoverAuthorizationServer(
asUrl: string,
opts?: { fetchFn?: FetchLike },
): Promise<AuthorizationServerMetadata> {
const meta = await discoverAuthorizationServerMetadata(asUrl, {
fetchFn: opts?.fetchFn ?? defaultFetch,
})
if (!meta?.issuer || !meta.token_endpoint) {
throw new Error(
`XAA: AS metadata discovery failed: no valid metadata at ${asUrl}`,
)
}
if (normalizeUrl(meta.issuer) !== normalizeUrl(asUrl)) {
throw new Error(
`XAA: AS metadata discovery failed: issuer mismatch: expected ${asUrl}, got ${meta.issuer}`,
)
}
// RFC 8414 §3.3 / RFC 9728 §3 require HTTPS. A PRM-advertised http:// AS
// that self-consistently reports an http:// issuer would pass the mismatch
// check above, then we'd POST id_token + client_secret over plaintext.
if (new URL(meta.token_endpoint).protocol !== 'https:') {
throw new Error(
`XAA: refusing non-HTTPS token endpoint: ${meta.token_endpoint}`,
)
}
return {
issuer: meta.issuer,
token_endpoint: meta.token_endpoint,
grant_types_supported: meta.grant_types_supported,
token_endpoint_auth_methods_supported:
meta.token_endpoint_auth_methods_supported,
}
}
// ─── Layer 2: Exchange ──────────────────────────────────────────────────────
export type JwtAuthGrantResult = {
/** The ID-JAG (Identity Assertion Authorization Grant) */
jwtAuthGrant: string
expiresIn?: number
scope?: string
}
/**
* RFC 8693 Token Exchange at the IdP: id_token → ID-JAG.
* Validates `issued_token_type` is `urn:ietf:params:oauth:token-type:id-jag`.
*
* `clientSecret` is optional — sent via `client_secret_post` if present.
* Some IdPs register the client as confidential even when they advertise
* `token_endpoint_auth_method: "none"`.
*
* TODO(xaa-ga): consult `token_endpoint_auth_methods_supported` from IdP
* OIDC metadata and support `client_secret_basic`, mirroring the AS-side
* selection in `performCrossAppAccess`. All major IdPs accept POST today.
*/
export async function requestJwtAuthorizationGrant(opts: {
tokenEndpoint: string
audience: string
resource: string
idToken: string
clientId: string
clientSecret?: string
scope?: string
fetchFn?: FetchLike
}): Promise<JwtAuthGrantResult> {
const fetchFn = opts.fetchFn ?? defaultFetch
const params = new URLSearchParams({
grant_type: TOKEN_EXCHANGE_GRANT,
requested_token_type: ID_JAG_TOKEN_TYPE,
audience: opts.audience,
resource: opts.resource,
subject_token: opts.idToken,
subject_token_type: ID_TOKEN_TYPE,
client_id: opts.clientId,
})
if (opts.clientSecret) {
params.set('client_secret', opts.clientSecret)
}
if (opts.scope) {
params.set('scope', opts.scope)
}
const res = await fetchFn(opts.tokenEndpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: params,
})
if (!res.ok) {
const body = redactTokens(await res.text()).slice(0, 200)
// 4xx → id_token rejected (invalid_grant etc.), clear cache.
// 5xx → IdP outage, id_token may still be valid, preserve it.
const shouldClear = res.status < 500
throw new XaaTokenExchangeError(
`XAA: token exchange failed: HTTP ${res.status}: ${body}`,
shouldClear,
)
}
let rawExchange: unknown
try {
rawExchange = await res.json()
} catch {
// Transient network condition (captive portal, proxy) — don't clear id_token.
throw new XaaTokenExchangeError(
`XAA: token exchange returned non-JSON (captive portal?) at ${opts.tokenEndpoint}`,
false,
)
}
const exchangeParsed = TokenExchangeResponseSchema().safeParse(rawExchange)
if (!exchangeParsed.success) {
throw new XaaTokenExchangeError(
`XAA: token exchange response did not match expected shape: ${redactTokens(rawExchange)}`,
true,
)
}
const result = exchangeParsed.data
if (!result.access_token) {
throw new XaaTokenExchangeError(
`XAA: token exchange response missing access_token: ${redactTokens(result)}`,
true,
)
}
if (result.issued_token_type !== ID_JAG_TOKEN_TYPE) {
throw new XaaTokenExchangeError(
`XAA: token exchange returned unexpected issued_token_type: ${result.issued_token_type}`,
true,
)
}
return {
jwtAuthGrant: result.access_token,
expiresIn: result.expires_in,
scope: result.scope,
}
}
export type XaaTokenResult = {
access_token: string
token_type: string
expires_in?: number
scope?: string
refresh_token?: string
}
export type XaaResult = XaaTokenResult & {
/**
* The AS issuer URL discovered via PRM. Callers must persist this as
* `discoveryState.authorizationServerUrl` so that refresh (auth.ts _doRefresh)
* and revocation (revokeServerTokens) can locate the token/revocation
* endpoints — the MCP URL is not the AS URL in typical XAA setups.
*/
authorizationServerUrl: string
}
/**
* RFC 7523 JWT Bearer Grant at the AS: ID-JAG → access_token.
*
* `authMethod` defaults to `client_secret_basic` (Base64 header, not body
* params) — the SEP-990 conformance test requires this. Only set
* `client_secret_post` if the AS explicitly requires it.
*/
export async function exchangeJwtAuthGrant(opts: {
tokenEndpoint: string
assertion: string
clientId: string
clientSecret: string
authMethod?: 'client_secret_basic' | 'client_secret_post'
scope?: string
fetchFn?: FetchLike
}): Promise<XaaTokenResult> {
const fetchFn = opts.fetchFn ?? defaultFetch
const authMethod = opts.authMethod ?? 'client_secret_basic'
const params = new URLSearchParams({
grant_type: JWT_BEARER_GRANT,
assertion: opts.assertion,
})
if (opts.scope) {
params.set('scope', opts.scope)
}
const headers: Record<string, string> = {
'Content-Type': 'application/x-www-form-urlencoded',
}
if (authMethod === 'client_secret_basic') {
const basicAuth = Buffer.from(
`${encodeURIComponent(opts.clientId)}:${encodeURIComponent(opts.clientSecret)}`,
).toString('base64')
headers.Authorization = `Basic ${basicAuth}`
} else {
params.set('client_id', opts.clientId)
params.set('client_secret', opts.clientSecret)
}
const res = await fetchFn(opts.tokenEndpoint, {
method: 'POST',
headers,
body: params,
})
if (!res.ok) {
const body = redactTokens(await res.text()).slice(0, 200)
throw new Error(`XAA: jwt-bearer grant failed: HTTP ${res.status}: ${body}`)
}
let rawTokens: unknown
try {
rawTokens = await res.json()
} catch {
throw new Error(
`XAA: jwt-bearer grant returned non-JSON (captive portal?) at ${opts.tokenEndpoint}`,
)
}
const tokensParsed = JwtBearerResponseSchema().safeParse(rawTokens)
if (!tokensParsed.success) {
throw new Error(
`XAA: jwt-bearer response did not match expected shape: ${redactTokens(rawTokens)}`,
)
}
return tokensParsed.data
}
// ─── Layer 3: Orchestrator ──────────────────────────────────────────────────
/**
* Config needed to run the full XAA orchestrator.
* Mirrors the conformance test context shape (see ClientConformanceContextSchema).
*/
export type XaaConfig = {
/** Client ID registered at the MCP server's authorization server */
clientId: string
/** Client secret for the MCP server's authorization server */
clientSecret: string
/** Client ID registered at the IdP (for the token-exchange request) */
idpClientId: string
/** Optional IdP client secret (client_secret_post) — some IdPs require it */
idpClientSecret?: string
/** The user's OIDC id_token from the IdP login */
idpIdToken: string
/** IdP token endpoint (where to send the RFC 8693 token-exchange) */
idpTokenEndpoint: string
}
/**
* Full XAA flow: PRM → AS metadata → token-exchange → jwt-bearer → access_token.
* Thin composition of the four Layer-2 ops. Used by performMCPXaaAuth,
* ClaudeAuthProvider.xaaRefresh, and the try-xaa*.ts debug scripts.
*
* @param serverUrl The MCP server URL (e.g. `https://mcp.example.com/mcp`)
* @param config IdP + AS credentials
* @param serverName Server name for debug logging
*/
export async function performCrossAppAccess(
serverUrl: string,
config: XaaConfig,
serverName = 'xaa',
abortSignal?: AbortSignal,
): Promise<XaaResult> {
const fetchFn = makeXaaFetch(abortSignal)
logMCPDebug(serverName, `XAA: discovering PRM for ${serverUrl}`)
const prm = await discoverProtectedResource(serverUrl, { fetchFn })
logMCPDebug(
serverName,
`XAA: discovered resource=${prm.resource} ASes=[${prm.authorization_servers.join(', ')}]`,
)
// Try each advertised AS in order. grant_types_supported is OPTIONAL per
// RFC 8414 §2 — only skip if the AS explicitly advertises a list that omits
// jwt-bearer. If absent, let the token endpoint decide.
let asMeta: AuthorizationServerMetadata | undefined
const asErrors: string[] = []
for (const asUrl of prm.authorization_servers) {
let candidate: AuthorizationServerMetadata
try {
candidate = await discoverAuthorizationServer(asUrl, { fetchFn })
} catch (e) {
if (abortSignal?.aborted) throw e
asErrors.push(`${asUrl}: ${e instanceof Error ? e.message : String(e)}`)
continue
}
if (
candidate.grant_types_supported &&
!candidate.grant_types_supported.includes(JWT_BEARER_GRANT)
) {
asErrors.push(
`${asUrl}: does not advertise jwt-bearer grant (supported: ${candidate.grant_types_supported.join(', ')})`,
)
continue
}
asMeta = candidate
break
}
if (!asMeta) {
throw new Error(
`XAA: no authorization server supports jwt-bearer. Tried: ${asErrors.join('; ')}`,
)
}
// Pick auth method from what the AS advertises. We handle
// client_secret_basic and client_secret_post; if the AS only supports post,
// honor that, else default to basic (SEP-990 conformance expectation).
const authMethods = asMeta.token_endpoint_auth_methods_supported
const authMethod: 'client_secret_basic' | 'client_secret_post' =
authMethods &&
!authMethods.includes('client_secret_basic') &&
authMethods.includes('client_secret_post')
? 'client_secret_post'
: 'client_secret_basic'
logMCPDebug(
serverName,
`XAA: AS issuer=${asMeta.issuer} token_endpoint=${asMeta.token_endpoint} auth_method=${authMethod}`,
)
logMCPDebug(serverName, `XAA: exchanging id_token for ID-JAG at IdP`)
const jag = await requestJwtAuthorizationGrant({
tokenEndpoint: config.idpTokenEndpoint,
audience: asMeta.issuer,
resource: prm.resource,
idToken: config.idpIdToken,
clientId: config.idpClientId,
clientSecret: config.idpClientSecret,
fetchFn,
})
logMCPDebug(serverName, `XAA: ID-JAG obtained`)
logMCPDebug(serverName, `XAA: exchanging ID-JAG for access_token at AS`)
const tokens = await exchangeJwtAuthGrant({
tokenEndpoint: asMeta.token_endpoint,
assertion: jag.jwtAuthGrant,
clientId: config.clientId,
clientSecret: config.clientSecret,
authMethod,
fetchFn,
})
logMCPDebug(serverName, `XAA: access_token obtained`)
return { ...tokens, authorizationServerUrl: asMeta.issuer }
}

View File

@@ -0,0 +1,487 @@
/**
* XAA IdP Login — acquires an OIDC id_token from an enterprise IdP via the
* standard authorization_code + PKCE flow, then caches it by IdP issuer.
*
* This is the "one browser pop" in the XAA value prop: one IdP login → N silent
* MCP server auths. The id_token is cached in the keychain and reused until expiry.
*/
import {
exchangeAuthorization,
startAuthorization,
} from '@modelcontextprotocol/sdk/client/auth.js'
import {
type OAuthClientInformation,
type OpenIdProviderDiscoveryMetadata,
OpenIdProviderDiscoveryMetadataSchema,
} from '@modelcontextprotocol/sdk/shared/auth.js'
import { randomBytes } from 'crypto'
import { createServer, type Server } from 'http'
import { parse } from 'url'
import xss from 'xss'
import { openBrowser } from '../../utils/browser.js'
import { isEnvTruthy } from '../../utils/envUtils.js'
import { toError } from '../../utils/errors.js'
import { logMCPDebug } from '../../utils/log.js'
import { getPlatform } from '../../utils/platform.js'
import { getSecureStorage } from '../../utils/secureStorage/index.js'
import { getInitialSettings } from '../../utils/settings/settings.js'
import { jsonParse } from '../../utils/slowOperations.js'
import { buildRedirectUri, findAvailablePort } from './oauthPort.js'
export function isXaaEnabled(): boolean {
return isEnvTruthy(process.env.CLAUDE_CODE_ENABLE_XAA)
}
export type XaaIdpSettings = {
issuer: string
clientId: string
callbackPort?: number
}
/**
* Typed accessor for settings.xaaIdp. The field is env-gated in SettingsSchema
* so it doesn't surface in SDK types/docs — which means the inferred settings
* type doesn't have it at compile time. This is the one cast.
*/
export function getXaaIdpSettings(): XaaIdpSettings | undefined {
return (getInitialSettings() as { xaaIdp?: XaaIdpSettings }).xaaIdp
}
const IDP_LOGIN_TIMEOUT_MS = 5 * 60 * 1000
const IDP_REQUEST_TIMEOUT_MS = 30000
const ID_TOKEN_EXPIRY_BUFFER_S = 60
export type IdpLoginOptions = {
idpIssuer: string
idpClientId: string
/**
* Optional IdP client secret for confidential clients. Auth method
* (client_secret_post, client_secret_basic, none) is chosen per IdP
* metadata. Omit for public clients (PKCE only).
*/
idpClientSecret?: string
/**
* Fixed callback port. If omitted, a random port is chosen.
* Use this when the IdP client is pre-registered with a specific loopback
* redirect URI (RFC 8252 §7.3 says IdPs SHOULD accept any port for
* http://localhost, but many don't).
*/
callbackPort?: number
/** Called with the authorization URL before (or instead of) opening the browser */
onAuthorizationUrl?: (url: string) => void
/** If true, don't auto-open the browser — just call onAuthorizationUrl */
skipBrowserOpen?: boolean
abortSignal?: AbortSignal
}
/**
* Normalize an IdP issuer URL for use as a cache key: strip trailing slashes,
* lowercase host. Issuers from config and from OIDC discovery may differ
* cosmetically but should hit the same cache slot. Exported so the setup
* command can compare issuers using the same normalization as keychain ops.
*/
export function issuerKey(issuer: string): string {
try {
const u = new URL(issuer)
u.pathname = u.pathname.replace(/\/+$/, '')
u.host = u.host.toLowerCase()
return u.toString()
} catch {
return issuer.replace(/\/+$/, '')
}
}
/**
* Read a cached id_token for the given IdP issuer from secure storage.
* Returns undefined if missing or within ID_TOKEN_EXPIRY_BUFFER_S of expiring.
*/
export function getCachedIdpIdToken(idpIssuer: string): string | undefined {
const storage = getSecureStorage()
const data = storage.read()
const entry = data?.mcpXaaIdp?.[issuerKey(idpIssuer)]
if (!entry) return undefined
const remainingMs = entry.expiresAt - Date.now()
if (remainingMs <= ID_TOKEN_EXPIRY_BUFFER_S * 1000) return undefined
return entry.idToken
}
function saveIdpIdToken(
idpIssuer: string,
idToken: string,
expiresAt: number,
): void {
const storage = getSecureStorage()
const existing = storage.read() || {}
storage.update({
...existing,
mcpXaaIdp: {
...existing.mcpXaaIdp,
[issuerKey(idpIssuer)]: { idToken, expiresAt },
},
})
}
/**
* Save an externally-obtained id_token into the XAA cache — the exact slot
* getCachedIdpIdToken/acquireIdpIdToken read from. Used by conformance testing
* where the mock IdP hands us a pre-signed token but doesn't serve /authorize.
*
* Parses the JWT's exp claim for cache TTL (same as acquireIdpIdToken).
* Returns the expiresAt it computed so the caller can report it.
*/
export function saveIdpIdTokenFromJwt(
idpIssuer: string,
idToken: string,
): number {
const expFromJwt = jwtExp(idToken)
const expiresAt = expFromJwt ? expFromJwt * 1000 : Date.now() + 3600 * 1000
saveIdpIdToken(idpIssuer, idToken, expiresAt)
return expiresAt
}
export function clearIdpIdToken(idpIssuer: string): void {
const storage = getSecureStorage()
const existing = storage.read()
const key = issuerKey(idpIssuer)
if (!existing?.mcpXaaIdp?.[key]) return
delete existing.mcpXaaIdp[key]
storage.update(existing)
}
/**
* Save an IdP client secret to secure storage, keyed by IdP issuer.
* Separate from MCP server AS secrets — different trust domain.
* Returns the storage update result so callers can surface keychain
* failures (locked keychain, `security` nonzero exit) instead of
* silently dropping the secret and failing later with invalid_client.
*/
export function saveIdpClientSecret(
idpIssuer: string,
clientSecret: string,
): { success: boolean; warning?: string } {
const storage = getSecureStorage()
const existing = storage.read() || {}
return storage.update({
...existing,
mcpXaaIdpConfig: {
...existing.mcpXaaIdpConfig,
[issuerKey(idpIssuer)]: { clientSecret },
},
})
}
/**
* Read the IdP client secret for the given issuer from secure storage.
*/
export function getIdpClientSecret(idpIssuer: string): string | undefined {
const storage = getSecureStorage()
const data = storage.read()
return data?.mcpXaaIdpConfig?.[issuerKey(idpIssuer)]?.clientSecret
}
/**
* Remove the IdP client secret for the given issuer from secure storage.
* Used by `claude mcp xaa clear`.
*/
export function clearIdpClientSecret(idpIssuer: string): void {
const storage = getSecureStorage()
const existing = storage.read()
const key = issuerKey(idpIssuer)
if (!existing?.mcpXaaIdpConfig?.[key]) return
delete existing.mcpXaaIdpConfig[key]
storage.update(existing)
}
// OIDC Discovery §4.1 says `{issuer}/.well-known/openid-configuration` — path
// APPEND, not replace. `new URL('/.well-known/...', issuer)` with a leading
// slash is a WHATWG absolute-path reference and drops the issuer's pathname,
// breaking Azure AD (`login.microsoftonline.com/{tenant}/v2.0`), Okta custom
// auth servers, and Keycloak realms. Trailing-slash base + relative path is
// the fix. Exported because auth.ts needs the same discovery.
export async function discoverOidc(
idpIssuer: string,
): Promise<OpenIdProviderDiscoveryMetadata> {
const base = idpIssuer.endsWith('/') ? idpIssuer : idpIssuer + '/'
const url = new URL('.well-known/openid-configuration', base)
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
const res = await fetch(url, {
headers: { Accept: 'application/json' },
signal: AbortSignal.timeout(IDP_REQUEST_TIMEOUT_MS),
})
if (!res.ok) {
throw new Error(
`XAA IdP: OIDC discovery failed: HTTP ${res.status} at ${url}`,
)
}
// Captive portals and proxy auth pages return 200 with HTML. res.json()
// throws a raw SyntaxError before safeParse can give a useful message.
let body: unknown
try {
body = await res.json()
} catch {
throw new Error(
`XAA IdP: OIDC discovery returned non-JSON at ${url} (captive portal or proxy?)`,
)
}
const parsed = OpenIdProviderDiscoveryMetadataSchema.safeParse(body)
if (!parsed.success) {
throw new Error(`XAA IdP: invalid OIDC metadata: ${parsed.error.message}`)
}
if (new URL(parsed.data.token_endpoint).protocol !== 'https:') {
throw new Error(
`XAA IdP: refusing non-HTTPS token endpoint: ${parsed.data.token_endpoint}`,
)
}
return parsed.data
}
/**
* Decode the exp claim from a JWT without verifying its signature.
* Returns undefined if parsing fails or exp is absent. Used only to
* derive a cache TTL.
*
* Why no signature/iss/aud/nonce validation: per SEP-990, this id_token
* is the RFC 8693 subject_token in a token-exchange at the IdP's own
* token endpoint. The IdP validates its own token there. An attacker who
* can mint a token that fools the IdP has no need to fool us first; an
* attacker who can't, hands us garbage and gets a 401 from the IdP. The
* --id-token injection seam is likewise safe: bad input → rejected later,
* no privesc. Client-side verification would add code and no security.
*/
function jwtExp(jwt: string): number | undefined {
const parts = jwt.split('.')
if (parts.length !== 3) return undefined
try {
const payload = jsonParse(
Buffer.from(parts[1]!, 'base64url').toString('utf-8'),
) as { exp?: number }
return typeof payload.exp === 'number' ? payload.exp : undefined
} catch {
return undefined
}
}
/**
* Wait for the OAuth authorization code on a local callback server.
* Returns the code once /callback is hit with a matching state.
*
* `onListening` fires after the socket is actually bound — use it to defer
* browser-open so EADDRINUSE surfaces before a spurious tab pops open.
*/
function waitForCallback(
port: number,
expectedState: string,
abortSignal: AbortSignal | undefined,
onListening: () => void,
): Promise<string> {
let server: Server | null = null
let timeoutId: NodeJS.Timeout | null = null
let abortHandler: (() => void) | null = null
const cleanup = () => {
server?.removeAllListeners()
// Defensive: removeAllListeners() strips the error handler, so swallow any late error during close
server?.on('error', () => {})
server?.close()
server = null
if (timeoutId) {
clearTimeout(timeoutId)
timeoutId = null
}
if (abortSignal && abortHandler) {
abortSignal.removeEventListener('abort', abortHandler)
abortHandler = null
}
}
return new Promise<string>((resolve, reject) => {
let resolved = false
const resolveOnce = (v: string) => {
if (resolved) return
resolved = true
cleanup()
resolve(v)
}
const rejectOnce = (e: Error) => {
if (resolved) return
resolved = true
cleanup()
reject(e)
}
if (abortSignal) {
abortHandler = () => rejectOnce(new Error('XAA IdP: login cancelled'))
if (abortSignal.aborted) {
abortHandler()
return
}
abortSignal.addEventListener('abort', abortHandler, { once: true })
}
server = createServer((req, res) => {
const parsed = parse(req.url || '', true)
if (parsed.pathname !== '/callback') {
res.writeHead(404)
res.end()
return
}
const code = parsed.query.code as string | undefined
const state = parsed.query.state as string | undefined
const err = parsed.query.error as string | undefined
if (err) {
const desc = parsed.query.error_description as string | undefined
const safeErr = xss(err)
const safeDesc = desc ? xss(desc) : ''
res.writeHead(400, { 'Content-Type': 'text/html' })
res.end(
`<html><body><h3>IdP login failed</h3><p>${safeErr}</p><p>${safeDesc}</p></body></html>`,
)
rejectOnce(new Error(`XAA IdP: ${err}${desc ? `${desc}` : ''}`))
return
}
if (state !== expectedState) {
res.writeHead(400, { 'Content-Type': 'text/html' })
res.end('<html><body><h3>State mismatch</h3></body></html>')
rejectOnce(new Error('XAA IdP: state mismatch (possible CSRF)'))
return
}
if (!code) {
res.writeHead(400, { 'Content-Type': 'text/html' })
res.end('<html><body><h3>Missing code</h3></body></html>')
rejectOnce(new Error('XAA IdP: callback missing code'))
return
}
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(
'<html><body><h3>IdP login complete — you can close this window.</h3></body></html>',
)
resolveOnce(code)
})
server.on('error', (err: NodeJS.ErrnoException) => {
if (err.code === 'EADDRINUSE') {
const findCmd =
getPlatform() === 'windows'
? `netstat -ano | findstr :${port}`
: `lsof -ti:${port} -sTCP:LISTEN`
rejectOnce(
new Error(
`XAA IdP: callback port ${port} is already in use. Run \`${findCmd}\` to find the holder.`,
),
)
} else {
rejectOnce(new Error(`XAA IdP: callback server failed: ${err.message}`))
}
})
server.listen(port, '127.0.0.1', () => {
try {
onListening()
} catch (e) {
rejectOnce(toError(e))
}
})
server.unref()
timeoutId = setTimeout(
rej => rej(new Error('XAA IdP: login timed out')),
IDP_LOGIN_TIMEOUT_MS,
rejectOnce,
)
timeoutId.unref()
})
}
/**
* Acquire an id_token from the IdP: return cached if valid, otherwise run
* the full OIDC authorization_code + PKCE flow (one browser pop).
*/
export async function acquireIdpIdToken(
opts: IdpLoginOptions,
): Promise<string> {
const { idpIssuer, idpClientId } = opts
const cached = getCachedIdpIdToken(idpIssuer)
if (cached) {
logMCPDebug('xaa', `Using cached id_token for ${idpIssuer}`)
return cached
}
logMCPDebug('xaa', `No cached id_token for ${idpIssuer}; starting OIDC login`)
const metadata = await discoverOidc(idpIssuer)
const port = opts.callbackPort ?? (await findAvailablePort())
const redirectUri = buildRedirectUri(port)
const state = randomBytes(32).toString('base64url')
const clientInformation: OAuthClientInformation = {
client_id: idpClientId,
...(opts.idpClientSecret ? { client_secret: opts.idpClientSecret } : {}),
}
const { authorizationUrl, codeVerifier } = await startAuthorization(
idpIssuer,
{
metadata,
clientInformation,
redirectUrl: redirectUri,
scope: 'openid',
state,
},
)
// Open the browser only after the socket is actually bound — listen() is
// async, and on the fixed-callbackPort path EADDRINUSE otherwise surfaces
// after a spurious tab has already popped. Mirrors the auth.ts pattern of
// wrapping sdkAuth inside server.listen's callback.
const authorizationCode = await waitForCallback(
port,
state,
opts.abortSignal,
() => {
if (opts.onAuthorizationUrl) {
opts.onAuthorizationUrl(authorizationUrl.toString())
}
if (!opts.skipBrowserOpen) {
logMCPDebug('xaa', `Opening browser to IdP authorization endpoint`)
void openBrowser(authorizationUrl.toString())
}
},
)
const tokens = await exchangeAuthorization(idpIssuer, {
metadata,
clientInformation,
authorizationCode,
codeVerifier,
redirectUri,
fetchFn: (url, init) =>
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
fetch(url, {
...init,
signal: AbortSignal.timeout(IDP_REQUEST_TIMEOUT_MS),
}),
})
if (!tokens.id_token) {
throw new Error(
'XAA IdP: token response missing id_token (check scope=openid)',
)
}
// Prefer the id_token's own exp claim; fall back to expires_in.
// expires_in is for the access_token and may differ from the id_token
// lifetime. If neither is present, default to 1h.
const expFromJwt = jwtExp(tokens.id_token)
const expiresAt = expFromJwt
? expFromJwt * 1000
: Date.now() + (tokens.expires_in ?? 3600) * 1000
saveIdpIdToken(idpIssuer, tokens.id_token, expiresAt)
logMCPDebug(
'xaa',
`Cached id_token for ${idpIssuer} (expires ${new Date(expiresAt).toISOString()})`,
)
return tokens.id_token
}