diff --git a/.github/workflows/mcp.yml b/.github/workflows/mcp.yml new file mode 100644 index 00000000..46a7f5d6 --- /dev/null +++ b/.github/workflows/mcp.yml @@ -0,0 +1,33 @@ +name: MCP Checks + +on: [ push, pull_request ] + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + mcp-unit: + runs-on: ubuntu-latest + + steps: + - name: Checkout source code + uses: actions/checkout@v6 + + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + rustup component add clippy + + - name: Check MCP crate builds + run: cargo check -p ldk-server-mcp + + - name: Run MCP crate tests + run: cargo test -p ldk-server-mcp + + - name: Run MCP crate clippy + run: cargo clippy -p ldk-server-mcp --all-targets -- -D warnings diff --git a/Cargo.lock b/Cargo.lock index 9fa50f49..f9b3b8e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1316,6 +1316,17 @@ dependencies = [ "tonic", ] +[[package]] +name = "ldk-server-mcp" +version = "0.1.0" +dependencies = [ + "ldk-server-client", + "serde", + "serde_json", + "tokio", + "toml", +] + [[package]] name = "libc" version = "0.2.177" diff --git a/Cargo.toml b/Cargo.toml index 6b09eb62..f9ccb552 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] resolver = "2" -members = ["ldk-server-cli", "ldk-server-client", "ldk-server-grpc", "ldk-server"] +members = ["ldk-server-cli", "ldk-server-client", "ldk-server-grpc", "ldk-server", "ldk-server-mcp"] exclude = ["e2e-tests"] [profile.release] diff --git a/README.md b/README.md index f14d2881..3000313e 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,14 @@ The primary goal of LDK Server is to provide an efficient, stable, and API-first a Lightning Network node. With its streamlined setup, LDK Server enables users to easily set up, configure, and run a Lightning node while exposing a robust, language-agnostic API via [Protocol Buffers (Protobuf)](https://protobuf.dev/). +## Workspace Crates + +- `ldk-server`: daemon that runs the Lightning node and exposes the API +- `ldk-server-cli`: CLI client for the server API +- `ldk-server-client`: Rust client library for authenticated TLS gRPC calls +- `ldk-server-grpc`: generated protobuf and shared gRPC types +- `ldk-server-mcp`: stdio MCP bridge exposing unary `ldk-server` RPCs as MCP tools + ### Features - **Out-of-the-Box Lightning Node**: @@ -46,6 +54,11 @@ git clone https://github.com/lightningdevkit/ldk-server.git cargo build ``` +Build just the MCP bridge: +``` +cargo build -p ldk-server-mcp +``` + ### Running - Using a config file: ``` @@ -87,6 +100,18 @@ eval "$(ldk-server-cli completions zsh)" ldk-server-cli completions fish | source ``` +## MCP Bridge + +The workspace also includes `ldk-server-mcp`, a stdio [Model Context Protocol](https://spec.modelcontextprotocol.io/) server +that lets MCP-compatible clients call the unary `ldk-server` RPC surface as tools. + +Run it directly from the workspace: +```bash +cargo run -p ldk-server-mcp -- --config /path/to/config.toml +``` + +It is covered by both crate-local tests and an `e2e-tests` sanity suite against a live `ldk-server` instance. + ## Contributing Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on building, testing, code style, and development workflow. diff --git a/e2e-tests/build.rs b/e2e-tests/build.rs index b8841d3d..d21fb16c 100644 --- a/e2e-tests/build.rs +++ b/e2e-tests/build.rs @@ -11,9 +11,13 @@ fn main() { .expect("e2e-tests must be inside workspace") .to_path_buf(); + let outer_target_dir = env::var_os("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|| workspace_root.join("target")); + // Use a separate target directory so the inner cargo build doesn't deadlock // waiting for the build directory lock held by the outer cargo. - let target_dir = workspace_root.join("target").join("e2e-deps"); + let target_dir = outer_target_dir.join("e2e-deps"); let status = Command::new(&cargo) .args([ @@ -24,6 +28,8 @@ fn main() { "experimental-lsps2-support", "-p", "ldk-server-cli", + "-p", + "ldk-server-mcp", ]) .current_dir(&workspace_root) .env("CARGO_TARGET_DIR", &target_dir) @@ -31,14 +37,16 @@ fn main() { .status() .expect("failed to run cargo build"); - assert!(status.success(), "cargo build of ldk-server / ldk-server-cli failed"); + assert!(status.success(), "cargo build of ldk-server / ldk-server-cli / ldk-server-mcp failed"); let bin_dir = target_dir.join(&profile); let server_bin = bin_dir.join("ldk-server"); let cli_bin = bin_dir.join("ldk-server-cli"); + let mcp_bin = bin_dir.join("ldk-server-mcp"); println!("cargo:rustc-env=LDK_SERVER_BIN={}", server_bin.display()); println!("cargo:rustc-env=LDK_SERVER_CLI_BIN={}", cli_bin.display()); + println!("cargo:rustc-env=LDK_SERVER_MCP_BIN={}", mcp_bin.display()); // Rebuild when server or CLI source changes println!("cargo:rerun-if-changed=../ldk-server/src"); @@ -47,4 +55,6 @@ fn main() { println!("cargo:rerun-if-changed=../ldk-server-cli/Cargo.toml"); println!("cargo:rerun-if-changed=../ldk-server-grpc/src"); println!("cargo:rerun-if-changed=../ldk-server-grpc/Cargo.toml"); + println!("cargo:rerun-if-changed=../ldk-server-mcp/src"); + println!("cargo:rerun-if-changed=../ldk-server-mcp/Cargo.toml"); } diff --git a/e2e-tests/src/lib.rs b/e2e-tests/src/lib.rs index b7aeee38..9e3cec21 100644 --- a/e2e-tests/src/lib.rs +++ b/e2e-tests/src/lib.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -use std::io::{BufRead, BufReader}; +use std::io::{BufRead, BufReader, Write}; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Stdio}; @@ -16,6 +16,7 @@ use std::time::Duration; use corepc_node::Node; use hex_conservative::DisplayHex; use ldk_server_client::client::LdkServerClient; +use serde_json::Value; use ldk_server_client::ldk_server_grpc::api::{GetNodeInfoRequest, GetNodeInfoResponse}; use ldk_server_grpc::api::{ GetBalancesRequest, ListChannelsRequest, OnchainReceiveRequest, OpenChannelRequest, @@ -291,6 +292,69 @@ pub fn cli_binary_path() -> PathBuf { PathBuf::from(env!("LDK_SERVER_CLI_BIN")) } +/// Returns the path to the ldk-server-mcp binary (built automatically by build.rs). +pub fn mcp_binary_path() -> PathBuf { + PathBuf::from(env!("LDK_SERVER_MCP_BIN")) +} + +/// Handle to a running ldk-server-mcp child process. +pub struct McpHandle { + child: Option, + stdin: std::process::ChildStdin, + stdout: BufReader, +} + +impl McpHandle { + pub fn start(server: &LdkServerHandle) -> Self { + let mcp_path = mcp_binary_path(); + let mut child = Command::new(&mcp_path) + .env("LDK_BASE_URL", server.base_url()) + .env("LDK_API_KEY", &server.api_key) + .env("LDK_TLS_CERT_PATH", server.tls_cert_path.to_str().unwrap()) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap_or_else(|e| panic!("Failed to run MCP server at {:?}: {}", mcp_path, e)); + + let stdin = child.stdin.take().unwrap(); + let stdout = BufReader::new(child.stdout.take().unwrap()); + + Self { child: Some(child), stdin, stdout } + } + + pub fn send(&mut self, request: &Value) { + let line = serde_json::to_string(request).unwrap(); + writeln!(self.stdin, "{}", line).unwrap(); + self.stdin.flush().unwrap(); + } + + pub fn recv(&mut self) -> Value { + let mut line = String::new(); + self.stdout.read_line(&mut line).expect("Failed to read MCP stdout"); + serde_json::from_str(line.trim()).expect("Failed to parse MCP response") + } + + pub fn call(&mut self, id: u64, method: &str, params: Value) -> Value { + self.send(&serde_json::json!({ + "jsonrpc": "2.0", + "id": id, + "method": method, + "params": params, + })); + self.recv() + } +} + +impl Drop for McpHandle { + fn drop(&mut self) { + if let Some(mut child) = self.child.take() { + let _ = child.kill(); + let _ = child.wait(); + } + } +} + /// Run a CLI command against the given server handle and return raw stdout as a string. pub fn run_cli_raw(handle: &LdkServerHandle, args: &[&str]) -> String { let cli_path = cli_binary_path(); diff --git a/e2e-tests/tests/mcp.rs b/e2e-tests/tests/mcp.rs new file mode 100644 index 00000000..4239c0c3 --- /dev/null +++ b/e2e-tests/tests/mcp.rs @@ -0,0 +1,87 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use e2e_tests::{LdkServerHandle, McpHandle, TestBitcoind}; +use ldk_server_client::ldk_server_grpc::api::Bolt11ReceiveRequest; +use ldk_server_client::ldk_server_grpc::types::{ + bolt11_invoice_description, Bolt11InvoiceDescription, +}; +use serde_json::json; + +#[tokio::test] +async fn test_mcp_initialize_and_list_tools() { + let bitcoind = TestBitcoind::new(); + let server = LdkServerHandle::start(&bitcoind).await; + let mut mcp = McpHandle::start(&server); + + let initialize = mcp.call( + 1, + "initialize", + json!({ + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": {"name": "e2e-test", "version": "0.1"} + }), + ); + assert_eq!(initialize["result"]["protocolVersion"], "2024-11-05"); + assert!(initialize["result"]["capabilities"]["tools"].is_object()); + + let tools = mcp.call(2, "tools/list", json!({})); + let tool_names = tools["result"]["tools"].as_array().unwrap(); + assert!(tool_names.iter().any(|tool| tool["name"] == "get_node_info")); + assert!(tool_names.iter().any(|tool| tool["name"] == "onchain_receive")); + assert!(tool_names.iter().any(|tool| tool["name"] == "decode_invoice")); +} + +#[tokio::test] +async fn test_mcp_live_tool_calls() { + let bitcoind = TestBitcoind::new(); + let server = LdkServerHandle::start(&bitcoind).await; + let mut mcp = McpHandle::start(&server); + + let node_info = mcp.call(1, "tools/call", json!({ + "name": "get_node_info", + "arguments": {} + })); + let node_info_text = node_info["result"]["content"][0]["text"].as_str().unwrap(); + let node_info_json: serde_json::Value = serde_json::from_str(node_info_text).unwrap(); + assert_eq!(node_info_json["node_id"], server.node_id()); + + let onchain_receive = mcp.call(2, "tools/call", json!({ + "name": "onchain_receive", + "arguments": {} + })); + let onchain_receive_text = onchain_receive["result"]["content"][0]["text"].as_str().unwrap(); + let onchain_receive_json: serde_json::Value = + serde_json::from_str(onchain_receive_text).unwrap(); + assert!(onchain_receive_json["address"].as_str().unwrap().starts_with("bcrt1")); + + let invoice = server + .client() + .bolt11_receive(Bolt11ReceiveRequest { + amount_msat: Some(50_000_000), + description: Some(Bolt11InvoiceDescription { + kind: Some(bolt11_invoice_description::Kind::Direct("mcp decode".to_string())), + }), + expiry_secs: 3600, + }) + .await + .unwrap(); + + let decode_invoice = mcp.call(3, "tools/call", json!({ + "name": "decode_invoice", + "arguments": { "invoice": invoice.invoice } + })); + let decode_invoice_text = decode_invoice["result"]["content"][0]["text"].as_str().unwrap(); + let decode_invoice_json: serde_json::Value = + serde_json::from_str(decode_invoice_text).unwrap(); + assert_eq!(decode_invoice_json["destination"], server.node_id()); + assert_eq!(decode_invoice_json["description"], "mcp decode"); + assert_eq!(decode_invoice_json["amount_msat"], 50_000_000u64); +} diff --git a/ldk-server-cli/src/main.rs b/ldk-server-cli/src/main.rs index f001d871..bf680233 100644 --- a/ldk-server-cli/src/main.rs +++ b/ldk-server-cli/src/main.rs @@ -49,6 +49,10 @@ use ldk_server_client::ldk_server_grpc::types::{ bolt11_invoice_description, Bolt11InvoiceDescription, ChannelConfig, PageToken, RouteParametersConfig, }; +use ldk_server_client::{ + DEFAULT_EXPIRY_SECS, DEFAULT_MAX_CHANNEL_SATURATION_POWER_OF_HALF, DEFAULT_MAX_PATH_COUNT, + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, +}; use serde::Serialize; use serde_json::{json, Value}; use types::{ @@ -58,14 +62,6 @@ use types::{ mod config; mod types; -// Having these default values as constants in the Proto file and -// importing/reusing them here might be better, but Proto3 removed -// the ability to set default values. -const DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA: u32 = 1008; -const DEFAULT_MAX_PATH_COUNT: u32 = 10; -const DEFAULT_MAX_CHANNEL_SATURATION_POWER_OF_HALF: u32 = 2; -const DEFAULT_EXPIRY_SECS: u32 = 86_400; - const DEFAULT_DIR: &str = if cfg!(target_os = "macos") { "~/Library/Application Support/ldk-server" } else if cfg!(target_os = "windows") { diff --git a/ldk-server-client/src/lib.rs b/ldk-server-client/src/lib.rs index ab89afe8..0905210a 100644 --- a/ldk-server-client/src/lib.rs +++ b/ldk-server-client/src/lib.rs @@ -21,3 +21,12 @@ pub mod error; /// Request/Response structs required for interacting with the ldk-ldk-server-client. pub use ldk_server_grpc; + +/// Default maximum total CLTV expiry delta for payment routing. +pub const DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA: u32 = 1008; +/// Default maximum number of payment paths. +pub const DEFAULT_MAX_PATH_COUNT: u32 = 10; +/// Default maximum channel saturation power of half. +pub const DEFAULT_MAX_CHANNEL_SATURATION_POWER_OF_HALF: u32 = 2; +/// Default BOLT11 invoice expiry in seconds (24 hours). +pub const DEFAULT_EXPIRY_SECS: u32 = 86_400; diff --git a/ldk-server-mcp/CLAUDE.md b/ldk-server-mcp/CLAUDE.md new file mode 100644 index 00000000..5e6f63a3 --- /dev/null +++ b/ldk-server-mcp/CLAUDE.md @@ -0,0 +1,71 @@ +# CLAUDE.md — ldk-server-mcp + +MCP (Model Context Protocol) server that exposes LDK Server operations as tools for AI agents. + +This crate is a member of the `ldk-server` workspace and should be kept green under the workspace-wide checks. + +## Build / Test Commands + +```bash +cargo fmt --all +cargo check +cargo test -p ldk-server-mcp +cargo clippy -p ldk-server-mcp --all-targets -- -D warnings + +# MCP sanity checks against a live ldk-server instance +cargo test --manifest-path e2e-tests/Cargo.toml mcp -- --nocapture +``` + +## Architecture + +``` +src/ + main.rs — Entry point: arg parsing, config, stdio JSON-RPC loop, method dispatch + config.rs — Config loading (TOML + env vars), mirrors ldk-server-cli config + protocol.rs — JSON-RPC 2.0 request/response types + mcp.rs — MCP protocol types (InitializeResult, ToolDefinition, ToolCallResult) + tools/ + mod.rs — ToolRegistry: build_tool_registry(), list_tools(), call_tool() + schema.rs — JSON Schema definitions for all tool inputs + handlers.rs — Handler functions: JSON args -> ldk-server-client call -> JSON result +``` + +## MCP Protocol + +- **Version**: `2024-11-05` +- **Spec**: https://spec.modelcontextprotocol.io/ +- **Transport**: stdio (one JSON-RPC 2.0 message per line) +- **Methods implemented**: `initialize`, `tools/list`, `tools/call` +- **Notifications handled**: `notifications/initialized` (ignored, no response) + +## Config + +The server reads configuration in this precedence order (highest first): + +1. **Environment variables**: `LDK_BASE_URL`, `LDK_API_KEY`, `LDK_TLS_CERT_PATH` +2. **CLI argument**: `--config ` pointing to a TOML file +3. **Default paths**: `~/.ldk-server/config.toml`, `~/.ldk-server/tls.crt`, `~/.ldk-server/{network}/api_key` + +If no config path is provided explicitly, the crate uses the default `ldk-server` config location at +`~/.ldk-server/config.toml`. + +TOML config format (same as ldk-server-cli): +```toml +[node] +grpc_service_address = "127.0.0.1:3536" +network = "bitcoin" + +[tls] +cert_path = "/path/to/tls.crt" +``` + +## Adding a New Tool + +When a new endpoint is added to `ldk-server-client`: + +1. Add a JSON schema function in `src/tools/schema.rs` (follow existing pattern) +2. Add a handler function in `src/tools/handlers.rs` +3. Register in `build_tool_registry()` in `src/tools/mod.rs` +4. Update the expected tool surface in `tests/integration.rs` +5. Add or update helper-level coverage in `src/tools/handlers.rs` when parsing or validation changes +6. If the tool is suitable for live validation, extend `e2e-tests/tests/mcp.rs` diff --git a/ldk-server-mcp/Cargo.toml b/ldk-server-mcp/Cargo.toml new file mode 100644 index 00000000..df8fc51c --- /dev/null +++ b/ldk-server-mcp/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "ldk-server-mcp" +version = "0.1.0" +edition = "2021" + +[dependencies] +ldk-server-client = { path = "../ldk-server-client", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1.38.0", features = ["rt-multi-thread", "macros", "io-util", "io-std"] } +toml = { version = "0.8", default-features = false, features = ["parse"] } diff --git a/ldk-server-mcp/README.md b/ldk-server-mcp/README.md new file mode 100644 index 00000000..a55ee5cf --- /dev/null +++ b/ldk-server-mcp/README.md @@ -0,0 +1,118 @@ +# ldk-server-mcp + +An [MCP (Model Context Protocol)](https://spec.modelcontextprotocol.io/) server that exposes [LDK Server](https://github.com/lightningdevkit/ldk-server) operations as tools for AI agents. It communicates over JSON-RPC 2.0 via stdio and connects to an LDK Server instance over TLS using the [`ldk-server-client`](https://github.com/lightningdevkit/ldk-server/tree/main/ldk-server-client) library. + +This crate lives inside the `ldk-server` workspace. + +## Building + +```bash +cargo build -p ldk-server-mcp --release +``` + +## Configuration + +The server reads configuration in this precedence order (highest wins): + +1. **Environment variables**: `LDK_BASE_URL`, `LDK_API_KEY`, `LDK_TLS_CERT_PATH` +2. **CLI argument**: `--config ` pointing to a TOML config file +3. **Default paths**: `~/.ldk-server/config.toml`, `~/.ldk-server/tls.crt`, `~/.ldk-server/{network}/api_key` + +The TOML config format is the same as used by [`ldk-server-cli`](https://github.com/lightningdevkit/ldk-server/tree/main/ldk-server-cli): + +```toml +[node] +grpc_service_address = "127.0.0.1:3536" +network = "signet" + +[tls] +cert_path = "/path/to/tls.crt" +``` + +## Usage + +### Standalone + +```bash +export LDK_BASE_URL="localhost:3000" +export LDK_API_KEY="your_hex_encoded_api_key" +export LDK_TLS_CERT_PATH="/path/to/tls.crt" +cargo run -p ldk-server-mcp --release +``` + +Or using a config file: + +```bash +cargo run -p ldk-server-mcp -- --config /path/to/config.toml +``` + +If `--config` is omitted, `ldk-server-mcp` falls back to the same default config path as +`ldk-server` and `ldk-server-cli`: `~/.ldk-server/config.toml`. + +### With Claude Desktop + +Add the following to your Claude Desktop MCP configuration (`claude_desktop_config.json`): + +```json +{ + "mcpServers": { + "ldk-server": { + "command": "/path/to/ldk-server-mcp", + "env": { + "LDK_BASE_URL": "localhost:3000", + "LDK_API_KEY": "your_hex_encoded_api_key", + "LDK_TLS_CERT_PATH": "/path/to/tls.crt" + } + } + } +} +``` + +### With Claude Code + +Add to your Claude Code MCP settings (`.claude/settings.json`): + +```json +{ + "mcpServers": { + "ldk-server": { + "command": "/path/to/ldk-server-mcp", + "env": { + "LDK_BASE_URL": "localhost:3000", + "LDK_API_KEY": "your_hex_encoded_api_key", + "LDK_TLS_CERT_PATH": "/path/to/tls.crt" + } + } + } +} +``` + +## Available Tools + +All unary LDK Server RPCs are exposed as MCP tools. Use `tools/list` to discover the current set. + +Streaming RPCs such as `subscribe_events` and non-RPC HTTP endpoints such as `metrics` are not exposed as tools. + +## MCP Protocol + +- **Protocol version**: `2024-11-05` +- **Transport**: stdio (one JSON-RPC 2.0 message per line) +- **Methods**: `initialize`, `tools/list`, `tools/call` + +## Testing + +```bash +cargo test -p ldk-server-mcp + +# MCP end-to-end sanity checks against a live ldk-server +cargo test --manifest-path e2e-tests/Cargo.toml mcp -- --nocapture +``` + +## License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT License ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. diff --git a/ldk-server-mcp/src/config.rs b/ldk-server-mcp/src/config.rs new file mode 100644 index 00000000..74ce1e02 --- /dev/null +++ b/ldk-server-mcp/src/config.rs @@ -0,0 +1,300 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use std::fmt::Write as _; +use std::path::PathBuf; + +use serde::Deserialize; + +const DEFAULT_CONFIG_FILE: &str = "config.toml"; +const DEFAULT_CERT_FILE: &str = "tls.crt"; +const API_KEY_FILE: &str = "api_key"; +const DEFAULT_GRPC_SERVICE_ADDRESS: &str = "127.0.0.1:3536"; + +fn get_default_data_dir() -> Option { + #[cfg(target_os = "macos")] + { + #[allow(deprecated)] + std::env::home_dir().map(|home| home.join("Library/Application Support/ldk-server")) + } + #[cfg(target_os = "windows")] + { + std::env::var("APPDATA").ok().map(|appdata| PathBuf::from(appdata).join("ldk-server")) + } + #[cfg(not(any(target_os = "macos", target_os = "windows")))] + { + #[allow(deprecated)] + std::env::home_dir().map(|home| home.join(".ldk-server")) + } +} + +fn get_default_config_path() -> Option { + get_default_data_dir().map(|dir| dir.join(DEFAULT_CONFIG_FILE)) +} + +fn get_default_cert_path() -> Option { + get_default_data_dir().map(|path| path.join(DEFAULT_CERT_FILE)) +} + +fn get_default_api_key_path(network: &str) -> Option { + get_default_data_dir().map(|path| path.join(network).join(API_KEY_FILE)) +} + +fn api_key_path_for_storage_dir(storage_dir: &str, network: &str) -> PathBuf { + PathBuf::from(storage_dir).join(network).join(API_KEY_FILE) +} + +fn cert_path_for_storage_dir(storage_dir: &str) -> PathBuf { + PathBuf::from(storage_dir).join(DEFAULT_CERT_FILE) +} + +fn hex_encode(bytes: &[u8]) -> String { + let mut encoded = String::with_capacity(bytes.len() * 2); + for byte in bytes { + let _ = write!(&mut encoded, "{byte:02x}"); + } + encoded +} + +#[derive(Debug, Deserialize)] +pub struct Config { + pub node: NodeConfig, + pub tls: Option, + pub storage: Option, +} + +#[derive(Debug, Deserialize)] +pub struct StorageConfig { + pub disk: Option, +} + +#[derive(Debug, Deserialize)] +pub struct DiskConfig { + pub dir_path: Option, +} + +#[derive(Debug, Deserialize)] +pub struct TlsConfig { + pub cert_path: Option, +} + +#[derive(Debug, Deserialize)] +pub struct NodeConfig { + #[serde(default = "default_grpc_service_address")] + pub grpc_service_address: String, + network: String, +} + +fn default_grpc_service_address() -> String { + DEFAULT_GRPC_SERVICE_ADDRESS.to_string() +} + +impl Config { + pub fn network(&self) -> Result { + match self.node.network.as_str() { + "bitcoin" | "mainnet" => Ok("bitcoin".to_string()), + "testnet" => Ok("testnet".to_string()), + "testnet4" => Ok("testnet4".to_string()), + "signet" => Ok("signet".to_string()), + "regtest" => Ok("regtest".to_string()), + other => Err(format!("Unsupported network: {other}")), + } + } +} + +fn load_config(path: &PathBuf) -> Result { + let contents = std::fs::read_to_string(path) + .map_err(|e| format!("Failed to read config file '{}': {}", path.display(), e))?; + toml::from_str(&contents) + .map_err(|e| format!("Failed to parse config file '{}': {}", path.display(), e)) +} + +pub struct ResolvedConfig { + pub base_url: String, + pub api_key: String, + pub tls_cert_pem: Vec, +} + +pub fn resolve_config(config_path: Option) -> Result { + let config_path = config_path.map(PathBuf::from).or_else(get_default_config_path); + let config = match config_path { + Some(ref path) if path.exists() => Some(load_config(path)?), + _ => None, + }; + + let storage_dir = + config.as_ref().and_then(|c| c.storage.as_ref()?.disk.as_ref()?.dir_path.as_deref()); + + let base_url = std::env::var("LDK_BASE_URL") + .ok() + .or_else(|| config.as_ref().map(|c| c.node.grpc_service_address.clone())) + .unwrap_or_else(default_grpc_service_address); + + let api_key = std::env::var("LDK_API_KEY").ok().or_else(|| { + let network = + config.as_ref().and_then(|c| c.network().ok()).unwrap_or("bitcoin".to_string()); + storage_dir + .map(|dir| api_key_path_for_storage_dir(dir, &network)) + .and_then(|path| std::fs::read(&path).ok()) + .or_else(|| { + get_default_api_key_path(&network).and_then(|path| std::fs::read(&path).ok()) + }) + .map(|bytes| hex_encode(&bytes)) + }).ok_or_else(|| { + "API key not provided. Set LDK_API_KEY or ensure the api_key file exists at ~/.ldk-server/[network]/api_key".to_string() + })?; + + let tls_cert_path = std::env::var("LDK_TLS_CERT_PATH").ok().map(PathBuf::from).or_else(|| { + config + .as_ref() + .and_then(|c| c.tls.as_ref().and_then(|t| t.cert_path.as_ref().map(PathBuf::from))) + .or_else(|| storage_dir.map(cert_path_for_storage_dir).filter(|path| path.exists())) + .or_else(get_default_cert_path) + }).ok_or_else(|| { + "TLS cert path not provided. Set LDK_TLS_CERT_PATH or ensure config file exists at ~/.ldk-server/config.toml".to_string() + })?; + + let tls_cert_pem = std::fs::read(&tls_cert_path).map_err(|e| { + format!("Failed to read server certificate file '{}': {}", tls_cert_path.display(), e) + })?; + + Ok(ResolvedConfig { base_url, api_key, tls_cert_pem }) +} + +#[cfg(test)] +mod tests { + use super::{resolve_config, Config, DEFAULT_GRPC_SERVICE_ADDRESS}; + use std::sync::Mutex; + + // Tests that call resolve_config manipulate process-global environment + // variables, so they must not run in parallel. + static ENV_LOCK: Mutex<()> = Mutex::new(()); + + #[test] + fn config_defaults_grpc_service_address() { + let config: Config = toml::from_str( + r#" + [node] + network = "regtest" + "#, + ) + .unwrap(); + + assert_eq!(config.node.grpc_service_address, DEFAULT_GRPC_SERVICE_ADDRESS); + } + + #[test] + fn resolve_config_uses_grpc_service_address_from_config() { + let _lock = ENV_LOCK.lock().unwrap(); + + let temp_dir = + std::env::temp_dir().join(format!("ldk-server-mcp-config-test-{}", std::process::id())); + std::fs::create_dir_all(&temp_dir).unwrap(); + + let config_path = temp_dir.join("config.toml"); + let cert_path = temp_dir.join("tls.crt"); + std::fs::write(&cert_path, b"test-cert").unwrap(); + std::fs::write( + &config_path, + format!( + r#" + [node] + network = "regtest" + grpc_service_address = "127.0.0.1:4242" + + [tls] + cert_path = "{}" + "#, + cert_path.display() + ), + ) + .unwrap(); + + std::env::set_var("LDK_API_KEY", "deadbeef"); + std::env::set_var("LDK_TLS_CERT_PATH", &cert_path); + std::env::remove_var("LDK_BASE_URL"); + let resolved = resolve_config(Some(config_path.display().to_string())).unwrap(); + std::env::remove_var("LDK_API_KEY"); + std::env::remove_var("LDK_TLS_CERT_PATH"); + + assert_eq!(resolved.base_url, "127.0.0.1:4242"); + assert_eq!(resolved.api_key, "deadbeef"); + assert_eq!(resolved.tls_cert_pem, b"test-cert"); + + std::fs::remove_dir_all(temp_dir).unwrap(); + } + + #[test] + fn resolve_config_falls_back_to_default_grpc_address() { + let _lock = ENV_LOCK.lock().unwrap(); + + let temp_dir = std::env::temp_dir() + .join(format!("ldk-server-mcp-config-fallback-{}", std::process::id())); + std::fs::create_dir_all(&temp_dir).unwrap(); + + let cert_path = temp_dir.join("tls.crt"); + std::fs::write(&cert_path, b"test-cert").unwrap(); + + // No config file, no LDK_BASE_URL — should fall back to default + std::env::set_var("LDK_API_KEY", "deadbeef"); + std::env::set_var("LDK_TLS_CERT_PATH", &cert_path); + std::env::remove_var("LDK_BASE_URL"); + let resolved = + resolve_config(Some(temp_dir.join("nonexistent.toml").display().to_string())).unwrap(); + std::env::remove_var("LDK_API_KEY"); + std::env::remove_var("LDK_TLS_CERT_PATH"); + + assert_eq!(resolved.base_url, DEFAULT_GRPC_SERVICE_ADDRESS); + + std::fs::remove_dir_all(temp_dir).unwrap(); + } + + #[test] + fn resolve_config_uses_storage_dir_for_credentials() { + let _lock = ENV_LOCK.lock().unwrap(); + + let temp_dir = + std::env::temp_dir().join(format!("ldk-server-mcp-storage-dir-{}", std::process::id())); + std::fs::create_dir_all(temp_dir.join("regtest")).unwrap(); + + let config_path = temp_dir.join("config.toml"); + let custom_storage = temp_dir.join("custom-storage"); + std::fs::create_dir_all(custom_storage.join("regtest")).unwrap(); + + let cert_path = custom_storage.join("tls.crt"); + std::fs::write(&cert_path, b"storage-cert").unwrap(); + std::fs::write(custom_storage.join("regtest").join("api_key"), [0xAB, 0xCD]).unwrap(); + + std::fs::write( + &config_path, + format!( + r#" + [node] + network = "regtest" + + [storage.disk] + dir_path = "{}" + "#, + custom_storage.display() + ), + ) + .unwrap(); + + std::env::remove_var("LDK_API_KEY"); + std::env::remove_var("LDK_TLS_CERT_PATH"); + std::env::remove_var("LDK_BASE_URL"); + let resolved = resolve_config(Some(config_path.display().to_string())).unwrap(); + + assert_eq!(resolved.base_url, DEFAULT_GRPC_SERVICE_ADDRESS); + assert_eq!(resolved.api_key, "abcd"); + assert_eq!(resolved.tls_cert_pem, b"storage-cert"); + + std::fs::remove_dir_all(temp_dir).unwrap(); + } +} diff --git a/ldk-server-mcp/src/main.rs b/ldk-server-mcp/src/main.rs new file mode 100644 index 00000000..7dfec64c --- /dev/null +++ b/ldk-server-mcp/src/main.rs @@ -0,0 +1,140 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +mod config; +mod mcp; +mod protocol; +mod tools; + +use ldk_server_client::client::LdkServerClient; +use serde_json::Value; +use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader}; + +use crate::mcp::InitializeResult; +use crate::protocol::{ + JsonRpcErrorResponse, JsonRpcRequest, JsonRpcResponse, METHOD_NOT_FOUND, PARSE_ERROR, +}; +use crate::tools::build_tool_registry; + +#[tokio::main] +async fn main() { + let mut config_path = None; + let mut args = std::env::args().skip(1); + while let Some(arg) = args.next() { + match arg.as_str() { + "--config" => { + config_path = args.next(); + if config_path.is_none() { + eprintln!("Error: --config requires a path argument"); + std::process::exit(1); + } + }, + other => { + eprintln!("Unknown argument: {other}"); + std::process::exit(1); + }, + } + } + + let cfg = match config::resolve_config(config_path) { + Ok(cfg) => cfg, + Err(e) => { + eprintln!("Error: {e}"); + std::process::exit(1); + }, + }; + + let client = match LdkServerClient::new(cfg.base_url, cfg.api_key, &cfg.tls_cert_pem) { + Ok(c) => c, + Err(e) => { + eprintln!("Error: Failed to create client: {e}"); + std::process::exit(1); + }, + }; + + let registry = build_tool_registry(); + + eprintln!("ldk-server-mcp: ready, waiting for JSON-RPC requests on stdin"); + + let stdin = tokio::io::stdin(); + let mut stdout = tokio::io::stdout(); + let mut reader = BufReader::new(stdin); + let mut line = String::new(); + + loop { + line.clear(); + match reader.read_line(&mut line).await { + Ok(0) => break, // EOF + Ok(_) => {}, + Err(e) => { + eprintln!("Error reading stdin: {e}"); + break; + }, + } + + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + let request: JsonRpcRequest = match serde_json::from_str(trimmed) { + Ok(r) => r, + Err(_) => { + let err = + JsonRpcErrorResponse::new(Value::Null, PARSE_ERROR, "Parse error".to_string()); + let resp = serde_json::to_string(&err).unwrap(); + let _ = stdout.write_all(resp.as_bytes()).await; + let _ = stdout.write_all(b"\n").await; + let _ = stdout.flush().await; + continue; + }, + }; + + // Notifications have no id — do not respond + if request.id.is_none() { + continue; + } + + let id = request.id.unwrap(); + + let response_str = match request.method.as_str() { + "initialize" => { + let result = InitializeResult::new(); + let resp = JsonRpcResponse::new(id, serde_json::to_value(result).unwrap()); + serde_json::to_string(&resp).unwrap() + }, + "tools/list" => { + let tools = registry.list_tools(); + let resp = JsonRpcResponse::new(id, serde_json::json!({ "tools": tools })); + serde_json::to_string(&resp).unwrap() + }, + "tools/call" => { + let params = request.params.unwrap_or(Value::Null); + let tool_name = params.get("name").and_then(|v| v.as_str()).unwrap_or(""); + let tool_args = params.get("arguments").cloned().unwrap_or(serde_json::json!({})); + + let result = registry.call_tool(&client, tool_name, tool_args).await; + let resp = JsonRpcResponse::new(id, serde_json::to_value(result).unwrap()); + serde_json::to_string(&resp).unwrap() + }, + _ => { + let err = JsonRpcErrorResponse::new( + id, + METHOD_NOT_FOUND, + format!("Method not found: {}", request.method), + ); + serde_json::to_string(&err).unwrap() + }, + }; + + let _ = stdout.write_all(response_str.as_bytes()).await; + let _ = stdout.write_all(b"\n").await; + let _ = stdout.flush().await; + } +} diff --git a/ldk-server-mcp/src/mcp.rs b/ldk-server-mcp/src/mcp.rs new file mode 100644 index 00000000..22aa8bf9 --- /dev/null +++ b/ldk-server-mcp/src/mcp.rs @@ -0,0 +1,89 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use serde::Serialize; +use serde_json::Value; + +pub const PROTOCOL_VERSION: &str = "2024-11-05"; +pub const SERVER_NAME: &str = "ldk-server-mcp"; +pub const SERVER_VERSION: &str = "0.1.0"; + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct InitializeResult { + pub protocol_version: String, + pub capabilities: Capabilities, + pub server_info: ServerInfo, +} + +#[derive(Debug, Serialize)] +pub struct Capabilities { + pub tools: ToolsCapability, +} + +#[derive(Debug, Serialize)] +pub struct ToolsCapability {} + +#[derive(Debug, Serialize)] +pub struct ServerInfo { + pub name: String, + pub version: String, +} + +impl InitializeResult { + pub fn new() -> Self { + Self { + protocol_version: PROTOCOL_VERSION.to_string(), + capabilities: Capabilities { tools: ToolsCapability {} }, + server_info: ServerInfo { + name: SERVER_NAME.to_string(), + version: SERVER_VERSION.to_string(), + }, + } + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ToolDefinition { + pub name: String, + pub description: String, + pub input_schema: Value, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ToolCallResult { + pub content: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub is_error: Option, +} + +#[derive(Debug, Serialize)] +pub struct ToolContent { + #[serde(rename = "type")] + pub content_type: String, + pub text: String, +} + +impl ToolCallResult { + pub fn success(text: String) -> Self { + Self { + content: vec![ToolContent { content_type: "text".to_string(), text }], + is_error: None, + } + } + + pub fn error(text: String) -> Self { + Self { + content: vec![ToolContent { content_type: "text".to_string(), text }], + is_error: Some(true), + } + } +} diff --git a/ldk-server-mcp/src/protocol.rs b/ldk-server-mcp/src/protocol.rs new file mode 100644 index 00000000..c14df7cd --- /dev/null +++ b/ldk-server-mcp/src/protocol.rs @@ -0,0 +1,61 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +pub const PARSE_ERROR: i64 = -32700; +pub const METHOD_NOT_FOUND: i64 = -32601; +#[allow(dead_code)] +pub const INVALID_PARAMS: i64 = -32602; +#[allow(dead_code)] +pub const INTERNAL_ERROR: i64 = -32603; + +#[derive(Debug, Deserialize)] +pub struct JsonRpcRequest { + #[allow(dead_code)] + pub jsonrpc: String, + pub id: Option, + pub method: String, + pub params: Option, +} + +#[derive(Debug, Serialize)] +pub struct JsonRpcResponse { + pub jsonrpc: String, + pub id: Value, + pub result: Value, +} + +#[derive(Debug, Serialize)] +pub struct JsonRpcErrorResponse { + pub jsonrpc: String, + pub id: Value, + pub error: JsonRpcError, +} + +#[derive(Debug, Serialize)] +pub struct JsonRpcError { + pub code: i64, + pub message: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, +} + +impl JsonRpcResponse { + pub fn new(id: Value, result: Value) -> Self { + Self { jsonrpc: "2.0".to_string(), id, result } + } +} + +impl JsonRpcErrorResponse { + pub fn new(id: Value, code: i64, message: String) -> Self { + Self { jsonrpc: "2.0".to_string(), id, error: JsonRpcError { code, message, data: None } } + } +} diff --git a/ldk-server-mcp/src/tools/handlers.rs b/ldk-server-mcp/src/tools/handlers.rs new file mode 100644 index 00000000..d52ab9fa --- /dev/null +++ b/ldk-server-mcp/src/tools/handlers.rs @@ -0,0 +1,922 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use std::fmt::Write as _; + +use ldk_server_client::client::LdkServerClient; +use ldk_server_client::ldk_server_grpc::api::{ + Bolt11ClaimForHashRequest, Bolt11FailForHashRequest, Bolt11ReceiveForHashRequest, + Bolt11ReceiveRequest, Bolt11ReceiveVariableAmountViaJitChannelRequest, + Bolt11ReceiveViaJitChannelRequest, Bolt11SendRequest, Bolt12ReceiveRequest, Bolt12SendRequest, + CloseChannelRequest, ConnectPeerRequest, DecodeInvoiceRequest, DecodeOfferRequest, + DisconnectPeerRequest, ExportPathfindingScoresRequest, ForceCloseChannelRequest, + GetBalancesRequest, GetNodeInfoRequest, GetPaymentDetailsRequest, GraphGetChannelRequest, + GraphGetNodeRequest, GraphListChannelsRequest, GraphListNodesRequest, ListChannelsRequest, + ListForwardedPaymentsRequest, ListPaymentsRequest, ListPeersRequest, OnchainReceiveRequest, + OnchainSendRequest, OpenChannelRequest, SignMessageRequest, SpliceInRequest, SpliceOutRequest, + SpontaneousSendRequest, UnifiedSendRequest, UpdateChannelConfigRequest, VerifySignatureRequest, +}; +use ldk_server_client::ldk_server_grpc::types::{ + bolt11_invoice_description, channel_config, Bolt11InvoiceDescription, ChannelConfig, PageToken, + RouteParametersConfig, +}; +use ldk_server_client::{ + DEFAULT_EXPIRY_SECS, DEFAULT_MAX_CHANNEL_SATURATION_POWER_OF_HALF, DEFAULT_MAX_PATH_COUNT, + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, +}; +use serde_json::{json, Value}; + +fn hex_encode(bytes: &[u8]) -> String { + let mut encoded = String::with_capacity(bytes.len() * 2); + for byte in bytes { + let _ = write!(&mut encoded, "{byte:02x}"); + } + encoded +} + +fn parse_page_token(token_str: &str) -> Result { + let parts: Vec<&str> = token_str.split(':').collect(); + if parts.len() != 2 { + return Err("Page token must be in format 'token:index'".to_string()); + } + let index = parts[1].parse::().map_err(|_| "Invalid page token index".to_string())?; + Ok(PageToken { token: parts[0].to_string(), index }) +} + +fn format_page_token(pt: &PageToken) -> String { + format!("{}:{}", pt.token, pt.index) +} + +fn build_route_parameters(args: &Value) -> RouteParametersConfig { + RouteParametersConfig { + max_total_routing_fee_msat: args.get("max_total_routing_fee_msat").and_then(|v| v.as_u64()), + max_total_cltv_expiry_delta: args + .get("max_total_cltv_expiry_delta") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA), + max_path_count: args + .get("max_path_count") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_MAX_PATH_COUNT), + max_channel_saturation_power_of_half: args + .get("max_channel_saturation_power_of_half") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_MAX_CHANNEL_SATURATION_POWER_OF_HALF), + } +} + +fn build_channel_config(args: &Value) -> Result, String> { + let forwarding_fee_proportional_millionths = args + .get("forwarding_fee_proportional_millionths") + .and_then(|v| v.as_u64()) + .map(|v| v as u32); + let forwarding_fee_base_msat = + args.get("forwarding_fee_base_msat").and_then(|v| v.as_u64()).map(|v| v as u32); + let cltv_expiry_delta = + args.get("cltv_expiry_delta").and_then(|v| v.as_u64()).map(|v| v as u32); + let force_close_avoidance_max_fee_satoshis = + args.get("force_close_avoidance_max_fee_satoshis").and_then(|v| v.as_u64()); + let accept_underpaying_htlcs = args.get("accept_underpaying_htlcs").and_then(|v| v.as_bool()); + let max_dust_htlc_exposure = match ( + args.get("max_dust_htlc_exposure_fixed_limit_msat").and_then(|v| v.as_u64()), + args.get("max_dust_htlc_exposure_fee_rate_multiplier").and_then(|v| v.as_u64()), + ) { + (Some(_), Some(_)) => { + return Err( + "Only one of max_dust_htlc_exposure_fixed_limit_msat or max_dust_htlc_exposure_fee_rate_multiplier can be set" + .to_string(), + ) + }, + (Some(limit_msat), None) => { + Some(channel_config::MaxDustHtlcExposure::FixedLimitMsat(limit_msat)) + }, + (None, Some(multiplier)) => { + Some(channel_config::MaxDustHtlcExposure::FeeRateMultiplier(multiplier)) + }, + (None, None) => None, + }; + + if forwarding_fee_proportional_millionths.is_none() + && forwarding_fee_base_msat.is_none() + && cltv_expiry_delta.is_none() + && force_close_avoidance_max_fee_satoshis.is_none() + && accept_underpaying_htlcs.is_none() + && max_dust_htlc_exposure.is_none() + { + return Ok(None); + } + + Ok(Some(ChannelConfig { + forwarding_fee_proportional_millionths, + forwarding_fee_base_msat, + cltv_expiry_delta, + force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs, + max_dust_htlc_exposure, + })) +} + +fn build_update_channel_config(args: &Value) -> Result { + Ok(build_channel_config(args)?.unwrap_or(ChannelConfig { + forwarding_fee_proportional_millionths: None, + forwarding_fee_base_msat: None, + cltv_expiry_delta: None, + force_close_avoidance_max_fee_satoshis: None, + accept_underpaying_htlcs: None, + max_dust_htlc_exposure: None, + })) +} + +fn build_bolt11_invoice_description( + args: &Value, +) -> Result, String> { + let description_str = args.get("description").and_then(|v| v.as_str()).map(|s| s.to_string()); + let description_hash = + args.get("description_hash").and_then(|v| v.as_str()).map(|s| s.to_string()); + + match (description_str, description_hash) { + (Some(desc), None) => Ok(Some(Bolt11InvoiceDescription { + kind: Some(bolt11_invoice_description::Kind::Direct(desc)), + })), + (None, Some(hash)) => Ok(Some(Bolt11InvoiceDescription { + kind: Some(bolt11_invoice_description::Kind::Hash(hash)), + })), + (Some(_), Some(_)) => { + Err("Only one of description or description_hash can be set".to_string()) + }, + (None, None) => Ok(None), + } +} + +pub async fn handle_get_node_info(client: &LdkServerClient, _args: Value) -> Result { + let response = + client.get_node_info(GetNodeInfoRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_get_balances(client: &LdkServerClient, _args: Value) -> Result { + let response = + client.get_balances(GetBalancesRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_onchain_receive( + client: &LdkServerClient, _args: Value, +) -> Result { + let response = + client.onchain_receive(OnchainReceiveRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_onchain_send(client: &LdkServerClient, args: Value) -> Result { + let address = args + .get("address") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: address")? + .to_string(); + let amount_sats = args.get("amount_sats").and_then(|v| v.as_u64()); + let send_all = args.get("send_all").and_then(|v| v.as_bool()); + let fee_rate_sat_per_vb = args.get("fee_rate_sat_per_vb").and_then(|v| v.as_u64()); + + let response = client + .onchain_send(OnchainSendRequest { address, amount_sats, send_all, fee_rate_sat_per_vb }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_receive(client: &LdkServerClient, args: Value) -> Result { + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let invoice_description = build_bolt11_invoice_description(&args)?; + + let expiry_secs = args + .get("expiry_secs") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_EXPIRY_SECS); + + let response = client + .bolt11_receive(Bolt11ReceiveRequest { + description: invoice_description, + expiry_secs, + amount_msat, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_receive_for_hash( + client: &LdkServerClient, args: Value, +) -> Result { + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let description = build_bolt11_invoice_description(&args)?; + let expiry_secs = args + .get("expiry_secs") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_EXPIRY_SECS); + let payment_hash = args + .get("payment_hash") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: payment_hash")? + .to_string(); + + let response = client + .bolt11_receive_for_hash(Bolt11ReceiveForHashRequest { + amount_msat, + description, + expiry_secs, + payment_hash, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_claim_for_hash( + client: &LdkServerClient, args: Value, +) -> Result { + let payment_hash = args.get("payment_hash").and_then(|v| v.as_str()).map(|s| s.to_string()); + let claimable_amount_msat = args.get("claimable_amount_msat").and_then(|v| v.as_u64()); + let preimage = args + .get("preimage") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: preimage")? + .to_string(); + + let response = client + .bolt11_claim_for_hash(Bolt11ClaimForHashRequest { + payment_hash, + claimable_amount_msat, + preimage, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_fail_for_hash( + client: &LdkServerClient, args: Value, +) -> Result { + let payment_hash = args + .get("payment_hash") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: payment_hash")? + .to_string(); + + let response = client + .bolt11_fail_for_hash(Bolt11FailForHashRequest { payment_hash }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_receive_via_jit_channel( + client: &LdkServerClient, args: Value, +) -> Result { + let amount_msat = args + .get("amount_msat") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: amount_msat")?; + let description = build_bolt11_invoice_description(&args)?; + let expiry_secs = args + .get("expiry_secs") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_EXPIRY_SECS); + let max_total_lsp_fee_limit_msat = + args.get("max_total_lsp_fee_limit_msat").and_then(|v| v.as_u64()); + + let response = client + .bolt11_receive_via_jit_channel(Bolt11ReceiveViaJitChannelRequest { + amount_msat, + description, + expiry_secs, + max_total_lsp_fee_limit_msat, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_receive_variable_amount_via_jit_channel( + client: &LdkServerClient, args: Value, +) -> Result { + let description = build_bolt11_invoice_description(&args)?; + let expiry_secs = args + .get("expiry_secs") + .and_then(|v| v.as_u64()) + .map(|v| v as u32) + .unwrap_or(DEFAULT_EXPIRY_SECS); + let max_proportional_lsp_fee_limit_ppm_msat = + args.get("max_proportional_lsp_fee_limit_ppm_msat").and_then(|v| v.as_u64()); + + let response = client + .bolt11_receive_variable_amount_via_jit_channel( + Bolt11ReceiveVariableAmountViaJitChannelRequest { + description, + expiry_secs, + max_proportional_lsp_fee_limit_ppm_msat, + }, + ) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt11_send(client: &LdkServerClient, args: Value) -> Result { + let invoice = args + .get("invoice") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: invoice")? + .to_string(); + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let route_parameters = build_route_parameters(&args); + + let response = client + .bolt11_send(Bolt11SendRequest { + invoice, + amount_msat, + route_parameters: Some(route_parameters), + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt12_receive(client: &LdkServerClient, args: Value) -> Result { + let description = args + .get("description") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: description")? + .to_string(); + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let expiry_secs = args.get("expiry_secs").and_then(|v| v.as_u64()).map(|v| v as u32); + let quantity = args.get("quantity").and_then(|v| v.as_u64()); + + let response = client + .bolt12_receive(Bolt12ReceiveRequest { description, amount_msat, expiry_secs, quantity }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_bolt12_send(client: &LdkServerClient, args: Value) -> Result { + let offer = args + .get("offer") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: offer")? + .to_string(); + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let quantity = args.get("quantity").and_then(|v| v.as_u64()); + let payer_note = args.get("payer_note").and_then(|v| v.as_str()).map(|s| s.to_string()); + let route_parameters = build_route_parameters(&args); + + let response = client + .bolt12_send(Bolt12SendRequest { + offer, + amount_msat, + quantity, + payer_note, + route_parameters: Some(route_parameters), + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_spontaneous_send( + client: &LdkServerClient, args: Value, +) -> Result { + let amount_msat = args + .get("amount_msat") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: amount_msat")?; + let node_id = args + .get("node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: node_id")? + .to_string(); + let route_parameters = build_route_parameters(&args); + + let response = client + .spontaneous_send(SpontaneousSendRequest { + amount_msat, + node_id, + route_parameters: Some(route_parameters), + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_unified_send(client: &LdkServerClient, args: Value) -> Result { + let uri = args + .get("uri") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: uri")? + .to_string(); + let amount_msat = args.get("amount_msat").and_then(|v| v.as_u64()); + let route_parameters = build_route_parameters(&args); + + let response = client + .unified_send(UnifiedSendRequest { + uri, + amount_msat, + route_parameters: Some(route_parameters), + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_open_channel(client: &LdkServerClient, args: Value) -> Result { + let node_pubkey = args + .get("node_pubkey") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: node_pubkey")? + .to_string(); + let address = args + .get("address") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: address")? + .to_string(); + let channel_amount_sats = args + .get("channel_amount_sats") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: channel_amount_sats")?; + let push_to_counterparty_msat = args.get("push_to_counterparty_msat").and_then(|v| v.as_u64()); + let announce_channel = args.get("announce_channel").and_then(|v| v.as_bool()).unwrap_or(false); + let disable_counterparty_reserve = + args.get("disable_counterparty_reserve").and_then(|v| v.as_bool()).unwrap_or(false); + let channel_config = build_channel_config(&args)?; + + let response = client + .open_channel(OpenChannelRequest { + node_pubkey, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + announce_channel, + disable_counterparty_reserve, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_splice_in(client: &LdkServerClient, args: Value) -> Result { + let user_channel_id = args + .get("user_channel_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: user_channel_id")? + .to_string(); + let counterparty_node_id = args + .get("counterparty_node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: counterparty_node_id")? + .to_string(); + let splice_amount_sats = args + .get("splice_amount_sats") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: splice_amount_sats")?; + + let response = client + .splice_in(SpliceInRequest { user_channel_id, counterparty_node_id, splice_amount_sats }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_splice_out(client: &LdkServerClient, args: Value) -> Result { + let user_channel_id = args + .get("user_channel_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: user_channel_id")? + .to_string(); + let counterparty_node_id = args + .get("counterparty_node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: counterparty_node_id")? + .to_string(); + let splice_amount_sats = args + .get("splice_amount_sats") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: splice_amount_sats")?; + let address = args.get("address").and_then(|v| v.as_str()).map(|s| s.to_string()); + + let response = client + .splice_out(SpliceOutRequest { + user_channel_id, + counterparty_node_id, + address, + splice_amount_sats, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_close_channel(client: &LdkServerClient, args: Value) -> Result { + let user_channel_id = args + .get("user_channel_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: user_channel_id")? + .to_string(); + let counterparty_node_id = args + .get("counterparty_node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: counterparty_node_id")? + .to_string(); + + let response = client + .close_channel(CloseChannelRequest { user_channel_id, counterparty_node_id }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_force_close_channel( + client: &LdkServerClient, args: Value, +) -> Result { + let user_channel_id = args + .get("user_channel_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: user_channel_id")? + .to_string(); + let counterparty_node_id = args + .get("counterparty_node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: counterparty_node_id")? + .to_string(); + let force_close_reason = + args.get("force_close_reason").and_then(|v| v.as_str()).map(|s| s.to_string()); + + let response = client + .force_close_channel(ForceCloseChannelRequest { + user_channel_id, + counterparty_node_id, + force_close_reason, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_list_channels(client: &LdkServerClient, _args: Value) -> Result { + let response = + client.list_channels(ListChannelsRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_update_channel_config( + client: &LdkServerClient, args: Value, +) -> Result { + let user_channel_id = args + .get("user_channel_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: user_channel_id")? + .to_string(); + let counterparty_node_id = args + .get("counterparty_node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: counterparty_node_id")? + .to_string(); + + let channel_config = build_update_channel_config(&args)?; + + let response = client + .update_channel_config(UpdateChannelConfigRequest { + user_channel_id, + counterparty_node_id, + channel_config: Some(channel_config), + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_list_payments(client: &LdkServerClient, args: Value) -> Result { + let page_token = match args.get("page_token").and_then(|v| v.as_str()) { + Some(token_str) => Some(parse_page_token(token_str)?), + None => None, + }; + + let response = client + .list_payments(ListPaymentsRequest { page_token }) + .await + .map_err(|e| e.message.clone())?; + + let mut result = serde_json::to_value(&response) + .map_err(|e| format!("Failed to serialize response: {e}"))?; + + if let Some(ref npt) = response.next_page_token { + result + .as_object_mut() + .unwrap() + .insert("next_page_token".to_string(), json!(format_page_token(npt))); + } + + Ok(result) +} + +pub async fn handle_get_payment_details( + client: &LdkServerClient, args: Value, +) -> Result { + let payment_id = args + .get("payment_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: payment_id")? + .to_string(); + + let response = client + .get_payment_details(GetPaymentDetailsRequest { payment_id }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_list_forwarded_payments( + client: &LdkServerClient, args: Value, +) -> Result { + let page_token = match args.get("page_token").and_then(|v| v.as_str()) { + Some(token_str) => Some(parse_page_token(token_str)?), + None => None, + }; + + let response = client + .list_forwarded_payments(ListForwardedPaymentsRequest { page_token }) + .await + .map_err(|e| e.message.clone())?; + + let mut result = serde_json::to_value(&response) + .map_err(|e| format!("Failed to serialize response: {e}"))?; + + if let Some(ref npt) = response.next_page_token { + result + .as_object_mut() + .unwrap() + .insert("next_page_token".to_string(), json!(format_page_token(npt))); + } + + Ok(result) +} + +pub async fn handle_connect_peer(client: &LdkServerClient, args: Value) -> Result { + let node_pubkey = args + .get("node_pubkey") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: node_pubkey")? + .to_string(); + let address = args + .get("address") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: address")? + .to_string(); + let persist = args.get("persist").and_then(|v| v.as_bool()).unwrap_or(false); + + let response = client + .connect_peer(ConnectPeerRequest { node_pubkey, address, persist }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_disconnect_peer( + client: &LdkServerClient, args: Value, +) -> Result { + let node_pubkey = args + .get("node_pubkey") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: node_pubkey")? + .to_string(); + + let response = client + .disconnect_peer(DisconnectPeerRequest { node_pubkey }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_list_peers(client: &LdkServerClient, _args: Value) -> Result { + let response = client.list_peers(ListPeersRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_decode_invoice(client: &LdkServerClient, args: Value) -> Result { + let invoice = args + .get("invoice") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: invoice")? + .to_string(); + + let response = client + .decode_invoice(DecodeInvoiceRequest { invoice }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_decode_offer(client: &LdkServerClient, args: Value) -> Result { + let offer = args + .get("offer") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: offer")? + .to_string(); + + let response = + client.decode_offer(DecodeOfferRequest { offer }).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_sign_message(client: &LdkServerClient, args: Value) -> Result { + let message = args + .get("message") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: message")? + .to_string(); + + let response = client + .sign_message(SignMessageRequest { message: message.into_bytes().into() }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_verify_signature( + client: &LdkServerClient, args: Value, +) -> Result { + let message = args + .get("message") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: message")? + .to_string(); + let signature = args + .get("signature") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: signature")? + .to_string(); + let public_key = args + .get("public_key") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: public_key")? + .to_string(); + + let response = client + .verify_signature(VerifySignatureRequest { + message: message.into_bytes().into(), + signature, + public_key, + }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_export_pathfinding_scores( + client: &LdkServerClient, _args: Value, +) -> Result { + let response = client + .export_pathfinding_scores(ExportPathfindingScoresRequest {}) + .await + .map_err(|e| e.message.clone())?; + let scores_hex = hex_encode(&response.scores); + Ok(json!({ "pathfinding_scores": scores_hex })) +} + +pub async fn handle_graph_list_channels( + client: &LdkServerClient, _args: Value, +) -> Result { + let response = client + .graph_list_channels(GraphListChannelsRequest {}) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_graph_get_channel( + client: &LdkServerClient, args: Value, +) -> Result { + let short_channel_id = args + .get("short_channel_id") + .and_then(|v| v.as_u64()) + .ok_or("Missing required parameter: short_channel_id")?; + + let response = client + .graph_get_channel(GraphGetChannelRequest { short_channel_id }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_graph_list_nodes( + client: &LdkServerClient, _args: Value, +) -> Result { + let response = + client.graph_list_nodes(GraphListNodesRequest {}).await.map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +pub async fn handle_graph_get_node(client: &LdkServerClient, args: Value) -> Result { + let node_id = args + .get("node_id") + .and_then(|v| v.as_str()) + .ok_or("Missing required parameter: node_id")? + .to_string(); + + let response = client + .graph_get_node(GraphGetNodeRequest { node_id }) + .await + .map_err(|e| e.message.clone())?; + serde_json::to_value(response).map_err(|e| format!("Failed to serialize response: {e}")) +} + +#[cfg(test)] +mod tests { + use super::{ + build_bolt11_invoice_description, build_channel_config, build_update_channel_config, + parse_page_token, + }; + use ldk_server_client::ldk_server_grpc::types::bolt11_invoice_description; + use serde_json::json; + + #[test] + fn parse_page_token_rejects_invalid_formats() { + assert_eq!( + parse_page_token("missing-colon").unwrap_err(), + "Page token must be in format 'token:index'" + ); + assert_eq!(parse_page_token("token:not-a-number").unwrap_err(), "Invalid page token index"); + } + + #[test] + fn build_bolt11_invoice_description_rejects_conflicting_fields() { + let err = build_bolt11_invoice_description(&json!({ + "description": "desc", + "description_hash": "hash" + })) + .unwrap_err(); + + assert_eq!(err, "Only one of description or description_hash can be set"); + } + + #[test] + fn build_bolt11_invoice_description_supports_direct_and_hash_modes() { + let direct = + build_bolt11_invoice_description(&json!({ "description": "desc" })).unwrap().unwrap(); + assert!(matches!( + direct.kind, + Some(bolt11_invoice_description::Kind::Direct(ref value)) if value == "desc" + )); + + let hash = build_bolt11_invoice_description(&json!({ "description_hash": "hash" })) + .unwrap() + .unwrap(); + assert!(matches!( + hash.kind, + Some(bolt11_invoice_description::Kind::Hash(ref value)) if value == "hash" + )); + } + + #[test] + fn build_channel_config_rejects_conflicting_dust_exposure_modes() { + let err = build_channel_config(&json!({ + "max_dust_htlc_exposure_fixed_limit_msat": 1, + "max_dust_htlc_exposure_fee_rate_multiplier": 2 + })) + .unwrap_err(); + + assert_eq!( + err, + "Only one of max_dust_htlc_exposure_fixed_limit_msat or max_dust_htlc_exposure_fee_rate_multiplier can be set" + ); + } + + #[test] + fn build_channel_config_returns_none_when_no_fields_are_set() { + assert!(build_channel_config(&json!({})).unwrap().is_none()); + } + + #[test] + fn build_update_channel_config_defaults_to_empty_config() { + let channel_config = build_update_channel_config(&json!({})).unwrap(); + + assert_eq!(channel_config.forwarding_fee_proportional_millionths, None); + assert_eq!(channel_config.forwarding_fee_base_msat, None); + assert_eq!(channel_config.cltv_expiry_delta, None); + assert_eq!(channel_config.force_close_avoidance_max_fee_satoshis, None); + assert_eq!(channel_config.accept_underpaying_htlcs, None); + assert_eq!(channel_config.max_dust_htlc_exposure, None); + } +} diff --git a/ldk-server-mcp/src/tools/mod.rs b/ldk-server-mcp/src/tools/mod.rs new file mode 100644 index 00000000..1fc2b5dc --- /dev/null +++ b/ldk-server-mcp/src/tools/mod.rs @@ -0,0 +1,311 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +pub mod handlers; +pub mod schema; + +use std::future::Future; +use std::pin::Pin; + +use ldk_server_client::client::LdkServerClient; +use serde_json::Value; + +use crate::mcp::{ToolCallResult, ToolDefinition}; + +type ToolHandler = for<'a> fn( + &'a LdkServerClient, + Value, +) -> Pin> + Send + 'a>>; + +pub struct ToolRegistry { + tools: Vec<(ToolDefinition, ToolHandler)>, +} + +struct ToolSpec { + name: &'static str, + description: &'static str, + input_schema: fn() -> Value, + handler: ToolHandler, +} + +fn tool_spec( + name: &'static str, description: &'static str, input_schema: fn() -> Value, + handler: ToolHandler, +) -> ToolSpec { + ToolSpec { name, description, input_schema, handler } +} + +impl ToolRegistry { + pub fn list_tools(&self) -> Vec<&ToolDefinition> { + self.tools.iter().map(|(def, _)| def).collect() + } + + pub async fn call_tool( + &self, client: &LdkServerClient, name: &str, args: Value, + ) -> ToolCallResult { + for (def, handler) in &self.tools { + if def.name == name { + return match handler(client, args).await { + Ok(value) => { + let text = serde_json::to_string_pretty(&value) + .unwrap_or_else(|e| format!("Failed to serialize response: {e}")); + ToolCallResult::success(text) + }, + Err(e) => ToolCallResult::error(e), + }; + } + } + ToolCallResult::error(format!("Unknown tool: {name}")) + } +} + +pub fn build_tool_registry() -> ToolRegistry { + let tools = vec![ + tool_spec( + "get_node_info", + "Retrieve node info including node_id, sync status, and best block", + schema::get_node_info_schema, + |client, args| Box::pin(handlers::handle_get_node_info(client, args)), + ), + tool_spec( + "get_balances", + "Retrieve an overview of all known balances (on-chain and Lightning)", + schema::get_balances_schema, + |client, args| Box::pin(handlers::handle_get_balances(client, args)), + ), + tool_spec( + "onchain_receive", + "Generate a new on-chain Bitcoin funding address", + schema::onchain_receive_schema, + |client, args| Box::pin(handlers::handle_onchain_receive(client, args)), + ), + tool_spec( + "onchain_send", + "Send an on-chain Bitcoin payment to an address", + schema::onchain_send_schema, + |client, args| Box::pin(handlers::handle_onchain_send(client, args)), + ), + tool_spec( + "bolt11_receive", + "Create a BOLT11 Lightning invoice to receive a payment", + schema::bolt11_receive_schema, + |client, args| Box::pin(handlers::handle_bolt11_receive(client, args)), + ), + tool_spec( + "bolt11_receive_for_hash", + "Create a BOLT11 Lightning invoice for a specific payment hash", + schema::bolt11_receive_for_hash_schema, + |client, args| Box::pin(handlers::handle_bolt11_receive_for_hash(client, args)), + ), + tool_spec( + "bolt11_claim_for_hash", + "Manually claim a BOLT11 payment for a specific payment hash", + schema::bolt11_claim_for_hash_schema, + |client, args| Box::pin(handlers::handle_bolt11_claim_for_hash(client, args)), + ), + tool_spec( + "bolt11_fail_for_hash", + "Manually fail a BOLT11 payment for a specific payment hash", + schema::bolt11_fail_for_hash_schema, + |client, args| Box::pin(handlers::handle_bolt11_fail_for_hash(client, args)), + ), + tool_spec( + "bolt11_receive_via_jit_channel", + "Create a BOLT11 Lightning invoice to receive via an LSPS2 JIT channel", + schema::bolt11_receive_via_jit_channel_schema, + |client, args| Box::pin(handlers::handle_bolt11_receive_via_jit_channel(client, args)), + ), + tool_spec( + "bolt11_receive_variable_amount_via_jit_channel", + "Create a variable-amount BOLT11 Lightning invoice to receive via an LSPS2 JIT channel", + schema::bolt11_receive_variable_amount_via_jit_channel_schema, + |client, args| { + Box::pin(handlers::handle_bolt11_receive_variable_amount_via_jit_channel( + client, args, + )) + }, + ), + tool_spec( + "bolt11_send", + "Pay a BOLT11 Lightning invoice", + schema::bolt11_send_schema, + |client, args| Box::pin(handlers::handle_bolt11_send(client, args)), + ), + tool_spec( + "bolt12_receive", + "Create a BOLT12 offer for receiving Lightning payments", + schema::bolt12_receive_schema, + |client, args| Box::pin(handlers::handle_bolt12_receive(client, args)), + ), + tool_spec( + "bolt12_send", + "Pay a BOLT12 Lightning offer", + schema::bolt12_send_schema, + |client, args| Box::pin(handlers::handle_bolt12_send(client, args)), + ), + tool_spec( + "spontaneous_send", + "Send a spontaneous (keysend) payment to a Lightning node", + schema::spontaneous_send_schema, + |client, args| Box::pin(handlers::handle_spontaneous_send(client, args)), + ), + tool_spec( + "unified_send", + "Send a payment given a BIP 21 URI or BIP 353 Human-Readable Name", + schema::unified_send_schema, + |client, args| Box::pin(handlers::handle_unified_send(client, args)), + ), + tool_spec( + "open_channel", + "Open a new Lightning channel with a remote node", + schema::open_channel_schema, + |client, args| Box::pin(handlers::handle_open_channel(client, args)), + ), + tool_spec( + "splice_in", + "Increase a channel's balance by splicing in on-chain funds", + schema::splice_in_schema, + |client, args| Box::pin(handlers::handle_splice_in(client, args)), + ), + tool_spec( + "splice_out", + "Decrease a channel's balance by splicing out to on-chain", + schema::splice_out_schema, + |client, args| Box::pin(handlers::handle_splice_out(client, args)), + ), + tool_spec( + "close_channel", + "Cooperatively close a Lightning channel", + schema::close_channel_schema, + |client, args| Box::pin(handlers::handle_close_channel(client, args)), + ), + tool_spec( + "force_close_channel", + "Force close a Lightning channel unilaterally", + schema::force_close_channel_schema, + |client, args| Box::pin(handlers::handle_force_close_channel(client, args)), + ), + tool_spec( + "list_channels", + "List all known Lightning channels", + schema::list_channels_schema, + |client, args| Box::pin(handlers::handle_list_channels(client, args)), + ), + tool_spec( + "update_channel_config", + "Update forwarding fees and CLTV delta for a channel", + schema::update_channel_config_schema, + |client, args| Box::pin(handlers::handle_update_channel_config(client, args)), + ), + tool_spec( + "list_payments", + "List all payments (supports pagination via page_token)", + schema::list_payments_schema, + |client, args| Box::pin(handlers::handle_list_payments(client, args)), + ), + tool_spec( + "get_payment_details", + "Get details of a specific payment by its ID", + schema::get_payment_details_schema, + |client, args| Box::pin(handlers::handle_get_payment_details(client, args)), + ), + tool_spec( + "list_forwarded_payments", + "List all forwarded payments (supports pagination via page_token)", + schema::list_forwarded_payments_schema, + |client, args| Box::pin(handlers::handle_list_forwarded_payments(client, args)), + ), + tool_spec( + "connect_peer", + "Connect to a Lightning peer without opening a channel", + schema::connect_peer_schema, + |client, args| Box::pin(handlers::handle_connect_peer(client, args)), + ), + tool_spec( + "disconnect_peer", + "Disconnect from a Lightning peer", + schema::disconnect_peer_schema, + |client, args| Box::pin(handlers::handle_disconnect_peer(client, args)), + ), + tool_spec( + "list_peers", + "List all known Lightning peers", + schema::list_peers_schema, + |client, args| Box::pin(handlers::handle_list_peers(client, args)), + ), + tool_spec( + "decode_invoice", + "Decode a BOLT11 invoice and return its parsed fields", + schema::decode_invoice_schema, + |client, args| Box::pin(handlers::handle_decode_invoice(client, args)), + ), + tool_spec( + "decode_offer", + "Decode a BOLT12 offer and return its parsed fields", + schema::decode_offer_schema, + |client, args| Box::pin(handlers::handle_decode_offer(client, args)), + ), + tool_spec( + "sign_message", + "Sign a message with the node's secret key", + schema::sign_message_schema, + |client, args| Box::pin(handlers::handle_sign_message(client, args)), + ), + tool_spec( + "verify_signature", + "Verify a signature against a message and public key", + schema::verify_signature_schema, + |client, args| Box::pin(handlers::handle_verify_signature(client, args)), + ), + tool_spec( + "export_pathfinding_scores", + "Export the pathfinding scores used by the Lightning router", + schema::export_pathfinding_scores_schema, + |client, args| Box::pin(handlers::handle_export_pathfinding_scores(client, args)), + ), + tool_spec( + "graph_list_channels", + "List all known short channel IDs in the network graph", + schema::graph_list_channels_schema, + |client, args| Box::pin(handlers::handle_graph_list_channels(client, args)), + ), + tool_spec( + "graph_get_channel", + "Get channel information from the network graph by short channel ID", + schema::graph_get_channel_schema, + |client, args| Box::pin(handlers::handle_graph_get_channel(client, args)), + ), + tool_spec( + "graph_list_nodes", + "List all known node IDs in the network graph", + schema::graph_list_nodes_schema, + |client, args| Box::pin(handlers::handle_graph_list_nodes(client, args)), + ), + tool_spec( + "graph_get_node", + "Get node information from the network graph by node ID", + schema::graph_get_node_schema, + |client, args| Box::pin(handlers::handle_graph_get_node(client, args)), + ), + ] + .into_iter() + .map(|spec| { + ( + ToolDefinition { + name: spec.name.to_string(), + description: spec.description.to_string(), + input_schema: (spec.input_schema)(), + }, + spec.handler, + ) + }) + .collect(); + + ToolRegistry { tools } +} diff --git a/ldk-server-mcp/src/tools/schema.rs b/ldk-server-mcp/src/tools/schema.rs new file mode 100644 index 00000000..9905f65c --- /dev/null +++ b/ldk-server-mcp/src/tools/schema.rs @@ -0,0 +1,755 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use serde_json::{json, Value}; + +pub fn get_node_info_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn get_balances_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn onchain_receive_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn onchain_send_schema() -> Value { + json!({ + "type": "object", + "properties": { + "address": { + "type": "string", + "description": "The Bitcoin address to send coins to" + }, + "amount_sats": { + "type": "integer", + "description": "The amount in satoshis to send. Respects on-chain reserve for anchor channels" + }, + "send_all": { + "type": "boolean", + "description": "If true, send full balance (ignores amount_sats). Warning: will not retain on-chain reserves for anchor channels" + }, + "fee_rate_sat_per_vb": { + "type": "integer", + "description": "Fee rate in satoshis per virtual byte. If not set, a reasonable estimate will be used" + } + }, + "required": ["address"] + }) +} + +pub fn bolt11_receive_schema() -> Value { + json!({ + "type": "object", + "properties": { + "amount_msat": { + "type": "integer", + "description": "Amount in millisatoshis to request. If unset, a variable-amount invoice is returned" + }, + "description": { + "type": "string", + "description": "Description to attach to the invoice. Mutually exclusive with description_hash" + }, + "description_hash": { + "type": "string", + "description": "SHA-256 hash of the description (hex). Use instead of description for longer text. Mutually exclusive with description" + }, + "expiry_secs": { + "type": "integer", + "description": "Invoice expiry time in seconds (default: 86400)" + } + }, + "required": [] + }) +} + +pub fn bolt11_receive_for_hash_schema() -> Value { + json!({ + "type": "object", + "properties": { + "amount_msat": { + "type": "integer", + "description": "Amount in millisatoshis to request. If unset, a variable-amount invoice is returned" + }, + "description": { + "type": "string", + "description": "Description to attach to the invoice. Mutually exclusive with description_hash" + }, + "description_hash": { + "type": "string", + "description": "SHA-256 hash of the description (hex). Use instead of description for longer text. Mutually exclusive with description" + }, + "expiry_secs": { + "type": "integer", + "description": "Invoice expiry time in seconds (default: 86400)" + }, + "payment_hash": { + "type": "string", + "description": "The hex-encoded 32-byte payment hash to use for the invoice" + } + }, + "required": ["payment_hash"] + }) +} + +pub fn bolt11_claim_for_hash_schema() -> Value { + json!({ + "type": "object", + "properties": { + "payment_hash": { + "type": "string", + "description": "The hex-encoded 32-byte payment hash. If provided, it will be used to verify that the preimage matches" + }, + "claimable_amount_msat": { + "type": "integer", + "description": "The amount in millisatoshis that is claimable. If not provided, skips amount verification" + }, + "preimage": { + "type": "string", + "description": "The hex-encoded 32-byte payment preimage" + } + }, + "required": ["preimage"] + }) +} + +pub fn bolt11_fail_for_hash_schema() -> Value { + json!({ + "type": "object", + "properties": { + "payment_hash": { + "type": "string", + "description": "The hex-encoded 32-byte payment hash" + } + }, + "required": ["payment_hash"] + }) +} + +pub fn bolt11_receive_via_jit_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "amount_msat": { + "type": "integer", + "description": "The amount in millisatoshis to request" + }, + "description": { + "type": "string", + "description": "Description to attach to the invoice. Mutually exclusive with description_hash" + }, + "description_hash": { + "type": "string", + "description": "SHA-256 hash of the description (hex). Use instead of description for longer text. Mutually exclusive with description" + }, + "expiry_secs": { + "type": "integer", + "description": "Invoice expiry time in seconds (default: 86400)" + }, + "max_total_lsp_fee_limit_msat": { + "type": "integer", + "description": "Optional upper bound for the total fee an LSP may deduct when opening the JIT channel" + } + }, + "required": ["amount_msat"] + }) +} + +pub fn bolt11_receive_variable_amount_via_jit_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Description to attach to the invoice. Mutually exclusive with description_hash" + }, + "description_hash": { + "type": "string", + "description": "SHA-256 hash of the description (hex). Use instead of description for longer text. Mutually exclusive with description" + }, + "expiry_secs": { + "type": "integer", + "description": "Invoice expiry time in seconds (default: 86400)" + }, + "max_proportional_lsp_fee_limit_ppm_msat": { + "type": "integer", + "description": "Optional upper bound for the proportional fee, in parts-per-million millisatoshis, that an LSP may deduct when opening the JIT channel" + } + }, + "required": [] + }) +} + +pub fn bolt11_send_schema() -> Value { + json!({ + "type": "object", + "properties": { + "invoice": { + "type": "string", + "description": "A BOLT11 invoice string to pay" + }, + "amount_msat": { + "type": "integer", + "description": "Amount in millisatoshis. Required when paying a zero-amount invoice" + }, + "max_total_routing_fee_msat": { + "type": "integer", + "description": "Maximum total routing fee in millisatoshis. Defaults to 1% of payment + 50 sats" + }, + "max_total_cltv_expiry_delta": { + "type": "integer", + "description": "Maximum total CLTV delta for the route (default: 1008)" + }, + "max_path_count": { + "type": "integer", + "description": "Maximum number of paths for MPP payments (default: 10)" + }, + "max_channel_saturation_power_of_half": { + "type": "integer", + "description": "Maximum channel capacity share as power of 1/2 (default: 2)" + } + }, + "required": ["invoice"] + }) +} + +pub fn bolt12_receive_schema() -> Value { + json!({ + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Description to attach to the offer" + }, + "amount_msat": { + "type": "integer", + "description": "Amount in millisatoshis. If unset, a variable-amount offer is returned" + }, + "expiry_secs": { + "type": "integer", + "description": "Offer expiry time in seconds" + }, + "quantity": { + "type": "integer", + "description": "Number of items requested. Can only be set for fixed-amount offers" + } + }, + "required": ["description"] + }) +} + +pub fn bolt12_send_schema() -> Value { + json!({ + "type": "object", + "properties": { + "offer": { + "type": "string", + "description": "A BOLT12 offer string to pay" + }, + "amount_msat": { + "type": "integer", + "description": "Amount in millisatoshis. Required when paying a zero-amount offer" + }, + "quantity": { + "type": "integer", + "description": "Number of items requested" + }, + "payer_note": { + "type": "string", + "description": "Note to include for the payee. Reflected back in the invoice" + }, + "max_total_routing_fee_msat": { + "type": "integer", + "description": "Maximum total routing fee in millisatoshis. Defaults to 1% of payment + 50 sats" + }, + "max_total_cltv_expiry_delta": { + "type": "integer", + "description": "Maximum total CLTV delta for the route (default: 1008)" + }, + "max_path_count": { + "type": "integer", + "description": "Maximum number of paths for MPP payments (default: 10)" + }, + "max_channel_saturation_power_of_half": { + "type": "integer", + "description": "Maximum channel capacity share as power of 1/2 (default: 2)" + } + }, + "required": ["offer"] + }) +} + +pub fn spontaneous_send_schema() -> Value { + json!({ + "type": "object", + "properties": { + "amount_msat": { + "type": "integer", + "description": "The amount in millisatoshis to send" + }, + "node_id": { + "type": "string", + "description": "The hex-encoded public key of the destination node" + }, + "max_total_routing_fee_msat": { + "type": "integer", + "description": "Maximum total routing fee in millisatoshis. Defaults to 1% of payment + 50 sats" + }, + "max_total_cltv_expiry_delta": { + "type": "integer", + "description": "Maximum total CLTV delta for the route (default: 1008)" + }, + "max_path_count": { + "type": "integer", + "description": "Maximum number of paths for MPP payments (default: 10)" + }, + "max_channel_saturation_power_of_half": { + "type": "integer", + "description": "Maximum channel capacity share as power of 1/2 (default: 2)" + } + }, + "required": ["amount_msat", "node_id"] + }) +} + +pub fn unified_send_schema() -> Value { + json!({ + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "A BIP 21 URI or BIP 353 Human-Readable Name to pay" + }, + "amount_msat": { + "type": "integer", + "description": "The amount in millisatoshis to send. Required for zero-amount or variable-amount URIs" + }, + "max_total_routing_fee_msat": { + "type": "integer", + "description": "Maximum total routing fee in millisatoshis. Defaults to 1% of payment + 50 sats" + }, + "max_total_cltv_expiry_delta": { + "type": "integer", + "description": "Maximum total CLTV delta for the route (default: 1008)" + }, + "max_path_count": { + "type": "integer", + "description": "Maximum number of paths for MPP payments (default: 10)" + }, + "max_channel_saturation_power_of_half": { + "type": "integer", + "description": "Maximum channel capacity share as power of 1/2 (default: 2)" + } + }, + "required": ["uri"] + }) +} + +pub fn open_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "node_pubkey": { + "type": "string", + "description": "The hex-encoded public key of the node to open a channel with" + }, + "address": { + "type": "string", + "description": "Address of the remote peer (IPv4:port, IPv6:port, OnionV3:port, or hostname:port)" + }, + "channel_amount_sats": { + "type": "integer", + "description": "The amount in satoshis to commit to the channel" + }, + "push_to_counterparty_msat": { + "type": "integer", + "description": "Amount in millisatoshis to push to the remote side" + }, + "announce_channel": { + "type": "boolean", + "description": "Whether the channel should be public (default: false)" + }, + "forwarding_fee_proportional_millionths": { + "type": "integer", + "description": "Fee in millionths of a satoshi charged per satoshi forwarded" + }, + "forwarding_fee_base_msat": { + "type": "integer", + "description": "Base fee in millisatoshis for forwarded payments" + }, + "cltv_expiry_delta": { + "type": "integer", + "description": "CLTV delta between incoming and outbound HTLCs" + }, + "force_close_avoidance_max_fee_satoshis": { + "type": "integer", + "description": "The maximum additional fee we are willing to pay to avoid waiting for the counterparty's to_self_delay to reclaim funds" + }, + "accept_underpaying_htlcs": { + "type": "boolean", + "description": "If set, allows the channel counterparty to skim an additional fee off inbound HTLCs" + }, + "max_dust_htlc_exposure_fixed_limit_msat": { + "type": "integer", + "description": "Sets a fixed limit on the total dust exposure in millisatoshis. Mutually exclusive with max_dust_htlc_exposure_fee_rate_multiplier" + }, + "max_dust_htlc_exposure_fee_rate_multiplier": { + "type": "integer", + "description": "Sets a multiplier on the on-chain sweep feerate to determine the maximum allowed dust exposure. Mutually exclusive with max_dust_htlc_exposure_fixed_limit_msat" + }, + "disable_counterparty_reserve": { + "type": "boolean", + "description": "Allow the counterparty to spend all its channel balance. Cannot be set together with announce_channel" + } + }, + "required": ["node_pubkey", "address", "channel_amount_sats"] + }) +} + +pub fn splice_in_schema() -> Value { + json!({ + "type": "object", + "properties": { + "user_channel_id": { + "type": "string", + "description": "The local user_channel_id of the channel" + }, + "counterparty_node_id": { + "type": "string", + "description": "The hex-encoded public key of the channel's counterparty node" + }, + "splice_amount_sats": { + "type": "integer", + "description": "The amount in satoshis to splice into the channel" + } + }, + "required": ["user_channel_id", "counterparty_node_id", "splice_amount_sats"] + }) +} + +pub fn splice_out_schema() -> Value { + json!({ + "type": "object", + "properties": { + "user_channel_id": { + "type": "string", + "description": "The local user_channel_id of the channel" + }, + "counterparty_node_id": { + "type": "string", + "description": "The hex-encoded public key of the channel's counterparty node" + }, + "splice_amount_sats": { + "type": "integer", + "description": "The amount in satoshis to splice out of the channel" + }, + "address": { + "type": "string", + "description": "Bitcoin address for the spliced-out funds. If not set, uses the node's on-chain wallet" + } + }, + "required": ["user_channel_id", "counterparty_node_id", "splice_amount_sats"] + }) +} + +pub fn close_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "user_channel_id": { + "type": "string", + "description": "The local user_channel_id of the channel" + }, + "counterparty_node_id": { + "type": "string", + "description": "The hex-encoded public key of the node to close the channel with" + } + }, + "required": ["user_channel_id", "counterparty_node_id"] + }) +} + +pub fn force_close_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "user_channel_id": { + "type": "string", + "description": "The local user_channel_id of the channel" + }, + "counterparty_node_id": { + "type": "string", + "description": "The hex-encoded public key of the node to close the channel with" + }, + "force_close_reason": { + "type": "string", + "description": "The reason for force-closing the channel" + } + }, + "required": ["user_channel_id", "counterparty_node_id"] + }) +} + +pub fn list_channels_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn update_channel_config_schema() -> Value { + json!({ + "type": "object", + "properties": { + "user_channel_id": { + "type": "string", + "description": "The local user_channel_id of the channel" + }, + "counterparty_node_id": { + "type": "string", + "description": "The hex-encoded public key of the counterparty node" + }, + "forwarding_fee_proportional_millionths": { + "type": "integer", + "description": "Fee in millionths of a satoshi charged per satoshi forwarded" + }, + "forwarding_fee_base_msat": { + "type": "integer", + "description": "Base fee in millisatoshis for forwarded payments" + }, + "cltv_expiry_delta": { + "type": "integer", + "description": "CLTV delta between incoming and outbound HTLCs" + }, + "force_close_avoidance_max_fee_satoshis": { + "type": "integer", + "description": "The maximum additional fee we are willing to pay to avoid waiting for the counterparty's to_self_delay to reclaim funds" + }, + "accept_underpaying_htlcs": { + "type": "boolean", + "description": "If set, allows the channel counterparty to skim an additional fee off inbound HTLCs" + }, + "max_dust_htlc_exposure_fixed_limit_msat": { + "type": "integer", + "description": "Sets a fixed limit on the total dust exposure in millisatoshis. Mutually exclusive with max_dust_htlc_exposure_fee_rate_multiplier" + }, + "max_dust_htlc_exposure_fee_rate_multiplier": { + "type": "integer", + "description": "Sets a multiplier on the on-chain sweep feerate to determine the maximum allowed dust exposure. Mutually exclusive with max_dust_htlc_exposure_fixed_limit_msat" + } + }, + "required": ["user_channel_id", "counterparty_node_id"] + }) +} + +pub fn list_payments_schema() -> Value { + json!({ + "type": "object", + "properties": { + "page_token": { + "type": "string", + "description": "Pagination token from a previous response (format: token:index)" + } + }, + "required": [] + }) +} + +pub fn get_payment_details_schema() -> Value { + json!({ + "type": "object", + "properties": { + "payment_id": { + "type": "string", + "description": "The payment ID in hex-encoded form" + } + }, + "required": ["payment_id"] + }) +} + +pub fn list_forwarded_payments_schema() -> Value { + json!({ + "type": "object", + "properties": { + "page_token": { + "type": "string", + "description": "Pagination token from a previous response (format: token:index)" + } + }, + "required": [] + }) +} + +pub fn connect_peer_schema() -> Value { + json!({ + "type": "object", + "properties": { + "node_pubkey": { + "type": "string", + "description": "The hex-encoded public key of the node to connect to" + }, + "address": { + "type": "string", + "description": "Address of the remote peer (IPv4:port, IPv6:port, OnionV3:port, or hostname:port)" + }, + "persist": { + "type": "boolean", + "description": "Whether to persist the connection for automatic reconnection on restart (default: false)" + } + }, + "required": ["node_pubkey", "address"] + }) +} + +pub fn disconnect_peer_schema() -> Value { + json!({ + "type": "object", + "properties": { + "node_pubkey": { + "type": "string", + "description": "The hex-encoded public key of the node to disconnect from" + } + }, + "required": ["node_pubkey"] + }) +} + +pub fn list_peers_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn decode_invoice_schema() -> Value { + json!({ + "type": "object", + "properties": { + "invoice": { + "type": "string", + "description": "The BOLT11 invoice string to decode" + } + }, + "required": ["invoice"] + }) +} + +pub fn decode_offer_schema() -> Value { + json!({ + "type": "object", + "properties": { + "offer": { + "type": "string", + "description": "The BOLT12 offer string to decode" + } + }, + "required": ["offer"] + }) +} + +pub fn sign_message_schema() -> Value { + json!({ + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "The message to sign" + } + }, + "required": ["message"] + }) +} + +pub fn verify_signature_schema() -> Value { + json!({ + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "The message that was signed" + }, + "signature": { + "type": "string", + "description": "The zbase32-encoded signature to verify" + }, + "public_key": { + "type": "string", + "description": "The hex-encoded public key of the signer" + } + }, + "required": ["message", "signature", "public_key"] + }) +} + +pub fn export_pathfinding_scores_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn graph_list_channels_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn graph_get_channel_schema() -> Value { + json!({ + "type": "object", + "properties": { + "short_channel_id": { + "type": "integer", + "description": "The short channel ID to look up" + } + }, + "required": ["short_channel_id"] + }) +} + +pub fn graph_list_nodes_schema() -> Value { + json!({ + "type": "object", + "properties": {}, + "required": [] + }) +} + +pub fn graph_get_node_schema() -> Value { + json!({ + "type": "object", + "properties": { + "node_id": { + "type": "string", + "description": "The hex-encoded node ID to look up" + } + }, + "required": ["node_id"] + }) +} diff --git a/ldk-server-mcp/tests/fixtures/test_cert.pem b/ldk-server-mcp/tests/fixtures/test_cert.pem new file mode 100644 index 00000000..3d23f614 --- /dev/null +++ b/ldk-server-mcp/tests/fixtures/test_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIUaIpIYZhk0rQjfg8F24i+TVYFQNgwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI2MDIyNjEyNDkzMFoXDTM2MDIy +NDEyNDkzMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA16GHTRmriId5v9pUIFk61spibHPGDeSLXFHM4EAHH6TA +1tc3vTrgkxXy65Ru6TlvSY8RSXl7GqtdSdLFX3ehOl6EOqxrM4R6iWrNbJqbhsVP +T1ILWAdCObV66vRdus8UtdYs9RfTMrM9ghyKAKxrb/v6oU+UVCqngLIw2qZTM6Ne +eQtU/YU3l0BG3mc0ufWmsN8XSMJeJxfcFZLQPIk1/h6NmRmOcjATgiaSGCkKW4WX +p9K52Za9GinUaOqN87lM+SZX03wJSwatm0vBcLHc9Cc3BAx7Hsd/+Em9ywchSCto +5Ay5OjsdOhXkxGVBmlqWEaECQ4M9hYKT4a+e6wF+owIDAQABo1MwUTAdBgNVHQ4E +FgQU74Mhg80zO7Yl02H45GgJLV2Yio0wHwYDVR0jBBgwFoAU74Mhg80zO7Yl02H4 +5GgJLV2Yio0wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEANlja +ph1/yXgInYiPswpyO3K67ujq9Gdn+XkFbsMiJqrvj2WJATYClkFIrb1DQaZV3ff6 +QAsxcpgAiNmLjlZ9A9G/6QMyFcqgI9Hzpd9nN8c0b1nQDuE7gLozCR0H7WeS9TRW +fE3mQBRZxahW78og2UvD4NeElvuk/hCPB0teovAUCaqpTsDnEeAGV8LVjMWRVp8h +gES9A4VOObWwfEirWSU3Bn3HwkVTkRbnJvo/b+3KpvXRS81M3eZxnPdmGK0zP+lY +40KYABID1DxTqYwjJI2nDEhR6+2ppATw3PhkEQQi+zpP9Tqxque2VtpGDJcyPOl1 +LXIaAEULV0zCGunmMQ== +-----END CERTIFICATE----- diff --git a/ldk-server-mcp/tests/integration.rs b/ldk-server-mcp/tests/integration.rs new file mode 100644 index 00000000..741d7b82 --- /dev/null +++ b/ldk-server-mcp/tests/integration.rs @@ -0,0 +1,440 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use std::io::{BufRead, BufReader, Write}; + +use serde_json::{json, Value}; + +const NUM_TOOLS: usize = 37; +const EXPECTED_TOOLS: [&str; NUM_TOOLS] = [ + "bolt11_claim_for_hash", + "bolt11_fail_for_hash", + "bolt11_receive", + "bolt11_receive_for_hash", + "bolt11_receive_variable_amount_via_jit_channel", + "bolt11_receive_via_jit_channel", + "bolt11_send", + "bolt12_receive", + "bolt12_send", + "close_channel", + "connect_peer", + "decode_invoice", + "decode_offer", + "disconnect_peer", + "export_pathfinding_scores", + "force_close_channel", + "get_balances", + "get_node_info", + "get_payment_details", + "graph_get_channel", + "graph_get_node", + "graph_list_channels", + "graph_list_nodes", + "list_channels", + "list_forwarded_payments", + "list_payments", + "list_peers", + "onchain_receive", + "onchain_send", + "open_channel", + "sign_message", + "splice_in", + "splice_out", + "spontaneous_send", + "unified_send", + "update_channel_config", + "verify_signature", +]; + +fn test_cert_path() -> String { + std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .join("tests/fixtures/test_cert.pem") + .to_str() + .unwrap() + .to_string() +} + +struct McpProcess { + child: std::process::Child, + stdin: std::process::ChildStdin, + reader: BufReader, +} + +impl McpProcess { + fn spawn() -> Self { + let mut child = std::process::Command::new(env!("CARGO_BIN_EXE_ldk-server-mcp")) + .env("LDK_BASE_URL", "localhost:19999") + .env("LDK_API_KEY", "deadbeef") + .env("LDK_TLS_CERT_PATH", test_cert_path()) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .expect("Failed to spawn MCP process"); + + let stdin = child.stdin.take().unwrap(); + let stdout = child.stdout.take().unwrap(); + let reader = BufReader::new(stdout); + + McpProcess { child, stdin, reader } + } + + fn send(&mut self, msg: &Value) { + let line = serde_json::to_string(msg).unwrap(); + writeln!(self.stdin, "{}", line).expect("Failed to write to stdin"); + self.stdin.flush().expect("Failed to flush stdin"); + } + + fn recv(&mut self) -> Value { + let mut line = String::new(); + self.reader.read_line(&mut line).expect("Failed to read from stdout"); + serde_json::from_str(line.trim()).expect("Failed to parse JSON response") + } +} + +impl Drop for McpProcess { + fn drop(&mut self) { + let _ = self.child.kill(); + let _ = self.child.wait(); + } +} + +fn assert_unreachable_tool(tool_name: &str, arguments: Value) { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": tool_name, + "arguments": arguments + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_initialize() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": {"name": "test", "version": "0.1"} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["protocolVersion"], "2024-11-05"); + assert!(resp["result"]["capabilities"]["tools"].is_object()); + assert_eq!(resp["result"]["serverInfo"]["name"], "ldk-server-mcp"); + assert_eq!(resp["result"]["serverInfo"]["version"], "0.1.0"); +} + +#[test] +fn test_tools_list() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + + let tools = resp["result"]["tools"].as_array().unwrap(); + assert_eq!(tools.len(), NUM_TOOLS, "Expected {NUM_TOOLS} tools, got {}", tools.len()); + let mut tool_names = tools + .iter() + .map(|tool| tool["name"].as_str().expect("Tool missing name").to_string()) + .collect::>(); + tool_names.sort(); + + let mut expected_tool_names = + EXPECTED_TOOLS.iter().map(|name| name.to_string()).collect::>(); + expected_tool_names.sort(); + assert_eq!(tool_names, expected_tool_names, "Tool names drifted from the expected API surface"); + + for tool in tools { + assert!(tool["name"].is_string(), "Tool missing name"); + assert!(tool["description"].is_string(), "Tool missing description"); + assert!(tool["inputSchema"].is_object(), "Tool missing inputSchema"); + } +} + +#[test] +fn test_tools_call_unknown_tool() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "nonexistent_tool", + "arguments": {} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(text.contains("Unknown tool"), "Expected 'Unknown tool' in error, got: {text}"); +} + +#[test] +fn test_tools_call_unreachable_server() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "get_node_info", + "arguments": {} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_bolt11_receive_via_jit_channel_unreachable() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "bolt11_receive_via_jit_channel", + "arguments": { + "amount_msat": 1000, + "description": "test jit" + } + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_bolt11_receive_variable_amount_via_jit_channel_unreachable() { + assert_unreachable_tool( + "bolt11_receive_variable_amount_via_jit_channel", + json!({ "description": "test jit" }), + ); +} + +#[test] +fn test_bolt11_receive_for_hash_unreachable() { + assert_unreachable_tool( + "bolt11_receive_for_hash", + json!({ + "payment_hash": "00".repeat(32), + "description": "test hodl" + }), + ); +} + +#[test] +fn test_bolt11_claim_for_hash_unreachable() { + assert_unreachable_tool( + "bolt11_claim_for_hash", + json!({ + "payment_hash": "11".repeat(32), + "preimage": "22".repeat(32) + }), + ); +} + +#[test] +fn test_bolt11_fail_for_hash_unreachable() { + assert_unreachable_tool("bolt11_fail_for_hash", json!({ "payment_hash": "33".repeat(32) })); +} + +#[test] +fn test_unified_send_unreachable() { + assert_unreachable_tool("unified_send", json!({ "uri": "bitcoin:tb1qexample?amount=0.001" })); +} + +#[test] +fn test_list_peers_unreachable() { + assert_unreachable_tool("list_peers", json!({})); +} + +#[test] +fn test_decode_invoice_unreachable() { + assert_unreachable_tool("decode_invoice", json!({ "invoice": "lnbc1example" })); +} + +#[test] +fn test_decode_offer_unreachable() { + assert_unreachable_tool("decode_offer", json!({ "offer": "lno1example" })); +} + +#[test] +fn test_notification_no_response() { + let mut proc = McpProcess::spawn(); + + // Send a notification (no id) - should produce no response + proc.send(&json!({ + "jsonrpc": "2.0", + "method": "notifications/initialized" + })); + + // Send a real request after the notification + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 42, + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": {"name": "test", "version": "0.1"} + } + })); + + // The first response we get should be for id 42, not for the notification + let resp = proc.recv(); + assert_eq!(resp["id"], 42); +} + +#[test] +fn test_graph_list_channels_unreachable() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "graph_list_channels", + "arguments": {} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_graph_get_channel_unreachable() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "graph_get_channel", + "arguments": {"short_channel_id": 12345} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_graph_list_nodes_unreachable() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "graph_list_nodes", + "arguments": {} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_graph_get_node_unreachable() { + let mut proc = McpProcess::spawn(); + + proc.send(&json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "graph_get_node", + "arguments": {"node_id": "02deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"} + } + })); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert_eq!(resp["id"], 1); + assert_eq!(resp["result"]["isError"], true); + let text = resp["result"]["content"][0]["text"].as_str().unwrap(); + assert!(!text.is_empty(), "Expected non-empty error message"); +} + +#[test] +fn test_malformed_json() { + let mut proc = McpProcess::spawn(); + + // Send garbage + writeln!(proc.stdin, "this is not json").unwrap(); + proc.stdin.flush().unwrap(); + + let resp = proc.recv(); + assert_eq!(resp["jsonrpc"], "2.0"); + assert!(resp["error"].is_object()); + assert_eq!(resp["error"]["code"], -32700); + assert_eq!(resp["error"]["message"], "Parse error"); +}