From 471ee7f1728356d3c3724321cf168c461e245255 Mon Sep 17 00:00:00 2001 From: Qasim Date: Mon, 27 Apr 2026 17:39:11 -0400 Subject: [PATCH 1/9] TW-4877: webhook server preflight UX + security hardening MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Originating issue: nylas webhook server displayed a localhost URL and told the user to register it with Nylas — but Nylas can't reach localhost, so events never fired (Slack #cli, 2026-04-27). Webhook preflight (the originating issue) - Interactive preflight when neither --tunnel nor --no-tunnel is set: detect cloudflared, offer brew install on macOS, prompt to enable tunnel, read secret with terminal echo disabled. - New --no-tunnel flag for non-interactive opt-out. - Loopback-only output no longer instructs user to register localhost. - Prompter interface so EOF (Ctrl-D) propagates cleanly instead of silently flipping into --allow-unsigned or auto-running brew install. Webhook server hardening (internal/adapters/webhookserver) - 1 MiB request body cap (http.MaxBytesReader). - Replay protection via MaxEventAge (CloudEvents `time` skew check). - Goroutine fanout bounded by 32-slot semaphore + per-handler recover(). - events_dropped surfaced in /health. - Event-display goroutine exits on ctx.Done() and recovers from panics. Other silent-failure fixes uncovered in review - pattern_learner: surface per-calendar errors instead of silent skip. - doctor_checks: propagate config/secret-store failures explicitly. - base_client.go: error on malformed tool-call JSON. - requestLocation: return error on bad timezone instead of UTC fallback. - admin.go: cycle guard + maxGrantPages ceiling on cursor pagination. Crypto hardening (internal/adapters/keyring) - Argon2id raised from t=1 to t=3. - NYLAS_FILE_STORE_PASSPHRASE minimum length 12. - Derived AES keys zeroed after use. Frontend (Air) - XSS fix: AI summary sentiment/category now escapeHtml-ed. - notetaker-open-external: scheme allow-list (https?://) + noopener. Tests - New unit tests for preflightTunnelChoice (mocked Prompter). - New webhookserver tests: oversized body 413, replay window, loopback bind, events_dropped in /health. - webguard middleware: Referer-only fallback path. - All packages pass go test -race; golangci-lint clean. --- docs/COMMANDS.md | 5 +- docs/commands/webhooks.md | 31 +- internal/adapters/ai/base_client.go | 89 ++++ internal/adapters/ai/groq_client.go | 87 +--- internal/adapters/ai/openai_client.go | 86 +--- internal/adapters/ai/pattern_learner.go | 25 +- internal/adapters/ai/pattern_learner_test.go | 7 +- internal/adapters/ai/scheduler_tools.go | 324 +----------- .../adapters/ai/scheduler_tools_helpers.go | 320 ++++++++++++ .../adapters/keyring/crossplatform_test.go | 190 ++++++- internal/adapters/keyring/file.go | 395 +++++++++----- internal/adapters/keyring/file_crypto.go | 71 +++ internal/adapters/keyring/file_legacy.go | 83 +++ internal/adapters/keyring/keyring.go | 46 +- internal/adapters/nylas/admin.go | 130 +++-- internal/adapters/nylas/admin_grants_test.go | 64 +++ internal/adapters/nylas/agent.go | 3 +- internal/adapters/nylas/attachments.go | 5 +- internal/adapters/nylas/auth.go | 5 +- .../adapters/nylas/calendars_calendars.go | 11 +- internal/adapters/nylas/calendars_events.go | 15 +- .../nylas/calendars_virtual_recurring.go | 7 +- internal/adapters/nylas/contacts.go | 21 +- internal/adapters/nylas/drafts.go | 65 ++- internal/adapters/nylas/drafts_update.go | 72 +-- internal/adapters/nylas/folders.go | 11 +- internal/adapters/nylas/managed_grants.go | 3 +- internal/adapters/nylas/messages.go | 9 +- internal/adapters/nylas/messages_send.go | 36 +- internal/adapters/nylas/notetakers.go | 11 +- internal/adapters/nylas/scheduler.go | 18 +- internal/adapters/nylas/signatures.go | 11 +- internal/adapters/nylas/threads.go | 9 +- internal/adapters/nylas/transactional.go | 3 +- internal/adapters/nylas/webhooks.go | 9 +- internal/adapters/oauth/server.go | 5 +- internal/adapters/webhookserver/server.go | 225 +++++--- .../adapters/webhookserver/server_test.go | 107 ++++ internal/air/handlers_ai_complete.go | 21 +- internal/air/handlers_ai_config.go | 33 +- internal/air/handlers_analytics.go | 23 +- internal/air/handlers_bundles.go | 23 +- internal/air/handlers_focus_mode.go | 49 +- internal/air/handlers_notetaker.go | 21 +- internal/air/handlers_read_receipts.go | 25 +- internal/air/handlers_reply_later.go | 19 +- internal/air/handlers_screener.go | 25 +- internal/air/middleware.go | 107 +--- internal/air/static/js/actions-registry.js | 362 +++++++++++++ internal/air/static/js/app-search.js | 23 +- internal/air/static/js/cache-init.js | 51 ++ internal/air/static/js/calendar-findtime.js | 8 +- internal/air/static/js/calendar-ui.js | 4 +- internal/air/static/js/command-palette.js | 4 +- internal/air/static/js/contacts-actions.js | 8 +- internal/air/static/js/contacts-detail.js | 30 +- internal/air/static/js/contacts-list.js | 5 +- internal/air/static/js/contacts-modal.js | 8 +- internal/air/static/js/contacts-states.js | 2 +- internal/air/static/js/contacts-utils.js | 13 + internal/air/static/js/email-ai.js | 22 +- internal/air/static/js/email-selection.js | 48 +- internal/air/static/js/event-delegation.js | 36 ++ internal/air/static/js/modals-navigation.js | 9 + internal/air/static/js/modals-settings.js | 79 +++ internal/air/static/js/notetaker-actions.js | 56 +- internal/air/static/js/notetaker-ui.js | 10 +- internal/air/static/js/scheduled-send.js | 2 +- internal/air/static/js/shortcuts.js | 2 +- internal/air/static/js/snooze.js | 4 +- internal/air/static/js/templates.js | 34 +- internal/air/static/js/undo-send.js | 2 +- internal/air/static/js/utils.js | 67 +++ internal/air/templates/base.gohtml | 61 +-- internal/air/templates/pages/calendar.gohtml | 4 +- internal/air/templates/pages/email.gohtml | 4 +- internal/air/templates/pages/notetaker.gohtml | 16 +- .../air/templates/pages/rules_policy.gohtml | 6 +- internal/air/templates/partials/header.gohtml | 22 +- .../templates/partials/modals_calendar.gohtml | 24 +- .../partials/modals_navigation.gohtml | 54 +- .../templates/partials/modals_settings.gohtml | 130 +---- internal/chat/handlers_conv.go | 6 +- internal/chat/handlers_conv_test.go | 28 +- internal/chat/memory.go | 29 +- internal/chat/memory_test.go | 10 +- internal/chat/server.go | 8 +- internal/chat/static/js/chat.js | 53 +- internal/chat/static/js/markdown.js | 36 +- internal/cli/admin/admin_test.go | 30 -- internal/cli/admin/applications.go | 12 +- internal/cli/admin/callback_uris.go | 12 +- internal/cli/admin/callback_uris_test.go | 9 - internal/cli/admin/connectors.go | 12 +- internal/cli/admin/credentials.go | 12 +- internal/cli/admin/grants.go | 10 +- internal/cli/agent/agent_test.go | 7 - internal/cli/agent/create.go | 12 +- internal/cli/agent/delete.go | 2 +- internal/cli/agent/get.go | 6 +- internal/cli/agent/helpers.go | 16 +- internal/cli/agent/list.go | 6 +- .../cli/agent/policy_create_update_delete.go | 22 +- internal/cli/agent/policy_list_get.go | 20 +- .../cli/agent/rule_create_update_delete.go | 10 +- internal/cli/agent/rule_list_get.go | 24 +- internal/cli/agent/status.go | 6 +- internal/cli/agent/update.go | 10 +- internal/cli/ai/usage.go | 4 +- internal/cli/audit/config.go | 2 +- internal/cli/audit/helpers.go | 41 -- internal/cli/audit/helpers_test.go | 44 -- internal/cli/audit/logs.go | 4 +- internal/cli/auth/detect.go | 25 +- internal/cli/auth/detect_test.go | 12 +- internal/cli/auth/providers.go | 6 +- internal/cli/auth/providers_test.go | 11 +- internal/cli/auth/scopes.go | 6 +- internal/cli/auth/scopes_test.go | 13 +- internal/cli/auth/show.go | 6 +- internal/cli/calendar/ai_context.go | 6 +- .../cli/calendar/ai_reschedule_helpers.go | 4 +- internal/cli/calendar/ai_schedule.go | 2 +- internal/cli/calendar/analyze.go | 4 +- internal/cli/calendar/availability.go | 96 +--- .../calendar/calendar_availability_test.go | 13 +- internal/cli/calendar/dst_helpers.go | 2 +- internal/cli/calendar/events_crud.go | 8 +- internal/cli/calendar/events_show.go | 4 +- internal/cli/calendar/recurring.go | 8 +- internal/cli/calendar/virtual.go | 18 +- internal/cli/common/flags.go | 5 - internal/cli/common/format.go | 4 +- internal/cli/common/time.go | 144 ++++++ internal/cli/common/time_test.go | 58 +++ .../cli/contacts/contacts_features_test.go | 9 - internal/cli/contacts/create.go | 18 +- internal/cli/contacts/photo.go | 4 +- internal/cli/contacts/search.go | 4 +- internal/cli/contacts/show.go | 6 +- internal/cli/contacts/update.go | 4 +- internal/cli/demo/email_drafts_attachments.go | 15 +- internal/cli/demo/email_folders_threads.go | 5 +- internal/cli/demo/email_main.go | 12 +- internal/cli/demo/notetaker.go | 18 +- internal/cli/demo/ui.go | 4 +- internal/cli/doctor.go | 453 +--------------- internal/cli/doctor_checks.go | 339 ++++++++++++ internal/cli/doctor_report.go | 121 +++++ internal/cli/doctor_test.go | 18 + internal/cli/email/attachments.go | 2 +- internal/cli/email/drafts.go | 17 +- internal/cli/email/email_advanced_test.go | 16 +- internal/cli/email/folders.go | 9 +- internal/cli/email/helpers.go | 18 +- internal/cli/email/helpers_test.go | 14 +- internal/cli/email/mark.go | 8 +- internal/cli/email/metadata.go | 5 +- internal/cli/email/metadata_test.go | 7 - internal/cli/email/scheduled.go | 2 +- internal/cli/email/send.go | 207 +------- internal/cli/email/send_helpers.go | 54 ++ internal/cli/email/send_parsing.go | 75 +++ internal/cli/email/signatures.go | 6 +- internal/cli/email/templates_create.go | 8 +- internal/cli/email/templates_delete.go | 2 +- internal/cli/email/templates_update.go | 2 +- internal/cli/email/templates_use.go | 5 +- internal/cli/email/threads.go | 4 +- internal/cli/email/threads_helpers.go | 10 +- internal/cli/integration/contacts_test.go | 33 +- internal/cli/integration/test.go | 485 +----------------- internal/cli/integration/test_extractors.go | 76 +++ internal/cli/integration/test_runners.go | 91 ++++ internal/cli/integration/test_setup.go | 343 +++++++++++++ internal/cli/integration/webhooks_test.go | 7 +- internal/cli/notetaker/create.go | 4 +- internal/cli/notetaker/list.go | 8 +- internal/cli/notetaker/media.go | 6 +- internal/cli/notetaker/notetaker_test.go | 25 +- internal/cli/notetaker/show.go | 6 +- internal/cli/scheduler/bookings.go | 30 +- internal/cli/scheduler/configurations.go | 20 +- internal/cli/scheduler/pages.go | 12 +- internal/cli/scheduler/scheduler_test.go | 28 - internal/cli/scheduler/sessions.go | 14 +- internal/cli/setup/application_setup.go | 5 +- internal/cli/setup/wizard_helpers.go | 5 +- internal/cli/testutil/command.go | 22 + internal/cli/timezone/convert.go | 4 +- internal/cli/timezone/dst.go | 8 +- internal/cli/timezone/find.go | 4 +- internal/cli/timezone/info.go | 4 +- internal/cli/timezone/list.go | 6 +- .../cli/timezone/timezone_advanced_test.go | 11 +- internal/cli/timezone/timezone_basic_test.go | 7 +- internal/cli/webhook/create.go | 10 +- internal/cli/webhook/list.go | 45 +- internal/cli/webhook/prompter.go | 90 ++++ .../webhook/pubsub_create_update_delete.go | 6 +- internal/cli/webhook/pubsub_helpers.go | 13 +- internal/cli/webhook/pubsub_list_show.go | 2 +- internal/cli/webhook/server.go | 260 +++++++++- internal/cli/webhook/server_test.go | 218 ++++++++ internal/cli/webhook/show.go | 19 +- internal/cli/webhook/test.go | 23 +- internal/cli/webhook/triggers.go | 27 +- internal/cli/webhook/update.go | 10 +- internal/cli/webhook/webhook_advanced_test.go | 17 +- internal/cli/webhook/webhook_basic_test.go | 40 +- internal/ports/webhook_server.go | 8 + internal/tui/attachments_test.go | 6 +- internal/ui/server.go | 9 +- internal/webguard/middleware.go | 105 ++++ internal/webguard/middleware_test.go | 132 +++++ 215 files changed, 5305 insertions(+), 3628 deletions(-) create mode 100644 internal/adapters/ai/scheduler_tools_helpers.go create mode 100644 internal/adapters/keyring/file_crypto.go create mode 100644 internal/adapters/keyring/file_legacy.go create mode 100644 internal/air/static/js/actions-registry.js create mode 100644 internal/air/static/js/cache-init.js create mode 100644 internal/air/static/js/event-delegation.js create mode 100644 internal/air/static/js/modals-navigation.js create mode 100644 internal/air/static/js/modals-settings.js create mode 100644 internal/cli/doctor_checks.go create mode 100644 internal/cli/doctor_report.go create mode 100644 internal/cli/email/send_helpers.go create mode 100644 internal/cli/email/send_parsing.go create mode 100644 internal/cli/integration/test_extractors.go create mode 100644 internal/cli/integration/test_runners.go create mode 100644 internal/cli/integration/test_setup.go create mode 100644 internal/cli/webhook/prompter.go create mode 100644 internal/cli/webhook/server_test.go create mode 100644 internal/webguard/middleware.go create mode 100644 internal/webguard/middleware_test.go diff --git a/docs/COMMANDS.md b/docs/COMMANDS.md index a0c666e..516bfd5 100644 --- a/docs/COMMANDS.md +++ b/docs/COMMANDS.md @@ -439,8 +439,9 @@ nylas webhook pubsub delete --yes ```bash nylas webhook test send # Send test payload nylas webhook test payload [trigger-type] # Generate test payload -nylas webhook server # Start local webhook server -nylas webhook server --port 8080 --tunnel cloudflared # With public tunnel +nylas webhook server # Interactive preflight (offers cloudflared tunnel) +nylas webhook server --no-tunnel # Loopback-only (skip preflight) +nylas webhook server --port 8080 --tunnel cloudflared --secret xxx # Public tunnel + HMAC verify ``` **Details:** `docs/commands/webhooks.md` diff --git a/docs/commands/webhooks.md b/docs/commands/webhooks.md index 1a91f56..bccf6f5 100644 --- a/docs/commands/webhooks.md +++ b/docs/commands/webhooks.md @@ -22,20 +22,35 @@ nylas webhook pubsub delete --yes Start a local webhook server for development and testing: ```bash -# Start server (local only) +# Interactive: detects cloudflared and prompts to enable a public tunnel. +# (Nylas can't deliver webhooks to localhost, so a tunnel is needed to +# receive real events.) nylas webhook server -# Start with public tunnel (cloudflared required) -nylas webhook server --tunnel cloudflared +# Skip the prompt and run loopback-only (useful for local curl tests +# or non-interactive environments) +nylas webhook server --no-tunnel -# Custom port -nylas webhook server --port 8080 --tunnel cloudflared +# Start with public tunnel (cloudflared required) + signature verification +nylas webhook server --tunnel cloudflared --secret your-webhook-secret + +# Custom port with a tunnel +nylas webhook server --port 8080 --tunnel cloudflared --secret your-webhook-secret ``` -**Install cloudflared:** +When `--tunnel` is set, `--secret` is required (or pass `--allow-unsigned` +to opt out explicitly). The interactive preflight will prompt for a +secret inline when you accept the tunnel; leaving it empty opts into +unsigned mode. + +**Cloudflared install:** + +On macOS, the preflight will offer to run `brew install cloudflared` for +you when cloudflared isn't on `PATH`. On other platforms, see: + ```bash -brew install cloudflared # macOS -# Or download from: github.com/cloudflare/cloudflared +brew install cloudflared # macOS (manual) +# Linux/Windows: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/installation/ ``` ### TUI Webhook Server diff --git a/internal/adapters/ai/base_client.go b/internal/adapters/ai/base_client.go index 8be6d69..0a2da8f 100644 --- a/internal/adapters/ai/base_client.go +++ b/internal/adapters/ai/base_client.go @@ -173,3 +173,92 @@ func FallbackStreamChat(ctx context.Context, req *domain.ChatRequest, chatFunc f } return callback(resp.Content) } + +// openAICompatibleResponse is the shared shape of /v1/chat/completions +// responses across providers that speak the OpenAI API surface (OpenAI, +// Groq, Together, Anyscale, etc.). Kept private to this package. +type openAICompatibleResponse struct { + Choices []struct { + Message struct { + Role string `json:"role"` + Content string `json:"content"` + ToolCalls []struct { + ID string `json:"id"` + Type string `json:"type"` + Function struct { + Name string `json:"name"` + Arguments string `json:"arguments"` + } `json:"function"` + } `json:"tool_calls,omitempty"` + } `json:"message"` + } `json:"choices"` + Model string `json:"model"` + Usage struct { + PromptTokens int `json:"prompt_tokens"` + CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` + } `json:"usage"` +} + +// OpenAICompatibleChat performs a chat request against any provider that +// implements the OpenAI /v1/chat/completions surface. provider is used to +// label the response and shape error messages. +// +// Callers (OpenAIClient, GroqClient, …) should validate IsConfigured before +// calling this method. +func (b *BaseClient) OpenAICompatibleChat(ctx context.Context, provider string, req *domain.ChatRequest, tools []domain.Tool) (*domain.ChatResponse, error) { + body := map[string]any{ + "model": b.GetModel(req.Model), + "messages": ConvertMessagesToMaps(req.Messages), + } + if req.MaxTokens > 0 { + body["max_tokens"] = req.MaxTokens + } + if req.Temperature > 0 { + body["temperature"] = req.Temperature + } + if len(tools) > 0 { + body["tools"] = ConvertToolsOpenAIFormat(tools) + body["tool_choice"] = "auto" + } + + headers := map[string]string{ + "Authorization": "Bearer " + b.apiKey, + } + + var raw openAICompatibleResponse + if err := b.DoJSONRequestAndDecode(ctx, "POST", "/chat/completions", body, headers, &raw); err != nil { + return nil, err + } + if len(raw.Choices) == 0 { + return nil, fmt.Errorf("no response from %s", provider) + } + + resp := &domain.ChatResponse{ + Content: raw.Choices[0].Message.Content, + Model: raw.Model, + Provider: provider, + Usage: domain.TokenUsage{ + PromptTokens: raw.Usage.PromptTokens, + CompletionTokens: raw.Usage.CompletionTokens, + TotalTokens: raw.Usage.TotalTokens, + }, + } + for _, tc := range raw.Choices[0].Message.ToolCalls { + var args map[string]any + if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err != nil { + // The model emitted a tool-call with malformed JSON arguments. + // Silently dropping it would leave the caller wondering why + // `len(ToolCalls)` is short — return the parse error so the + // scheduler can decide whether to retry or surface it. + return nil, fmt.Errorf("model tool-call %q has invalid JSON arguments: %w", + tc.Function.Name, err) + } + resp.ToolCalls = append(resp.ToolCalls, domain.ToolCall{ + ID: tc.ID, + Function: tc.Function.Name, + Arguments: args, + }) + } + return resp, nil +} diff --git a/internal/adapters/ai/groq_client.go b/internal/adapters/ai/groq_client.go index 4869656..088c419 100644 --- a/internal/adapters/ai/groq_client.go +++ b/internal/adapters/ai/groq_client.go @@ -2,7 +2,6 @@ package ai import ( "context" - "encoding/json" "fmt" "github.com/nylas/cli/internal/domain" @@ -48,92 +47,14 @@ func (c *GroqClient) Chat(ctx context.Context, req *domain.ChatRequest) (*domain return c.ChatWithTools(ctx, req, nil) } -// ChatWithTools sends a chat request with function calling. +// ChatWithTools sends a chat request with function calling. Groq exposes the +// OpenAI /v1/chat/completions surface, so this delegates to the shared +// pipeline. func (c *GroqClient) ChatWithTools(ctx context.Context, req *domain.ChatRequest, tools []domain.Tool) (*domain.ChatResponse, error) { if !c.IsConfigured() { return nil, fmt.Errorf("groq API key not configured") } - - // Prepare Groq request (OpenAI-compatible format) - groqReq := map[string]any{ - "model": c.GetModel(req.Model), - "messages": ConvertMessagesToMaps(req.Messages), - } - - if req.MaxTokens > 0 { - groqReq["max_tokens"] = req.MaxTokens - } - - if req.Temperature > 0 { - groqReq["temperature"] = req.Temperature - } - - // Tools support - if len(tools) > 0 { - groqReq["tools"] = ConvertToolsOpenAIFormat(tools) - groqReq["tool_choice"] = "auto" - } - - // Send request using base client - var groqResp struct { - Choices []struct { - Message struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCalls []struct { - ID string `json:"id"` - Type string `json:"type"` - Function struct { - Name string `json:"name"` - Arguments string `json:"arguments"` - } `json:"function"` - } `json:"tool_calls,omitempty"` - } `json:"message"` - } `json:"choices"` - Model string `json:"model"` - Usage struct { - PromptTokens int `json:"prompt_tokens"` - CompletionTokens int `json:"completion_tokens"` - TotalTokens int `json:"total_tokens"` - } `json:"usage"` - } - - headers := map[string]string{ - "Authorization": "Bearer " + c.apiKey, - } - - if err := c.DoJSONRequestAndDecode(ctx, "POST", "/chat/completions", groqReq, headers, &groqResp); err != nil { - return nil, err - } - - if len(groqResp.Choices) == 0 { - return nil, fmt.Errorf("no response from Groq") - } - - response := &domain.ChatResponse{ - Content: groqResp.Choices[0].Message.Content, - Model: groqResp.Model, - Provider: "groq", - Usage: domain.TokenUsage{ - PromptTokens: groqResp.Usage.PromptTokens, - CompletionTokens: groqResp.Usage.CompletionTokens, - TotalTokens: groqResp.Usage.TotalTokens, - }, - } - - // Convert tool calls if present - for _, tc := range groqResp.Choices[0].Message.ToolCalls { - var args map[string]any - if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err == nil { - response.ToolCalls = append(response.ToolCalls, domain.ToolCall{ - ID: tc.ID, - Function: tc.Function.Name, - Arguments: args, - }) - } - } - - return response, nil + return c.OpenAICompatibleChat(ctx, "groq", req, tools) } // StreamChat streams chat responses. diff --git a/internal/adapters/ai/openai_client.go b/internal/adapters/ai/openai_client.go index 6bec859..9918cd2 100644 --- a/internal/adapters/ai/openai_client.go +++ b/internal/adapters/ai/openai_client.go @@ -2,7 +2,6 @@ package ai import ( "context" - "encoding/json" "fmt" "github.com/nylas/cli/internal/domain" @@ -48,92 +47,13 @@ func (c *OpenAIClient) Chat(ctx context.Context, req *domain.ChatRequest) (*doma return c.ChatWithTools(ctx, req, nil) } -// ChatWithTools sends a chat request with function calling. +// ChatWithTools sends a chat request with function calling, delegating to +// the shared OpenAI-compatible pipeline in BaseClient. func (c *OpenAIClient) ChatWithTools(ctx context.Context, req *domain.ChatRequest, tools []domain.Tool) (*domain.ChatResponse, error) { if !c.IsConfigured() { return nil, fmt.Errorf("openai API key not configured") } - - // Prepare OpenAI request - openaiReq := map[string]any{ - "model": c.GetModel(req.Model), - "messages": ConvertMessagesToMaps(req.Messages), - } - - if req.MaxTokens > 0 { - openaiReq["max_tokens"] = req.MaxTokens - } - - if req.Temperature > 0 { - openaiReq["temperature"] = req.Temperature - } - - // Tools support - if len(tools) > 0 { - openaiReq["tools"] = ConvertToolsOpenAIFormat(tools) - openaiReq["tool_choice"] = "auto" - } - - // Send request using base client - var openaiResp struct { - Choices []struct { - Message struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCalls []struct { - ID string `json:"id"` - Type string `json:"type"` - Function struct { - Name string `json:"name"` - Arguments string `json:"arguments"` - } `json:"function"` - } `json:"tool_calls,omitempty"` - } `json:"message"` - } `json:"choices"` - Model string `json:"model"` - Usage struct { - PromptTokens int `json:"prompt_tokens"` - CompletionTokens int `json:"completion_tokens"` - TotalTokens int `json:"total_tokens"` - } `json:"usage"` - } - - headers := map[string]string{ - "Authorization": "Bearer " + c.apiKey, - } - - if err := c.DoJSONRequestAndDecode(ctx, "POST", "/chat/completions", openaiReq, headers, &openaiResp); err != nil { - return nil, err - } - - if len(openaiResp.Choices) == 0 { - return nil, fmt.Errorf("no response from OpenAI") - } - - response := &domain.ChatResponse{ - Content: openaiResp.Choices[0].Message.Content, - Model: openaiResp.Model, - Provider: "openai", - Usage: domain.TokenUsage{ - PromptTokens: openaiResp.Usage.PromptTokens, - CompletionTokens: openaiResp.Usage.CompletionTokens, - TotalTokens: openaiResp.Usage.TotalTokens, - }, - } - - // Convert tool calls if present - for _, tc := range openaiResp.Choices[0].Message.ToolCalls { - var args map[string]any - if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err == nil { - response.ToolCalls = append(response.ToolCalls, domain.ToolCall{ - ID: tc.ID, - Function: tc.Function.Name, - Arguments: args, - }) - } - } - - return response, nil + return c.OpenAICompatibleChat(ctx, "openai", req, tools) } // StreamChat streams chat responses. diff --git a/internal/adapters/ai/pattern_learner.go b/internal/adapters/ai/pattern_learner.go index 1526988..23abf5a 100644 --- a/internal/adapters/ai/pattern_learner.go +++ b/internal/adapters/ai/pattern_learner.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "os" "strings" "time" @@ -151,6 +152,7 @@ func (p *PatternLearner) fetchHistoricalEvents(ctx context.Context, req *LearnPa } allEvents := []domain.Event{} + skipped := []string{} // Fetch events from each calendar for _, calendar := range calendars { @@ -161,13 +163,26 @@ func (p *PatternLearner) fetchHistoricalEvents(ctx context.Context, req *LearnPa }) if err != nil { - // Skip calendar if error (might be read-only, etc.) + // Some calendars are read-only or temporarily unavailable. Record + // the skip with the underlying error so the caller (and test + // harness) can see analysis was partial — silently dropping the + // calendar gives the user "patterns" computed from incomplete + // data and no way to know. + skipped = append(skipped, fmt.Sprintf("%s: %v", calendar.ID, err)) continue } allEvents = append(allEvents, events...) } + if len(skipped) > 0 { + // Log to stderr; downstream callers that already check for + // PartialAnalysis on the returned struct (set below) get the same + // signal without depending on a logger interface. + fmt.Fprintf(os.Stderr, "warn: pattern analysis skipped %d calendar(s): %s\n", + len(skipped), strings.Join(skipped, "; ")) + } + // Filter out recurring events if not requested if !req.IncludeRecurring { filtered := []domain.Event{} @@ -184,7 +199,7 @@ func (p *PatternLearner) fetchHistoricalEvents(ctx context.Context, req *LearnPa } // calculateAnalysisPeriod calculates the actual period analyzed. -func (p *PatternLearner) calculateAnalysisPeriod(events []domain.Event, requestedDays int) AnalysisPeriod { +func (p *PatternLearner) calculateAnalysisPeriod(events []domain.Event, _ int) AnalysisPeriod { if len(events) == 0 { return AnalysisPeriod{} } @@ -320,10 +335,10 @@ func (p *PatternLearner) buildPatternContext(events []domain.Event, acceptance [ } // SavePatterns saves learned patterns (stub for future storage implementation). +// Returns an error rather than nil so callers can't mistake the no-op for a +// successful persist — pairs with LoadPatterns which already errors. func (p *PatternLearner) SavePatterns(ctx context.Context, patterns *SchedulingPatterns) error { - // Future: Save to local storage/database - // For now, this is a no-op - return nil + return fmt.Errorf("pattern storage not yet implemented") } // LoadPatterns loads previously learned patterns (stub for future storage implementation). diff --git a/internal/adapters/ai/pattern_learner_test.go b/internal/adapters/ai/pattern_learner_test.go index 181d586..06e564c 100644 --- a/internal/adapters/ai/pattern_learner_test.go +++ b/internal/adapters/ai/pattern_learner_test.go @@ -154,9 +154,12 @@ func TestPatternLearner_SaveLoadPatterns(t *testing.T) { ctx := context.Background() learner := &PatternLearner{} - t.Run("SavePatterns returns nil (stub)", func(t *testing.T) { + t.Run("SavePatterns returns not implemented error", func(t *testing.T) { + // SavePatterns is a stub; returning a real error keeps a caller + // from mistaking the no-op for a successful persist. err := learner.SavePatterns(ctx, &SchedulingPatterns{}) - assert.NoError(t, err) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not yet implemented") }) t.Run("LoadPatterns returns not implemented error", func(t *testing.T) { diff --git a/internal/adapters/ai/scheduler_tools.go b/internal/adapters/ai/scheduler_tools.go index 087397d..5534895 100644 --- a/internal/adapters/ai/scheduler_tools.go +++ b/internal/adapters/ai/scheduler_tools.go @@ -2,12 +2,7 @@ package ai import ( "context" - "encoding/json" "fmt" - "math" - "sort" - "strconv" - "strings" "time" tzutil "github.com/nylas/cli/internal/adapters/utilities/timezone" @@ -35,7 +30,11 @@ func (s *AIScheduler) runFindMeetingTime(ctx context.Context, args map[string]an return "", fmt.Errorf("duration must be greater than zero") } - searchStart, searchEnd, err := dateRangeArgs(args["dateRange"], requestLocation(req)) + loc, err := requestLocation(req) + if err != nil { + return "", err + } + searchStart, searchEnd, err := dateRangeArgs(args["dateRange"], loc) if err != nil { return "", err } @@ -46,7 +45,7 @@ func (s *AIScheduler) runFindMeetingTime(ctx context.Context, args map[string]an return "", fmt.Errorf("failed to get availability: %w", err) } - slots := rankAvailableSlots(result.Data.TimeSlots, requestLocation(req)) + slots := rankAvailableSlots(result.Data.TimeSlots, loc) payload := map[string]any{ "status": "success", "message": fmt.Sprintf("Found %d available time slots", len(slots)), @@ -103,7 +102,7 @@ func (s *AIScheduler) runCheckDST(ctx context.Context, args map[string]any) (str return marshalToolResult(payload) } -func (s *AIScheduler) runValidateWorkingHours(ctx context.Context, args map[string]any) (string, error) { +func (s *AIScheduler) runValidateWorkingHours(_ context.Context, args map[string]any) (string, error) { timezoneID, err := stringArg(args, "timezone", "") if err != nil { return "", err @@ -269,7 +268,10 @@ func (s *AIScheduler) runGetAvailability(ctx context.Context, args map[string]an return "", fmt.Errorf("participants are required") } - loc := requestLocation(req) + loc, err := requestLocation(req) + if err != nil { + return "", err + } startTime, err := timeArg(args, "startTime", loc) if err != nil { return "", err @@ -352,307 +354,3 @@ func (s *AIScheduler) runGetTimezoneInfo(ctx context.Context, args map[string]an return marshalToolResult(payload) } - -func buildAvailabilityRequest(participants []string, startTime, endTime time.Time, durationMinutes int) *domain.AvailabilityRequest { - availParticipants := make([]domain.AvailabilityParticipant, 0, len(participants)) - for _, email := range participants { - availParticipants = append(availParticipants, domain.AvailabilityParticipant{ - Email: email, - }) - } - - return &domain.AvailabilityRequest{ - StartTime: startTime.Unix(), - EndTime: endTime.Unix(), - DurationMinutes: durationMinutes, - Participants: availParticipants, - IntervalMinutes: 30, - } -} - -func rankAvailableSlots(slots []domain.AvailableSlot, loc *time.Location) []map[string]any { - type rankedSlot struct { - slot domain.AvailableSlot - score int - } - - ranked := make([]rankedSlot, 0, len(slots)) - for _, slot := range slots { - start := time.Unix(slot.StartTime, 0).In(loc) - ranked = append(ranked, rankedSlot{ - slot: slot, - score: localTimeScore(start), - }) - } - - sort.Slice(ranked, func(i, j int) bool { - if ranked[i].score == ranked[j].score { - return ranked[i].slot.StartTime < ranked[j].slot.StartTime - } - return ranked[i].score > ranked[j].score - }) - - limit := len(ranked) - if limit > 10 { - limit = 10 - } - - result := make([]map[string]any, 0, limit) - for _, entry := range ranked[:limit] { - result = append(result, map[string]any{ - "start": time.Unix(entry.slot.StartTime, 0).UTC().Format(time.RFC3339), - "end": time.Unix(entry.slot.EndTime, 0).UTC().Format(time.RFC3339), - "score": entry.score, - "emails": entry.slot.Emails, - "timezone": loc.String(), - }) - } - - return result -} - -func localTimeScore(start time.Time) int { - localHour := float64(start.Hour()) + float64(start.Minute())/60 - distanceFromIdeal := math.Abs(localHour - 13) - - score := 100 - int(distanceFromIdeal*8) - switch start.Weekday() { - case time.Tuesday, time.Wednesday, time.Thursday: - score += 5 - case time.Saturday, time.Sunday: - score -= 25 - } - - if score < 0 { - return 0 - } - if score > 100 { - return 100 - } - return score -} - -func (s *AIScheduler) defaultWritableCalendarID(ctx context.Context, grantID string) (string, error) { - calendars, err := s.nylasClient.GetCalendars(ctx, grantID) - if err != nil { - return "", fmt.Errorf("failed to list calendars: %w", err) - } - if len(calendars) == 0 { - return "", fmt.Errorf("no calendars available") - } - - for _, cal := range calendars { - if cal.IsPrimary && !cal.ReadOnly { - return cal.ID, nil - } - } - for _, cal := range calendars { - if !cal.ReadOnly { - return cal.ID, nil - } - } - - return "", fmt.Errorf("no writable calendar available") -} - -func marshalToolResult(payload map[string]any) (string, error) { - bytes, err := json.Marshal(payload) - if err != nil { - return "", fmt.Errorf("failed to marshal result: %w", err) - } - return string(bytes), nil -} - -func requestLocation(req *ScheduleRequest) *time.Location { - if req == nil || req.UserTimezone == "" { - return time.UTC - } - - loc, err := time.LoadLocation(req.UserTimezone) - if err != nil { - return time.UTC - } - return loc -} - -func dateRangeArgs(value any, loc *time.Location) (time.Time, time.Time, error) { - rangeArgs, ok := value.(map[string]any) - if !ok { - return time.Time{}, time.Time{}, fmt.Errorf("dateRange must be an object") - } - - startValue, ok := rangeArgs["start"] - if !ok { - return time.Time{}, time.Time{}, fmt.Errorf("dateRange.start is required") - } - endValue, ok := rangeArgs["end"] - if !ok { - return time.Time{}, time.Time{}, fmt.Errorf("dateRange.end is required") - } - - startDate, err := time.ParseInLocation("2006-01-02", fmt.Sprint(startValue), loc) - if err != nil { - return time.Time{}, time.Time{}, fmt.Errorf("invalid dateRange.start: %w", err) - } - endDate, err := time.ParseInLocation("2006-01-02", fmt.Sprint(endValue), loc) - if err != nil { - return time.Time{}, time.Time{}, fmt.Errorf("invalid dateRange.end: %w", err) - } - - endOfDay := time.Date(endDate.Year(), endDate.Month(), endDate.Day(), 23, 59, 59, 0, loc) - return startDate, endOfDay, nil -} - -func participantEmailsArg(args map[string]any, key string) ([]string, error) { - value, ok := args[key] - if !ok { - return nil, nil - } - - switch typed := value.(type) { - case []string: - return cleanStrings(typed), nil - case []any: - result := make([]string, 0, len(typed)) - for _, item := range typed { - value, ok := item.(string) - if !ok { - return nil, fmt.Errorf("%s entries must be strings", key) - } - if strings.TrimSpace(value) != "" { - result = append(result, strings.TrimSpace(value)) - } - } - return result, nil - case string: - return cleanStrings(strings.Split(typed, ",")), nil - default: - return nil, fmt.Errorf("%s must be a string array", key) - } -} - -func stringArg(args map[string]any, key, fallback string) (string, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - - switch typed := value.(type) { - case string: - if strings.TrimSpace(typed) == "" { - return fallback, nil - } - return strings.TrimSpace(typed), nil - default: - return "", fmt.Errorf("%s must be a string", key) - } -} - -func intArg(args map[string]any, key string, fallback int) (int, error) { - value, ok := args[key] - if !ok || value == nil { - return fallback, nil - } - - switch typed := value.(type) { - case int: - return typed, nil - case int64: - return int(typed), nil - case float64: - return int(typed), nil - case string: - parsed, err := strconv.Atoi(strings.TrimSpace(typed)) - if err != nil { - return 0, fmt.Errorf("%s must be an integer", key) - } - return parsed, nil - default: - return 0, fmt.Errorf("%s must be an integer", key) - } -} - -func timeArg(args map[string]any, key string, loc *time.Location) (time.Time, error) { - value, ok := args[key] - if !ok || value == nil { - return time.Time{}, fmt.Errorf("%s is required", key) - } - - raw, ok := value.(string) - if !ok { - return time.Time{}, fmt.Errorf("%s must be a string", key) - } - raw = strings.TrimSpace(raw) - if raw == "" { - return time.Time{}, fmt.Errorf("%s is required", key) - } - - layouts := []string{ - time.RFC3339, - "2006-01-02T15:04:05", - "2006-01-02 15:04:05", - "2006-01-02T15:04", - "2006-01-02 15:04", - } - - for _, layout := range layouts { - var ( - parsed time.Time - err error - ) - - if layout == time.RFC3339 { - parsed, err = time.Parse(layout, raw) - } else { - parsed, err = time.ParseInLocation(layout, raw, loc) - } - if err == nil { - return parsed, nil - } - } - - return time.Time{}, fmt.Errorf("invalid %s: %q", key, raw) -} - -func clockMinutes(value string) (int, error) { - parts := strings.Split(value, ":") - if len(parts) != 2 { - return 0, fmt.Errorf("expected HH:MM") - } - - hour, err := strconv.Atoi(parts[0]) - if err != nil { - return 0, fmt.Errorf("invalid hour") - } - minute, err := strconv.Atoi(parts[1]) - if err != nil { - return 0, fmt.Errorf("invalid minute") - } - if hour < 0 || hour > 23 || minute < 0 || minute > 59 { - return 0, fmt.Errorf("hour must be 0-23 and minute must be 0-59") - } - - return hour*60 + minute, nil -} - -func formatOffset(seconds int) string { - sign := "+" - if seconds < 0 { - sign = "-" - seconds = -seconds - } - - hours := seconds / 3600 - minutes := (seconds % 3600) / 60 - return fmt.Sprintf("%s%02d:%02d", sign, hours, minutes) -} - -func cleanStrings(values []string) []string { - result := make([]string, 0, len(values)) - for _, value := range values { - if strings.TrimSpace(value) != "" { - result = append(result, strings.TrimSpace(value)) - } - } - return result -} diff --git a/internal/adapters/ai/scheduler_tools_helpers.go b/internal/adapters/ai/scheduler_tools_helpers.go new file mode 100644 index 0000000..14453e1 --- /dev/null +++ b/internal/adapters/ai/scheduler_tools_helpers.go @@ -0,0 +1,320 @@ +package ai + +import ( + "context" + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" + "time" + + "github.com/nylas/cli/internal/domain" +) + +func buildAvailabilityRequest(participants []string, startTime, endTime time.Time, durationMinutes int) *domain.AvailabilityRequest { + availParticipants := make([]domain.AvailabilityParticipant, 0, len(participants)) + for _, email := range participants { + availParticipants = append(availParticipants, domain.AvailabilityParticipant{ + Email: email, + }) + } + + return &domain.AvailabilityRequest{ + StartTime: startTime.Unix(), + EndTime: endTime.Unix(), + DurationMinutes: durationMinutes, + Participants: availParticipants, + IntervalMinutes: 30, + } +} + +func rankAvailableSlots(slots []domain.AvailableSlot, loc *time.Location) []map[string]any { + type rankedSlot struct { + slot domain.AvailableSlot + score int + } + + ranked := make([]rankedSlot, 0, len(slots)) + for _, slot := range slots { + start := time.Unix(slot.StartTime, 0).In(loc) + ranked = append(ranked, rankedSlot{ + slot: slot, + score: localTimeScore(start), + }) + } + + sort.Slice(ranked, func(i, j int) bool { + if ranked[i].score == ranked[j].score { + return ranked[i].slot.StartTime < ranked[j].slot.StartTime + } + return ranked[i].score > ranked[j].score + }) + + limit := min(len(ranked), 10) + + result := make([]map[string]any, 0, limit) + for _, entry := range ranked[:limit] { + result = append(result, map[string]any{ + "start": time.Unix(entry.slot.StartTime, 0).UTC().Format(time.RFC3339), + "end": time.Unix(entry.slot.EndTime, 0).UTC().Format(time.RFC3339), + "score": entry.score, + "emails": entry.slot.Emails, + "timezone": loc.String(), + }) + } + + return result +} + +func localTimeScore(start time.Time) int { + localHour := float64(start.Hour()) + float64(start.Minute())/60 + distanceFromIdeal := math.Abs(localHour - 13) + + score := 100 - int(distanceFromIdeal*8) + switch start.Weekday() { + case time.Tuesday, time.Wednesday, time.Thursday: + score += 5 + case time.Saturday, time.Sunday: + score -= 25 + } + + if score < 0 { + return 0 + } + if score > 100 { + return 100 + } + return score +} + +func (s *AIScheduler) defaultWritableCalendarID(ctx context.Context, grantID string) (string, error) { + calendars, err := s.nylasClient.GetCalendars(ctx, grantID) + if err != nil { + return "", fmt.Errorf("failed to list calendars: %w", err) + } + if len(calendars) == 0 { + return "", fmt.Errorf("no calendars available") + } + + for _, cal := range calendars { + if cal.IsPrimary && !cal.ReadOnly { + return cal.ID, nil + } + } + for _, cal := range calendars { + if !cal.ReadOnly { + return cal.ID, nil + } + } + + return "", fmt.Errorf("no writable calendar available") +} + +func marshalToolResult(payload map[string]any) (string, error) { + bytes, err := json.Marshal(payload) + if err != nil { + return "", fmt.Errorf("failed to marshal result: %w", err) + } + return string(bytes), nil +} + +// requestLocation resolves the user's timezone for a scheduling request. +// A bad timezone ID (e.g. "PST" instead of "America/Los_Angeles") yields +// an error rather than silently rounding to UTC — slot rankings produced +// against the wrong zone look correct but are wrong by hours, which is +// exactly the kind of failure the user has no easy way to catch. +func requestLocation(req *ScheduleRequest) (*time.Location, error) { + if req == nil || req.UserTimezone == "" { + return time.UTC, nil + } + + loc, err := time.LoadLocation(req.UserTimezone) + if err != nil { + return nil, fmt.Errorf("invalid timezone %q: %w", req.UserTimezone, err) + } + return loc, nil +} + +func dateRangeArgs(value any, loc *time.Location) (time.Time, time.Time, error) { + rangeArgs, ok := value.(map[string]any) + if !ok { + return time.Time{}, time.Time{}, fmt.Errorf("dateRange must be an object") + } + + startValue, ok := rangeArgs["start"] + if !ok { + return time.Time{}, time.Time{}, fmt.Errorf("dateRange.start is required") + } + endValue, ok := rangeArgs["end"] + if !ok { + return time.Time{}, time.Time{}, fmt.Errorf("dateRange.end is required") + } + + startDate, err := time.ParseInLocation("2006-01-02", fmt.Sprint(startValue), loc) + if err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("invalid dateRange.start: %w", err) + } + endDate, err := time.ParseInLocation("2006-01-02", fmt.Sprint(endValue), loc) + if err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("invalid dateRange.end: %w", err) + } + + endOfDay := time.Date(endDate.Year(), endDate.Month(), endDate.Day(), 23, 59, 59, 0, loc) + return startDate, endOfDay, nil +} + +func participantEmailsArg(args map[string]any, key string) ([]string, error) { + value, ok := args[key] + if !ok { + return nil, nil + } + + switch typed := value.(type) { + case []string: + return cleanStrings(typed), nil + case []any: + result := make([]string, 0, len(typed)) + for _, item := range typed { + value, ok := item.(string) + if !ok { + return nil, fmt.Errorf("%s entries must be strings", key) + } + if strings.TrimSpace(value) != "" { + result = append(result, strings.TrimSpace(value)) + } + } + return result, nil + case string: + return cleanStrings(strings.Split(typed, ",")), nil + default: + return nil, fmt.Errorf("%s must be a string array", key) + } +} + +func stringArg(args map[string]any, key, fallback string) (string, error) { + value, ok := args[key] + if !ok || value == nil { + return fallback, nil + } + + switch typed := value.(type) { + case string: + if strings.TrimSpace(typed) == "" { + return fallback, nil + } + return strings.TrimSpace(typed), nil + default: + return "", fmt.Errorf("%s must be a string", key) + } +} + +func intArg(args map[string]any, key string, fallback int) (int, error) { + value, ok := args[key] + if !ok || value == nil { + return fallback, nil + } + + switch typed := value.(type) { + case int: + return typed, nil + case int64: + return int(typed), nil + case float64: + return int(typed), nil + case string: + parsed, err := strconv.Atoi(strings.TrimSpace(typed)) + if err != nil { + return 0, fmt.Errorf("%s must be an integer", key) + } + return parsed, nil + default: + return 0, fmt.Errorf("%s must be an integer", key) + } +} + +func timeArg(args map[string]any, key string, loc *time.Location) (time.Time, error) { + value, ok := args[key] + if !ok || value == nil { + return time.Time{}, fmt.Errorf("%s is required", key) + } + + raw, ok := value.(string) + if !ok { + return time.Time{}, fmt.Errorf("%s must be a string", key) + } + raw = strings.TrimSpace(raw) + if raw == "" { + return time.Time{}, fmt.Errorf("%s is required", key) + } + + layouts := []string{ + time.RFC3339, + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + "2006-01-02T15:04", + "2006-01-02 15:04", + } + + for _, layout := range layouts { + var ( + parsed time.Time + err error + ) + + if layout == time.RFC3339 { + parsed, err = time.Parse(layout, raw) + } else { + parsed, err = time.ParseInLocation(layout, raw, loc) + } + if err == nil { + return parsed, nil + } + } + + return time.Time{}, fmt.Errorf("invalid %s: %q", key, raw) +} + +func clockMinutes(value string) (int, error) { + parts := strings.Split(value, ":") + if len(parts) != 2 { + return 0, fmt.Errorf("expected HH:MM") + } + + hour, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, fmt.Errorf("invalid hour") + } + minute, err := strconv.Atoi(parts[1]) + if err != nil { + return 0, fmt.Errorf("invalid minute") + } + if hour < 0 || hour > 23 || minute < 0 || minute > 59 { + return 0, fmt.Errorf("hour must be 0-23 and minute must be 0-59") + } + + return hour*60 + minute, nil +} + +func formatOffset(seconds int) string { + sign := "+" + if seconds < 0 { + sign = "-" + seconds = -seconds + } + + hours := seconds / 3600 + minutes := (seconds % 3600) / 60 + return fmt.Sprintf("%s%02d:%02d", sign, hours, minutes) +} + +func cleanStrings(values []string) []string { + result := make([]string, 0, len(values)) + for _, value := range values { + if strings.TrimSpace(value) != "" { + result = append(result, strings.TrimSpace(value)) + } + } + return result +} diff --git a/internal/adapters/keyring/crossplatform_test.go b/internal/adapters/keyring/crossplatform_test.go index ed5219c..b64d350 100644 --- a/internal/adapters/keyring/crossplatform_test.go +++ b/internal/adapters/keyring/crossplatform_test.go @@ -250,14 +250,65 @@ func TestEncryptedFileStore_RequiresPassphraseForWrites(t *testing.T) { t.Cleanup(func() { _ = os.Setenv(fileStorePassphraseEnv, orig) }) } + // Fresh install: no legacy file, no passphrase. Construction succeeds so + // callers can probe the empty store, but Set must fail with a clear + // message pointing at NYLAS_FILE_STORE_PASSPHRASE. store, err := NewEncryptedFileStore(t.TempDir()) if err != nil { - t.Fatalf("NewEncryptedFileStore failed: %v", err) + t.Fatalf("NewEncryptedFileStore should not fail on a fresh install: %v", err) + } + + if err := store.Set("api_key", "value"); err == nil { + t.Fatal("Set succeeded without passphrase on fresh install") + } else if !strings.Contains(err.Error(), fileStorePassphraseEnv) { + t.Fatalf("Set error %q does not mention %s", err.Error(), fileStorePassphraseEnv) + } +} + +// TestEncryptedFileStore_RefusesWriteWhenLegacyExistsNoPassphrase verifies that +// when a legacy .secrets.enc exists but no passphrase is set, writes are refused +// with a migration-required error. +func TestEncryptedFileStore_RefusesWriteWhenLegacyExistsNoPassphrase(t *testing.T) { + tmpDir := t.TempDir() + + legacyKey, err := deriveLegacyKey() + if err != nil { + t.Fatalf("deriveLegacyKey failed: %v", err) + } + legacyCiphertext, err := encryptWithKey(legacyKey, []byte(`{"api_key":"old-value"}`)) + if err != nil { + t.Fatalf("encryptWithKey failed: %v", err) + } + secretsPath := filepath.Join(tmpDir, ".secrets.enc") + if err := os.WriteFile(secretsPath, legacyCiphertext, 0600); err != nil { + t.Fatalf("failed to write legacy file: %v", err) + } + + orig := os.Getenv(fileStorePassphraseEnv) + if orig != "" { + _ = os.Unsetenv(fileStorePassphraseEnv) + t.Cleanup(func() { _ = os.Setenv(fileStorePassphraseEnv, orig) }) + } + + // Legacy file exists → construction should succeed (store is openable). + store, err := NewEncryptedFileStore(tmpDir) + if err != nil { + t.Fatalf("NewEncryptedFileStore failed when legacy file exists: %v", err) } - err = store.Set("api_key", "value") + // Read must fail with migration-required error, not silently succeed. + _, err = store.Get("api_key") if err == nil { - t.Fatal("Set succeeded without passphrase") + t.Fatal("Get succeeded without passphrase on legacy file — migration gate not enforced") + } + if !strings.Contains(err.Error(), fileStorePassphraseEnv) { + t.Fatalf("Get error %q does not mention %s", err.Error(), fileStorePassphraseEnv) + } + + // Write must also fail with migration-required error. + err = store.Set("api_key", "new-value") + if err == nil { + t.Fatal("Set succeeded without passphrase on legacy file") } if !strings.Contains(err.Error(), fileStorePassphraseEnv) { t.Fatalf("Set error %q does not mention %s", err.Error(), fileStorePassphraseEnv) @@ -543,3 +594,136 @@ func TestConcurrentAccess(t *testing.T) { t.Errorf("Concurrent access error: %v", err) } } + +// TestEncryptedFileStore_MigratesOnFirstGet verifies that the one-shot migration +// happens on the first Get, not only after a subsequent Set. +// After Get returns the plaintext, the on-disk file must already be re-encrypted +// with the passphrase-derived key and must no longer be decryptable by the legacy key. +func TestEncryptedFileStore_MigratesOnFirstGet(t *testing.T) { + tmpDir := t.TempDir() + passphrase := setFileStorePassphrase(t) + + legacyKey, err := deriveLegacyKey() + if err != nil { + t.Fatalf("deriveLegacyKey failed: %v", err) + } + + legacyCiphertext, err := encryptWithKey(legacyKey, []byte(`{"migrate_key":"migrate_value"}`)) + if err != nil { + t.Fatalf("encryptWithKey failed: %v", err) + } + + secretsPath := filepath.Join(tmpDir, ".secrets.enc") + if err := os.WriteFile(secretsPath, legacyCiphertext, 0600); err != nil { + t.Fatalf("failed to write legacy secrets file: %v", err) + } + + store, err := NewEncryptedFileStore(tmpDir) + if err != nil { + t.Fatalf("NewEncryptedFileStore failed: %v", err) + } + + // First Get — should trigger migration inline. + value, err := store.Get("migrate_key") + if err != nil { + t.Fatalf("Get failed: %v", err) + } + if value != "migrate_value" { + t.Fatalf("Get returned %q, want %q", value, "migrate_value") + } + + // After Get, the on-disk file must already use the passphrase-derived key. + data, err := os.ReadFile(secretsPath) + if err != nil { + t.Fatalf("failed to read secrets file after migration: %v", err) + } + + // Legacy key must no longer decrypt the file. + if _, err := decryptWithKey(legacyKey, data); err == nil { + t.Fatal("on-disk file is still decryptable with the legacy key after Get-triggered migration") + } + + // Passphrase-derived key must decrypt successfully. + saltData, err := os.ReadFile(filepath.Join(tmpDir, ".secrets.salt")) + if err != nil { + t.Fatalf("failed to read salt file: %v", err) + } + decodedSalt, err := base64.StdEncoding.DecodeString(strings.TrimSpace(string(saltData))) + if err != nil { + t.Fatalf("failed to decode salt: %v", err) + } + if _, err := decryptWithKey(derivePassphraseKey([]byte(passphrase), decodedSalt), data); err != nil { + t.Fatalf("failed to decrypt migrated file with passphrase-derived key: %v", err) + } +} + +// TestDetectKeyType verifies the detectKeyType helper across the expected states. +func TestDetectKeyType(t *testing.T) { + t.Run("none_when_no_file", func(t *testing.T) { + tmpDir := t.TempDir() + setFileStorePassphrase(t) + + store, err := NewEncryptedFileStore(tmpDir) + // Fresh install with passphrase set — construction should succeed. + if err != nil { + t.Fatalf("NewEncryptedFileStore failed: %v", err) + } + // No file written yet. + kt, err := store.detectKeyType() + if err != nil { + t.Fatalf("detectKeyType failed: %v", err) + } + if kt != fileStoreKeyNone { + t.Fatalf("detectKeyType = %d, want fileStoreKeyNone (%d)", kt, fileStoreKeyNone) + } + }) + + t.Run("passphrase_only_after_write", func(t *testing.T) { + tmpDir := t.TempDir() + setFileStorePassphrase(t) + + store, err := NewEncryptedFileStore(tmpDir) + if err != nil { + t.Fatalf("NewEncryptedFileStore failed: %v", err) + } + if err := store.Set("k", "v"); err != nil { + t.Fatalf("Set failed: %v", err) + } + kt, err := store.detectKeyType() + if err != nil { + t.Fatalf("detectKeyType failed: %v", err) + } + if kt != fileStoreKeyPassphraseOnly { + t.Fatalf("detectKeyType = %d, want fileStoreKeyPassphraseOnly (%d)", kt, fileStoreKeyPassphraseOnly) + } + }) + + t.Run("legacy_only_before_migration", func(t *testing.T) { + tmpDir := t.TempDir() + setFileStorePassphrase(t) + + legacyKey, err := deriveLegacyKey() + if err != nil { + t.Fatalf("deriveLegacyKey failed: %v", err) + } + ct, err := encryptWithKey(legacyKey, []byte(`{"k":"v"}`)) + if err != nil { + t.Fatalf("encryptWithKey failed: %v", err) + } + if err := os.WriteFile(filepath.Join(tmpDir, ".secrets.enc"), ct, 0600); err != nil { + t.Fatalf("WriteFile failed: %v", err) + } + + store, err := NewEncryptedFileStore(tmpDir) + if err != nil { + t.Fatalf("NewEncryptedFileStore failed: %v", err) + } + kt, err := store.detectKeyType() + if err != nil { + t.Fatalf("detectKeyType failed: %v", err) + } + if kt != fileStoreKeyLegacyOnly { + t.Fatalf("detectKeyType = %d, want fileStoreKeyLegacyOnly (%d)", kt, fileStoreKeyLegacyOnly) + } + }) +} diff --git a/internal/adapters/keyring/file.go b/internal/adapters/keyring/file.go index ea13ef6..648386b 100644 --- a/internal/adapters/keyring/file.go +++ b/internal/adapters/keyring/file.go @@ -1,17 +1,13 @@ package keyring import ( - "crypto/aes" - "crypto/cipher" "crypto/rand" - "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "io" "os" "path/filepath" - "runtime" "strings" "sync" @@ -24,9 +20,29 @@ const ( fileStoreSaltSize = 16 ) +// fileStoreKeyType describes which key(s) can decrypt the on-disk .secrets.enc file. +type fileStoreKeyType int + +const ( + fileStoreKeyNone fileStoreKeyType = iota // file does not exist or neither key decrypts it + fileStoreKeyLegacyOnly // decryptable only with the legacy machine-derived key + fileStoreKeyPassphraseOnly // decryptable only with the passphrase-derived key + fileStoreKeyBoth // decryptable with either key +) + // EncryptedFileStore implements SecretStore using an encrypted file. // This is a fallback for environments where the system keyring is unavailable. -// Uses AES-256-GCM encryption with a key derived from user-supplied secret material. +// Uses AES-256-GCM encryption with an Argon2id key derived from a user-supplied +// passphrase set via NYLAS_FILE_STORE_PASSPHRASE. +// +// REQUIREMENT: NYLAS_FILE_STORE_PASSPHRASE must be set before using this store +// on a fresh install. Existing installations that used the legacy machine-derived +// key will be migrated automatically the first time NYLAS_FILE_STORE_PASSPHRASE +// is supplied. If the environment variable is unset and no legacy file exists, +// construction fails immediately. +// +// To avoid the file store entirely, leave NYLAS_DISABLE_KEYRING unset and let +// the system keyring be used, or run `nylas auth config` to reconfigure. type EncryptedFileStore struct { path string keyPath string @@ -37,8 +53,15 @@ type EncryptedFileStore struct { mu sync.RWMutex } -// NewEncryptedFileStore creates a new EncryptedFileStore. -// The secrets are stored in an encrypted file within the config directory. +// NewEncryptedFileStore creates a new EncryptedFileStore rooted in configDir. +// +// Construction always succeeds — a fresh install (no passphrase, no legacy +// file) yields a store whose reads return ErrSecretNotFound and whose writes +// fail with a clear "set NYLAS_FILE_STORE_PASSPHRASE" error. This lets +// callers probe an empty store without crashing. +// +// To actually persist secrets, set NYLAS_FILE_STORE_PASSPHRASE to a strong +// value, or run `nylas auth config` to switch to the system keyring. func NewEncryptedFileStore(configDir string) (*EncryptedFileStore, error) { path := filepath.Join(configDir, ".secrets.enc") keyPath := filepath.Join(configDir, ".secrets.key") @@ -56,6 +79,17 @@ func NewEncryptedFileStore(configDir string) (*EncryptedFileStore, error) { var passphrase []byte if value := os.Getenv(fileStorePassphraseEnv); value != "" { + // Enforce a minimum length so a 4-character passphrase isn't held + // up as adequate defense. This is a deliberately gentle floor (12 + // characters) — long enough to make offline brute-force materially + // expensive when combined with Argon2id, short enough that real + // users can comply. + if len(value) < minPassphraseLen { + return nil, fmt.Errorf( + "%s must be at least %d characters (got %d) — pick a longer passphrase", + fileStorePassphraseEnv, minPassphraseLen, len(value), + ) + } passphrase = []byte(value) } @@ -139,6 +173,56 @@ func (f *EncryptedFileStore) Name() string { return "encrypted file" } +// detectKeyType returns which key(s) can currently decrypt the on-disk file. +// It reads the file once and probes each key in order. If the file does not +// exist, fileStoreKeyNone is returned with no error. +func (f *EncryptedFileStore) detectKeyType() (fileStoreKeyType, error) { + data, err := os.ReadFile(f.path) + if err != nil { + if os.IsNotExist(err) { + return fileStoreKeyNone, nil + } + return fileStoreKeyNone, err + } + + hasPassphrase := false + if key, err := f.passphraseKey(false); err == nil { + if _, err := decryptWithKey(key, data); err == nil { + hasPassphrase = true + } + zeroBytes(key) + } + + hasLegacy := f.canDecryptWithLegacyKeys(data) + + switch { + case hasPassphrase && hasLegacy: + return fileStoreKeyBoth, nil + case hasPassphrase: + return fileStoreKeyPassphraseOnly, nil + case hasLegacy: + return fileStoreKeyLegacyOnly, nil + default: + return fileStoreKeyNone, nil + } +} + +// canDecryptWithLegacyKeys returns true when the ciphertext can be opened by +// either the migration master key or the legacy machine-derived key. +func (f *EncryptedFileStore) canDecryptWithLegacyKeys(data []byte) bool { + if len(f.migrationKey) > 0 { + if _, err := decryptWithKey(f.migrationKey, data); err == nil { + return true + } + } + if len(f.legacyKey) > 0 { + if _, err := decryptWithKey(f.legacyKey, data); err == nil { + return true + } + } + return false +} + // loadSecrets loads and decrypts the secrets file. func (f *EncryptedFileStore) loadSecrets() (map[string]string, error) { data, err := os.ReadFile(f.path) @@ -158,7 +242,7 @@ func (f *EncryptedFileStore) loadSecrets() (map[string]string, error) { return secrets, nil } -// saveSecrets encrypts and saves the secrets file. +// saveSecrets encrypts and saves the secrets file atomically. func (f *EncryptedFileStore) saveSecrets(secrets map[string]string) error { plaintext, err := json.Marshal(secrets) if err != nil { @@ -170,16 +254,42 @@ func (f *EncryptedFileStore) saveSecrets(secrets map[string]string) error { return err } - // Ensure directory exists + // Ensure directory exists. dir := filepath.Dir(f.path) if err := os.MkdirAll(dir, 0700); err != nil { return err } - // Write with restrictive permissions - if err := os.WriteFile(f.path, ciphertext, 0600); err != nil { + // Atomic write: write to a sibling temp file, then rename. + tmp, err := os.CreateTemp(dir, ".secrets.enc.tmp.*") + if err != nil { + return err + } + tmpPath := tmp.Name() + + // Clean up the temp file on any failure path. + committed := false + defer func() { + if !committed { + _ = os.Remove(tmpPath) + } + }() + + if err := tmp.Chmod(0600); err != nil { + _ = tmp.Close() + return err + } + if _, err := tmp.Write(ciphertext); err != nil { + _ = tmp.Close() + return err + } + if err := tmp.Close(); err != nil { + return err + } + if err := os.Rename(tmpPath, f.path); err != nil { return err } + committed = true // Remove the plaintext migration key once the store has been rewritten. if f.keyPath != "" { @@ -190,88 +300,163 @@ func (f *EncryptedFileStore) saveSecrets(secrets map[string]string) error { } // encrypt encrypts plaintext using AES-256-GCM. +// +// Passphrase rules enforced here: +// - If passphrase is set: encrypt with the passphrase-derived key. +// - If passphrase is unset AND a legacy file exists: refuse — the caller must +// set NYLAS_FILE_STORE_PASSPHRASE so the migration path in decrypt can run first. +// - If passphrase is unset AND no legacy file: refuse — this is a new install +// that should never have been constructed (NewEncryptedFileStore checks this). func (f *EncryptedFileStore) encrypt(plaintext []byte) ([]byte, error) { + if len(f.passphrase) == 0 { + // Distinguish between "legacy file exists" and "fresh install" for clearer errors. + if _, statErr := os.Stat(f.path); statErr == nil { + return nil, fmt.Errorf( + "encrypted file store requires %s to migrate from the legacy machine-derived key. "+ + "Set it and re-run, or run `nylas auth config` to switch to the system keyring", + fileStorePassphraseEnv, + ) + } + return nil, fmt.Errorf( + "%s must be set to use the encrypted file secret store. "+ + "Set it and re-run, or run `nylas auth config` to switch to the system keyring", + fileStorePassphraseEnv, + ) + } + key, err := f.passphraseKey(true) if err != nil { return nil, err } + defer zeroBytes(key) return encryptWithKey(key, plaintext) } // decrypt decrypts ciphertext using AES-256-GCM. +// +// Decryption order: +// 1. Passphrase key (if passphrase is set) — normal path. +// 2. Legacy key (migration master key or machine-derived key): +// - If passphrase is NOT set: return an error requiring the user to set it. +// - If passphrase IS set: re-encrypt with the passphrase key (one-shot +// migration), print a notice to stderr, and return the plaintext. +// 3. Neither key works: return an error. func (f *EncryptedFileStore) decrypt(data []byte) ([]byte, error) { - if key, err := f.passphraseKey(false); err == nil { - plaintext, err := decryptWithKey(key, data) - if err == nil { - return plaintext, nil + // 1. Try passphrase key first. + if len(f.passphrase) > 0 { + if key, err := f.passphraseKey(false); err == nil { + plaintext, decErr := decryptWithKey(key, data) + zeroBytes(key) + if decErr == nil { + return plaintext, nil + } + } else if !os.IsNotExist(err) { + return nil, err } - } else if !os.IsNotExist(err) && len(f.passphrase) > 0 { - return nil, err - } - - if len(f.migrationKey) > 0 { - plaintext, err := decryptWithKey(f.migrationKey, data) - if err == nil { - return plaintext, nil + // Passphrase set but salt missing or passphrase wrong — fall through to legacy. + } + + // 2. Try legacy keys. + if plaintext, legacyKey, ok := f.tryLegacyDecrypt(data); ok { + _ = legacyKey // used only for the migration path below + if len(f.passphrase) == 0 { + // Legacy decryption succeeded but no passphrase — block to force migration. + return nil, fmt.Errorf( + "encrypted file store requires %s to migrate from the legacy machine-derived key. "+ + "Set it and re-run, or run `nylas auth config` to switch to the system keyring", + fileStorePassphraseEnv, + ) } - } - if len(f.legacyKey) > 0 { - plaintext, err := decryptWithKey(f.legacyKey, data) - if err == nil { - return plaintext, nil + // Passphrase is available — perform one-shot migration. + if migrateErr := f.migrateToPassphrase(plaintext); migrateErr != nil { + // Migration failed (e.g. disk write error). Return the plaintext so + // the caller's operation still succeeds; the legacy file remains intact. + fmt.Fprintf(os.Stderr, "warning: failed to migrate encrypted file store: %v\n", migrateErr) + } else { + fmt.Fprintf(os.Stderr, + "notice: encrypted file store migrated to passphrase-derived key (%s)\n", + fileStorePassphraseEnv) } + return plaintext, nil } + // 3. Nothing worked. if len(f.passphrase) == 0 { - return nil, fmt.Errorf("%s must be set to unlock the encrypted file store", fileStorePassphraseEnv) + return nil, fmt.Errorf( + "%s must be set to unlock the encrypted file store", + fileStorePassphraseEnv, + ) } - return nil, fmt.Errorf("failed to decrypt encrypted file store with the configured passphrase") } -func encryptWithKey(key, plaintext []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { - return nil, err +// tryLegacyDecrypt attempts decryption with the migration master key first, +// then the machine-derived legacy key. Returns the plaintext, the key used, +// and whether decryption succeeded. +func (f *EncryptedFileStore) tryLegacyDecrypt(data []byte) (plaintext []byte, key []byte, ok bool) { + if len(f.migrationKey) > 0 { + if pt, err := decryptWithKey(f.migrationKey, data); err == nil { + return pt, f.migrationKey, true + } } + if len(f.legacyKey) > 0 { + if pt, err := decryptWithKey(f.legacyKey, data); err == nil { + return pt, f.legacyKey, true + } + } + return nil, nil, false +} - gcm, err := cipher.NewGCM(block) +// migrateToPassphrase re-encrypts plaintext with the passphrase-derived key and +// atomically writes it to disk. If this fails, the on-disk legacy file is left +// intact so the next invocation can retry. +func (f *EncryptedFileStore) migrateToPassphrase(plaintext []byte) error { + ciphertext, err := f.encrypt(plaintext) if err != nil { - return nil, err + return fmt.Errorf("re-encrypt for migration: %w", err) } - nonce := make([]byte, gcm.NonceSize()) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, err + dir := filepath.Dir(f.path) + if err := os.MkdirAll(dir, 0700); err != nil { + return fmt.Errorf("mkdir for migration: %w", err) } - ciphertext := gcm.Seal(nonce, nonce, plaintext, nil) - return []byte(base64.StdEncoding.EncodeToString(ciphertext)), nil -} - -func decryptWithKey(key, data []byte) ([]byte, error) { - ciphertext, err := base64.StdEncoding.DecodeString(string(data)) + tmp, err := os.CreateTemp(dir, ".secrets.enc.tmp.*") if err != nil { - return nil, err + return fmt.Errorf("create temp file for migration: %w", err) } + tmpPath := tmp.Name() - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } + committed := false + defer func() { + if !committed { + _ = os.Remove(tmpPath) + } + }() - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err + if err := tmp.Chmod(0600); err != nil { + _ = tmp.Close() + return fmt.Errorf("chmod temp file for migration: %w", err) + } + if _, err := tmp.Write(ciphertext); err != nil { + _ = tmp.Close() + return fmt.Errorf("write temp file for migration: %w", err) + } + if err := tmp.Close(); err != nil { + return fmt.Errorf("close temp file for migration: %w", err) } + if err := os.Rename(tmpPath, f.path); err != nil { + return fmt.Errorf("rename temp file for migration: %w", err) + } + committed = true - if len(ciphertext) < gcm.NonceSize() { - return nil, fmt.Errorf("ciphertext too short") + // Remove the plaintext migration master key now that re-encryption succeeded. + if f.keyPath != "" { + _ = os.Remove(f.keyPath) } - nonce, ciphertext := ciphertext[:gcm.NonceSize()], ciphertext[gcm.NonceSize():] - return gcm.Open(nil, nonce, ciphertext, nil) + return nil } func readCompatibilityMasterKey(path string) ([]byte, error) { @@ -342,84 +527,26 @@ func (f *EncryptedFileStore) loadSalt(create bool) ([]byte, error) { return salt, nil } +// argon2id parameters. The OWASP 2024 guidance is t=2, m=19MiB, p=1 as +// the absolute minimum for password storage; modern hosts comfortably +// support t=3, m=64MiB, p=4 for a CLI use case where derive happens once +// per process. Increasing t from 1 (the previous setting) to 3 raises +// offline-cracking cost ~3x for any attacker who exfiltrates the salt and +// ciphertext. +const ( + argon2idTime uint32 = 3 + argon2idMemory uint32 = 64 * 1024 // 64 MiB + argon2idThreads uint8 = 4 + argon2idKeyLen uint32 = 32 + + // minPassphraseLen is the minimum length we accept for + // NYLAS_FILE_STORE_PASSPHRASE. Argon2id alone cannot save a 4-character + // passphrase from offline brute-force. + minPassphraseLen = 12 +) + func derivePassphraseKey(passphrase, salt []byte) []byte { // Argon2id keeps the fallback store bound to user-supplied secret material // instead of host metadata while staying fast enough for CLI use. - return argon2.IDKey(passphrase, salt, 1, 64*1024, 4, 32) -} - -// deriveLegacyKey derives the pre-v2 machine-specific fallback key so older -// encrypted files can still be read and rewritten with a passphrase-derived key. -func deriveLegacyKey() ([]byte, error) { - // Collect machine-specific identifiers - var identifiers []byte - - // Add hostname - hostname, _ := os.Hostname() - identifiers = append(identifiers, []byte(hostname)...) - - // Add user info - identifiers = append(identifiers, []byte(os.Getenv("USER"))...) - identifiers = append(identifiers, []byte(os.Getenv("USERNAME"))...) // Windows - - // Add home directory - home, _ := os.UserHomeDir() - identifiers = append(identifiers, []byte(home)...) - - // Add OS-specific machine ID if available - machineID := getMachineID() - identifiers = append(identifiers, []byte(machineID)...) - - // Add a static salt to prevent rainbow table attacks - salt := []byte("nylas-cli-v1-secret-store") - identifiers = append(identifiers, salt...) - - // Derive key using SHA-256 - hash := sha256.Sum256(identifiers) - return hash[:], nil -} - -// getMachineID attempts to read a machine-specific ID. -func getMachineID() string { - switch runtime.GOOS { - case "linux": - // Try /etc/machine-id (systemd) - if data, err := os.ReadFile("/etc/machine-id"); err == nil { - return string(data) - } - // Try /var/lib/dbus/machine-id - if data, err := os.ReadFile("/var/lib/dbus/machine-id"); err == nil { - return string(data) - } - case "darwin": - // Try to get hardware UUID from system profiler - if data, err := os.ReadFile("/var/db/SystemKey"); err == nil { - return string(data) - } - // Fallback: use boot time + serial from IOKit - if data, err := os.ReadFile("/Library/Preferences/SystemConfiguration/com.apple.smb.server.plist"); err == nil { - return string(data) - } - case "windows": - // Try to read MachineGuid from registry path - programData := os.Getenv("PROGRAMDATA") - if programData != "" { - // Construct and clean the path to prevent traversal - guidPath := filepath.Join(programData, "Microsoft", "Crypto", "RSA", "MachineKeys", ".GUID") - cleanPath := filepath.Clean(guidPath) - - // Validate the path starts with the expected base (security check) - if strings.HasPrefix(cleanPath, filepath.Clean(programData)) { - if data, err := os.ReadFile(cleanPath); err == nil { - return string(data) - } - } - } - // Fallback: use system drive serial - systemRoot := os.Getenv("SYSTEMROOT") - if systemRoot != "" { - return systemRoot - } - } - return "" + return argon2.IDKey(passphrase, salt, argon2idTime, argon2idMemory, argon2idThreads, argon2idKeyLen) } diff --git a/internal/adapters/keyring/file_crypto.go b/internal/adapters/keyring/file_crypto.go new file mode 100644 index 0000000..ba7d828 --- /dev/null +++ b/internal/adapters/keyring/file_crypto.go @@ -0,0 +1,71 @@ +package keyring + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "fmt" + "io" +) + +// zeroBytes overwrites b with zeros so a key derived from a user +// passphrase doesn't linger in heap memory after use. Go's GC retains +// allocations until they're collected; for a long-running `nylas air` / +// `nylas chat` process the derived AES key would otherwise survive in +// RAM for the lifetime of the process. +func zeroBytes(b []byte) { + for i := range b { + b[i] = 0 + } +} + +// encryptWithKey encrypts plaintext with AES-256-GCM using the given key. +// The returned bytes are base64-encoded and include the nonce prepended to +// the ciphertext. +func encryptWithKey(key, plaintext []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return nil, err + } + + ciphertext := gcm.Seal(nonce, nonce, plaintext, nil) + return []byte(base64.StdEncoding.EncodeToString(ciphertext)), nil +} + +// decryptWithKey decrypts base64-encoded AES-256-GCM ciphertext using the +// given key. The nonce is read from the first gcm.NonceSize() bytes of the +// decoded ciphertext. +func decryptWithKey(key, data []byte) ([]byte, error) { + ciphertext, err := base64.StdEncoding.DecodeString(string(data)) + if err != nil { + return nil, err + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(ciphertext) < gcm.NonceSize() { + return nil, fmt.Errorf("ciphertext too short") + } + + nonce, ciphertext := ciphertext[:gcm.NonceSize()], ciphertext[gcm.NonceSize():] + return gcm.Open(nil, nonce, ciphertext, nil) +} diff --git a/internal/adapters/keyring/file_legacy.go b/internal/adapters/keyring/file_legacy.go new file mode 100644 index 0000000..3188440 --- /dev/null +++ b/internal/adapters/keyring/file_legacy.go @@ -0,0 +1,83 @@ +package keyring + +import ( + "crypto/sha256" + "os" + "path/filepath" + "runtime" + "strings" +) + +// deriveLegacyKey derives the pre-v2 machine-specific fallback key so older +// encrypted files can still be read and then re-encrypted with a +// passphrase-derived key (one-shot migration). +// +// The key is a SHA-256 hash of concatenated host metadata. It is +// intentionally low-entropy compared to a user-supplied passphrase and exists +// only to allow migration from legacy installations. +func deriveLegacyKey() ([]byte, error) { + var identifiers []byte + + hostname, _ := os.Hostname() + identifiers = append(identifiers, []byte(hostname)...) + + identifiers = append(identifiers, []byte(os.Getenv("USER"))...) + identifiers = append(identifiers, []byte(os.Getenv("USERNAME"))...) // Windows + + home, _ := os.UserHomeDir() + identifiers = append(identifiers, []byte(home)...) + + identifiers = append(identifiers, []byte(getMachineID())...) + + // Static salt to prevent rainbow table attacks against this specific construction. + identifiers = append(identifiers, []byte("nylas-cli-v1-secret-store")...) + + hash := sha256.Sum256(identifiers) + return hash[:], nil +} + +// getMachineID attempts to read a platform-specific machine identifier. +// Returns an empty string when no identifier is available; callers handle this +// gracefully by concatenating an empty contribution. +// +// On macOS, both candidate paths typically require elevated privileges and +// won't exist on a stock install — most macOS users will fall through with +// an empty machine ID. That's intentional: this helper feeds the legacy +// machine-derived migration key, which now exists only to decrypt files +// written by older versions and re-encrypt them under the user-supplied +// passphrase. Empty contribution means the legacy key is weaker, but the +// migration path requires NYLAS_FILE_STORE_PASSPHRASE to be set anyway. +func getMachineID() string { + switch runtime.GOOS { + case "linux": + if data, err := os.ReadFile("/etc/machine-id"); err == nil { + return string(data) + } + if data, err := os.ReadFile("/var/lib/dbus/machine-id"); err == nil { + return string(data) + } + case "darwin": + // Both files are typically root-owned on modern macOS. + if data, err := os.ReadFile("/var/db/SystemKey"); err == nil { + return string(data) + } + if data, err := os.ReadFile("/Library/Preferences/SystemConfiguration/com.apple.smb.server.plist"); err == nil { + return string(data) + } + case "windows": + programData := os.Getenv("PROGRAMDATA") + if programData != "" { + guidPath := filepath.Join(programData, "Microsoft", "Crypto", "RSA", "MachineKeys", ".GUID") + cleanPath := filepath.Clean(guidPath) + if strings.HasPrefix(cleanPath, filepath.Clean(programData)) { + if data, err := os.ReadFile(cleanPath); err == nil { + return string(data) + } + } + } + if systemRoot := os.Getenv("SYSTEMROOT"); systemRoot != "" { + return systemRoot + } + } + return "" +} diff --git a/internal/adapters/keyring/keyring.go b/internal/adapters/keyring/keyring.go index 003b8c8..7dc8330 100644 --- a/internal/adapters/keyring/keyring.go +++ b/internal/adapters/keyring/keyring.go @@ -3,6 +3,7 @@ package keyring import ( "errors" + "fmt" "os" "github.com/nylas/cli/internal/domain" @@ -28,7 +29,7 @@ func (k *SystemKeyring) Set(key, value string) error { // Get retrieves a secret value for the given key. func (k *SystemKeyring) Get(key string) (string, error) { value, err := keyring.Get(serviceName, key) - if err == keyring.ErrNotFound { + if errors.Is(err, keyring.ErrNotFound) { return "", domain.ErrSecretNotFound } return value, err @@ -97,23 +98,40 @@ func NewSecretStore(configDir string) (ports.SecretStore, error) { return nil, err } - // Migrate credentials from file store to keyring - if apiKey != "" { - _ = kr.Set(ports.KeyAPIKey, apiKey) + // Migrate credentials from file store to keyring. Keep going on per-key + // failures so a single broken entry doesn't block the rest of the move, + // but surface the failures so the user knows something didn't migrate. + var migrationErrs []error + migrate := func(key, value string) { + if value == "" { + return + } + if err := kr.Set(key, value); err != nil { + migrationErrs = append(migrationErrs, fmt.Errorf("migrate %s: %w", key, err)) + } + } + + migrate(ports.KeyAPIKey, apiKey) + if clientID, err := fileStore.Get(ports.KeyClientID); err == nil { + migrate(ports.KeyClientID, clientID) } - if clientID, err := fileStore.Get(ports.KeyClientID); err == nil && clientID != "" { - _ = kr.Set(ports.KeyClientID, clientID) + if clientSecret, err := fileStore.Get(ports.KeyClientSecret); err == nil { + migrate(ports.KeyClientSecret, clientSecret) } - if clientSecret, err := fileStore.Get(ports.KeyClientSecret); err == nil && clientSecret != "" { - _ = kr.Set(ports.KeyClientSecret, clientSecret) + if grants, err := fileStore.Get("grants"); err == nil { + migrate("grants", grants) } - - // Migrate grants data - if grants, err := fileStore.Get("grants"); err == nil && grants != "" { - _ = kr.Set("grants", grants) + if defaultGrant, err := fileStore.Get("default_grant"); err == nil { + migrate("default_grant", defaultGrant) } - if defaultGrant, err := fileStore.Get("default_grant"); err == nil && defaultGrant != "" { - _ = kr.Set("default_grant", defaultGrant) + + if len(migrationErrs) > 0 { + // Print to stderr but do not fail — the keyring is usable even with + // partial migration; users may need to re-run `nylas auth config`. + fmt.Fprintf(os.Stderr, "warning: %d secrets failed to migrate from file store to keyring; re-run `nylas auth config` to retry\n", len(migrationErrs)) + for _, e := range migrationErrs { + fmt.Fprintf(os.Stderr, " - %v\n", e) + } } return kr, nil diff --git a/internal/adapters/nylas/admin.go b/internal/adapters/nylas/admin.go index c13bd46..0b781a4 100644 --- a/internal/adapters/nylas/admin.go +++ b/internal/adapters/nylas/admin.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -32,8 +33,11 @@ func (c *HTTPClient) ListApplications(ctx context.Context) ([]domain.Application return nil, c.parseError(resp) } - // Read body once (special handling: API may return array or single object) - body, err := io.ReadAll(resp.Body) + // Read body once (special handling: API may return array or single object). + // Bound the read so a misbehaving upstream cannot OOM us, and so that an + // auth/error JSON containing tokens or PII cannot be echoed unbounded into + // our error string below. + body, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBodySize)) if err != nil { return nil, err } @@ -57,17 +61,22 @@ func (c *HTTPClient) ListApplications(ctx context.Context) ([]domain.Application } } - // If both fail, return error with response body for debugging - return nil, fmt.Errorf("failed to decode applications response: %s", string(body)) + // Both decodings failed. Report status only — never echo the raw body + // since it may carry tokens, customer data, or other sensitive fields. + return nil, fmt.Errorf("failed to decode applications response (status %d, %d bytes)", resp.StatusCode, len(body)) } +// maxResponseBodySize bounds bodies read for ad-hoc decoding (1 MiB). Larger +// bodies are an upstream bug; truncating prevents secret leakage in errors. +const maxResponseBodySize = 1 << 20 + // GetApplication retrieves a specific application. func (c *HTTPClient) GetApplication(ctx context.Context, appID string) (*domain.Application, error) { if err := validateRequired("application ID", appID); err != nil { return nil, err } - queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, appID) + queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, url.PathEscape(appID)) var result struct { Data domain.Application `json:"data"` @@ -102,7 +111,7 @@ func (c *HTTPClient) UpdateApplication(ctx context.Context, appID string, req *d return nil, err } - queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, appID) + queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, url.PathEscape(appID)) resp, err := c.doJSONRequest(ctx, "PATCH", queryURL, req) if err != nil { @@ -123,7 +132,7 @@ func (c *HTTPClient) DeleteApplication(ctx context.Context, appID string) error if err := validateRequired("application ID", appID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, appID) + queryURL := fmt.Sprintf("%s/v3/applications/%s", c.baseURL, url.PathEscape(appID)) return c.doDelete(ctx, queryURL) } @@ -148,7 +157,7 @@ func (c *HTTPClient) GetCallbackURI(ctx context.Context, uriID string) (*domain. return nil, err } - queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, uriID) + queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, url.PathEscape(uriID)) var result struct { Data domain.CallbackURI `json:"data"` @@ -190,7 +199,7 @@ func (c *HTTPClient) UpdateCallbackURI(ctx context.Context, uriID string, req *d return nil, err } - queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, uriID) + queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, url.PathEscape(uriID)) resp, err := c.doJSONRequest(ctx, "PATCH", queryURL, req) if err != nil { @@ -211,7 +220,7 @@ func (c *HTTPClient) DeleteCallbackURI(ctx context.Context, uriID string) error if err := validateRequired("callback URI ID", uriID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, uriID) + queryURL := fmt.Sprintf("%s/v3/applications/callback-uris/%s", c.baseURL, url.PathEscape(uriID)) return c.doDelete(ctx, queryURL) } @@ -236,7 +245,7 @@ func (c *HTTPClient) GetConnector(ctx context.Context, connectorID string) (*dom return nil, err } - queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, connectorID) + queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, url.PathEscape(connectorID)) var result struct { Data domain.Connector `json:"data"` @@ -271,7 +280,7 @@ func (c *HTTPClient) UpdateConnector(ctx context.Context, connectorID string, re return nil, err } - queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, connectorID) + queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, url.PathEscape(connectorID)) resp, err := c.doJSONRequest(ctx, "PATCH", queryURL, req) if err != nil { @@ -292,7 +301,7 @@ func (c *HTTPClient) DeleteConnector(ctx context.Context, connectorID string) er if err := validateRequired("connector ID", connectorID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, connectorID) + queryURL := fmt.Sprintf("%s/v3/connectors/%s", c.baseURL, url.PathEscape(connectorID)) return c.doDelete(ctx, queryURL) } @@ -304,7 +313,7 @@ func (c *HTTPClient) ListCredentials(ctx context.Context, connectorID string) ([ return nil, err } - queryURL := fmt.Sprintf("%s/v3/connectors/%s/credentials", c.baseURL, connectorID) + queryURL := fmt.Sprintf("%s/v3/connectors/%s/credentials", c.baseURL, url.PathEscape(connectorID)) var result struct { Data []domain.ConnectorCredential `json:"data"` @@ -321,7 +330,7 @@ func (c *HTTPClient) GetCredential(ctx context.Context, credentialID string) (*d return nil, err } - queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, credentialID) + queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, url.PathEscape(credentialID)) var result struct { Data domain.ConnectorCredential `json:"data"` @@ -338,7 +347,7 @@ func (c *HTTPClient) CreateCredential(ctx context.Context, connectorID string, r return nil, err } - queryURL := fmt.Sprintf("%s/v3/connectors/%s/credentials", c.baseURL, connectorID) + queryURL := fmt.Sprintf("%s/v3/connectors/%s/credentials", c.baseURL, url.PathEscape(connectorID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, req) if err != nil { @@ -360,7 +369,7 @@ func (c *HTTPClient) UpdateCredential(ctx context.Context, credentialID string, return nil, err } - queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, credentialID) + queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, url.PathEscape(credentialID)) resp, err := c.doJSONRequest(ctx, "PATCH", queryURL, req) if err != nil { @@ -381,32 +390,89 @@ func (c *HTTPClient) DeleteCredential(ctx context.Context, credentialID string) if err := validateRequired("credential ID", credentialID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, credentialID) + queryURL := fmt.Sprintf("%s/v3/credentials/%s", c.baseURL, url.PathEscape(credentialID)) return c.doDelete(ctx, queryURL) } // Admin Grant Operations -// ListAllGrants retrieves all grants with optional filtering. +// maxGrantPages caps the number of pages ListAllGrants will fetch even +// when the server keeps returning fresh next_cursor values. It's a +// safety ceiling above any realistic grant count — the cycle detector +// catches the typical misbehaviour, but a server that hands out +// distinct-but-empty pages forever still needs a hard stop. +const maxGrantPages = 1000 + +// ListAllGrants retrieves all grants matching the optional filters, +// transparently following next_cursor pagination so callers always see the +// complete result set. +// +// When params.Limit is positive, at most that many grants are returned and +// pagination stops once the cap is reached. When params is nil or Limit is +// zero, every page is fetched. func (c *HTTPClient) ListAllGrants(ctx context.Context, params *domain.GrantsQueryParams) ([]domain.Grant, error) { baseURL := fmt.Sprintf("%s/v3/grants", c.baseURL) - qb := NewQueryBuilder() + limit := 0 + connectorID := "" + status := "" + offset := 0 if params != nil { - qb.AddInt("limit", params.Limit). - AddInt("offset", params.Offset). - Add("connector_id", params.ConnectorID). - Add("status", params.Status) - } - queryURL := qb.BuildURL(baseURL) + limit = params.Limit + connectorID = params.ConnectorID + status = params.Status + offset = params.Offset + } + + pageToken := "" + grants := make([]domain.Grant, 0) + // seenCursors guards against cycles longer than length 1 (the simple + // `result.NextCursor == pageToken` case is checked separately). + seenCursors := make(map[string]struct{}) + for { + qb := NewQueryBuilder(). + AddInt("limit", limit). + Add("connector_id", connectorID). + Add("status", status). + AddInt("offset", offset). + Add("page_token", pageToken) + queryURL := qb.BuildURL(baseURL) + + var result struct { + Data []domain.Grant `json:"data"` + NextCursor string `json:"next_cursor,omitempty"` + } + if err := c.doGet(ctx, queryURL, &result); err != nil { + return nil, err + } - var result struct { - Data []domain.Grant `json:"data"` - } - if err := c.doGet(ctx, queryURL, &result); err != nil { - return nil, err + grants = append(grants, result.Data...) + if limit > 0 && len(grants) >= limit { + return grants[:limit], nil + } + + if result.NextCursor == "" { + return grants, nil + } + if result.NextCursor == pageToken { + return nil, fmt.Errorf("failed to paginate grants: repeated cursor %q", result.NextCursor) + } + // Detect cycles longer than length 1 (e.g. A → B → A) and bound the + // total number of pages we'll walk so a misbehaving server can't + // trap us in an unbounded loop. Cap at 1000 pages — well above + // realistic grant counts but a hard ceiling on the worst case. + if _, seen := seenCursors[result.NextCursor]; seen { + return nil, fmt.Errorf("failed to paginate grants: cursor cycle detected near %q", result.NextCursor) + } + seenCursors[result.NextCursor] = struct{}{} + if len(seenCursors) > maxGrantPages { + return nil, fmt.Errorf("failed to paginate grants: exceeded max page count (%d)", maxGrantPages) + } + pageToken = result.NextCursor + // offset only meaningful on the first request; the API uses cursors + // to advance from there. + offset = 0 } - return result.Data, nil } // GetGrantStats retrieves grant statistics. diff --git a/internal/adapters/nylas/admin_grants_test.go b/internal/adapters/nylas/admin_grants_test.go index b4d67b0..57eb535 100644 --- a/internal/adapters/nylas/admin_grants_test.go +++ b/internal/adapters/nylas/admin_grants_test.go @@ -94,6 +94,70 @@ func TestHTTPClient_ListAllGrants_WithParams(t *testing.T) { assert.Equal(t, "google", string(grants[0].Provider)) } +func TestHTTPClient_ListAllGrants_FollowsPagination(t *testing.T) { + // Regression: previously ListAllGrants ignored next_cursor and returned + // only the first page, producing silently incorrect grant counts in + // GetGrantStats for any tenant whose grants exceeded the page size. + calls := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + calls++ + token := r.URL.Query().Get("page_token") + w.Header().Set("Content-Type", "application/json") + switch token { + case "": + _ = json.NewEncoder(w).Encode(map[string]any{ + "data": []map[string]any{ + {"id": "grant-1", "provider": "google", "grant_status": "valid"}, + {"id": "grant-2", "provider": "google", "grant_status": "valid"}, + }, + "next_cursor": "page-2", + }) + case "page-2": + _ = json.NewEncoder(w).Encode(map[string]any{ + "data": []map[string]any{ + {"id": "grant-3", "provider": "microsoft", "grant_status": "invalid"}, + }, + }) + default: + t.Fatalf("unexpected page_token %q", token) + } + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + grants, err := client.ListAllGrants(context.Background(), nil) + require.NoError(t, err) + assert.Equal(t, 2, calls, "should have followed next_cursor exactly once") + assert.Len(t, grants, 3) + assert.Equal(t, "grant-3", grants[2].ID) +} + +func TestHTTPClient_ListAllGrants_LimitCapsResults(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "data": []map[string]any{ + {"id": "grant-1", "provider": "google", "grant_status": "valid"}, + {"id": "grant-2", "provider": "google", "grant_status": "valid"}, + {"id": "grant-3", "provider": "google", "grant_status": "valid"}, + }, + "next_cursor": "next", + }) + })) + defer server.Close() + + client := nylas.NewHTTPClient() + client.SetCredentials("client-id", "secret", "api-key") + client.SetBaseURL(server.URL) + + grants, err := client.ListAllGrants(context.Background(), &domain.GrantsQueryParams{Limit: 2}) + require.NoError(t, err) + assert.Len(t, grants, 2, "client-side limit should cap results") +} + func TestHTTPClient_GetGrantStats(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "/v3/grants", r.URL.Path) diff --git a/internal/adapters/nylas/agent.go b/internal/adapters/nylas/agent.go index 507b342..8da113b 100644 --- a/internal/adapters/nylas/agent.go +++ b/internal/adapters/nylas/agent.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -93,7 +94,7 @@ func (c *HTTPClient) UpdateAgentAccount(ctx context.Context, grantID, email, app return nil, fmt.Errorf("%w: grant is not a nylas agent account (provider=%s)", domain.ErrInvalidGrant, grant.Provider) } - queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, url.PathEscape(grantID)) settings := make(map[string]any) settings["email"] = email if grant.Settings.PolicyID != "" { diff --git a/internal/adapters/nylas/attachments.go b/internal/adapters/nylas/attachments.go index 45e8194..2375e75 100644 --- a/internal/adapters/nylas/attachments.go +++ b/internal/adapters/nylas/attachments.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -22,7 +23,7 @@ type attachmentResponse struct { // GetAttachment retrieves attachment metadata. func (c *HTTPClient) GetAttachment(ctx context.Context, grantID, messageID, attachmentID string) (*domain.Attachment, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/attachments/%s", c.baseURL, grantID, messageID, attachmentID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/attachments/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID), url.PathEscape(attachmentID)) var result struct { Data attachmentResponse `json:"data"` @@ -44,7 +45,7 @@ func (c *HTTPClient) GetAttachment(ctx context.Context, grantID, messageID, atta // DownloadAttachment downloads attachment content. func (c *HTTPClient) DownloadAttachment(ctx context.Context, grantID, messageID, attachmentID string) (io.ReadCloser, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/attachments/%s/download", c.baseURL, grantID, messageID, attachmentID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/attachments/%s/download", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID), url.PathEscape(attachmentID)) req, err := http.NewRequestWithContext(ctx, "GET", queryURL, nil) if err != nil { diff --git a/internal/adapters/nylas/auth.go b/internal/adapters/nylas/auth.go index 5cde7c3..b119ec7 100644 --- a/internal/adapters/nylas/auth.go +++ b/internal/adapters/nylas/auth.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -87,7 +88,7 @@ func (c *HTTPClient) ListGrants(ctx context.Context) ([]domain.Grant, error) { // GetGrant retrieves a specific grant. func (c *HTTPClient) GetGrant(ctx context.Context, grantID string) (*domain.Grant, error) { - queryURL := c.baseURL + "/v3/grants/" + grantID + queryURL := c.baseURL + "/v3/grants/" + url.PathEscape(grantID) var result struct { Data domain.Grant `json:"data"` @@ -101,7 +102,7 @@ func (c *HTTPClient) GetGrant(ctx context.Context, grantID string) (*domain.Gran // RevokeGrant revokes a grant. func (c *HTTPClient) RevokeGrant(ctx context.Context, grantID string) error { - req, err := http.NewRequestWithContext(ctx, "DELETE", c.baseURL+"/v3/grants/"+grantID, nil) + req, err := http.NewRequestWithContext(ctx, "DELETE", c.baseURL+"/v3/grants/"+url.PathEscape(grantID), nil) if err != nil { return err } diff --git a/internal/adapters/nylas/calendars_calendars.go b/internal/adapters/nylas/calendars_calendars.go index 3d54314..cd6577e 100644 --- a/internal/adapters/nylas/calendars_calendars.go +++ b/internal/adapters/nylas/calendars_calendars.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -12,7 +13,7 @@ func (c *HTTPClient) GetCalendars(ctx context.Context, grantID string) ([]domain return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars", c.baseURL, url.PathEscape(grantID)) var result struct { Data []calendarResponse `json:"data"` @@ -33,7 +34,7 @@ func (c *HTTPClient) GetCalendar(ctx context.Context, grantID, calendarID string return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, grantID, calendarID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(calendarID)) var result struct { Data calendarResponse `json:"data"` @@ -52,7 +53,7 @@ func (c *HTTPClient) CreateCalendar(ctx context.Context, grantID string, req *do return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars", c.baseURL, url.PathEscape(grantID)) payload := map[string]any{ "name": req.Name, @@ -92,7 +93,7 @@ func (c *HTTPClient) UpdateCalendar(ctx context.Context, grantID, calendarID str return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, grantID, calendarID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(calendarID)) payload := make(map[string]any) if req.Name != nil { @@ -135,7 +136,7 @@ func (c *HTTPClient) DeleteCalendar(ctx context.Context, grantID, calendarID str if err := validateRequired("calendar ID", calendarID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, grantID, calendarID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(calendarID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/calendars_events.go b/internal/adapters/nylas/calendars_events.go index ec475b2..681e7b4 100644 --- a/internal/adapters/nylas/calendars_events.go +++ b/internal/adapters/nylas/calendars_events.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -31,7 +32,7 @@ func (c *HTTPClient) GetEventsWithCursor(ctx context.Context, grantID, calendarI params.Limit = 10 } - baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder(). Add("calendar_id", calendarID). AddInt("limit", params.Limit). @@ -74,7 +75,7 @@ func (c *HTTPClient) GetEvent(ctx context.Context, grantID, calendarID, eventID if err := validateRequired("event ID", eventID); err != nil { return nil, err } - baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, grantID, eventID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(eventID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) var result struct { @@ -90,7 +91,7 @@ func (c *HTTPClient) GetEvent(ctx context.Context, grantID, calendarID, eventID // CreateEvent creates a new event. func (c *HTTPClient) CreateEvent(ctx context.Context, grantID, calendarID string, req *domain.CreateEventRequest) (*domain.Event, error) { - baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) payload := map[string]any{ @@ -142,7 +143,7 @@ func (c *HTTPClient) CreateEvent(ctx context.Context, grantID, calendarID string // UpdateEvent updates an existing event. func (c *HTTPClient) UpdateEvent(ctx context.Context, grantID, calendarID, eventID string, req *domain.UpdateEventRequest) (*domain.Event, error) { - baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, grantID, eventID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(eventID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) payload := make(map[string]any) @@ -195,14 +196,14 @@ func (c *HTTPClient) UpdateEvent(ctx context.Context, grantID, calendarID, event // DeleteEvent deletes an event. func (c *HTTPClient) DeleteEvent(ctx context.Context, grantID, calendarID, eventID string) error { - baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, grantID, eventID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(eventID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) return c.doDelete(ctx, queryURL) } // SendRSVP sends an RSVP response to an event invitation. func (c *HTTPClient) SendRSVP(ctx context.Context, grantID, calendarID, eventID string, req *domain.SendRSVPRequest) error { - baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s/send-rsvp", c.baseURL, grantID, eventID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events/%s/send-rsvp", c.baseURL, url.PathEscape(grantID), url.PathEscape(eventID)) queryURL := NewQueryBuilder().Add("calendar_id", calendarID).BuildURL(baseURL) payload := map[string]any{ @@ -223,7 +224,7 @@ func (c *HTTPClient) SendRSVP(ctx context.Context, grantID, calendarID, eventID // GetFreeBusy retrieves free/busy information. func (c *HTTPClient) GetFreeBusy(ctx context.Context, grantID string, freeBusyReq *domain.FreeBusyRequest) (*domain.FreeBusyResponse, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/free-busy", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/calendars/free-busy", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, freeBusyReq) if err != nil { diff --git a/internal/adapters/nylas/calendars_virtual_recurring.go b/internal/adapters/nylas/calendars_virtual_recurring.go index dfc4c45..6c8f602 100644 --- a/internal/adapters/nylas/calendars_virtual_recurring.go +++ b/internal/adapters/nylas/calendars_virtual_recurring.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -48,7 +49,7 @@ func (c *HTTPClient) ListVirtualCalendarGrants(ctx context.Context) ([]domain.Vi // GetVirtualCalendarGrant retrieves a single virtual calendar grant by ID. func (c *HTTPClient) GetVirtualCalendarGrant(ctx context.Context, grantID string) (*domain.VirtualCalendarGrant, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, url.PathEscape(grantID)) var result struct { Data domain.VirtualCalendarGrant `json:"data"` @@ -62,7 +63,7 @@ func (c *HTTPClient) GetVirtualCalendarGrant(ctx context.Context, grantID string // DeleteVirtualCalendarGrant deletes a virtual calendar grant. func (c *HTTPClient) DeleteVirtualCalendarGrant(ctx context.Context, grantID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, url.PathEscape(grantID)) return c.doDelete(ctx, queryURL) } @@ -77,7 +78,7 @@ func (c *HTTPClient) GetRecurringEventInstances(ctx context.Context, grantID, ca params.ExpandRecurring = true } - baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/events", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder(). Add("calendar_id", calendarID). Add("master_event_id", masterEventID). diff --git a/internal/adapters/nylas/contacts.go b/internal/adapters/nylas/contacts.go index 737d5f2..f5ce062 100644 --- a/internal/adapters/nylas/contacts.go +++ b/internal/adapters/nylas/contacts.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" "github.com/nylas/cli/internal/util" @@ -55,7 +56,7 @@ func (c *HTTPClient) GetContacts(ctx context.Context, grantID string, params *do // GetContactsWithCursor retrieves contacts with pagination cursor. func (c *HTTPClient) GetContactsWithCursor(ctx context.Context, grantID string, params *domain.ContactQueryParams) (*domain.ContactListResponse, error) { - baseURL := fmt.Sprintf("%s/v3/grants/%s/contacts", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/contacts", c.baseURL, url.PathEscape(grantID)) qb := NewQueryBuilder() if params != nil { @@ -94,7 +95,7 @@ func (c *HTTPClient) GetContact(ctx context.Context, grantID, contactID string) // GetContactWithPicture retrieves a single contact by ID with optional profile picture. func (c *HTTPClient) GetContactWithPicture(ctx context.Context, grantID, contactID string, includePicture bool) (*domain.Contact, error) { - baseURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, grantID, contactID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(contactID)) queryURL := NewQueryBuilder().AddBool("profile_picture", includePicture).BuildURL(baseURL) var result struct { @@ -110,7 +111,7 @@ func (c *HTTPClient) GetContactWithPicture(ctx context.Context, grantID, contact // CreateContact creates a new contact. func (c *HTTPClient) CreateContact(ctx context.Context, grantID string, req *domain.CreateContactRequest) (*domain.Contact, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, req) if err != nil { @@ -130,7 +131,7 @@ func (c *HTTPClient) CreateContact(ctx context.Context, grantID string, req *dom // UpdateContact updates an existing contact. func (c *HTTPClient) UpdateContact(ctx context.Context, grantID, contactID string, req *domain.UpdateContactRequest) (*domain.Contact, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, grantID, contactID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(contactID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req, http.StatusOK) if err != nil { @@ -150,13 +151,13 @@ func (c *HTTPClient) UpdateContact(ctx context.Context, grantID, contactID strin // DeleteContact deletes a contact. func (c *HTTPClient) DeleteContact(ctx context.Context, grantID, contactID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, grantID, contactID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(contactID)) return c.doDelete(ctx, queryURL) } // GetContactGroups retrieves contact groups for a grant. func (c *HTTPClient) GetContactGroups(ctx context.Context, grantID string) ([]domain.ContactGroup, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups", c.baseURL, url.PathEscape(grantID)) var result struct { Data []contactGroupResponse `json:"data"` @@ -170,7 +171,7 @@ func (c *HTTPClient) GetContactGroups(ctx context.Context, grantID string) ([]do // GetContactGroup retrieves a single contact group by ID. func (c *HTTPClient) GetContactGroup(ctx context.Context, grantID, groupID string) (*domain.ContactGroup, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, grantID, groupID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(groupID)) var result struct { Data contactGroupResponse `json:"data"` @@ -185,7 +186,7 @@ func (c *HTTPClient) GetContactGroup(ctx context.Context, grantID, groupID strin // CreateContactGroup creates a new contact group. func (c *HTTPClient) CreateContactGroup(ctx context.Context, grantID string, req *domain.CreateContactGroupRequest) (*domain.ContactGroup, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, req) if err != nil { @@ -205,7 +206,7 @@ func (c *HTTPClient) CreateContactGroup(ctx context.Context, grantID string, req // UpdateContactGroup updates an existing contact group. func (c *HTTPClient) UpdateContactGroup(ctx context.Context, grantID, groupID string, req *domain.UpdateContactGroupRequest) (*domain.ContactGroup, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, grantID, groupID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(groupID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req, http.StatusOK) if err != nil { @@ -225,7 +226,7 @@ func (c *HTTPClient) UpdateContactGroup(ctx context.Context, grantID, groupID st // DeleteContactGroup deletes a contact group. func (c *HTTPClient) DeleteContactGroup(ctx context.Context, grantID, groupID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, grantID, groupID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/contacts/groups/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(groupID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/drafts.go b/internal/adapters/nylas/drafts.go index d4a8f95..702b3fa 100644 --- a/internal/adapters/nylas/drafts.go +++ b/internal/adapters/nylas/drafts.go @@ -9,6 +9,8 @@ import ( "mime/multipart" "net/http" "net/textproto" + "net/url" + "slices" "time" "github.com/nylas/cli/internal/domain" @@ -59,7 +61,7 @@ func (c *HTTPClient) GetDrafts(ctx context.Context, grantID string, limit int) ( limit = 10 } - baseURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder().AddInt("limit", limit).BuildURL(baseURL) var result struct { @@ -74,7 +76,7 @@ func (c *HTTPClient) GetDrafts(ctx context.Context, grantID string, limit int) ( // GetDraft retrieves a single draft by ID. func (c *HTTPClient) GetDraft(ctx context.Context, grantID, draftID string) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, grantID, draftID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(draftID)) var result struct { Data draftResponse `json:"data"` @@ -129,7 +131,7 @@ func buildDraftPayload(req *domain.CreateDraftRequest, includeSignature bool) ma // createDraftWithJSON creates a draft using JSON encoding (no attachments or small attachments). func (c *HTTPClient) createDraftWithJSON(ctx context.Context, grantID string, req *domain.CreateDraftRequest) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, buildDraftPayload(req, true)) if err != nil { @@ -147,25 +149,24 @@ func (c *HTTPClient) createDraftWithJSON(ctx context.Context, grantID string, re return &draft, nil } -// createDraftWithMultipart creates a draft with attachments using multipart/form-data. -func (c *HTTPClient) createDraftWithMultipart(ctx context.Context, grantID string, req *domain.CreateDraftRequest) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, grantID) - +// doMultipartDraft sends a multipart/form-data draft request and decodes the +// response into out. Used by both createDraftWithMultipart and updateDraftWithMultipart. +func (c *HTTPClient) doMultipartDraft(ctx context.Context, method, url string, payload map[string]any, attachments []domain.Attachment, out any, acceptedStatuses ...int) error { // Create multipart form var buf bytes.Buffer writer := multipart.NewWriter(&buf) // Add message as JSON field - messageJSON, err := json.Marshal(buildDraftPayload(req, true)) + messageJSON, err := json.Marshal(payload) if err != nil { - return nil, fmt.Errorf("failed to marshal message: %w", err) + return fmt.Errorf("failed to marshal message: %w", err) } if err := writer.WriteField("message", string(messageJSON)); err != nil { - return nil, fmt.Errorf("failed to write message field: %w", err) + return fmt.Errorf("failed to write message field: %w", err) } // Add each attachment as a file - for i, att := range req.Attachments { + for i, att := range attachments { if len(att.Content) == 0 { continue // Skip attachments without content } @@ -181,38 +182,45 @@ func (c *HTTPClient) createDraftWithMultipart(ctx context.Context, grantID strin part, err := writer.CreatePart(h) if err != nil { - return nil, fmt.Errorf("failed to create attachment part: %w", err) + return fmt.Errorf("failed to create attachment part: %w", err) } if _, err := part.Write(att.Content); err != nil { - return nil, fmt.Errorf("failed to write attachment content: %w", err) + return fmt.Errorf("failed to write attachment content: %w", err) } } if err := writer.Close(); err != nil { - return nil, fmt.Errorf("failed to close multipart writer: %w", err) + return fmt.Errorf("failed to close multipart writer: %w", err) } - httpReq, err := http.NewRequestWithContext(ctx, "POST", queryURL, &buf) + httpReq, err := http.NewRequestWithContext(ctx, method, url, &buf) if err != nil { - return nil, err + return err } httpReq.Header.Set("Content-Type", writer.FormDataContentType()) c.setAuthHeader(httpReq) resp, err := c.doRequest(ctx, httpReq) if err != nil { - return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) + return fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - return nil, c.parseError(resp) + if !slices.Contains(acceptedStatuses, resp.StatusCode) { + defer func() { _ = resp.Body.Close() }() + return c.parseError(resp) } + return c.decodeJSONResponse(resp, out) +} + +// createDraftWithMultipart creates a draft with attachments using multipart/form-data. +func (c *HTTPClient) createDraftWithMultipart(ctx context.Context, grantID string, req *domain.CreateDraftRequest) (*domain.Draft, error) { + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, url.PathEscape(grantID)) + var result struct { Data draftResponse `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.doMultipartDraft(ctx, "POST", queryURL, buildDraftPayload(req, true), req.Attachments, &result, http.StatusOK, http.StatusCreated); err != nil { return nil, err } @@ -223,7 +231,7 @@ func (c *HTTPClient) createDraftWithMultipart(ctx context.Context, grantID strin // CreateDraftWithAttachmentFromReader creates a draft with an attachment from an io.Reader. // This is useful for large attachments or streaming file uploads. func (c *HTTPClient) CreateDraftWithAttachmentFromReader(ctx context.Context, grantID string, req *domain.CreateDraftRequest, filename string, contentType string, reader io.Reader) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts", c.baseURL, url.PathEscape(grantID)) payload := buildDraftPayload(req, true) // Use pipe to stream multipart data @@ -280,21 +288,22 @@ func (c *HTTPClient) CreateDraftWithAttachmentFromReader(ctx context.Context, gr if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() // Wait for writer goroutine to finish if writerErr := <-errCh; writerErr != nil { + _ = resp.Body.Close() return nil, writerErr } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } var result struct { Data draftResponse `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } @@ -304,13 +313,13 @@ func (c *HTTPClient) CreateDraftWithAttachmentFromReader(ctx context.Context, gr // DeleteDraft deletes a draft. func (c *HTTPClient) DeleteDraft(ctx context.Context, grantID, draftID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, grantID, draftID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(draftID)) return c.doDelete(ctx, queryURL) } // SendDraft sends a draft. func (c *HTTPClient) SendDraft(ctx context.Context, grantID, draftID string, req *domain.SendDraftRequest) (*domain.Message, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, grantID, draftID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(draftID)) var bodyReader io.Reader if req != nil && req.SignatureID != "" { @@ -332,16 +341,16 @@ func (c *HTTPClient) SendDraft(ctx context.Context, grantID, draftID string, req if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } var result struct { Data messageResponse `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } diff --git a/internal/adapters/nylas/drafts_update.go b/internal/adapters/nylas/drafts_update.go index 8a9d3f5..7889cc7 100644 --- a/internal/adapters/nylas/drafts_update.go +++ b/internal/adapters/nylas/drafts_update.go @@ -1,13 +1,10 @@ package nylas import ( - "bytes" "context" - "encoding/json" "fmt" - "mime/multipart" "net/http" - "net/textproto" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -23,7 +20,7 @@ func (c *HTTPClient) UpdateDraft(ctx context.Context, grantID, draftID string, r // updateDraftWithJSON updates a draft using JSON encoding (no attachments). func (c *HTTPClient) updateDraftWithJSON(ctx context.Context, grantID, draftID string, req *domain.CreateDraftRequest) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, grantID, draftID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(draftID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, buildDraftPayload(req, false), http.StatusOK) if err != nil { @@ -43,73 +40,12 @@ func (c *HTTPClient) updateDraftWithJSON(ctx context.Context, grantID, draftID s // updateDraftWithMultipart updates a draft with attachments using multipart/form-data. func (c *HTTPClient) updateDraftWithMultipart(ctx context.Context, grantID, draftID string, req *domain.CreateDraftRequest) (*domain.Draft, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, grantID, draftID) - - // Build the message JSON - message := buildDraftPayload(req, false) - - // Create multipart form - var buf bytes.Buffer - writer := multipart.NewWriter(&buf) - - // Add message as JSON field - messageJSON, err := json.Marshal(message) - if err != nil { - return nil, fmt.Errorf("failed to marshal message: %w", err) - } - if err := writer.WriteField("message", string(messageJSON)); err != nil { - return nil, fmt.Errorf("failed to write message field: %w", err) - } - - // Add each attachment as a file - for i, att := range req.Attachments { - if len(att.Content) == 0 { - continue // Skip attachments without content - } - - // Create form file with proper headers - h := make(textproto.MIMEHeader) - h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file%d"; filename="%s"`, i, att.Filename)) - if att.ContentType != "" { - h.Set("Content-Type", att.ContentType) - } else { - h.Set("Content-Type", "application/octet-stream") - } - - part, err := writer.CreatePart(h) - if err != nil { - return nil, fmt.Errorf("failed to create attachment part: %w", err) - } - if _, err := part.Write(att.Content); err != nil { - return nil, fmt.Errorf("failed to write attachment content: %w", err) - } - } - - if err := writer.Close(); err != nil { - return nil, fmt.Errorf("failed to close multipart writer: %w", err) - } - - httpReq, err := http.NewRequestWithContext(ctx, "PUT", queryURL, &buf) - if err != nil { - return nil, err - } - httpReq.Header.Set("Content-Type", writer.FormDataContentType()) - c.setAuthHeader(httpReq) - - resp, err := c.doRequest(ctx, httpReq) - if err != nil { - return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) - } - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - return nil, c.parseError(resp) - } + queryURL := fmt.Sprintf("%s/v3/grants/%s/drafts/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(draftID)) var result struct { Data draftResponse `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.doMultipartDraft(ctx, "PUT", queryURL, buildDraftPayload(req, false), req.Attachments, &result, http.StatusOK); err != nil { return nil, err } diff --git a/internal/adapters/nylas/folders.go b/internal/adapters/nylas/folders.go index 719efa7..f5e394f 100644 --- a/internal/adapters/nylas/folders.go +++ b/internal/adapters/nylas/folders.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "github.com/nylas/cli/internal/domain" "github.com/nylas/cli/internal/util" @@ -29,7 +30,7 @@ func (c *HTTPClient) GetFolders(ctx context.Context, grantID string) ([]domain.F return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/folders", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/folders", c.baseURL, url.PathEscape(grantID)) var result struct { Data []folderResponse `json:"data"` @@ -50,7 +51,7 @@ func (c *HTTPClient) GetFolder(ctx context.Context, grantID, folderID string) (* return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, grantID, folderID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(folderID)) var result struct { Data folderResponse `json:"data"` @@ -69,7 +70,7 @@ func (c *HTTPClient) CreateFolder(ctx context.Context, grantID string, req *doma return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/folders", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/folders", c.baseURL, url.PathEscape(grantID)) payload := map[string]any{ "name": req.Name, @@ -109,7 +110,7 @@ func (c *HTTPClient) UpdateFolder(ctx context.Context, grantID, folderID string, return nil, err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, grantID, folderID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(folderID)) payload := make(map[string]any, 4) // Pre-allocate for up to 4 fields if req.Name != "" { @@ -149,7 +150,7 @@ func (c *HTTPClient) DeleteFolder(ctx context.Context, grantID, folderID string) if err := validateRequired("folder ID", folderID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, grantID, folderID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/folders/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(folderID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/managed_grants.go b/internal/adapters/nylas/managed_grants.go index f493ef3..43803cd 100644 --- a/internal/adapters/nylas/managed_grants.go +++ b/internal/adapters/nylas/managed_grants.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -61,7 +62,7 @@ func (c *HTTPClient) listManagedGrants(ctx context.Context, provider domain.Prov } func (c *HTTPClient) getManagedGrant(ctx context.Context, grantID string) (*managedGrantResponse, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s", c.baseURL, url.PathEscape(grantID)) var result struct { Data managedGrantResponse `json:"data"` diff --git a/internal/adapters/nylas/messages.go b/internal/adapters/nylas/messages.go index c6d5233..32948f7 100644 --- a/internal/adapters/nylas/messages.go +++ b/internal/adapters/nylas/messages.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "fmt" + "net/url" "strings" "time" @@ -94,7 +95,7 @@ func (c *HTTPClient) GetMessagesWithCursor(ctx context.Context, grantID string, params.Limit = 10 } - baseURL := fmt.Sprintf("%s/v3/grants/%s/messages", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/messages", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder(). AddInt("limit", params.Limit). Add("page_token", params.PageToken). @@ -147,7 +148,7 @@ func (c *HTTPClient) GetMessageWithFields(ctx context.Context, grantID, messageI return nil, err } - baseURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, grantID, messageID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID)) queryURL := NewQueryBuilder().Add("fields", fields).BuildURL(baseURL) var result struct { @@ -163,7 +164,7 @@ func (c *HTTPClient) GetMessageWithFields(ctx context.Context, grantID, messageI // UpdateMessage updates message properties. func (c *HTTPClient) UpdateMessage(ctx context.Context, grantID, messageID string, req *domain.UpdateMessageRequest) (*domain.Message, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, grantID, messageID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID)) payload := make(map[string]any, 3) // Pre-allocate for up to 3 fields if req.Unread != nil { @@ -194,7 +195,7 @@ func (c *HTTPClient) UpdateMessage(ctx context.Context, grantID, messageID strin // DeleteMessage deletes a message (moves to trash). func (c *HTTPClient) DeleteMessage(ctx context.Context, grantID, messageID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, grantID, messageID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/messages_send.go b/internal/adapters/nylas/messages_send.go index d020c12..7cd4243 100644 --- a/internal/adapters/nylas/messages_send.go +++ b/internal/adapters/nylas/messages_send.go @@ -8,6 +8,7 @@ import ( "io" "mime/multipart" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" "github.com/nylas/cli/internal/util" @@ -53,7 +54,7 @@ func buildSendMessagePayload(req *domain.SendMessageRequest, includeSignature bo // SendMessage sends an email. func (c *HTTPClient) SendMessage(ctx context.Context, grantID string, req *domain.SendMessageRequest) (*domain.Message, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/send", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/send", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, buildSendMessagePayload(req, true), http.StatusOK, http.StatusCreated, http.StatusAccepted) if err != nil { @@ -74,7 +75,7 @@ func (c *HTTPClient) SendMessage(ctx context.Context, grantID string, req *domai // SendRawMessage sends a raw RFC 822 MIME message via the Nylas API. // Uses multipart/form-data with type=mime query parameter per Nylas API v3 spec. func (c *HTTPClient) SendRawMessage(ctx context.Context, grantID string, rawMIME []byte) (*domain.Message, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/send?type=mime", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/send?type=mime", c.baseURL, url.PathEscape(grantID)) // Create multipart form data var body bytes.Buffer @@ -106,10 +107,10 @@ func (c *HTTPClient) SendRawMessage(ctx context.Context, grantID string, rawMIME if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() // Check status code if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusAccepted { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } @@ -117,7 +118,7 @@ func (c *HTTPClient) SendRawMessage(ctx context.Context, grantID string, rawMIME var result struct { Data messageResponse `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } @@ -127,7 +128,7 @@ func (c *HTTPClient) SendRawMessage(ctx context.Context, grantID string, rawMIME // ListScheduledMessages retrieves all scheduled messages for a grant. func (c *HTTPClient) ListScheduledMessages(ctx context.Context, grantID string) ([]domain.ScheduledMessage, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules", c.baseURL, url.PathEscape(grantID)) req, err := http.NewRequestWithContext(ctx, "GET", queryURL, nil) if err != nil { @@ -139,9 +140,9 @@ func (c *HTTPClient) ListScheduledMessages(ctx context.Context, grantID string) if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } @@ -155,7 +156,7 @@ func (c *HTTPClient) ListScheduledMessages(ctx context.Context, grantID string) CloseTime int64 `json:"close_time"` } `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } @@ -177,7 +178,7 @@ func (c *HTTPClient) ListScheduledMessages(ctx context.Context, grantID string) // GetScheduledMessage retrieves a specific scheduled message. func (c *HTTPClient) GetScheduledMessage(ctx context.Context, grantID, scheduleID string) (*domain.ScheduledMessage, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules/%s", c.baseURL, grantID, scheduleID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(scheduleID)) req, err := http.NewRequestWithContext(ctx, "GET", queryURL, nil) if err != nil { @@ -189,12 +190,13 @@ func (c *HTTPClient) GetScheduledMessage(ctx context.Context, grantID, scheduleI if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() if resp.StatusCode == http.StatusNotFound { + _ = resp.Body.Close() return nil, domain.ErrMessageNotFound } if resp.StatusCode != http.StatusOK { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } @@ -208,7 +210,7 @@ func (c *HTTPClient) GetScheduledMessage(ctx context.Context, grantID, scheduleI CloseTime int64 `json:"close_time"` } `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } @@ -221,7 +223,7 @@ func (c *HTTPClient) GetScheduledMessage(ctx context.Context, grantID, scheduleI // CancelScheduledMessage cancels a scheduled message. func (c *HTTPClient) CancelScheduledMessage(ctx context.Context, grantID, scheduleID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules/%s", c.baseURL, grantID, scheduleID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/schedules/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(scheduleID)) req, err := http.NewRequestWithContext(ctx, "DELETE", queryURL, nil) if err != nil { @@ -245,7 +247,7 @@ func (c *HTTPClient) CancelScheduledMessage(ctx context.Context, grantID, schedu // SmartCompose generates an AI-powered email draft based on a prompt. // Uses Nylas Smart Compose API (requires Plus package). func (c *HTTPClient) SmartCompose(ctx context.Context, grantID string, req *domain.SmartComposeRequest) (*domain.SmartComposeSuggestion, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/smart-compose", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/smart-compose", c.baseURL, url.PathEscape(grantID)) payload := map[string]any{ "prompt": req.Prompt, @@ -268,16 +270,16 @@ func (c *HTTPClient) SmartCompose(ctx context.Context, grantID string, req *doma if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } var result struct { Data domain.SmartComposeSuggestion `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } @@ -287,7 +289,7 @@ func (c *HTTPClient) SmartCompose(ctx context.Context, grantID string, req *doma // SmartComposeReply generates an AI-powered reply to a specific message. // Uses Nylas Smart Compose API (requires Plus package). func (c *HTTPClient) SmartComposeReply(ctx context.Context, grantID, messageID string, req *domain.SmartComposeRequest) (*domain.SmartComposeSuggestion, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/smart-compose", c.baseURL, grantID, messageID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/messages/%s/smart-compose", c.baseURL, url.PathEscape(grantID), url.PathEscape(messageID)) payload := map[string]any{ "prompt": req.Prompt, @@ -310,16 +312,16 @@ func (c *HTTPClient) SmartComposeReply(ctx context.Context, grantID, messageID s if err != nil { return nil, fmt.Errorf("%w: %v", domain.ErrNetworkError, err) } - defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { + defer func() { _ = resp.Body.Close() }() return nil, c.parseError(resp) } var result struct { Data domain.SmartComposeSuggestion `json:"data"` } - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + if err := c.decodeJSONResponse(resp, &result); err != nil { return nil, err } diff --git a/internal/adapters/nylas/notetakers.go b/internal/adapters/nylas/notetakers.go index 55e2c18..0561f02 100644 --- a/internal/adapters/nylas/notetakers.go +++ b/internal/adapters/nylas/notetakers.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "time" "github.com/nylas/cli/internal/domain" @@ -52,7 +53,7 @@ func (c *HTTPClient) ListNotetakers(ctx context.Context, grantID string, params params.Limit = 10 } - baseURL := fmt.Sprintf("%s/v3/grants/%s/notetakers", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/notetakers", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder(). AddInt("limit", params.Limit). Add("page_token", params.PageToken). @@ -71,7 +72,7 @@ func (c *HTTPClient) ListNotetakers(ctx context.Context, grantID string, params // GetNotetaker retrieves a single notetaker by ID. func (c *HTTPClient) GetNotetaker(ctx context.Context, grantID, notetakerID string) (*domain.Notetaker, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s", c.baseURL, grantID, notetakerID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(notetakerID)) var result struct { Data notetakerResponse `json:"data"` @@ -86,7 +87,7 @@ func (c *HTTPClient) GetNotetaker(ctx context.Context, grantID, notetakerID stri // CreateNotetaker creates a new notetaker to join a meeting. func (c *HTTPClient) CreateNotetaker(ctx context.Context, grantID string, req *domain.CreateNotetakerRequest) (*domain.Notetaker, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers", c.baseURL, url.PathEscape(grantID)) payload := map[string]any{ "meeting_link": req.MeetingLink, @@ -126,13 +127,13 @@ func (c *HTTPClient) CreateNotetaker(ctx context.Context, grantID string, req *d // DeleteNotetaker deletes/cancels a notetaker. func (c *HTTPClient) DeleteNotetaker(ctx context.Context, grantID, notetakerID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s", c.baseURL, grantID, notetakerID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(notetakerID)) return c.doDelete(ctx, queryURL) } // GetNotetakerMedia retrieves the media (recording/transcript) for a notetaker. func (c *HTTPClient) GetNotetakerMedia(ctx context.Context, grantID, notetakerID string) (*domain.MediaData, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s/media", c.baseURL, grantID, notetakerID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/notetakers/%s/media", c.baseURL, url.PathEscape(grantID), url.PathEscape(notetakerID)) var result struct { Data struct { diff --git a/internal/adapters/nylas/scheduler.go b/internal/adapters/nylas/scheduler.go index 65ac5bb..58d29c2 100644 --- a/internal/adapters/nylas/scheduler.go +++ b/internal/adapters/nylas/scheduler.go @@ -30,7 +30,7 @@ func (c *HTTPClient) GetSchedulerConfiguration(ctx context.Context, configID str return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, configID) + queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, url.PathEscape(configID)) var result struct { Data domain.SchedulerConfiguration `json:"data"` @@ -65,7 +65,7 @@ func (c *HTTPClient) UpdateSchedulerConfiguration(ctx context.Context, configID return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, configID) + queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, url.PathEscape(configID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req) if err != nil { @@ -86,7 +86,7 @@ func (c *HTTPClient) DeleteSchedulerConfiguration(ctx context.Context, configID if err := validateRequired("configuration ID", configID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, configID) + queryURL := fmt.Sprintf("%s/v3/scheduling/configurations/%s", c.baseURL, url.PathEscape(configID)) return c.doDelete(ctx, queryURL) } @@ -116,7 +116,7 @@ func (c *HTTPClient) GetSchedulerSession(ctx context.Context, sessionID string) return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/sessions/%s", c.baseURL, sessionID) + queryURL := fmt.Sprintf("%s/v3/scheduling/sessions/%s", c.baseURL, url.PathEscape(sessionID)) var result struct { Data domain.SchedulerSession `json:"data"` @@ -149,7 +149,7 @@ func (c *HTTPClient) GetBooking(ctx context.Context, bookingID string) (*domain. return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/bookings/%s", c.baseURL, bookingID) + queryURL := fmt.Sprintf("%s/v3/scheduling/bookings/%s", c.baseURL, url.PathEscape(bookingID)) var result struct { Data domain.Booking `json:"data"` @@ -166,7 +166,7 @@ func (c *HTTPClient) ConfirmBooking(ctx context.Context, bookingID string, req * return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/bookings/%s", c.baseURL, bookingID) + queryURL := fmt.Sprintf("%s/v3/scheduling/bookings/%s", c.baseURL, url.PathEscape(bookingID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req) if err != nil { @@ -243,7 +243,7 @@ func (c *HTTPClient) GetSchedulerPage(ctx context.Context, pageID string) (*doma return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, pageID) + queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, url.PathEscape(pageID)) var result struct { Data domain.SchedulerPage `json:"data"` @@ -278,7 +278,7 @@ func (c *HTTPClient) UpdateSchedulerPage(ctx context.Context, pageID string, req return nil, err } - queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, pageID) + queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, url.PathEscape(pageID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req) if err != nil { @@ -299,6 +299,6 @@ func (c *HTTPClient) DeleteSchedulerPage(ctx context.Context, pageID string) err if err := validateRequired("page ID", pageID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, pageID) + queryURL := fmt.Sprintf("%s/v3/scheduling/pages/%s", c.baseURL, url.PathEscape(pageID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/signatures.go b/internal/adapters/nylas/signatures.go index a83722d..1145d86 100644 --- a/internal/adapters/nylas/signatures.go +++ b/internal/adapters/nylas/signatures.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "time" "github.com/nylas/cli/internal/domain" @@ -20,7 +21,7 @@ type signatureResponse struct { // GetSignatures retrieves all signatures for a grant. func (c *HTTPClient) GetSignatures(ctx context.Context, grantID string) ([]domain.Signature, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures", c.baseURL, url.PathEscape(grantID)) var result struct { Data []signatureResponse `json:"data"` @@ -34,7 +35,7 @@ func (c *HTTPClient) GetSignatures(ctx context.Context, grantID string) ([]domai // GetSignature retrieves a specific signature. func (c *HTTPClient) GetSignature(ctx context.Context, grantID, signatureID string) (*domain.Signature, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, grantID, signatureID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(signatureID)) var result struct { Data signatureResponse `json:"data"` @@ -49,7 +50,7 @@ func (c *HTTPClient) GetSignature(ctx context.Context, grantID, signatureID stri // CreateSignature creates a new signature. func (c *HTTPClient) CreateSignature(ctx context.Context, grantID string, req *domain.CreateSignatureRequest) (*domain.Signature, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures", c.baseURL, grantID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures", c.baseURL, url.PathEscape(grantID)) resp, err := c.doJSONRequestNoRetry(ctx, http.MethodPost, queryURL, req) if err != nil { @@ -69,7 +70,7 @@ func (c *HTTPClient) CreateSignature(ctx context.Context, grantID string, req *d // UpdateSignature updates an existing signature. func (c *HTTPClient) UpdateSignature(ctx context.Context, grantID, signatureID string, req *domain.UpdateSignatureRequest) (*domain.Signature, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, grantID, signatureID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(signatureID)) resp, err := c.doJSONRequestNoRetry(ctx, http.MethodPut, queryURL, req, http.StatusOK) if err != nil { @@ -89,7 +90,7 @@ func (c *HTTPClient) UpdateSignature(ctx context.Context, grantID, signatureID s // DeleteSignature deletes a signature. func (c *HTTPClient) DeleteSignature(ctx context.Context, grantID, signatureID string) error { - queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, grantID, signatureID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/signatures/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(signatureID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/threads.go b/internal/adapters/nylas/threads.go index 3c6c027..be4d878 100644 --- a/internal/adapters/nylas/threads.go +++ b/internal/adapters/nylas/threads.go @@ -3,6 +3,7 @@ package nylas import ( "context" "fmt" + "net/url" "time" "github.com/nylas/cli/internal/domain" @@ -40,7 +41,7 @@ func (c *HTTPClient) GetThreads(ctx context.Context, grantID string, params *dom params.Limit = 10 } - baseURL := fmt.Sprintf("%s/v3/grants/%s/threads", c.baseURL, grantID) + baseURL := fmt.Sprintf("%s/v3/grants/%s/threads", c.baseURL, url.PathEscape(grantID)) queryURL := NewQueryBuilder(). AddInt("limit", params.Limit). AddInt("offset", params.Offset). @@ -64,7 +65,7 @@ func (c *HTTPClient) GetThreads(ctx context.Context, grantID string, params *dom // GetThread retrieves a single thread by ID. func (c *HTTPClient) GetThread(ctx context.Context, grantID, threadID string) (*domain.Thread, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, grantID, threadID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(threadID)) var result struct { Data threadResponse `json:"data"` @@ -79,7 +80,7 @@ func (c *HTTPClient) GetThread(ctx context.Context, grantID, threadID string) (* // UpdateThread updates thread properties. func (c *HTTPClient) UpdateThread(ctx context.Context, grantID, threadID string, req *domain.UpdateMessageRequest) (*domain.Thread, error) { - queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, grantID, threadID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(threadID)) payload := make(map[string]any) if req.Unread != nil { @@ -113,7 +114,7 @@ func (c *HTTPClient) DeleteThread(ctx context.Context, grantID, threadID string) if err := validateRequired("thread ID", threadID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, grantID, threadID) + queryURL := fmt.Sprintf("%s/v3/grants/%s/threads/%s", c.baseURL, url.PathEscape(grantID), url.PathEscape(threadID)) return c.doDelete(ctx, queryURL) } diff --git a/internal/adapters/nylas/transactional.go b/internal/adapters/nylas/transactional.go index 3f9bea0..16392c1 100644 --- a/internal/adapters/nylas/transactional.go +++ b/internal/adapters/nylas/transactional.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "github.com/nylas/cli/internal/domain" ) @@ -24,7 +25,7 @@ func buildTransactionalSendPayload(req *domain.SendMessageRequest) map[string]an // SendTransactionalMessage sends an email via the domain-based transactional endpoint. // Used for managed Nylas grants: POST /v3/domains/{domain}/messages/send func (c *HTTPClient) SendTransactionalMessage(ctx context.Context, domainName string, req *domain.SendMessageRequest) (*domain.Message, error) { - queryURL := fmt.Sprintf("%s/v3/domains/%s/messages/send", c.baseURL, domainName) + queryURL := fmt.Sprintf("%s/v3/domains/%s/messages/send", c.baseURL, url.PathEscape(domainName)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, buildTransactionalSendPayload(req), http.StatusOK, http.StatusCreated, http.StatusAccepted) if err != nil { diff --git a/internal/adapters/nylas/webhooks.go b/internal/adapters/nylas/webhooks.go index 67d2d72..3e23e7b 100644 --- a/internal/adapters/nylas/webhooks.go +++ b/internal/adapters/nylas/webhooks.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" "time" "github.com/nylas/cli/internal/domain" @@ -44,7 +45,7 @@ func (c *HTTPClient) GetWebhook(ctx context.Context, webhookID string) (*domain. return nil, err } - queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, webhookID) + queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, url.PathEscape(webhookID)) var result struct { Data webhookResponse `json:"data"` @@ -83,7 +84,7 @@ func (c *HTTPClient) UpdateWebhook(ctx context.Context, webhookID string, req *d return nil, err } - queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, webhookID) + queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, url.PathEscape(webhookID)) resp, err := c.doJSONRequest(ctx, "PUT", queryURL, req) if err != nil { @@ -106,7 +107,7 @@ func (c *HTTPClient) DeleteWebhook(ctx context.Context, webhookID string) error if err := validateRequired("webhook ID", webhookID); err != nil { return err } - queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, webhookID) + queryURL := fmt.Sprintf("%s/v3/webhooks/%s", c.baseURL, url.PathEscape(webhookID)) return c.doDelete(ctx, queryURL) } @@ -119,7 +120,7 @@ func (c *HTTPClient) RotateWebhookSecret( return nil, err } - queryURL := fmt.Sprintf("%s/v3/webhooks/rotate-secret/%s", c.baseURL, webhookID) + queryURL := fmt.Sprintf("%s/v3/webhooks/rotate-secret/%s", c.baseURL, url.PathEscape(webhookID)) resp, err := c.doJSONRequest(ctx, "POST", queryURL, nil) if err != nil { diff --git a/internal/adapters/oauth/server.go b/internal/adapters/oauth/server.go index af9898d..74222b8 100644 --- a/internal/adapters/oauth/server.go +++ b/internal/adapters/oauth/server.go @@ -52,8 +52,11 @@ func (s *CallbackServer) Start() error { IdleTimeout: 60 * time.Second, } + // Bind to loopback only. The browser performing the OAuth flow runs on + // the same machine; exposing the callback to the LAN would let an + // attacker race the legitimate browser callback. var err error - s.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.port)) + s.listener, err = net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", s.port)) if err != nil { return fmt.Errorf("failed to start callback server: %w", err) } diff --git a/internal/adapters/webhookserver/server.go b/internal/adapters/webhookserver/server.go index 459eeb7..bf34674 100644 --- a/internal/adapters/webhookserver/server.go +++ b/internal/adapters/webhookserver/server.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "html/template" "io" "net" "net/http" @@ -14,17 +15,64 @@ import ( "github.com/nylas/cli/internal/ports" ) +const ( + // maxWebhookBodyBytes caps the request body size accepted by the webhook + // receiver. Nylas events are well under 100 KB; the cap exists to prevent + // a malicious sender (the public tunnel URL is reachable by anyone) from + // asking the server to allocate gigabytes of RAM before HMAC verifies. + maxWebhookBodyBytes = 1 << 20 // 1 MiB + + // maxConcurrentHandlers bounds the goroutines that fan-out registered + // handlers. Without a bound, a flood of events combined with slow handlers + // would let an attacker drive unbounded goroutine creation. + maxConcurrentHandlers = 32 +) + +// rootTemplate renders the webhook server landing page. html/template HTML- +// escapes every value, preventing PublicURL (and any future user-derived +// fields) from breaking out of the document. +var rootTemplate = template.Must(template.New("root").Parse(` + + + Nylas Webhook Server + + + +

Nylas Webhook Server

+
+
Status: Running
+
Events Received: {{.EventsReceived}}
+
Started: {{.StartedAt}}
+
+

Webhook Endpoint

+
{{.PublicURL}}
+

Send POST requests to this URL to receive webhook events.

+

Health Check

+
{{.HealthURL}}
+ +`)) + // Server implements the WebhookServer interface. type Server struct { - config ports.WebhookServerConfig - server *http.Server - listener net.Listener - tunnel ports.Tunnel - events chan *ports.WebhookEvent - handlers []ports.WebhookEventHandler - stats ports.WebhookServerStats - mu sync.RWMutex - startedAt time.Time + config ports.WebhookServerConfig + server *http.Server + listener net.Listener + tunnel ports.Tunnel + events chan *ports.WebhookEvent + handlers []ports.WebhookEventHandler + handlerSlots chan struct{} + stats ports.WebhookServerStats + mu sync.RWMutex + startedAt time.Time + closeOnce sync.Once } // NewServer creates a new webhook server. @@ -37,8 +85,9 @@ func NewServer(config ports.WebhookServerConfig) *Server { } return &Server{ - config: config, - events: make(chan *ports.WebhookEvent, 100), + config: config, + events: make(chan *ports.WebhookEvent, 100), + handlerSlots: make(chan struct{}, maxConcurrentHandlers), stats: ports.WebhookServerStats{ LocalURL: fmt.Sprintf("http://localhost:%d%s", config.Port, config.Path), }, @@ -66,9 +115,12 @@ func (s *Server) Start(ctx context.Context) error { ReadHeaderTimeout: 10 * time.Second, } - // Start listener + // Start listener bound to loopback only. Tunnels (cloudflared, ngrok) + // connect to localhost — there is no use case for accepting webhooks + // directly from the LAN, and binding to 0.0.0.0 would let any host on + // the local network forge webhook events. var err error - s.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.config.Port)) + s.listener, err = net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", s.config.Port)) if err != nil { return fmt.Errorf("failed to start listener on port %d: %w", s.config.Port, err) } @@ -105,28 +157,33 @@ func (s *Server) Start(ctx context.Context) error { return nil } -// Stop stops the webhook server and tunnel. +// Stop stops the webhook server and tunnel. Safe to call more than once — +// the channel is closed under sync.Once, and the events channel is only +// closed after http.Server.Shutdown returns (which waits for in-flight +// handlers to complete) so producers cannot race the close. func (s *Server) Stop() error { var errs []error - // Stop tunnel first - if s.tunnel != nil { - if err := s.tunnel.Stop(); err != nil { - errs = append(errs, fmt.Errorf("tunnel stop: %w", err)) + s.closeOnce.Do(func() { + // Stop tunnel first. + if s.tunnel != nil { + if err := s.tunnel.Stop(); err != nil { + errs = append(errs, fmt.Errorf("tunnel stop: %w", err)) + } } - } - // Stop HTTP server - if s.server != nil { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := s.server.Shutdown(ctx); err != nil { - errs = append(errs, fmt.Errorf("server shutdown: %w", err)) + // Stop HTTP server. Shutdown blocks until all in-flight requests have + // finished, so when it returns no handler is sending to s.events. + if s.server != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.server.Shutdown(ctx); err != nil { + errs = append(errs, fmt.Errorf("server shutdown: %w", err)) + } } - } - // Close events channel - close(s.events) + close(s.events) + }) if len(errs) > 0 { return errs[0] @@ -192,10 +249,17 @@ func (s *Server) handleWebhook(w http.ResponseWriter, r *http.Request) { return } - // Read body + // Cap request body size so a malicious sender on a public tunnel can't + // drive unbounded RAM allocation. MaxBytesReader closes the body and + // returns an error from ReadAll once the limit is exceeded. + r.Body = http.MaxBytesReader(w, r.Body, maxWebhookBodyBytes) body, err := io.ReadAll(r.Body) if err != nil { - http.Error(w, "Failed to read body", http.StatusBadRequest) + // MaxBytesReader returns *http.MaxBytesError once the cap is hit; any + // other read error (timeout, connection reset) is also surfaced as a + // 413 to keep the response simple — the client cannot recover either + // way. + http.Error(w, "Request body too large or unreadable", http.StatusRequestEntityTooLarge) return } defer func() { _ = r.Body.Close() }() @@ -253,6 +317,26 @@ func (s *Server) handleWebhook(w http.ResponseWriter, r *http.Request) { } } } + + // Replay protection. The signature has already been verified above + // (or is absent — see comment on MaxEventAge). When configured, we + // reject events whose CloudEvents `time` field is older than the + // allowed skew, which prevents an attacker who captured a single + // signed body from replaying it forever. + if s.config.WebhookSecret != "" && s.config.MaxEventAge > 0 { + if rawTime, ok := payload["time"].(string); ok { + eventTime, terr := time.Parse(time.RFC3339, rawTime) + if terr != nil { + http.Error(w, "Invalid event timestamp", http.StatusBadRequest) + return + } + skew := time.Since(eventTime) + if skew > s.config.MaxEventAge || skew < -s.config.MaxEventAge { + http.Error(w, "Event timestamp outside allowed skew", http.StatusUnauthorized) + return + } + } + } } // Update stats @@ -261,20 +345,43 @@ func (s *Server) handleWebhook(w http.ResponseWriter, r *http.Request) { s.stats.LastEventAt = time.Now() s.mu.Unlock() - // Send to channel (non-blocking) + // Send to channel non-blocking. If the buffer is full we drop the + // *new* event (not the oldest) — bump a stat so callers can see the + // loss. select { case s.events <- event: default: - // Channel full, drop oldest + s.mu.Lock() + s.stats.EventsDropped++ + s.mu.Unlock() } - // Call handlers + // Call handlers. Goroutines are bounded by handlerSlots so a flood of + // events combined with slow handlers cannot drive unbounded goroutine + // creation. If the slot pool is saturated, we run synchronously rather + // than dropping the call — handlers are typically cheap (channel sends). s.mu.RLock() handlers := s.handlers s.mu.RUnlock() for _, handler := range handlers { - go handler(event) + select { + case s.handlerSlots <- struct{}{}: + go func(h ports.WebhookEventHandler) { + defer func() { + <-s.handlerSlots + // Recover so a buggy handler can't take down the server. + _ = recover() + }() + h(event) + }(handler) + default: + // Slot pool full — invoke inline to apply backpressure. + func() { + defer func() { _ = recover() }() + handler(event) + }() + } } // Respond with 200 OK @@ -291,6 +398,7 @@ func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { "status": "healthy", "started_at": stats.StartedAt, "events_received": stats.EventsReceived, + "events_dropped": stats.EventsDropped, "local_url": stats.LocalURL, "public_url": stats.PublicURL, } @@ -312,43 +420,20 @@ func (s *Server) handleRoot(w http.ResponseWriter, r *http.Request) { } stats := s.GetStats() + data := struct { + EventsReceived int + StartedAt string + PublicURL string + HealthURL string + }{ + EventsReceived: stats.EventsReceived, + StartedAt: stats.StartedAt.Format(time.RFC3339), + PublicURL: stats.PublicURL, + HealthURL: s.GetPublicURL() + "/health", + } - html := fmt.Sprintf(` - - - Nylas Webhook Server - - - -

Nylas Webhook Server

-
-
Status: Running
-
Events Received: %d
-
Started: %s
-
-

Webhook Endpoint

-
%s
-

Send POST requests to this URL to receive webhook events.

-

Health Check

-
%s/health
- -`, - stats.EventsReceived, - stats.StartedAt.Format(time.RFC3339), - stats.PublicURL, - s.GetPublicURL(), - ) - - w.Header().Set("Content-Type", "text/html") - _, _ = w.Write([]byte(html)) // Ignore write error - best effort response + w.Header().Set("Content-Type", "text/html; charset=utf-8") + _ = rootTemplate.Execute(w, data) // best-effort response } // verifySignature verifies the webhook signature using HMAC-SHA256. diff --git a/internal/adapters/webhookserver/server_test.go b/internal/adapters/webhookserver/server_test.go index f4dc6bc..bbc64a4 100644 --- a/internal/adapters/webhookserver/server_test.go +++ b/internal/adapters/webhookserver/server_test.go @@ -338,3 +338,110 @@ func signWebhookPayload(secret string, payload []byte) string { _, _ = mac.Write(payload) return hex.EncodeToString(mac.Sum(nil)) } + +// TestServer_HandleWebhook_RejectsOversizedBody verifies the body-size cap +// rejects payloads larger than the configured limit before allocating +// gigabytes of RAM. This is the gate that prevents a malicious sender on +// a public tunnel URL from driving the receiver out of memory. +func TestServer_HandleWebhook_RejectsOversizedBody(t *testing.T) { + server := NewServer(ports.WebhookServerConfig{Port: 0, Path: "/webhook"}) + + // 2 MiB — twice the cap. + oversized := bytes.Repeat([]byte("A"), 2<<20) + + req := httptest.NewRequest(http.MethodPost, "/webhook", bytes.NewReader(oversized)) + rec := httptest.NewRecorder() + server.handleWebhook(rec, req) + + assert.Equal(t, http.StatusRequestEntityTooLarge, rec.Code, + "oversized body must be rejected with 413, got %d", rec.Code) +} + +// TestServer_HandleWebhook_DropsOldEvents_ReplayWindow exercises the +// MaxEventAge gate. With MaxEventAge configured, an event whose +// CloudEvents `time` field is older than the window is rejected as a +// replay even when the HMAC verifies — bound to a captured signature, an +// attacker would otherwise be able to replay a single signed body +// indefinitely. +func TestServer_HandleWebhook_DropsOldEvents_ReplayWindow(t *testing.T) { + secret := "test-secret" + server := NewServer(ports.WebhookServerConfig{ + Port: 0, + Path: "/webhook", + WebhookSecret: secret, + MaxEventAge: 30 * time.Second, + }) + + // Body with a `time` field 5 minutes in the past. + oldTime := time.Now().Add(-5 * time.Minute).UTC().Format(time.RFC3339) + body := []byte(`{"id":"evt_1","type":"message.created","time":"` + oldTime + `"}`) + sig := signWebhookPayload(secret, body) + + req := httptest.NewRequest(http.MethodPost, "/webhook", bytes.NewReader(body)) + req.Header.Set("X-Nylas-Signature", sig) + rec := httptest.NewRecorder() + server.handleWebhook(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code, + "stale event must be rejected as replay, got %d", rec.Code) +} + +// TestServer_HandleWebhook_AcceptsRecentEvents_ReplayWindow is the +// positive twin of the replay test: a signed event with a fresh `time` +// passes the gate. +func TestServer_HandleWebhook_AcceptsRecentEvents_ReplayWindow(t *testing.T) { + secret := "test-secret" + server := NewServer(ports.WebhookServerConfig{ + Port: 0, + Path: "/webhook", + WebhookSecret: secret, + MaxEventAge: 60 * time.Second, + }) + + now := time.Now().UTC().Format(time.RFC3339) + body := []byte(`{"id":"evt_2","type":"message.created","time":"` + now + `"}`) + sig := signWebhookPayload(secret, body) + + req := httptest.NewRequest(http.MethodPost, "/webhook", bytes.NewReader(body)) + req.Header.Set("X-Nylas-Signature", sig) + rec := httptest.NewRecorder() + server.handleWebhook(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code, "fresh event must be accepted") +} + +// TestServer_HandleHealth_SurfacesEventsDropped confirms the health +// response includes the events_dropped counter so operators can detect a +// slow consumer without parsing logs. +func TestServer_HandleHealth_SurfacesEventsDropped(t *testing.T) { + server := NewServer(ports.WebhookServerConfig{Port: 0, Path: "/webhook"}) + server.mu.Lock() + server.stats.EventsDropped = 7 + server.mu.Unlock() + + req := httptest.NewRequest(http.MethodGet, "/health", nil) + rec := httptest.NewRecorder() + server.handleHealth(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + var body map[string]any + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &body)) + require.Contains(t, body, "events_dropped") + assert.Equal(t, float64(7), body["events_dropped"]) +} + +// TestServer_StartBindsLoopbackOnly asserts the listener address is on a +// loopback interface — guards against an accidental change from +// 127.0.0.1: to :PORT (which would let any host on the LAN forge events). +func TestServer_StartBindsLoopbackOnly(t *testing.T) { + server := NewServer(ports.WebhookServerConfig{Port: 0, Path: "/webhook"}) + require.NoError(t, server.Start(context.Background())) + defer func() { _ = server.Stop() }() + + addr := server.listener.Addr().String() + host, _, err := net.SplitHostPort(addr) + require.NoError(t, err) + ip := net.ParseIP(host) + require.NotNil(t, ip, "could not parse listener host as IP: %s", host) + assert.True(t, ip.IsLoopback(), "listener bound to non-loopback address: %s", addr) +} diff --git a/internal/air/handlers_ai_complete.go b/internal/air/handlers_ai_complete.go index 01f2407..73d165f 100644 --- a/internal/air/handlers_ai_complete.go +++ b/internal/air/handlers_ai_complete.go @@ -10,6 +10,8 @@ import ( "strconv" "strings" "time" + + "github.com/nylas/cli/internal/httputil" ) const smartComposeTimeout = 5 * time.Second @@ -42,11 +44,7 @@ func (s *Server) handleAIComplete(w http.ResponseWriter, r *http.Request) { } if req.Text == "" { - w.Header().Set("Content-Type", "application/json") - resp := CompleteResponse{Suggestion: "", Confidence: 0} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, CompleteResponse{Suggestion: "", Confidence: 0}) return } @@ -56,14 +54,10 @@ func (s *Server) handleAIComplete(w http.ResponseWriter, r *http.Request) { suggestion := getAICompletion(r.Context(), req.Text, req.MaxLength) - w.Header().Set("Content-Type", "application/json") - resp := CompleteResponse{ + httputil.WriteJSON(w, http.StatusOK, CompleteResponse{ Suggestion: suggestion, Confidence: 0.8, - } - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode response", http.StatusInternalServerError) - } + }) } // getAICompletion gets completion from Claude via CLI @@ -175,10 +169,7 @@ func (s *Server) handleNLSearch(w http.ResponseWriter, r *http.Request) { result := parseNaturalLanguageSearch(req.Query) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(result); err != nil { - http.Error(w, "Failed to encode response", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, result) } // parseNaturalLanguageSearch converts NL query to search params diff --git a/internal/air/handlers_ai_config.go b/internal/air/handlers_ai_config.go index 1d303b7..d13bec1 100644 --- a/internal/air/handlers_ai_config.go +++ b/internal/air/handlers_ai_config.go @@ -2,8 +2,11 @@ package air import ( "encoding/json" + "maps" "net/http" "sync" + + "github.com/nylas/cli/internal/httputil" ) // AIConfig represents AI provider configuration @@ -81,10 +84,7 @@ func (s *Server) handleGetAIConfig(w http.ResponseWriter, r *http.Request) { config.APIKey = "***" + config.APIKey[max(0, len(config.APIKey)-4):] } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(config); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, config) } // handleUpdateAIConfig updates AI configuration @@ -118,20 +118,14 @@ func (s *Server) handleUpdateAIConfig(w http.ResponseWriter, r *http.Request) { aiStore.config.Temperature = req.Temperature } if req.TaskModels != nil { - for task, model := range req.TaskModels { - aiStore.config.TaskModels[task] = model - } + maps.Copy(aiStore.config.TaskModels, req.TaskModels) } if req.UsageBudget > 0 { aiStore.config.UsageBudget = req.UsageBudget } aiStore.config.Enabled = req.Enabled - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "updated"} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "updated"}) } // handleTestAIConnection tests the AI provider connection @@ -153,10 +147,7 @@ func (s *Server) handleTestAIConnection(w http.ResponseWriter, r *http.Request) result["message"] = "API key required" } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(result); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, result) } // handleGetAIUsage returns AI usage statistics @@ -172,10 +163,7 @@ func (s *Server) handleGetAIUsage(w http.ResponseWriter, r *http.Request) { "percentUsed": (aiStore.config.UsageSpent / aiStore.config.UsageBudget) * 100, } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(response); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, response) } // GetAIProviders returns available AI providers @@ -207,10 +195,7 @@ func (s *Server) handleGetAIProviders(w http.ResponseWriter, r *http.Request) { }, } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(providers); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, providers) } // RecordAIUsage records AI usage for a task diff --git a/internal/air/handlers_analytics.go b/internal/air/handlers_analytics.go index 2ce9733..a91f6dd 100644 --- a/internal/air/handlers_analytics.go +++ b/internal/air/handlers_analytics.go @@ -1,10 +1,11 @@ package air import ( - "encoding/json" "net/http" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // EmailAnalytics represents email analytics data @@ -121,10 +122,7 @@ func (s *Server) handleGetAnalyticsDashboard(w http.ResponseWriter, r *http.Requ aStore.mu.RLock() defer aStore.mu.RUnlock() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(aStore.analytics); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, aStore.analytics) } // handleGetAnalyticsTrends returns email trends @@ -144,10 +142,7 @@ func (s *Server) handleGetAnalyticsTrends(w http.ResponseWriter, r *http.Request "dailyVolume": aStore.analytics.DailyVolume, } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(response); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, response) } // handleGetFocusTimeSuggestions returns suggested focus time blocks @@ -198,10 +193,7 @@ func (s *Server) handleGetFocusTimeSuggestions(w http.ResponseWriter, r *http.Re Score: 90, }) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(suggestions); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, suggestions) } // handleGetProductivityStats returns productivity metrics @@ -219,10 +211,7 @@ func (s *Server) handleGetProductivityStats(w http.ResponseWriter, r *http.Reque "emailsProcessed": aStore.analytics.TotalArchived + aStore.analytics.TotalDeleted, } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(response); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, response) } // RecordEmailReceived records a received email diff --git a/internal/air/handlers_bundles.go b/internal/air/handlers_bundles.go index 25924f6..610c1ae 100644 --- a/internal/air/handlers_bundles.go +++ b/internal/air/handlers_bundles.go @@ -7,6 +7,8 @@ import ( "strings" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // Bundle represents an email bundle/category @@ -156,10 +158,7 @@ func (s *Server) handleGetBundles(w http.ResponseWriter, r *http.Request) { bundles = append(bundles, b) } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(bundles); err != nil { - http.Error(w, "Failed to encode bundles", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, bundles) } // handleBundleCategorize assigns an email to a bundle @@ -187,11 +186,7 @@ func (s *Server) handleBundleCategorize(w http.ResponseWriter, r *http.Request) bundleStore.mu.Unlock() } - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"bundleId": bundleID} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode response", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"bundleId": bundleID}) } // categorizeEmail determines which bundle an email belongs to @@ -295,10 +290,7 @@ func (s *Server) handleUpdateBundle(w http.ResponseWriter, r *http.Request) { existing.LastUpdated = time.Now() } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(bundle); err != nil { - http.Error(w, "Failed to encode response", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, bundle) } // handleGetBundleEmails returns emails for a specific bundle @@ -319,8 +311,5 @@ func (s *Server) handleGetBundleEmails(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(emailIDs); err != nil { - http.Error(w, "Failed to encode response", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, emailIDs) } diff --git a/internal/air/handlers_focus_mode.go b/internal/air/handlers_focus_mode.go index 6dceb8c..2b4938b 100644 --- a/internal/air/handlers_focus_mode.go +++ b/internal/air/handlers_focus_mode.go @@ -3,15 +3,18 @@ package air import ( "encoding/json" "net/http" + "slices" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // FocusModeState represents the current focus mode state type FocusModeState struct { IsActive bool `json:"isActive"` - StartedAt time.Time `json:"startedAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` + StartedAt time.Time `json:"startedAt,omitzero"` + EndsAt time.Time `json:"endsAt,omitzero"` Duration int `json:"duration"` // minutes PomodoroMode bool `json:"pomodoroMode"` SessionCount int `json:"sessionCount"` @@ -112,10 +115,7 @@ func (s *Server) handleGetFocusModeState(w http.ResponseWriter, r *http.Request) } } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(response); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, response) } // handleStartFocusMode starts a focus mode session @@ -154,10 +154,7 @@ func (s *Server) handleStartFocusMode(w http.ResponseWriter, r *http.Request) { InBreak: false, } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(fmStore.state); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, fmStore.state) } // handleStopFocusMode stops the current focus mode session @@ -171,14 +168,10 @@ func (s *Server) handleStopFocusMode(w http.ResponseWriter, r *http.Request) { fmStore.state.IsActive = false fmStore.state.InBreak = false - w.Header().Set("Content-Type", "application/json") - resp := map[string]any{ + httputil.WriteJSON(w, http.StatusOK, map[string]any{ "status": "stopped", "sessionCount": fmStore.state.SessionCount, - } - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + }) } // handleStartBreak starts a break in pomodoro mode @@ -203,10 +196,7 @@ func (s *Server) handleStartBreak(w http.ResponseWriter, r *http.Request) { fmStore.state.EndsAt = now.Add(time.Duration(breakDuration) * time.Minute) fmStore.state.BreakDuration = breakDuration - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(fmStore.state); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, fmStore.state) } // handleGetFocusModeSettings returns focus mode settings @@ -214,10 +204,7 @@ func (s *Server) handleGetFocusModeSettings(w http.ResponseWriter, r *http.Reque fmStore.mu.RLock() defer fmStore.mu.RUnlock() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(fmStore.settings); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, fmStore.settings) } // handleUpdateFocusModeSettings updates focus mode settings @@ -232,11 +219,7 @@ func (s *Server) handleUpdateFocusModeSettings(w http.ResponseWriter, r *http.Re fmStore.settings = &settings fmStore.mu.Unlock() - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "updated"} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "updated"}) } // IsFocusModeActive returns whether focus mode is active @@ -256,11 +239,5 @@ func ShouldAllowNotification(senderEmail string) bool { } // Check if sender is in allowed list - for _, allowed := range fmStore.settings.AllowedSenders { - if allowed == senderEmail { - return true - } - } - - return false + return slices.Contains(fmStore.settings.AllowedSenders, senderEmail) } diff --git a/internal/air/handlers_notetaker.go b/internal/air/handlers_notetaker.go index 71a15cb..878ae60 100644 --- a/internal/air/handlers_notetaker.go +++ b/internal/air/handlers_notetaker.go @@ -9,6 +9,7 @@ import ( "time" "github.com/nylas/cli/internal/domain" + "github.com/nylas/cli/internal/httputil" ) // States to filter out from notetaker list @@ -124,10 +125,7 @@ func (s *Server) handleListNotetakers(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(response); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, response) } // domainToNotetakerResponse converts a domain.Notetaker to NotetakerResponse @@ -205,10 +203,7 @@ func (s *Server) handleCreateNotetaker(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(domainToNotetakerResponse(nt)); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, domainToNotetakerResponse(nt)) } // handleGetNotetaker returns a single notetaker from the Nylas API @@ -233,10 +228,7 @@ func (s *Server) handleGetNotetaker(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(domainToNotetakerResponse(nt)); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, domainToNotetakerResponse(nt)) } // handleGetNotetakerMedia returns media for a notetaker from the Nylas API @@ -273,10 +265,7 @@ func (s *Server) handleGetNotetakerMedia(w http.ResponseWriter, r *http.Request) media.TranscriptSize = mediaData.Transcript.Size } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(media); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, media) } // handleDeleteNotetaker cancels a notetaker via the Nylas API diff --git a/internal/air/handlers_read_receipts.go b/internal/air/handlers_read_receipts.go index 7bbc1ff..67ea4a9 100644 --- a/internal/air/handlers_read_receipts.go +++ b/internal/air/handlers_read_receipts.go @@ -5,13 +5,15 @@ import ( "net/http" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // ReadReceipt represents a read receipt for a sent email type ReadReceipt struct { EmailID string `json:"emailId"` Recipient string `json:"recipient"` - OpenedAt time.Time `json:"openedAt,omitempty"` + OpenedAt time.Time `json:"openedAt,omitzero"` OpenCount int `json:"openCount"` Device string `json:"device,omitempty"` Location string `json:"location,omitempty"` @@ -55,10 +57,7 @@ func (s *Server) handleGetReadReceipts(w http.ResponseWriter, r *http.Request) { if emailID != "" { if receipt, ok := rrStore.receipts[emailID]; ok { - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(receipt); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, receipt) return } http.Error(w, "Receipt not found", http.StatusNotFound) @@ -70,10 +69,7 @@ func (s *Server) handleGetReadReceipts(w http.ResponseWriter, r *http.Request) { receipts = append(receipts, r) } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(receipts); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, receipts) } // handleTrackOpen records an email open (tracking pixel endpoint) @@ -131,10 +127,7 @@ func (s *Server) handleGetReadReceiptSettings(w http.ResponseWriter, r *http.Req rrStore.mu.RLock() defer rrStore.mu.RUnlock() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(rrStore.settings); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, rrStore.settings) } // handleUpdateReadReceiptSettings updates settings @@ -149,11 +142,7 @@ func (s *Server) handleUpdateReadReceiptSettings(w http.ResponseWriter, r *http. rrStore.settings = &settings rrStore.mu.Unlock() - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "updated"} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "updated"}) } // RegisterEmailForTracking registers an email for read tracking diff --git a/internal/air/handlers_reply_later.go b/internal/air/handlers_reply_later.go index f20ffcb..260e429 100644 --- a/internal/air/handlers_reply_later.go +++ b/internal/air/handlers_reply_later.go @@ -5,6 +5,8 @@ import ( "net/http" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // ReplyLaterItem represents an email in the reply later queue @@ -13,7 +15,7 @@ type ReplyLaterItem struct { Subject string `json:"subject"` From string `json:"from"` AddedAt time.Time `json:"addedAt"` - RemindAt time.Time `json:"remindAt,omitempty"` + RemindAt time.Time `json:"remindAt,omitzero"` DraftID string `json:"draftId,omitempty"` Notes string `json:"notes,omitempty"` Priority int `json:"priority"` // 1=high, 2=medium, 3=low @@ -56,10 +58,7 @@ func (s *Server) handleGetReplyLaterItems(w http.ResponseWriter, r *http.Request } } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(items); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, items) } // handleAddToReplyLater adds an email to reply later queue @@ -117,10 +116,7 @@ func (s *Server) handleAddToReplyLater(w http.ResponseWriter, r *http.Request) { rlStore.items[req.EmailID] = item rlStore.mu.Unlock() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(item); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, item) } // handleUpdateReplyLater updates a reply later item @@ -158,10 +154,7 @@ func (s *Server) handleUpdateReplyLater(w http.ResponseWriter, r *http.Request) } item.IsCompleted = req.IsCompleted - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(item); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, item) } // handleRemoveFromReplyLater removes an email from reply later queue diff --git a/internal/air/handlers_screener.go b/internal/air/handlers_screener.go index 64d57d6..f3a19c7 100644 --- a/internal/air/handlers_screener.go +++ b/internal/air/handlers_screener.go @@ -5,6 +5,8 @@ import ( "net/http" "sync" "time" + + "github.com/nylas/cli/internal/httputil" ) // ScreenedSender represents a sender pending approval @@ -46,10 +48,7 @@ func (s *Server) handleGetScreenedSenders(w http.ResponseWriter, r *http.Request } } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(senders); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, senders) } // handleScreenerAllow allows a sender @@ -83,11 +82,7 @@ func (s *Server) handleScreenerAllow(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "allowed", "destination": req.Destination} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "allowed", "destination": req.Destination}) } // handleScreenerBlock blocks a sender @@ -114,11 +109,7 @@ func (s *Server) handleScreenerBlock(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "blocked"} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "blocked"}) } // handleAddToScreener adds a new sender for screening @@ -156,11 +147,7 @@ func (s *Server) handleAddToScreener(w http.ResponseWriter, r *http.Request) { } } - w.Header().Set("Content-Type", "application/json") - resp := map[string]string{"status": "pending"} - if err := json.NewEncoder(w).Encode(resp); err != nil { - http.Error(w, "Failed to encode", http.StatusInternalServerError) - } + httputil.WriteJSON(w, http.StatusOK, map[string]string{"status": "pending"}) } // IsSenderAllowed checks if a sender is allowed diff --git a/internal/air/middleware.go b/internal/air/middleware.go index 47e5498..f33e116 100644 --- a/internal/air/middleware.go +++ b/internal/air/middleware.go @@ -3,12 +3,12 @@ package air import ( "compress/gzip" "io" - "net" "net/http" - "net/url" "strconv" "strings" "time" + + "github.com/nylas/cli/internal/webguard" ) // gzipResponseWriter wraps http.ResponseWriter to support gzip compression. @@ -180,14 +180,28 @@ func SecurityHeadersMiddleware(next http.Handler) http.Handler { // Referrer policy w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") - // Content Security Policy (relaxed for local development) + // Content Security Policy. + // + // 'unsafe-inline' is removed from script-src: all inline onclick/ + // onchange handlers and inline + + @@ -123,7 +72,10 @@ + + + @@ -209,6 +161,9 @@ + + + {{end}} diff --git a/internal/air/templates/pages/calendar.gohtml b/internal/air/templates/pages/calendar.gohtml index 55ac5f5..737f78b 100644 --- a/internal/air/templates/pages/calendar.gohtml +++ b/internal/air/templates/pages/calendar.gohtml @@ -3,7 +3,7 @@
{{if eq .Provider "nylas"}} - {{end}} diff --git a/internal/air/templates/pages/notetaker.gohtml b/internal/air/templates/pages/notetaker.gohtml index 59739c9..e7c952b 100644 --- a/internal/air/templates/pages/notetaker.gohtml +++ b/internal/air/templates/pages/notetaker.gohtml @@ -5,12 +5,12 @@
- - + +
- - + +
@@ -21,7 +21,7 @@
🎙️

No recordings yet

Join a meeting to start recording

- + @@ -33,7 +33,7 @@ - @@ -18,7 +18,7 @@

Policies

Managed mailbox policies available for this Nylas account.

- +
@@ -35,7 +35,7 @@

Rules

Inbound processing rules configured for this Nylas account.

- +
diff --git a/internal/air/templates/partials/header.gohtml b/internal/air/templates/partials/header.gohtml index a585fe2..eda8861 100644 --- a/internal/air/templates/partials/header.gohtml +++ b/internal/air/templates/partials/header.gohtml @@ -7,27 +7,27 @@ Nylas Air
@@ -156,8 +156,8 @@
diff --git a/internal/air/templates/partials/modals_navigation.gohtml b/internal/air/templates/partials/modals_navigation.gohtml index 3281935..287a494 100644 --- a/internal/air/templates/partials/modals_navigation.gohtml +++ b/internal/air/templates/partials/modals_navigation.gohtml @@ -1,7 +1,7 @@ {{define "modals_navigation"}} -