diff --git a/changelog/+artifact-composition.added.md b/changelog/+artifact-composition.added.md new file mode 100644 index 00000000..5891fa4d --- /dev/null +++ b/changelog/+artifact-composition.added.md @@ -0,0 +1 @@ +Add `artifact_content`, `file_object_content`, `from_json`, and `from_yaml` Jinja2 filters for artifact content composition in templates. diff --git a/changelog/+artifact-composition.changed.md b/changelog/+artifact-composition.changed.md new file mode 100644 index 00000000..4a9912c0 --- /dev/null +++ b/changelog/+artifact-composition.changed.md @@ -0,0 +1 @@ +Replace `FilterDefinition.trusted: bool` with flag-based `ExecutionContext` model (`CORE`, `WORKER`, `LOCAL`) for context-aware template validation. `validate()` now accepts an optional `context` parameter. Backward compatible. diff --git a/changelog/+infp380.removed.md b/changelog/+infp380.removed.md new file mode 100644 index 00000000..9dc9e92a --- /dev/null +++ b/changelog/+infp380.removed.md @@ -0,0 +1 @@ +Removed the deprecated `raise_for_error` argument from `execute_graphql`, `query_gql_query`, `get_diff_summary`, `allocate_next_ip_address`, and `allocate_next_ip_prefix` client methods. HTTP errors are now always raised via `resp.raise_for_status()`. diff --git a/changelog/654.fixed.md b/changelog/654.fixed.md new file mode 100644 index 00000000..7cb360ab --- /dev/null +++ b/changelog/654.fixed.md @@ -0,0 +1 @@ +Allow direct assignment of authentication method to the configuration to override settings from environment variables. diff --git a/dev/specs/infp-496-graphql-fragment-inlining/spec.md b/dev/specs/infp-496-graphql-fragment-inlining/spec.md new file mode 100644 index 00000000..4c7a161a --- /dev/null +++ b/dev/specs/infp-496-graphql-fragment-inlining/spec.md @@ -0,0 +1,237 @@ +# SDK Spec: GraphQL Fragment Inlining + +**Jira**: INFP-496 +**Created**: 2026-03-13 +**Status**: Implemented +**Parent spec**: [infrahub/dev/specs/infp-496-graphql-fragment-inlining/spec.md](../../../../dev/specs/infp-496-graphql-fragment-inlining/spec.md) + +## Scope + +This spec covers the SDK-side responsibilities for the GraphQL Fragment Inlining feature (FR-015). The Infrahub server and backend integration are documented in the parent spec. + +Per the architecture decision in the parent spec: + +> Fragment parsing, resolution, and rendering is **SDK responsibility**, not server responsibility. + +The SDK must provide: + +1. **Config model extension** — `graphql_fragments` in `.infrahub.yml` +2. **Fragment renderer** — parse, resolve (transitively), and render queries +3. **CLI integration** — `infrahubctl` local workflows apply fragment rendering automatically + +--- + +## Component Responsibilities + +| Responsibility | SDK Module | +| --- | --- | +| `InfrahubRepositoryFragmentConfig` model | `infrahub_sdk/schema/repository.py` | +| `graphql_fragments` field on `InfrahubRepositoryConfig` | `infrahub_sdk/schema/repository.py` | +| Read fragment file content from disk | `InfrahubRepositoryFragmentConfig.load_fragments()` | +| Parse `.gql` fragment files into AST | `infrahub_sdk/graphql/query_renderer.py` | +| Build fragment name index (across all declared files) | `infrahub_sdk/graphql/query_renderer.py` | +| Detect duplicate fragment names across files | `infrahub_sdk/graphql/query_renderer.py` | +| Resolve transitive fragment dependencies | `infrahub_sdk/graphql/query_renderer.py` | +| Detect circular fragment dependencies | `infrahub_sdk/graphql/query_renderer.py` | +| Render self-contained query document | `infrahub_sdk/graphql/query_renderer.py` | +| High-level `render_query()` entry point (load + render) | `infrahub_sdk/graphql/query_renderer.py` | +| Typed error exceptions | `infrahub_sdk/exceptions.py` | +| Apply rendering in `infrahubctl` execution | `infrahub_sdk/ctl/utils.py` | +| Apply rendering in `infrahubctl` transform | `infrahub_sdk/ctl/cli_commands.py` | + +--- + +## API Contract: Fragment Renderer + +### CLI-facing entry point + +```python +# infrahub_sdk/graphql/query_renderer.py + +def render_query(name: str, config: InfrahubRepositoryConfig, relative_path: str = ".") -> str: + """Return a self-contained GraphQL document for the named query, with fragment definitions inlined. + + Loads the query file and all declared fragment files from config, then delegates to + render_query_with_fragments. + + Raises: + ResourceNotDefinedError: Query name not found in config. + FragmentFileNotFoundError: A declared fragment file path does not exist. + DuplicateFragmentError: Same fragment name declared in multiple files. + FragmentNotFoundError: Query references a fragment not found in any declared file. + CircularFragmentError: Circular dependency detected among fragments. + """ +``` + +### Low-level entry point + +```python +# infrahub_sdk/graphql/query_renderer.py + +def render_query_with_fragments(query_str: str, fragment_files: list[str]) -> str: + """Return a self-contained GraphQL document with required fragment definitions inlined. + + If the query contains no fragment spreads, query_str is returned unchanged. + + Raises: + QuerySyntaxError: Query string or a fragment file contains invalid GraphQL syntax. + DuplicateFragmentError: Same fragment name declared in multiple files. + FragmentNotFoundError: Query references a fragment not found in any declared file. + CircularFragmentError: Circular dependency detected among fragments. + """ +``` + +### Public helpers in `query_renderer.py` + +```python +def build_fragment_index(fragment_files: list[str]) -> dict[str, FragmentDefinitionNode]: + """Parse all fragment file contents and return a mapping from fragment name to its AST node.""" + +def collect_required_fragments( + query_doc: DocumentNode, + fragment_index: dict[str, FragmentDefinitionNode], +) -> list[str]: + """Walk query_doc and collect all fragment names required (transitively). + + Returns a topologically ordered list of unique fragment names. + """ +``` + +### Error types (additions to `infrahub_sdk/exceptions.py`) + +```python +class GraphQLQueryError(Error): + """Base class for all errors raised during GraphQL query rendering.""" + + +class QuerySyntaxError(GraphQLQueryError): + def __init__(self, syntax_error: str) -> None: ... + # message: f"GraphQL syntax error: {syntax_error}" + + +class FragmentNotFoundError(GraphQLQueryError): + def __init__(self, fragment_name: str, query_file: str | None = None, message: str | None = None) -> None: ... + # message: f"Fragment '{fragment_name}' not found." (or mentions query_file if provided) + + +class DuplicateFragmentError(GraphQLQueryError): + def __init__(self, fragment_name: str, message: str | None = None) -> None: ... + # message: f"Fragment '{fragment_name}' is defined more than once across declared fragment files." + + +class CircularFragmentError(GraphQLQueryError): + def __init__(self, cycle: list[str], message: str | None = None) -> None: ... + # message: f"Circular fragment dependency detected: {' -> '.join(cycle)}." + + +class FragmentFileNotFoundError(GraphQLQueryError): + def __init__(self, file_path: str, message: str | None = None) -> None: ... + # message: f"Fragment file '{file_path}' declared in graphql_fragments does not exist." +``` + +`GraphQLQueryError` is also handled in `handle_exception()` in `ctl/utils.py` so CLI commands print +a clean error message and exit instead of raising an unhandled exception. + +--- + +## Config Model Extension + +```python +# infrahub_sdk/schema/repository.py + +class InfrahubRepositoryFragmentConfig(InfrahubRepositoryConfigElement): + model_config = ConfigDict(extra="forbid") + name: str = Field(..., description="Logical name for this fragment file or directory") + file_path: Path = Field(..., description="Path to a .gql file or directory of .gql files, relative to repo root") + + def load_fragments(self, relative_path: str = ".") -> list[str]: + """Read and return raw content of all fragment files at file_path. + + If file_path is a .gql file, returns a single-element list. + If file_path is a directory, returns one entry per .gql file found (sorted). + Raises FragmentFileNotFoundError if file_path does not exist. + """ + resolved = Path(f"{relative_path}/{self.file_path}") + if not resolved.exists(): + raise FragmentFileNotFoundError(file_path=str(self.file_path)) + if resolved.is_dir(): + return [f.read_text(encoding="UTF-8") for f in sorted(resolved.glob("*.gql"))] + return [resolved.read_text(encoding="UTF-8")] + + +class InfrahubRepositoryConfig(BaseModel): + # ... existing fields ... + graphql_fragments: list[InfrahubRepositoryFragmentConfig] = Field( + default_factory=list, description="GraphQL fragment files" + ) +``` + +--- + +## infrahubctl Integration + +Both CLI call sites use `render_query()` from `query_renderer.py`, which handles loading fragment +files from config and delegating to `render_query_with_fragments`. + +### `execute_graphql_query()` in `ctl/utils.py` + +```python +# Before +query_str = query_object.load_query() + +# After +query_str = render_query(name=query, config=repository_config) +``` + +### `transform()` in `ctl/cli_commands.py` + +```python +# Before +query_str = repository_config.get_query(name=transform.query).load_query() + +# After +query_str = render_query(name=transform.query, config=repository_config) +``` + +--- + +## Testing Requirements + +### Unit tests — `tests/unit/sdk/graphql/test_fragment_renderer.py` (imports from `query_renderer`) + +- Render query with single direct fragment spread → correct output +- Render query with fragment spreads across two files → correct output +- Render query with transitive dependency (A → B across files) → correct output +- Render query with no fragment spreads → returned unchanged +- Same fragment spread used twice → fragment definition appears once in output +- Only required fragments included, not all from the file +- `FragmentNotFoundError` raised for unresolved spread +- `DuplicateFragmentError` raised for duplicate name across multiple content strings +- `DuplicateFragmentError` raised for duplicate name within the same content string +- `CircularFragmentError` raised for A→B→A cycle +- `QuerySyntaxError` raised for invalid GraphQL syntax in query or fragment file + +### Unit tests — `tests/unit/sdk/graphql/test_query_renderer.py` + +- `render_query()` loads query + fragments from config and returns rendered document +- `render_query()` with no `graphql_fragments` in config returns query unchanged + +### Unit tests — `tests/unit/sdk/test_repository.py` + +- `InfrahubRepositoryConfig` parses `graphql_fragments` YAML correctly +- `InfrahubRepositoryFragmentConfig.load_fragments()` with a file path returns a single-element list with the file content +- `InfrahubRepositoryFragmentConfig.load_fragments()` with a directory path returns one entry per `.gql` file found +- `load_fragments()` raises `FragmentFileNotFoundError` for a path that does not exist + +### Integration / functional tests — caller-side + +- See main repo plan for backend integration tests and E2E fixtures + +--- + +## Constraints + +- Fragment rendering uses only `graphql-core` (already a dependency). No new dependencies. +- All new public functions carry full type hints. +- Both async and sync `InfrahubClient` paths are unaffected — rendering is a pure string transformation with no I/O. +- Generated files (`protocols.py`) are not touched. diff --git a/dev/specs/infp-496-graphql-fragment-inlining/tasks.md b/dev/specs/infp-496-graphql-fragment-inlining/tasks.md new file mode 100644 index 00000000..9377bd4b --- /dev/null +++ b/dev/specs/infp-496-graphql-fragment-inlining/tasks.md @@ -0,0 +1,157 @@ +# Tasks: GraphQL Fragment Inlining — SDK Scope + +**Input**: `python_sdk/dev/specs/infp-496-graphql-fragment-inlining/spec.md` +**Parent tasks**: `specs/infp-496-graphql-fragment-inlining/tasks.md` (full feature view including backend) +**Scope**: All work inside `python_sdk/` only (FR-015: all fragment logic lives in the SDK) + +**Path note**: All file paths below are relative to the **infrahub repo root** (e.g., +`python_sdk/infrahub_sdk/...`). The `python_sdk/` directory is a git submodule — changes inside +it must be committed separately from the main infrahub repo. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to +- Include exact file paths in descriptions + +--- + +## Phase 1: Setup (Fixture Repository) + +**Purpose**: Create the fixture repository used by SDK unit tests and backend component tests. +All fixture files live inside the `python_sdk` submodule. + +- [*] T001 Create fixture repo directory structure at `python_sdk/tests/fixtures/repos/fragment_inlining/` (subdirectories: `fragments/`, `queries/`) +- [*] T002 [P] Create `python_sdk/tests/fixtures/repos/fragment_inlining/fragments/interfaces.gql` (defines `interfaceFragment`, `portFragment`) and `python_sdk/tests/fixtures/repos/fragment_inlining/fragments/devices.gql` (defines `deviceFragment` that spreads `...interfaceFragment`, and `chassisFragment`) +- [*] T003 [P] Create `python_sdk/tests/fixtures/repos/fragment_inlining/queries/query_two_files.gql` (spreads `...interfaceFragment` and `...deviceFragment`), `query_no_fragments.gql` (no spreads), `query_transitive.gql` (spreads `...deviceFragment` only), `query_missing_fragment.gql` (spreads `...undeclaredFragment`) +- [*] T004 Create `python_sdk/tests/fixtures/repos/fragment_inlining/.infrahub.yml` declaring `graphql_fragments` (both fragment files under `fragments/`) and `graphql_queries` (all four query files under `queries/`) + +--- + +## Phase 2: Foundational (SDK Core — Blocking Prerequisites) + +**Purpose**: Exception types, config model, and renderer are needed by all user story phases. + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete. + +- [x] T005 Add `GraphQLQueryError` base class plus five typed exception classes (`QuerySyntaxError`, `FragmentNotFoundError`, `DuplicateFragmentError`, `CircularFragmentError`, `FragmentFileNotFoundError`) to `python_sdk/infrahub_sdk/exceptions.py` — all use `__init__`-based pattern, all extend `GraphQLQueryError`; update `handle_exception()` in `ctl/utils.py` to also catch `GraphQLQueryError` +- [x] T006 Add `InfrahubRepositoryFragmentConfig` class with `name: str`, `file_path: Path`, and `load_fragments(relative_path: str = ".") -> list[str]` method to `python_sdk/infrahub_sdk/schema/repository.py` — mirror the existing `InfrahubRepositoryGraphQLConfig` pattern +- [x] T007 Add `graphql_fragments: list[InfrahubRepositoryFragmentConfig] = Field(default_factory=list)` field to `InfrahubRepositoryConfig` in `python_sdk/infrahub_sdk/schema/repository.py` +- [x] T008 Add **public** functions `build_fragment_index(fragment_files: list[str]) -> dict[str, FragmentDefinitionNode]` and `collect_required_fragments(query_doc: DocumentNode, fragment_index: dict[str, FragmentDefinitionNode]) -> list[str]` to `python_sdk/infrahub_sdk/graphql/query_renderer.py` +- [x] T009 Add `render_query_with_fragments(query_str: str, fragment_files: list[str]) -> str` to `python_sdk/infrahub_sdk/graphql/query_renderer.py`; early-return when query has no fragment spreads (FR-011); also raises `QuerySyntaxError` for invalid syntax in query or fragment files +- [x] T009b Create `python_sdk/infrahub_sdk/graphql/query_renderer.py` with `render_query(name: str, config: InfrahubRepositoryConfig, relative_path: str = ".") -> str` — high-level entry point used by CLI: loads query + fragment files from the configuration, delegates to `render_query_with_fragments` + +**Checkpoint**: SDK core complete — all test and CLI phases can now proceed + +--- + +## Phase 3: User Story 1 — Basic Fragment Import (Priority: P1) 🎯 MVP + +**Goal**: The renderer correctly inlines required fragments from multiple files and excludes +unreferenced ones. Repository configuration parses `graphql_fragments` YAML correctly. + +**Independent Test**: + +```bash +cd python_sdk && uv run pytest tests/unit/sdk/graphql/test_fragment_renderer.py -v +cd python_sdk && uv run pytest tests/unit/sdk/test_repository.py -v -k fragment +``` + +- [ ] T010 [P] [US1] Write unit tests covering: single direct spread from one file → renders correctly; spreads across two files → both rendered; no spreads → query returned unchanged; same spread used twice → definition appears once; surplus definitions excluded — in `python_sdk/tests/unit/sdk/graphql/test_fragment_renderer.py` +- [ ] T011 [P] [US1] Write unit tests covering: `InfrahubRepositoryConfig` parses `graphql_fragments` YAML section; `load_fragments()` with a file path returns single-element list with file content; `load_fragments()` with a directory path returns one entry per `.gql` file (sorted alphabetically); `load_fragments()` raises `FragmentFileNotFoundError` for a path that does not exist — in `python_sdk/tests/unit/sdk/test_repository.py` + +**Checkpoint**: US1 SDK work fully tested and independently verifiable + +--- + +## Phase 4: User Story 2 — Transitive Fragment Dependencies (Priority: P2) + +**Goal**: `collect_required_fragments` resolves transitive spreads so both A and its dependency B +are included even when the query only references A directly. + +**Independent Test**: + +```bash +cd python_sdk && uv run pytest tests/unit/sdk/graphql/test_fragment_renderer.py -v -k transitive +``` + +- [ ] T013 [P] [US2] Write unit tests covering: transitive dependency across two files (query spreads `...deviceFragment`; `deviceFragment` spreads `...interfaceFragment` in a different file → both definitions in output); only directly/transitively required fragments included, not all from the files — in `python_sdk/tests/unit/sdk/graphql/test_fragment_renderer.py` + +**Checkpoint**: US1 + US2 SDK logic fully tested + +--- + +## Phase 5: User Story 4 — Graceful Failure for Unresolved Fragments (Priority: P2) + +**Goal**: All four error types are raised correctly under their documented conditions. + +**Independent Test**: + +```bash +cd python_sdk && uv run pytest tests/unit/sdk/graphql/test_fragment_renderer.py -v -k error +``` + +- [ ] T014 [P] [US4] Write unit tests covering: `FragmentNotFoundError` raised when spread references name absent from all fragment files; `DuplicateFragmentError` raised when same name appears in two separate content strings; `DuplicateFragmentError` raised when same name appears twice within one content string; `CircularFragmentError` raised for A→B→A cycle — in `python_sdk/tests/unit/sdk/graphql/test_fragment_renderer.py` + +**Checkpoint**: All renderer error paths tested + +--- + +## Phase 6: infrahubctl CLI Integration (enables US1 + US4 for local workflows) + +**Goal**: `infrahubctl` local execution paths apply fragment rendering automatically when +`graphql_fragments` is declared in `.infrahub.yml` (FR-016). + +**Independent Test**: Run `infrahubctl run` pointing at the fixture repository; the query executes +without unresolved-spread errors. + +- [x] T019 [P] [US1] Update `execute_graphql_query()` in `python_sdk/infrahub_sdk/ctl/utils.py`: replace `query_object.load_query()` with `render_query(name=query, config=repository_config)` from `query_renderer.py` +- [x] T020 [P] [US1] Update `transform()` in `python_sdk/infrahub_sdk/ctl/cli_commands.py`: replace `repository_config.get_query(name=...).load_query()` with `render_query(name=transform.query, config=repository_config)` from `query_renderer.py` + +**Checkpoint**: Both server sync and infrahubctl CLI paths apply fragment rendering + +--- + +## Phase 7: Polish & SDK-Specific Concerns + +- [ ] T021 [P] Run `cd python_sdk && uv run invoke format lint-code` to verify no ruff/mypy violations in modified files (`exceptions.py`, `schema/repository.py`, `graphql/query_renderer.py`, `ctl/utils.py`, `ctl/cli_commands.py`) +- [ ] T022 Run `cd python_sdk && uv run invoke docs-generate` to regenerate SDK CLI + configuration docs after docstring additions + +--- + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies — start immediately +- **Foundational (Phase 2)**: Depends on Phase 1 — BLOCKS all user story phases +- **US1 (Phase 3)**: Depends on Phase 2; T010 and T011 are independent [P] +- **US2 (Phase 4)**: Depends on Phase 2 (renderer transitive logic already in T008) +- **US4 (Phase 5)**: Depends on Phase 2 (error types in T005, error logic in T008) +- **CLI Integration (Phase 6)**: Depends on Phase 2; T019 and T020 are independent [P] +- **Polish (Phase 7)**: After all phases complete + +### Parallel Opportunities + +```bash +# Phase 1 — after T001 completes: +T002 python_sdk/tests/fixtures/repos/fragment_inlining/fragments/*.gql +T003 python_sdk/tests/fixtures/repos/fragment_inlining/queries/*.gql + +# Phase 3 — after Phase 2 completes: +T010 python_sdk/tests/unit/sdk/graphql/test_fragment_renderer.py +T011 python_sdk/tests/unit/sdk/test_repository.py + +# Phase 6 — after Phase 2 completes (independent of Phase 3): +T019 python_sdk/infrahub_sdk/ctl/utils.py +T020 python_sdk/infrahub_sdk/ctl/cli_commands.py +``` + +--- + +## Notes + +- `python_sdk/` is a git submodule — commit changes there separately from the main Infrahub repository +- Run `cd python_sdk && uv run invoke format lint-code` before committing any Python changes +- Run `cd python_sdk && uv run invoke docs-generate` after any docstring or CLI command changes +- Backend integration tasks (updating `backend/infrahub/git/integrator.py` and writing component tests) are in `specs/infp-496-graphql-fragment-inlining/tasks.md` diff --git a/dev/specs/infp-504-artifact-composition/checklists/requirements.md b/dev/specs/infp-504-artifact-composition/checklists/requirements.md new file mode 100644 index 00000000..b86889a7 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/checklists/requirements.md @@ -0,0 +1,36 @@ +# Specification Quality Checklist: Artifact Content Composition via Jinja2 Filters + +**Purpose**: Validate specification completeness and quality before proceeding to planning +**Created**: 2026-02-18 +**Feature**: [spec.md](../spec.md) + +## Content Quality + +- [x] No implementation details (languages, frameworks, APIs) +- [x] Focused on user value and business needs +- [x] Written for non-technical stakeholders +- [x] All mandatory sections completed + +## Requirement Completeness + +- [x] No [NEEDS CLARIFICATION] markers remain +- [x] Requirements are testable and unambiguous +- [x] Success criteria are measurable +- [x] Success criteria are technology-agnostic (no implementation details) +- [x] All acceptance scenarios are defined +- [x] Edge cases are identified +- [x] Scope is clearly bounded +- [x] Dependencies and assumptions identified + +## Feature Readiness + +- [x] All functional requirements have clear acceptance criteria +- [x] User scenarios cover primary flows +- [x] Feature meets measurable outcomes defined in Success Criteria +- [x] No implementation details leak into specification + +## Notes + +- One open question remains intentionally: whether to add a Python transform convenience SDK method (FR scope question flagged in Open Questions section, documented for planning phase). +- Ordering guarantee is explicitly out of scope and documented as a known limitation. +- `from_json`/`from_yaml` existence in the current filter set is flagged as an assumption to verify during planning. diff --git a/dev/specs/infp-504-artifact-composition/contracts/filter-interfaces.md b/dev/specs/infp-504-artifact-composition/contracts/filter-interfaces.md new file mode 100644 index 00000000..ed71b3b3 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/contracts/filter-interfaces.md @@ -0,0 +1,147 @@ +# Filter Interface Contracts + +**Feature**: INFP-504 | **Date**: 2026-03-20 + +## Jinja2 Filter Signatures + +### artifact_content + +```python +async def artifact_content(storage_id: str) -> str +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid storage_id string | Raw artifact content (text) | — | +| `None` | — | `JinjaFilterError("artifact_content", "storage_id is null", hint="...")` | +| `""` (empty) | — | `JinjaFilterError("artifact_content", "storage_id is empty", hint="...")` | +| Non-existent storage_id | — | `JinjaFilterError("artifact_content", "content not found: {id}")` | +| Permission denied (401/403) | — | `JinjaFilterError("artifact_content", "permission denied for storage_id: {id}")` | +| No client provided | — | `JinjaFilterError("artifact_content", "requires InfrahubClient", hint="pass client via Jinja2Template(client=...)")` | + +**Validation**: Blocked in `CORE` context. Allowed in `WORKER` and `LOCAL` contexts. + +### file_object_content + +```python +async def file_object_content(storage_id: str) -> str +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid storage_id (text file) | Raw file content (text) | — | +| Valid storage_id (binary file) | — | `JinjaFilterError("file_object_content", "binary content not supported for storage_id: {id}")` | +| `None` | — | `JinjaFilterError("file_object_content", "storage_id is null", hint="...")` | +| `""` (empty) | — | `JinjaFilterError("file_object_content", "storage_id is empty", hint="...")` | +| Non-existent storage_id | — | `JinjaFilterError("file_object_content", "content not found: {id}")` | +| Permission denied (401/403) | — | `JinjaFilterError("file_object_content", "permission denied for storage_id: {id}")` | +| No client provided | — | `JinjaFilterError("file_object_content", "requires InfrahubClient", hint="pass client via Jinja2Template(client=...)")` | + +**Validation**: Blocked in `CORE` context. Allowed in `WORKER` and `LOCAL` contexts. + +### file_object_content_by_id + +```python +async def file_object_content_by_id(node_id: str) -> str +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid node UUID (text file) | Raw file content (text) | — | +| Valid node UUID (binary file) | — | `JinjaFilterError("file_object_content_by_id", "binary content not supported...")` | +| `None` | — | `JinjaFilterError("file_object_content_by_id", "node_id is null", hint="...")` | +| `""` (empty) | — | `JinjaFilterError("file_object_content_by_id", "node_id is empty", hint="...")` | +| Permission denied (401/403) | — | `JinjaFilterError("file_object_content_by_id", "permission denied for node_id: {id}")` | +| No client provided | — | `JinjaFilterError("file_object_content_by_id", "requires InfrahubClient", hint="...")` | + +**Validation**: Blocked in `CORE` context. Allowed in `WORKER` and `LOCAL` contexts. + +### file_object_content_by_hfid + +```python +async def file_object_content_by_hfid(hfid: str | list[str], kind: str = "") -> str +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid HFID + kind (text file) | Raw file content (text) | — | +| Valid HFID + kind (binary file) | — | `JinjaFilterError("file_object_content_by_hfid", "binary content not supported...")` | +| `None` | — | `JinjaFilterError("file_object_content_by_hfid", "hfid is null", hint="...")` | +| Missing `kind` argument | — | `JinjaFilterError("file_object_content_by_hfid", "'kind' argument is required", hint="...")` | +| Permission denied (401/403) | — | `JinjaFilterError("file_object_content_by_hfid", "permission denied for hfid: {hfid}")` | +| No client provided | — | `JinjaFilterError("file_object_content_by_hfid", "requires InfrahubClient", hint="...")` | + +**Validation**: Blocked in `CORE` context. Allowed in `WORKER` and `LOCAL` contexts. + +### from_json + +```python +def from_json(value: str) -> dict | list +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid JSON string | Parsed dict or list | — | +| `""` (empty) | `{}` | — | +| Malformed JSON | — | `JinjaFilterError("from_json", "invalid JSON: {error_detail}")` | + +**Validation**: Allowed in all contexts (`ALL`). + +### from_yaml + +```python +def from_yaml(value: str) -> dict | list +``` + +| Input | Output | Error | +| ----- | ------ | ----- | +| Valid YAML string | Parsed dict, list, or scalar | — | +| `""` (empty) | `{}` | — | +| Malformed YAML | — | `JinjaFilterError("from_yaml", "invalid YAML: {error_detail}")` | + +**Validation**: Allowed in all contexts (`ALL`). + +## ObjectStore API Contract + +### GET /api/storage/object/{identifier} (existing) + +Used by `artifact_content`. Returns plain text content. + +### File object endpoints + +All three endpoints return file content with the node's `file_type` as content-type. The SDK validates that the content-type is text-based. + +| Endpoint | Used by | Identifier | +| -------- | ------- | ---------- | +| `GET /api/files/by-storage-id/{storage_id}` | `file_object_content` | storage_id | +| `GET /api/files/{node_id}` | `file_object_content_by_id` | node UUID | +| `GET /api/files/by-hfid/{kind}?hfid=...` | `file_object_content_by_hfid` | kind + HFID components | + +**Accepted content-types** (text-based): + +- `text/*` +- `application/json` +- `application/yaml` +- `application/x-yaml` + +**Rejected**: All other content-types → `JinjaFilterError` with binary content message. + +## Validation Contract + +### validate() method + +```python +def validate( + self, + restricted: bool = True, + context: ExecutionContext | None = None, +) -> None +``` + +| Context | Trusted filters | Worker filters | Untrusted filters | +| ------- | :-: | :-: | :-: | +| `CORE` | allowed | blocked | blocked | +| `WORKER` | allowed | allowed | blocked | +| `LOCAL` | allowed | allowed | allowed | + +**Backward compat**: `restricted=True` → `CORE`, `restricted=False` → `LOCAL`. diff --git a/dev/specs/infp-504-artifact-composition/data-model.md b/dev/specs/infp-504-artifact-composition/data-model.md new file mode 100644 index 00000000..1be99968 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/data-model.md @@ -0,0 +1,193 @@ +# Data Model: Artifact Content Composition + +**Feature**: INFP-504 | **Date**: 2026-03-20 + +## New Entities + +### ExecutionContext (Flag enum) + +**Location**: `infrahub_sdk/template/filters.py` + +```python +class ExecutionContext(Flag): + CORE = auto() # API server computed attributes — most restrictive + WORKER = auto() # Prefect background workers + LOCAL = auto() # Local CLI / unrestricted rendering + ALL = CORE | WORKER | LOCAL +``` + +**Semantics**: Represents where template code executes. A filter's `allowed_contexts` flags are an allowlist — fewer flags means less trusted. + +### FilterDefinition (modified) + +**Location**: `infrahub_sdk/template/filters.py` + +```python +@dataclass +class FilterDefinition: + name: str + allowed_contexts: ExecutionContext + source: str + + @property + def trusted(self) -> bool: + """Backward compatibility: trusted means allowed in all contexts.""" + return self.allowed_contexts == ExecutionContext.ALL +``` + +**Migration**: + +| Current | New | +| ------- | --- | +| `FilterDefinition("abs", trusted=True, source="jinja2")` | `FilterDefinition("abs", allowed_contexts=ExecutionContext.ALL, source="jinja2")` | +| `FilterDefinition("safe", trusted=False, source="jinja2")` | `FilterDefinition("safe", allowed_contexts=ExecutionContext.LOCAL, source="jinja2")` | + +### JinjaFilterError (new exception) + +**Location**: `infrahub_sdk/template/exceptions.py` + +```python +class JinjaFilterError(JinjaTemplateError): + def __init__(self, filter_name: str, message: str, hint: str | None = None) -> None: + self.filter_name = filter_name + self.hint = hint + full_message = f"Filter '{filter_name}': {message}" + if hint: + full_message += f" — {hint}" + super().__init__(full_message) +``` + +**Inheritance**: `Error` → `JinjaTemplateError` → `JinjaFilterError` + +### InfrahubFilters (new class) + +**Location**: `infrahub_sdk/template/infrahub_filters.py` (new file) + +```python +class InfrahubFilters: + @classmethod + def get_filter_names(cls) -> tuple[str, ...]: + """Discover filter names from public methods.""" + ... + + def __init__(self, client: InfrahubClient | None = None) -> None: + self.client = client + + def _require_client(self, filter_name: str) -> InfrahubClient: + """Raise JinjaFilterError if no client is available.""" + ... + + async def artifact_content(self, storage_id: str) -> str: ... + async def file_object_content(self, storage_id: str) -> str: ... + async def file_object_content_by_id(self, node_id: str) -> str: ... + async def file_object_content_by_hfid(self, hfid: str | list[str], kind: str = "") -> str: ... +``` + +**Key design decisions**: + +- Client is optional — `InfrahubFilters` is always instantiated, each method checks for a client at call time via `_require_client()` +- `get_filter_names()` discovers client-dependent filter names automatically from all public methods — adding a new filter only requires adding a method +- Methods are `async` — Jinja2's `auto_await` handles them in async rendering mode +- Holds an `InfrahubClient` (async only), not `InfrahubClientSync` +- Each method validates inputs and catches `AuthenticationError` to wrap in `JinjaFilterError` +- File object retrieval is split into 3 filters matching the server's 3 endpoints (`by-storage-id`, `by-id`, `by-hfid`) + +## Modified Entities + +### Jinja2Template (modified constructor) + +**Location**: `infrahub_sdk/template/__init__.py` + +```python +def __init__( + self, + template: str | Path, + template_directory: Path | None = None, + filters: dict[str, Callable] | None = None, + client: InfrahubClient | None = None, # NEW +) -> None: +``` + +**Changes**: + +- New optional `client` parameter +- When `client` provided: instantiate `InfrahubFilters`, register `artifact_content` and `file_object_content` +- Always register `from_json` and `from_yaml` (no client needed) +- File-based environment already has `enable_async=True` (no change needed) + +### Jinja2Template.set_client() (new method) + +```python +def set_client(self, client: InfrahubClient) -> None: +``` + +**Purpose**: Deferred client injection — allows creating a `Jinja2Template` first and adding the client later. Also supports replacing a previously set client. + +- Updates `self._infrahub_filters.client` on the existing `InfrahubFilters` instance (no re-registration needed since the bound methods are already registered) +- If the Jinja2 environment was already created, patches it in place +- Without calling `set_client()` (and without passing `client` to `__init__`), client-dependent filters raise `JinjaFilterError` with a descriptive message at render time via `_require_client()` + +### Jinja2Template.validate() (modified signature) + +```python +def validate(self, restricted: bool = True, context: ExecutionContext | None = None) -> None: +``` + +**Changes**: + +- New optional `context` parameter (takes precedence over `restricted` when provided) +- Backward compat: `restricted=True` → `ExecutionContext.CORE`, `restricted=False` → `ExecutionContext.LOCAL` +- Validation logic: filter allowed if `filter.allowed_contexts & context` is truthy + +### ObjectStore (new method) + +**Location**: `infrahub_sdk/object_store.py` + +```python +async def get_file_by_storage_id(self, storage_id: str, tracker: str | None = None) -> str: + """Retrieve file object content by storage_id. + + Raises error if content-type is not text-based. + """ + ... +``` + +**API endpoints**: + +- `GET /api/files/by-storage-id/{storage_id}` — used by `file_object_content` +- `GET /api/files/{node_id}` — used by `file_object_content_by_id` +- `GET /api/files/by-hfid/{kind}?hfid=...` — used by `file_object_content_by_hfid` + +**Content-type check**: Allow `text/*`, `application/json`, `application/yaml`, `application/x-yaml`. Reject all others. + +## New Filter Registrations + +```python +# In AVAILABLE_FILTERS: + +# Infrahub client-dependent filters (worker and local contexts) +FilterDefinition("artifact_content", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="infrahub"), +FilterDefinition("file_object_content", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="infrahub"), +FilterDefinition("file_object_content_by_hfid", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="infrahub"), +FilterDefinition("file_object_content_by_id", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="infrahub"), + +# Parsing filters (trusted, all contexts) +FilterDefinition("from_json", allowed_contexts=ExecutionContext.ALL, source="infrahub"), +FilterDefinition("from_yaml", allowed_contexts=ExecutionContext.ALL, source="infrahub"), +``` + +## Relationships + +```text +Jinja2Template + ├── has-a → InfrahubFilters (when client provided) + ├── uses → FilterDefinition registry (for validation) + └── uses → ExecutionContext (for context-aware validation) + +InfrahubFilters + ├── has-a → InfrahubClient + └── uses → ObjectStore (for content retrieval) + +JinjaFilterError + └── extends → JinjaTemplateError → Error +``` diff --git a/dev/specs/infp-504-artifact-composition/plan.md b/dev/specs/infp-504-artifact-composition/plan.md new file mode 100644 index 00000000..58808d78 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/plan.md @@ -0,0 +1,108 @@ +# Implementation Plan: Artifact Content Composition + +**Branch**: `infp-504-artifact-composition` | **Date**: 2026-03-20 | **Spec**: [spec.md](spec.md) +**Jira**: INFP-504 | **Epic**: IFC-2275 + +## Summary + +Enable Jinja2 templates to reference and inline rendered content from other artifacts and file objects via new filters (`artifact_content`, `file_object_content`, `from_json`, `from_yaml`). Requires evolving the filter trust model from a binary boolean to a flag-based execution context system, creating a new `InfrahubFilters` class to hold client-dependent filter logic, and extending `Jinja2Template` with an optional client parameter. + +## Technical Context + +**Language/Version**: Python 3.10-3.13 +**Primary Dependencies**: jinja2, httpx, pydantic >=2.0, PyYAML (already available via netutils) +**Storage**: Infrahub object store (REST API) +**Testing**: pytest (`uv run pytest tests/unit/`) +**Target Platform**: SDK library consumed by Prefect workers, CLI, and API server +**Project Type**: Single Python package +**Constraints**: No new external dependencies. Must maintain async/sync dual pattern. Must not break existing filter behavior. + +## Key Technical Decisions + +### 1. Async Filters via Jinja2 native support (R-001) + +The `SandboxedEnvironment` already uses `enable_async=True`. Jinja2's `auto_await` automatically awaits filter return values during `render_async()`. The new content-fetching filters can be `async def` — no bridging needed. + +**Required change**: Add `enable_async=True` to the file-based environment (`_get_file_based_environment()`) so async filters work for file-based templates too. + +### 2. Flag-based trust model (R-004) + +Replace `FilterDefinition.trusted: bool` with `allowed_contexts: ExecutionContext` using Python's `Flag` enum. Three contexts: `CORE` (most restrictive), `WORKER`, `LOCAL` (least restrictive). A backward-compatible `trusted` property preserves existing API. + +### 3. Content-type checking for file objects (R-003) + +New `ObjectStore.get_file_by_storage_id()` method checks response `content-type` header. Text-based types are allowed; binary types are rejected with a descriptive error. + +## Project Structure + +### Documentation (this feature) + +```text +dev/specs/infp-504-artifact-composition/ +├── spec.md # Feature specification +├── plan.md # This file +├── research.md # Phase 0 research findings +├── data-model.md # Entity definitions +├── quickstart.md # Usage examples +├── contracts/ +│ └── filter-interfaces.md # Filter I/O contracts +└── checklists/ + └── requirements.md # Quality checklist +``` + +### Source Code (files to create or modify) + +```text +infrahub_sdk/ +├── template/ +│ ├── __init__.py # MODIFY: Jinja2Template (client param, validate context) +│ ├── filters.py # MODIFY: ExecutionContext enum, FilterDefinition migration +│ ├── exceptions.py # MODIFY: Add JinjaFilterError +│ └── infrahub_filters.py # CREATE: InfrahubFilters class +├── object_store.py # MODIFY: Add get_file_by_storage_id() +``` + +```text +tests/unit/ +├── template/ +│ ├── test_filters.py # MODIFY: Tests for new filters and trust model +│ └── test_infrahub_filters.py # CREATE: Tests for InfrahubFilters +``` + +## Implementation Order + +The 13 Jira tasks under IFC-2275 follow this dependency graph: + +```text +Phase 1 (Foundation — no dependencies, can be parallel): + IFC-2367: JinjaFilterError exception + IFC-2368: Flag-based trust model (ExecutionContext + FilterDefinition migration) + IFC-2373: ObjectStore.get_file_by_storage_id() + +Phase 2 (Filters — depend on Phase 1): + IFC-2369: from_json filter (depends on IFC-2367) + IFC-2370: from_yaml filter (depends on IFC-2367) + IFC-2371: InfrahubFilters class (depends on IFC-2367) + +Phase 3 (Content filters — depend on Phase 2): + IFC-2372: artifact_content filter (depends on IFC-2371) + IFC-2374: file_object_content filter (depends on IFC-2371, IFC-2373) + +Phase 4 (Integration — depend on Phase 3): + IFC-2375: Jinja2Template client param + wiring (depends on IFC-2368, IFC-2371, IFC-2372) + IFC-2376: Filter registration with correct contexts (depends on IFC-2368, IFC-2369, IFC-2370, IFC-2372, IFC-2374) + +Phase 5 (Documentation + Server — depend on Phase 4): + IFC-2377: Documentation (depends on IFC-2376) + IFC-2378: integrator.py threading [Infrahub server] (depends on IFC-2375) + IFC-2379: Schema validation [Infrahub server] (depends on IFC-2368) +``` + +## Risk Register + +| Risk | Likelihood | Impact | Mitigation | +| ---- | --------- | ------ | ---------- | +| Jinja2 `auto_await` doesn't work as expected for filters | Low | High | Verify with a minimal test before building on the assumption. Fallback: sync wrapper with thread executor. | +| File-based environment breaks with `enable_async=True` | Low | Medium | File-based env change is isolated and testable. Existing tests will catch regressions. | +| ObjectStore API returns incorrect content-type for file objects | Medium | Low | Already flagged by @wvandeun. The filter will use best-effort content-type checking; can be refined when API is fixed. | +| `validate()` backward compat breaks existing callers | Low | High | Keep `restricted` param with deprecation path. Test all existing call sites. | diff --git a/dev/specs/infp-504-artifact-composition/quickstart.md b/dev/specs/infp-504-artifact-composition/quickstart.md new file mode 100644 index 00000000..27f1e272 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/quickstart.md @@ -0,0 +1,98 @@ +# Quickstart: Artifact Content Composition + +**Feature**: INFP-504 | **Date**: 2026-03-20 + +## Jinja2 Templates + +### Inline artifact content + +Query artifacts via GraphQL and use the `artifact_content` filter to include their content: + +```jinja2 +{% set device = data.NetworkDevice.edges[0].node %} +hostname {{ device.hostname.value }} + +{% for artifact in device.artifacts.edges %} +{% set content = artifact.node.storage_id.value | artifact_content %} +{% if content %} +{{ content }} +{% endif %} +{% endfor %} +``` + +### Inline file object content + +File objects can be retrieved by storage ID, node UUID, or HFID: + +```jinja2 +{# By storage_id (most common) #} +{{ file_object.storage_id.value | file_object_content }} + +{# By node UUID #} +{{ file_object.id | file_object_content_by_id }} + +{# By Human-Friendly ID #} +{{ hfid_components | file_object_content_by_hfid(kind="NetworkCircuitContract") }} +``` + +### Parse structured content + +Chain `artifact_content` with `from_json` or `from_yaml` to access structured data: + +```jinja2 +{% set config = artifact.node.storage_id.value | artifact_content | from_json %} +interface {{ config.interface_name }} + ip address {{ config.ip_address }} +``` + +```jinja2 +{% set config = artifact.node.storage_id.value | artifact_content | from_yaml %} +{% for route in config.static_routes %} +ip route {{ route.prefix }} {{ route.next_hop }} +{% endfor %} +``` + +## Python Transforms + +For Python transforms, use the SDK's object store directly: + +```python +async def transform(self, data: dict, client: InfrahubClient) -> str: + storage_id = ( + data["NetworkDevice"]["edges"][0]["node"] + ["artifacts"]["edges"][0]["node"] + ["storage_id"]["value"] + ) + content = await client.object_store.get(identifier=storage_id) + return content +``` + +## GraphQL Query Pattern + +Reference artifacts in your query via the `artifacts` relationship: + +```graphql +query StartupConfig($name: String!) { + NetworkDevice(hostname__value: $name) { + edges { + node { + hostname { value } + artifacts { + edges { + node(name__value: "base_config") { + id + storage_id { value } + } + } + } + } + } + } +} +``` + +## Known Limitations + +- **No ordering guarantee**: Artifacts may be generated in parallel. A composite artifact template may render before its dependencies are ready. Future event-driven pipeline work (INFP-227) will address this. +- **Worker context only**: `artifact_content` and `file_object_content` are only available on Prefect workers, not in computed attributes or local CLI. +- **Text content only**: `file_object_content` rejects binary file objects. `artifact_content` always returns text (artifacts are text-only). diff --git a/dev/specs/infp-504-artifact-composition/research.md b/dev/specs/infp-504-artifact-composition/research.md new file mode 100644 index 00000000..92f590a7 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/research.md @@ -0,0 +1,75 @@ +# Research: Artifact Content Composition + +**Feature**: INFP-504 | **Date**: 2026-03-20 + +## Research Findings + +### R-001: Async-to-Sync Bridge for Jinja2 Filters + +**Decision**: Use Jinja2's native async filter support (`auto_await`) — no bridging needed. + +**Rationale**: The `SandboxedEnvironment` is already created with `enable_async=True` (`template/__init__.py`:137), and rendering uses `template.render_async()` (`template/__init__.py`:122). In Jinja2 async mode, filter call results are wrapped in `auto_await()`, which detects awaitables and awaits them automatically. This means we can register async functions directly as filters. + +**Caveat**: The file-based environment (`_get_file_based_environment()` at line 140) does NOT currently set `enable_async=True`. This must be added for async filters to work with file-based templates. + +**Alternatives considered**: + +- `asyncio.run()`: Cannot be used — we're already inside a running event loop during `render_async()`. Would raise `RuntimeError: This event loop is already running`. +- Thread-based executor: Overly complex, introduces thread safety concerns, and is unnecessary given Jinja2's built-in async support. +- `nest_asyncio`: External dependency, fragile, not needed. + +**Evidence**: Jinja2 source code confirms `auto_await` wrapping of filter results in async mode. SDK's existing pytest plugin already uses `asyncio.run()` for a different scenario (sync test runner calling async render), which is a distinct pattern. + +### R-002: File Object Content API Path + +**Decision**: Use `/api/files/by-storage-id/{storage_id}` endpoint. + +**Rationale**: Confirmed by product owner. The `storage_id` alone is sufficient for retrieval. Future endpoints for by-hfid and by-node are anticipated but not in scope. + +**Implementation note**: The existing `ObjectStore.get()` uses the path `/api/storage/object/{identifier}`. The file object endpoint is completely different, so a new method is needed rather than parameterizing the existing one. + +### R-003: Binary Content Detection for File Objects + +**Decision**: Check the `content-type` response header from the API response. Reject non-text content types. + +**Rationale**: Artifacts are always plain text (no detection needed). File objects can be any type, but the response `content-type` header reliably indicates the type. The current `ObjectStore.get()` returns `response.text` directly without checking the content type — the new file object method must inspect the header first. + +**Text types to allow**: `text/*`, `application/json`, `application/yaml`, `application/x-yaml`. Everything else should be rejected with `JinjaFilterError`. + +### R-004: Filter Trust Model Design + +**Decision**: Flag-based `ExecutionContext` using Python's `Flag` enum. + +**Rationale**: The requirements don't form a clean hierarchy. `artifact_content` must be allowed in WORKER but not LOCAL (no client), while `safe` must be allowed in LOCAL but not WORKER. A flag-based system with an allowlist per filter is the only model that handles all cases without implicit ordering assumptions. + +**Design**: + +```python +class ExecutionContext(Flag): + CORE = auto() # API server computed attributes (most restrictive) + WORKER = auto() # Prefect background workers + LOCAL = auto() # Local CLI / unrestricted rendering + ALL = CORE | WORKER | LOCAL +``` + +```python +@dataclass +class FilterDefinition: + name: str + allowed_contexts: ExecutionContext + source: str +``` + +**Migration**: `trusted=True` → `allowed_contexts=ALL`, `trusted=False` → `allowed_contexts=LOCAL`. A `trusted` property can be preserved for backward compatibility: `return bool(self.allowed_contexts & ExecutionContext.CORE)`. + +### R-005: Existing Netutils Filter Inventory + +**Decision**: `from_json` and `from_yaml` do NOT exist in the current filter set. + +**Rationale**: Searched all 51 builtin filters and 87 netutils filters in `infrahub_sdk/template/filters.py`. No `from_json`, `from_yaml`, `parse_json`, or `parse_yaml` entries. `tojson` exists (builtin, untrusted) but is the reverse operation. Safe to add without de-duplication concerns. + +### R-006: ObjectStore Authentication Error Handling + +**Decision**: Reuse the existing pattern from `ObjectStore.get()`. + +**Rationale**: `ObjectStore.get()` (object_store.py:34-40) already handles 401/403 by raising `AuthenticationError`. The new filters should catch `AuthenticationError` and wrap it in `JinjaFilterError` with a permission-specific message. No new auth handling logic needed in ObjectStore itself. diff --git a/dev/specs/infp-504-artifact-composition/spec.md b/dev/specs/infp-504-artifact-composition/spec.md new file mode 100644 index 00000000..026cec7d --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/spec.md @@ -0,0 +1,169 @@ +# Feature specification: Artifact content composition via Jinja2 filters + +**Feature Branch**: `infp-504-artifact-composition` +**Created**: 2026-02-18 +**Status**: Draft +**Jira**: INFP-504 (part of INFP-304 Artifact of Artifacts initiative) + +## Overview + +Enable customers building modular configuration pipelines to compose larger artifacts from smaller sub-artifacts by referencing and inlining rendered artifact content directly inside a Jinja2 transform, without duplicating template logic or GraphQL query fields. + +## User scenarios & testing *(mandatory)* + +### User story 1 - inline artifact content in a composite template (Priority: P1) + +A network engineer maintains separate section-level artifacts for routing policy, interfaces, and base config. They want a composite "startup config" artifact whose Jinja2 template pulls in each section's rendered content via a `storage_id` already present in the GraphQL query result — without copy-pasting template logic. + +The template uses `artifact.node.storage_id.value | artifact_content` and the rendered output assembles all sections automatically. + +**Why this priority**: This is the primary use case that delivers the modular pipeline capability. Everything else in this feature supports or extends it. + +**Independent Test**: A Jinja2 template calling `artifact_content` with a valid storage_id can be rendered against a real or mocked Infrahub instance and the output matches the expected concatenated artifact contents. + +**Acceptance Scenarios**: + +1. **Given** a `Jinja2Template` constructed with a valid `InfrahubClient` and a template calling `storage_id | artifact_content`, **When** the template is rendered with a data dict containing a valid storage_id string, **Then** the output contains the raw string content fetched from the object store. +2. **Given** the same setup but the storage_id is null or the object store cannot retrieve the content, **When** rendered, **Then** the filter raises a descriptive error indicating the retrieval failure. +3. **Given** a `Jinja2Template` constructed *without* an `InfrahubClient` and a template calling `artifact_content`, **When** rendered, **Then** an error is raised with a message clearly stating that an `InfrahubClient` is required for this filter. +4. **Given** a template using `artifact_content` and `validate(restricted=True)` is called, **Then** a `JinjaTemplateOperationViolationError` is raised, confirming the filter is blocked in local restricted mode. + +--- + +### User story 2 - inline file object content in a composite template (Priority: P2) + +A template author needs to embed the content of a stored file object (as distinct from an artifact) into a Jinja2 template. They use `storage_id | file_object_content` and the same injection and error-handling behaviour applies. + +**Why this priority**: Mirrors `artifact_content` for the file-object use case; same implementation pattern, lower novelty. + +**Independent Test**: A template calling `file_object_content` renders correctly with a valid storage_id, and raises a descriptive error for null or unresolvable storage_ids. + +**Acceptance Scenarios**: + +1. **Given** a `Jinja2Template` with a client and a valid file-object storage_id, **When** rendered, **Then** the raw file content string is returned. +2. **Given** a null or missing storage_id value, **When** the filter is invoked, **Then** an error is raised with a descriptive message about the retrieval failure. +3. **Given** no client provided to `Jinja2Template`, **When** the filter is invoked, **Then** an error is raised. + +--- + +### User story 3 - parse structured artifact content in a template (Priority: P3) + +A template author retrieves a JSON-formatted artifact and needs to traverse its structure as a dict within the template. They chain `storage_id | artifact_content | from_json` to obtain a parsed object, then access fields normally. + +**Why this priority**: Unlocks structured composition use cases; depends on `artifact_content` (P1) being in place. `from_json`/`from_yaml` are useful in isolation too. + +**Independent Test**: A template chaining `artifact_content | from_json` renders correctly and the output reflects values from parsed JSON fields. + +**Acceptance Scenarios**: + +1. **Given** a template using `storage_id | artifact_content | from_json`, **When** rendered with a storage_id pointing to valid JSON content, **Then** the template can access keys of the parsed object. +2. **Given** `storage_id | artifact_content | from_yaml`, **When** rendered with YAML content, **Then** the template can access keys of the parsed mapping. +3. **Given** `from_json` or `from_yaml` applied to an empty string (for example, a template variable that is explicitly empty), **When** rendered, **Then** the filter returns an empty dict or appropriate empty value without raising. + +--- + +### User story 4 - security gate blocks filters in computed attributes context (Priority: P1) + +The Infrahub API server executes computed attributes locally and must block `artifact_content` and `file_object_content` because no network calls should be made within that context. Prefect workers run inside Infrahub with a client and must be able to use these filters. Other currently-untrusted Jinja2 filters (for example, `safe`, `attr`) must remain subject to their existing restriction rules — this feature must not inadvertently widen their permissions. + +The existing single `restricted: bool` parameter on `validate()` is insufficient: flipping it to `False` to permit Infrahub filters would also permit all other untrusted filters. The validation mechanism must be extended to express at least three distinct execution contexts. + +**Why this priority**: Preventing these filters from running in the computed attributes context is a hard requirement. Shares P1 priority with User Story 1. + +**Independent Test**: Validation in the computed-attributes context raises `JinjaTemplateOperationViolationError` for templates using `artifact_content` or `file_object_content`. Validation in the Prefect-worker context passes for the same templates. Neither context changes the restriction behaviour of other currently-untrusted filters. + +**Acceptance Scenarios**: + +1. **Given** a template referencing `artifact_content`, **When** validated in the computed-attributes context, **Then** `JinjaTemplateOperationViolationError` is raised. +2. **Given** the same template, **When** validated in the Prefect-worker context with a client-initialised `Jinja2Template`, **Then** validation passes. +3. **Given** a template using an existing untrusted filter (for example, `safe`), **When** validated in the Prefect-worker context, **Then** `JinjaTemplateOperationViolationError` is still raised — the Prefect-worker context does not unlock other untrusted filters. + +--- + +### Edge cases + +- What happens if a storage_id value is `None` (Python None) rather than a missing string? Both cases must raise a descriptive error. +- What if the object store raises a network or authentication error mid-render? All error conditions (null storage_id, not-found, auth failure, network failure) raise exceptions — there is no silent fallback. +- What if `from_json` or `from_yaml` already exists in the netutils filter set? De-duplicate rather than shadow. +- What happens when `from_json` or `from_yaml` receives malformed content (invalid JSON/YAML syntax)? `JinjaFilterError` is raised — no silent fallback. +- What if the same filter name is registered twice (for example, a user-supplied filter that shadows `artifact_content`)? Existing override behaviour should be preserved. +- File-based templates use a regular `Environment` (not sandboxed); the new filters must be injected correctly in both cases. + +## Requirements *(mandatory)* + +### Functional requirements + +- **FR-001**: `Jinja2Template.__init__` MUST accept an optional `client` parameter of type `InfrahubClient | None` (default `None`). Additionally, `Jinja2Template` MUST expose a `set_client(client)` method for deferred client injection, allowing the template to be created first and the client added later. `InfrahubClientSync` is not supported. +- **FR-002**: A dedicated class (for example, `InfrahubFilters`) MUST be introduced to hold an optional client reference and expose the Infrahub-specific filter callable methods. `InfrahubFilters` is always instantiated by `Jinja2Template` (even without a client); each filter method checks for a client at call time and raises `JinjaFilterError` if none is available. `set_client()` updates the existing `InfrahubFilters` instance rather than creating a new one. `InfrahubFilters.get_filter_names()` discovers client-dependent filter names automatically from all public methods on the class. +- **FR-003**: The system MUST provide an `artifact_content` Jinja2 filter that accepts a `storage_id` string and returns the raw string content of the referenced artifact, using the artifact-specific API path. +- **FR-004**: The system MUST provide three file object content filters, each retrieving content via a different identifier: + - `file_object_content` — accepts a `storage_id` string, uses `GET /api/files/by-storage-id/{storage_id}` + - `file_object_content_by_id` — accepts a node UUID string, uses `GET /api/files/{node_id}` + - `file_object_content_by_hfid` — accepts an HFID string or list and a required `kind` argument, uses `GET /api/files/by-hfid/{kind}?hfid=...` + All three share the same binary content-type rejection and error handling behavior. +- **FR-005**: All client-dependent filters (`artifact_content`, `file_object_content`, `file_object_content_by_id`, `file_object_content_by_hfid`) MUST raise `JinjaFilterError` when the input identifier is null or empty, or when the object store cannot retrieve the content for any reason (not found, network failure, auth failure). Additionally, all file object filters MUST raise `JinjaFilterError` when the retrieved content has a non-text content type (i.e., not `text/*`, `application/json`, or `application/yaml`). `file_object_content_by_hfid` MUST also raise `JinjaFilterError` when the `kind` argument is missing. +- **FR-006**: All client-dependent filters MUST raise `JinjaFilterError` when invoked and no `InfrahubClient` was supplied to `Jinja2Template` at construction time. The error message MUST name the filter and explain that an `InfrahubClient` is required. +- **FR-007**: All client-dependent filters (`artifact_content`, `file_object_content`, `file_object_content_by_id`, `file_object_content_by_hfid`) MUST be registered with `allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL` in the `FilterDefinition` registry. The `validate()` method accepts an `ExecutionContext` flag; these filters are blocked in the `CORE` context (API server computed attributes) and permitted in the `WORKER` context (Prefect workers) and `LOCAL` context (CLI/unrestricted rendering). Within Infrahub, any Jinja2-based computed attributes that use these new filters should cause a schema violation when loading the schema. +- **FR-008**: The system MUST provide `from_json` and `from_yaml` Jinja2 filters (adding them only if not already present in the environment) that parse a string into a Python dict/list. Applying them to an empty string MUST return an empty dict without raising. Applying them to malformed content MUST raise `JinjaFilterError`. +- **FR-009**: `from_json` and `from_yaml` MUST be registered as trusted filters (`trusted=True`) since they perform no external I/O. +- **FR-010**: All new filters MUST work correctly with `InfrahubClient` (async). `InfrahubClientSync` is not a supported client type for `Jinja2Template`. Both the sandboxed environment (string-based templates) and the file-based environment MUST have `enable_async=True` to support async filter callables via Jinja2's `auto_await`. +- **FR-011**: All `JinjaFilterError` instances MUST carry an actionable error message that identifies the filter name, the cause of failure, and any remediation hint (for example: "artifact_content requires an InfrahubClient — pass one via Jinja2Template(client=...)"). +- **FR-012**: A new `JinjaFilterError` exception class MUST be added to `infrahub_sdk/template/exceptions.py` as a subclass of `JinjaTemplateError`. +- **FR-013**: Documentation MUST include a Python transform example demonstrating artifact content retrieval via `client.object_store.get(identifier=storage_id)`. No new SDK convenience method will be added. +- **FR-014**: If the current user isn't allowed due to a permission denied error to query for the artifact or object file the filter should catch such permission error and raise a Jinja2 error specifically related to the permission issue. +- **FR-015**: The auto-generated templating reference (`sdk_template_reference.j2` and `_generate_infrahub_sdk_template_documentation()`) MUST be updated to include `INFRAHUB_FILTERS` as a third section ("Infrahub filters"). The table MUST show which execution contexts each filter is allowed in (`CORE`, `WORKER`, `LOCAL`) rather than only a binary "Trusted" column. +- **FR-016**: The templating reference documentation MUST explain the `ExecutionContext` model — what `CORE`, `WORKER`, and `LOCAL` contexts mean, how they map to Infrahub's execution environments (computed attributes, Prefect workers, local CLI), and how `validate(context=...)` is used to enforce filter restrictions. +- **FR-017**: Usage examples for the new Jinja2 filters MUST be included in the SDK documentation, covering at minimum: `artifact_content` with a storage_id, `file_object_content` by storage_id, `file_object_content_by_id` by node UUID, `file_object_content_by_hfid` with `kind` argument, and `from_json`/`from_yaml` chaining. + +### Key entities + +- **`Jinja2Template`**: Gains an optional `client` constructor parameter; delegates client-bound filter registration to `InfrahubFilters`. +- **`InfrahubFilters`**: New class that holds an `InfrahubClient` reference and exposes `artifact_content`, `file_object_content`, and any other client-dependent filter methods. Registered into the Jinja2 filter map when a client is provided. +- **`FilterDefinition`**: Existing dataclass used to declare filter `name`, `trusted` flag, and `source`. New entries are added here for all new filters. +- **`ObjectStore`**: Existing async storage client used by `InfrahubFilters` to perform `get(identifier=storage_id)` calls. (`ObjectStoreSync` is not used; `InfrahubClientSync` is explicitly out of scope — see FR-001, FR-010.) +- **`JinjaFilterError`**: New exception class, subclass of `JinjaTemplateError`, raised by `InfrahubFilters` methods on all filter-level failures (no client, null/empty storage_id, retrieval error). + +## Success criteria *(mandatory)* + +### Measurable outcomes + +- **SC-001**: A composite Jinja2 artifact template using `artifact_content` renders successfully end-to-end (integration test), with output containing all expected sub-artifact content. +- **SC-002**: `validate(restricted=True)` on any template referencing `artifact_content` or `file_object_content` always raises a security violation — zero false negatives across the test suite. +- **SC-003**: All filter error conditions (no client, null/empty storage_id, retrieval failure) produce a descriptive, actionable error message — no silent failures, no raw tracebacks as the primary user-facing message. +- **SC-004**: The async execution path (`InfrahubClient`) is covered by unit tests with no regressions to existing filter behaviour. +- **SC-005**: The full unit test suite (`uv run pytest tests/unit/`) passes without modification after the feature is added. +- **SC-006**: A template chaining `artifact_content | from_json` or `artifact_content | from_yaml` can access parsed fields from a structured artifact in a rendered output. + +## Assumptions + +- The `artifact_content` and `file_object_content` filters receive a `storage_id` string directly from the template variable context — extracted from the GraphQL query result by the template author. The filter does not resolve artifact names — it operates on storage IDs only. +- Ordering of artifact generation is a known limitation: artifacts may be generated in parallel. This is a documented constraint, not something this feature enforces. Future event-driven pipeline work (INFP-227) will address ordering. +- `from_json` and `from_yaml` are not currently present in the builtin or netutils filter sets; they will be added as part of this feature. If they already exist, the implementation de-duplicates rather than overrides. +- All failure modes from the filters (null storage_id, empty storage_id, object not found, network error, auth error) raise exceptions. There is no silent fallback to an empty string. +- The permitted execution context for `artifact_content` and `file_object_content` is Prefect workers only. The computed attributes path in the Infrahub API server always runs `validate(restricted=True)`, which blocks these filters before rendering begins. +- The `InfrahubFilters` class provides `async def` callables to Jinja2's filter map; the underlying client is always `InfrahubClient` (async). Jinja2's `auto_await` mechanism (enabled via `enable_async=True` on the environment) automatically awaits filter return values during `render_async()`, so no explicit sync-to-async bridging is needed. + +## Dependencies & constraints + +- Depends on `ObjectStore.get(identifier)` in `infrahub_sdk/object_store.py`. +- Depends on the existing `FilterDefinition` dataclass and `trusted` flag mechanism in `infrahub_sdk/template/filters.py`. +- Depends on the existing `validate(restricted=True)` security mechanism in `Jinja2Template`. +- Must not break any existing filter behaviour or the `validate()` contract. +- No new external Python dependencies may be introduced without approval. +- Related: INFP-304 (Artifact of Artifacts), INFP-496 (Modular GraphQL queries), INFP-227 (Modular generators / event-driven pipeline). + +## Open questions + +- **Filter naming**: `artifact_content` is the working name. Alternatives are open. Same with `file_object_content` as one option is to use the "/api/storage/files/by-storage-id" endpoint, we will want to support "by-hfid" and node as well. +- **Sandboxed environment injection**: The `render_jinja2_template` method in `integrator.py` has access to `self.sdk`; the exact threading path to pass the client into `Jinja2Template` needs investigation during planning. +- **Validation level model**: The current `validate(restricted: bool)` parameter is too coarse to express the three distinct execution contexts this feature requires. A natural evolution would be to replace the boolean with an enum (for example: `core` for the Infrahub API server, `worker` for Prefect background workers, `untrusted` for fully restricted local execution). Filters tagged as `worker`-only would be blocked in the `core` context but permitted in the `worker` context, while `trusted` filters remain available in all contexts. The exact enum design and migration of existing call sites is a technical decision for the implementation plan, but the interface change should be considered up front to avoid needing to revisit `validate()` again later. + +## Clarifications + +### Session 2026-02-18 + +- Q: Are `artifact_content` and `file_object_content` identical at the storage API level, or do they use different API paths / metadata handling? → A: Different implementations — `file_object_content` uses a different API path or carries different metadata handling than `artifact_content`. +- Q: Where are these filters permitted to execute, and what mechanism enforces the boundary? → A: Blocked in computed attributes (executed locally in the Infrahub API server, which uses `validate(restricted=True)`); permitted on Prefect workers, which have access to an `InfrahubClient`. The `trusted=False` registration enforces this boundary via the existing restricted-mode validation. +- Q: What exception class should filter-level errors (no client, retrieval failure) raise? → A: A new `JinjaFilterError` class that is a child of the existing `JinjaTemplateError` base class. +- Q: Should the SDK expose a convenience method for artifact content retrieval in Python transforms? → A: No new method — document `client.object_store.get(identifier=storage_id)` directly. +- Q: What should `from_json`/`from_yaml` do on malformed input? → A: Raise `JinjaFilterError` on malformed JSON or YAML input. diff --git a/dev/specs/infp-504-artifact-composition/tasks.md b/dev/specs/infp-504-artifact-composition/tasks.md new file mode 100644 index 00000000..ca708923 --- /dev/null +++ b/dev/specs/infp-504-artifact-composition/tasks.md @@ -0,0 +1,188 @@ +# Tasks: Artifact Content Composition via Jinja2 Filters + +**Input**: Design documents from `dev/specs/infp-504-artifact-composition/` +**Prerequisites**: plan.md, spec.md, research.md, data-model.md, contracts/ +**Jira Epic**: IFC-2275 + +**Tests**: Included with each implementation task (per project convention). + +**Organization**: Tasks grouped by user story. US4 (security gate) is foundational and combined with US1 as both are P1. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3, US4) +- Include exact file paths in descriptions + +--- + +## Phase 1: Foundational (Blocking Prerequisites) + +**Purpose**: Exception class and trust model that ALL user stories depend on + +**CRITICAL**: No user story work can begin until this phase is complete + +- [x] T001 [P] Create `JinjaFilterError` exception class in `infrahub_sdk/template/exceptions.py` — subclass of `JinjaTemplateError` with `filter_name`, `message`, and optional `hint` attributes. Include unit tests for instantiation, inheritance chain, and message formatting. (IFC-2367) +- [x] T002 [P] Implement `ExecutionContext` flag enum and migrate `FilterDefinition` in `infrahub_sdk/template/filters.py` — add `ExecutionContext(Flag)` with `CORE`, `WORKER`, `LOCAL`, `ALL` values. Replace `FilterDefinition.trusted: bool` with `allowed_contexts: ExecutionContext`. Add backward-compat `trusted` property. Migrate all 138 existing filter entries (`trusted=True` → `ALL`, `trusted=False` → `LOCAL`). Update `validate()` in `infrahub_sdk/template/__init__.py` to accept optional `context: ExecutionContext` parameter (takes precedence over `restricted`; `restricted=True` → `CORE`, `restricted=False` → `LOCAL`). Include unit tests for all 3 contexts with existing filters, backward compat path, and no regressions. (IFC-2368) + +**Checkpoint**: Foundation ready — JinjaFilterError and ExecutionContext available for all stories + +--- + +## Phase 2: US1 — Inline Artifact Content + US4 — Security Gate (Priority: P1) MVP + +**Goal**: A Jinja2 template can use `storage_id | artifact_content` to inline rendered sub-artifact content. Validation blocks this filter in CORE context but allows it in WORKER context. + +**Independent Test**: Render a template calling `artifact_content` with a mocked `InfrahubClient` and verify output matches expected content. Validate the same template in CORE context raises `JinjaTemplateOperationViolationError`, and in WORKER context passes. + +### Implementation for US1 + US4 + +- [x] T003 [US1] Create `InfrahubFilters` class in `infrahub_sdk/template/infrahub_filters.py` — new file. Class holds `InfrahubClient` reference, exposes async filter methods. Methods are `async def` (Jinja2 `auto_await` handles them in async render mode per R-001). Raises `JinjaFilterError` when called without a client. Include unit tests for instantiation with/without client. (IFC-2371) +- [x] T004 [US1] Implement `artifact_content` async method on `InfrahubFilters` in `infrahub_sdk/template/infrahub_filters.py` — uses `self.client.object_store.get(identifier=storage_id)`. Raises `JinjaFilterError` on: null/empty storage_id, retrieval failure, permission denied (catch `AuthenticationError` per R-006). Artifacts are always text (no binary check needed per R-003). Include unit tests: happy path (mocked ObjectStore), null, empty, not-found, network error, permission denied, no-client error with descriptive message. (IFC-2372) +- [x] T005 [US1] [US4] Add `client` parameter to `Jinja2Template.__init__` and wire up filter registration in `infrahub_sdk/template/__init__.py` — add `client: InfrahubClient | None = None` param. When client provided: instantiate `InfrahubFilters`, register `artifact_content` into Jinja2 env filter map. Add `enable_async=True` to `_get_file_based_environment()` (per R-001 caveat). Register `artifact_content` in `FilterDefinition` registry with `allowed_contexts=ExecutionContext.WORKER`. Include unit tests: render with client (mocked), render without client (error), validation in CORE (blocked), WORKER (allowed), LOCAL (allowed). Verify existing untrusted filters like `safe` remain blocked in WORKER context (US4 AC3). Also added `set_client()` setter for deferred client injection per PR #885 feedback. (IFC-2375 partial + IFC-2376 partial) + +**Checkpoint**: US1 + US4 fully functional. `artifact_content` renders in WORKER context, blocked in CORE. MVP complete. + +--- + +## Phase 3: US2 — Inline File Object Content (Priority: P2) + +**Goal**: A Jinja2 template can use `storage_id | file_object_content` to inline file object content with binary rejection. + +**Independent Test**: Render a template calling `file_object_content` with a mocked client. Verify text content returned, binary content rejected. Validation blocks in CORE, allows in WORKER. + +### Implementation for US2 + +- [x] T006 [P] [US2] Add `get_file_by_storage_id()` method to `ObjectStore` in `infrahub_sdk/object_store.py` — async method using endpoint `GET /api/files/by-storage-id/{storage_id}`. Check `content-type` response header: allow `text/*`, `application/json`, `application/yaml`, `application/x-yaml`; reject all others with descriptive error. Handle 401/403 as `AuthenticationError`. Include unit tests: text response, binary rejection, 404, auth failure, network error. (IFC-2373) +- [x] T007 [US2] Implement `file_object_content` async method on `InfrahubFilters` in `infrahub_sdk/template/infrahub_filters.py` — uses new `self.client.object_store.get_file_by_storage_id(storage_id)`. Same error handling as `artifact_content` plus binary content error (delegated to ObjectStore). Include unit tests: happy path, all error conditions, binary content rejection. (IFC-2374) +- [x] T008 [US2] Register `file_object_content` filter in `Jinja2Template` and `FilterDefinition` in `infrahub_sdk/template/__init__.py` and `infrahub_sdk/template/filters.py` — register when client provided. `allowed_contexts=ExecutionContext.WORKER`. Include unit tests: render with client, validation in CORE (blocked), WORKER (allowed). (IFC-2375 partial + IFC-2376 partial) + +**Checkpoint**: US2 complete. `file_object_content` works alongside `artifact_content`. + +--- + +## Phase 4: US3 — Parse Structured Artifact Content (Priority: P3) + +**Goal**: Templates can chain `artifact_content | from_json` or `artifact_content | from_yaml` to access structured data. + +**Independent Test**: Render a template chaining `artifact_content | from_json` and verify parsed fields accessible. `from_json("")` and `from_yaml("")` return `{}`. + +### Implementation for US3 + +- [x] T009 [P] [US3] Implement `from_json` filter function in `infrahub_sdk/template/infrahub_filters.py` — pure sync function (no client needed). Empty string → `{}` (explicit special-case since `json.loads("")` raises). Malformed JSON → `JinjaFilterError`. Register in `FilterDefinition` with `allowed_contexts=ExecutionContext.ALL`. Register unconditionally in `Jinja2Template._set_filters()`. Include unit tests: valid JSON dict, valid JSON list, empty string → `{}`, malformed → error, render through template. (IFC-2369) +- [x] T010 [P] [US3] Implement `from_yaml` filter function in `infrahub_sdk/template/infrahub_filters.py` — pure sync function. Empty string → `{}` (explicit special-case since `yaml.safe_load("")` returns `None`). Malformed YAML → `JinjaFilterError`. Register in `FilterDefinition` with `allowed_contexts=ExecutionContext.ALL`. Register unconditionally in `Jinja2Template._set_filters()`. Include unit tests: valid YAML mapping, valid YAML list, empty string → `{}`, malformed → error, render through template. (IFC-2370) +- [x] T011 [US3] Integration test for filter chaining in `tests/unit/template/test_infrahub_filters.py` — test `artifact_content | from_json` and `artifact_content | from_yaml` end-to-end with mocked ObjectStore returning JSON/YAML content. Verify template can access parsed fields. (IFC-2376 partial, SC-006) + +**Checkpoint**: US3 complete. All 4 filters work, chain correctly, and are validated per context. + +--- + +## Phase 5: Polish & Cross-Cutting Concerns + +**Purpose**: Documentation, full regression, and server-side tasks + +- [ ] T012 Run full unit test suite (`uv run pytest tests/unit/`) and verify zero regressions (SC-005) +- [ ] T013 Run `uv run invoke format lint-code` and fix any issues +- [ ] T014 Documentation: artifact composition usage guide — create or update docs with Jinja2 filter examples, Python transform example using `client.object_store.get(identifier=storage_id)`, GraphQL query patterns, known limitations (no ordering guarantee). Run `uv run invoke docs-generate` and `uv run invoke docs-validate`. (IFC-2377) +- [ ] T015 [Infrahub server] Thread SDK client into `Jinja2Template` in `integrator.py` — pass `self.sdk` from `render_jinja2_template` as `Jinja2Template(client=...)` on Prefect workers. Integration test with composite template. (IFC-2378) +- [ ] T016 [Infrahub server] Schema validation: block new filters in computed attributes — validate with `context=ExecutionContext.CORE` at schema load time. Templates using `artifact_content`/`file_object_content` must be rejected. (IFC-2379) + +--- + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Foundational (Phase 1)**: No dependencies — start immediately +- **US1 + US4 (Phase 2)**: Depends on Phase 1 completion — BLOCKS remaining stories +- **US2 (Phase 3)**: Depends on Phase 2 (uses InfrahubFilters + Jinja2Template wiring) +- **US3 (Phase 4)**: Depends on Phase 1 only (from_json/from_yaml need JinjaFilterError). Can start in parallel with Phase 2 if desired. +- **Polish (Phase 5)**: Depends on all user stories being complete + +### User Story Dependencies + +- **US4 + US1 (P1)**: Can start after Phase 1 — No dependencies on other stories. This is the MVP. +- **US2 (P2)**: Depends on US1 (reuses InfrahubFilters and Jinja2Template wiring from Phase 2) +- **US3 (P3)**: Depends on Phase 1 only for `from_json`/`from_yaml`. Chaining test (T011) depends on US1. + +### Within Each Phase + +- Tasks marked [P] can run in parallel (different files) +- Tests are included within each implementation task +- Core implementation before wiring/registration + +### Parallel Opportunities + +- T001 and T002 can run in parallel (different files: exceptions.py vs filters.py) +- T006 can run in parallel with T003/T004 (ObjectStore vs InfrahubFilters) +- T009 and T010 can run in parallel (from_json and from_yaml are independent) +- US3 Phase 4 (T009, T010) can start in parallel with Phase 2 after Phase 1 completes + +--- + +## Parallel Example: Phase 1 + +```text +# Launch both foundational tasks together (different files): +Task T001: "JinjaFilterError in infrahub_sdk/template/exceptions.py" +Task T002: "ExecutionContext + FilterDefinition in infrahub_sdk/template/filters.py" +``` + +## Parallel Example: Phase 4 + +```text +# Launch both parsing filters together (same file but independent functions): +Task T009: "from_json filter in infrahub_sdk/template/infrahub_filters.py" +Task T010: "from_yaml filter in infrahub_sdk/template/infrahub_filters.py" +``` + +--- + +## Implementation Strategy + +### MVP First (US1 + US4 Only) + +1. Complete Phase 1: Foundational (T001, T002) +2. Complete Phase 2: US1 + US4 (T003, T004, T005) +3. **STOP and VALIDATE**: artifact_content renders, validation blocks in CORE, allows in WORKER +4. This alone delivers the primary value proposition + +### Incremental Delivery + +1. Phase 1 → Foundation ready +2. Phase 2 → artifact_content + security gate → **MVP deployed** +3. Phase 3 → file_object_content extends to file objects +4. Phase 4 → from_json/from_yaml enable structured composition +5. Phase 5 → Documentation + server integration +6. Each phase adds value without breaking previous phases + +### Jira Task Mapping + +| Task | Jira | Phase | +| ---- | ---- | ----- | +| T001 | IFC-2367 | 1 | +| T002 | IFC-2368 | 1 | +| T003 | IFC-2371 | 2 | +| T004 | IFC-2372 | 2 | +| T005 | IFC-2375 + IFC-2376 (partial) | 2 | +| T006 | IFC-2373 | 3 | +| T007 | IFC-2374 | 3 | +| T008 | IFC-2375 + IFC-2376 (partial) | 3 | +| T009 | IFC-2369 | 4 | +| T010 | IFC-2370 | 4 | +| T011 | IFC-2376 (partial) | 4 | +| T012-T013 | — | 5 | +| T014 | IFC-2377 | 5 | +| T015 | IFC-2378 | 5 | +| T016 | IFC-2379 | 5 | + +--- + +## Notes + +- [P] tasks = different files, no dependencies +- [Story] label maps task to specific user story for traceability +- Tests are included in each implementation task (not separate) +- All error paths must produce actionable messages with filter name, cause, and remediation hint +- Commit after each task or logical group +- Stop at any checkpoint to validate story independently diff --git a/docs/_templates/sdk_template_reference.j2 b/docs/_templates/sdk_template_reference.j2 index b8a89c90..77828ca4 100644 --- a/docs/_templates/sdk_template_reference.j2 +++ b/docs/_templates/sdk_template_reference.j2 @@ -3,29 +3,117 @@ title: Python SDK Templating --- Filters can be used when defining [computed attributes](https://docs.infrahub.app/guides/computed-attributes) or [Jinja2 Transforms](https://docs.infrahub.app/guides/jinja2-transform) within Infrahub. +## Execution contexts + +Filters are restricted based on the execution context in which a template is rendered: + +- **CORE** — Computed attributes evaluated inside the Infrahub API server. Only fully trusted filters (no I/O, no side effects) are allowed. +- **WORKER** — Jinja2 transforms executed on Prefect background workers. Trusted filters and Infrahub client-dependent filters are allowed. +- **LOCAL** — Local CLI rendering and unrestricted usage. All filters are allowed. + +The `validate()` method on `Jinja2Template` accepts an optional `context` parameter to enforce these restrictions: + +{% raw %}```python +from infrahub_sdk.template import Jinja2Template +from infrahub_sdk.template.filters import ExecutionContext + +template = Jinja2Template(template="{{ sid | artifact_content }}") + +# Raises JinjaTemplateOperationViolationError — blocked in CORE +template.validate(context=ExecutionContext.CORE) + +# Passes — allowed in WORKER +template.validate(context=ExecutionContext.WORKER) +``` +{% endraw %} + +For backward compatibility, `validate(restricted=True)` maps to `CORE` and `validate(restricted=False)` maps to `LOCAL`. + ## Builtin Jinja2 filters -The following filters are those that are [shipped with Jinja2](https://jinja.palletsprojects.com/en/stable/templates/#list-of-builtin-filters) and enabled within Infrahub. The trusted column indicates if the filter is allowed for use with Infrahub's computed attributes when the server is configured in strict mode. +The following filters are [shipped with Jinja2](https://jinja.palletsprojects.com/en/stable/templates/#list-of-builtin-filters) and enabled within Infrahub. -| Name | Trusted | -| ---- | ------- | +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | {% for filter in builtin %} -| {{ filter.name }} | {% if filter.trusted %}✅{% else %}❌{% endif %} | +| {{ filter.name }} | {% if filter.core %}✅{% else %}❌{% endif %} | {% if filter.worker %}✅{% else %}❌{% endif %} | {% if filter.local %}✅{% else %}❌{% endif %} | {% endfor %} ## Netutils filters The following Jinja2 filters from Netutils are included within Infrahub. + -| Name | Trusted | -| ---- | ------- | +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | {% for filter in netutils %} -| {{ filter.name }} | {% if filter.trusted %}✅{% else %}❌{% endif %} | +| {{ filter.name }} | {% if filter.core %}✅{% else %}❌{% endif %} | {% if filter.worker %}✅{% else %}❌{% endif %} | {% if filter.local %}✅{% else %}❌{% endif %} | +{% endfor %} + + +## Infrahub filters + +These filters are provided by the Infrahub SDK for artifact and file object content composition. + + +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | +{% for filter in infrahub %} +| `{{ filter.name }}` | {% if filter.core %}✅{% else %}❌{% endif %} | {% if filter.worker %}✅{% else %}❌{% endif %} | {% if filter.local %}✅{% else %}❌{% endif %} | {% endfor %} +### Usage examples + +**Inline artifact content by `storage_id`:** + +```jinja2 +{% raw %}{{ artifact.node.storage_id.value | artifact_content }} +{% endraw %}``` + +**Inline file object content:** + +```jinja2 +{% raw %}{# By storage_id #} +{{ file_object.storage_id.value | file_object_content }} + +{# By node UUID #} +{{ file_object.id | file_object_content_by_id }} + +{# By Human-Friendly ID #} +{{ hfid_components | file_object_content_by_hfid(kind="NetworkCircuitContract") }} +{% endraw %}``` + +**Parse structured content with chaining:** + +```jinja2 +{% raw %}{# JSON artifact → access parsed fields #} +{% set config = artifact.node.storage_id.value | artifact_content | from_json %} +interface {{ config.interface_name }} + ip address {{ config.ip_address }} + +{# YAML artifact → iterate parsed data #} +{% set config = artifact.node.storage_id.value | artifact_content | from_yaml %} +{% for route in config.static_routes %} +ip route {{ route.prefix }} {{ route.next_hop }} +{% endfor %} +{% endraw %}``` + +Client-dependent filters (`artifact_content`, `file_object_content`, `file_object_content_by_id`, `file_object_content_by_hfid`) require an `InfrahubClient` to be passed to `Jinja2Template`: + +```python +from infrahub_sdk.template import Jinja2Template + +# At construction time +template = Jinja2Template(template=my_template, client=client) + +# Or via deferred injection +template = Jinja2Template(template=my_template) +template.set_client(client) +``` + ## Known issues ### Unable to combine the map and sort filters (https://github.com/pallets/jinja/issues/2081) diff --git a/docs/docs/infrahubctl/infrahubctl-telemetry.mdx b/docs/docs/infrahubctl/infrahubctl-telemetry.mdx new file mode 100644 index 00000000..69cffa8f --- /dev/null +++ b/docs/docs/infrahubctl/infrahubctl-telemetry.mdx @@ -0,0 +1,57 @@ +# `infrahubctl telemetry` + +**Usage**: + +```console +$ infrahubctl telemetry [OPTIONS] COMMAND [ARGS]... +``` + +**Options**: + +* `--install-completion`: Install completion for the current shell. +* `--show-completion`: Show completion for the current shell, to copy it or customize the installation. +* `--help`: Show this message and exit. + +**Commands**: + +* `export`: Export telemetry snapshots to a JSON file. +* `list`: List telemetry snapshots with summary... + +## `infrahubctl telemetry export` + +Export telemetry snapshots to a JSON file. + +Pages through the API automatically so that all matching snapshots are exported, +not just the first page. + +**Usage**: + +```console +$ infrahubctl telemetry export [OPTIONS] +``` + +**Options**: + +* `--output TEXT`: Output file path [default: telemetry-export.json] +* `--start-date [%Y-%m-%d|%Y-%m-%dT%H:%M:%S|%Y-%m-%dT%H:%M:%S%z]`: Start date filter (ISO 8601) +* `--end-date [%Y-%m-%d|%Y-%m-%dT%H:%M:%S|%Y-%m-%dT%H:%M:%S%z]`: End date filter (ISO 8601) +* `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--help`: Show this message and exit. + +## `infrahubctl telemetry list` + +List telemetry snapshots with summary information. + +**Usage**: + +```console +$ infrahubctl telemetry list [OPTIONS] +``` + +**Options**: + +* `--start-date [%Y-%m-%d|%Y-%m-%dT%H:%M:%S|%Y-%m-%dT%H:%M:%S%z]`: Start date filter (ISO 8601) +* `--end-date [%Y-%m-%d|%Y-%m-%dT%H:%M:%S|%Y-%m-%dT%H:%M:%S%z]`: End date filter (ISO 8601) +* `--limit INTEGER`: Maximum number of results [default: 50] +* `--config-file TEXT`: [env var: INFRAHUBCTL_CONFIG; default: infrahubctl.toml] +* `--help`: Show this message and exit. diff --git a/docs/docs/python-sdk/reference/templating.mdx b/docs/docs/python-sdk/reference/templating.mdx index b74f5f2f..191d249e 100644 --- a/docs/docs/python-sdk/reference/templating.mdx +++ b/docs/docs/python-sdk/reference/templating.mdx @@ -3,155 +3,245 @@ title: Python SDK Templating --- Filters can be used when defining [computed attributes](https://docs.infrahub.app/guides/computed-attributes) or [Jinja2 Transforms](https://docs.infrahub.app/guides/jinja2-transform) within Infrahub. +## Execution contexts + +Filters are restricted based on the execution context in which a template is rendered: + +- **CORE** — Computed attributes evaluated inside the Infrahub API server. Only fully trusted filters (no I/O, no side effects) are allowed. +- **WORKER** — Jinja2 transforms executed on Prefect background workers. Trusted filters and Infrahub client-dependent filters are allowed. +- **LOCAL** — Local CLI rendering and unrestricted usage. All filters are allowed. + +The `validate()` method on `Jinja2Template` accepts an optional `context` parameter to enforce these restrictions: + +```python +from infrahub_sdk.template import Jinja2Template +from infrahub_sdk.template.filters import ExecutionContext + +template = Jinja2Template(template="{{ sid | artifact_content }}") + +# Raises JinjaTemplateOperationViolationError — blocked in CORE +template.validate(context=ExecutionContext.CORE) + +# Passes — allowed in WORKER +template.validate(context=ExecutionContext.WORKER) +``` + +For backward compatibility, `validate(restricted=True)` maps to `CORE` and `validate(restricted=False)` maps to `LOCAL`. + ## Builtin Jinja2 filters -The following filters are those that are [shipped with Jinja2](https://jinja.palletsprojects.com/en/stable/templates/#list-of-builtin-filters) and enabled within Infrahub. The trusted column indicates if the filter is allowed for use with Infrahub's computed attributes when the server is configured in strict mode. +The following filters are [shipped with Jinja2](https://jinja.palletsprojects.com/en/stable/templates/#list-of-builtin-filters) and enabled within Infrahub. -| Name | Trusted | -| ---- | ------- | -| abs | ✅ | -| attr | ❌ | -| batch | ❌ | -| capitalize | ✅ | -| center | ✅ | -| count | ✅ | -| d | ✅ | -| default | ✅ | -| dictsort | ❌ | -| e | ✅ | -| escape | ✅ | -| filesizeformat | ✅ | -| first | ✅ | -| float | ✅ | -| forceescape | ✅ | -| format | ✅ | -| groupby | ❌ | -| indent | ✅ | -| int | ✅ | -| items | ❌ | -| join | ✅ | -| last | ✅ | -| length | ✅ | -| list | ✅ | -| lower | ✅ | -| map | ❌ | -| max | ✅ | -| min | ✅ | -| pprint | ❌ | -| random | ❌ | -| reject | ❌ | -| rejectattr | ❌ | -| replace | ✅ | -| reverse | ✅ | -| round | ✅ | -| safe | ❌ | -| select | ❌ | -| selectattr | ❌ | -| slice | ✅ | -| sort | ❌ | -| string | ✅ | -| striptags | ✅ | -| sum | ✅ | -| title | ✅ | -| tojson | ❌ | -| trim | ✅ | -| truncate | ✅ | -| unique | ❌ | -| upper | ✅ | -| urlencode | ✅ | -| urlize | ❌ | -| wordcount | ✅ | -| wordwrap | ✅ | -| xmlattr | ❌ | +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | +| abs | ✅ | ✅ | ✅ | +| attr | ❌ | ✅ | ✅ | +| batch | ❌ | ✅ | ✅ | +| capitalize | ✅ | ✅ | ✅ | +| center | ✅ | ✅ | ✅ | +| count | ✅ | ✅ | ✅ | +| d | ✅ | ✅ | ✅ | +| default | ✅ | ✅ | ✅ | +| dictsort | ❌ | ✅ | ✅ | +| e | ✅ | ✅ | ✅ | +| escape | ✅ | ✅ | ✅ | +| filesizeformat | ✅ | ✅ | ✅ | +| first | ✅ | ✅ | ✅ | +| float | ✅ | ✅ | ✅ | +| forceescape | ✅ | ✅ | ✅ | +| format | ✅ | ✅ | ✅ | +| groupby | ❌ | ✅ | ✅ | +| indent | ✅ | ✅ | ✅ | +| int | ✅ | ✅ | ✅ | +| items | ❌ | ✅ | ✅ | +| join | ✅ | ✅ | ✅ | +| last | ✅ | ✅ | ✅ | +| length | ✅ | ✅ | ✅ | +| list | ✅ | ✅ | ✅ | +| lower | ✅ | ✅ | ✅ | +| map | ❌ | ✅ | ✅ | +| max | ✅ | ✅ | ✅ | +| min | ✅ | ✅ | ✅ | +| pprint | ❌ | ✅ | ✅ | +| random | ❌ | ✅ | ✅ | +| reject | ❌ | ✅ | ✅ | +| rejectattr | ❌ | ✅ | ✅ | +| replace | ✅ | ✅ | ✅ | +| reverse | ✅ | ✅ | ✅ | +| round | ✅ | ✅ | ✅ | +| safe | ❌ | ✅ | ✅ | +| select | ❌ | ✅ | ✅ | +| selectattr | ❌ | ✅ | ✅ | +| slice | ✅ | ✅ | ✅ | +| sort | ❌ | ✅ | ✅ | +| string | ✅ | ✅ | ✅ | +| striptags | ✅ | ✅ | ✅ | +| sum | ✅ | ✅ | ✅ | +| title | ✅ | ✅ | ✅ | +| tojson | ❌ | ✅ | ✅ | +| trim | ✅ | ✅ | ✅ | +| truncate | ✅ | ✅ | ✅ | +| unique | ❌ | ✅ | ✅ | +| upper | ✅ | ✅ | ✅ | +| urlencode | ✅ | ✅ | ✅ | +| urlize | ❌ | ✅ | ✅ | +| wordcount | ✅ | ✅ | ✅ | +| wordwrap | ✅ | ✅ | ✅ | +| xmlattr | ❌ | ✅ | ✅ | ## Netutils filters The following Jinja2 filters from Netutils are included within Infrahub. + -| Name | Trusted | -| ---- | ------- | -| abbreviated_interface_name | ✅ | -| abbreviated_interface_name_list | ✅ | -| asn_to_int | ✅ | -| bits_to_name | ✅ | -| bytes_to_name | ✅ | -| canonical_interface_name | ✅ | -| canonical_interface_name_list | ✅ | -| cidr_to_netmask | ✅ | -| cidr_to_netmaskv6 | ✅ | -| clean_config | ✅ | -| compare_version_loose | ✅ | -| compare_version_strict | ✅ | -| config_compliance | ✅ | -| config_section_not_parsed | ✅ | -| delimiter_change | ✅ | -| diff_network_config | ✅ | -| feature_compliance | ✅ | -| find_unordered_cfg_lines | ✅ | -| fqdn_to_ip | ❌ | -| get_all_host | ❌ | -| get_broadcast_address | ✅ | -| get_first_usable | ✅ | -| get_ips_sorted | ✅ | -| get_nist_urls | ✅ | -| get_nist_vendor_platform_urls | ✅ | -| get_oui | ✅ | -| get_peer_ip | ✅ | -| get_range_ips | ✅ | -| get_upgrade_path | ✅ | -| get_usable_range | ✅ | -| hash_data | ✅ | -| int_to_asdot | ✅ | -| interface_range_compress | ✅ | -| interface_range_expansion | ✅ | -| ip_addition | ✅ | -| ip_subtract | ✅ | -| ip_to_bin | ✅ | -| ip_to_hex | ✅ | -| ipaddress_address | ✅ | -| ipaddress_interface | ✅ | -| ipaddress_network | ✅ | -| is_classful | ✅ | -| is_fqdn_resolvable | ❌ | -| is_ip | ✅ | -| is_ip_range | ✅ | -| is_ip_within | ✅ | -| is_netmask | ✅ | -| is_network | ✅ | -| is_reversible_wildcardmask | ✅ | -| is_valid_mac | ✅ | -| longest_prefix_match | ✅ | -| mac_normalize | ✅ | -| mac_to_format | ✅ | -| mac_to_int | ✅ | -| mac_type | ✅ | -| name_to_bits | ✅ | -| name_to_bytes | ✅ | -| name_to_name | ✅ | -| netmask_to_cidr | ✅ | -| netmask_to_wildcardmask | ✅ | -| normalise_delimiter_caret_c | ✅ | -| paloalto_panos_brace_to_set | ✅ | -| paloalto_panos_clean_newlines | ✅ | -| regex_findall | ❌ | -| regex_match | ❌ | -| regex_search | ❌ | -| regex_split | ❌ | -| regex_sub | ❌ | -| sanitize_config | ✅ | -| section_config | ✅ | -| sort_interface_list | ✅ | -| split_interface | ✅ | -| uptime_seconds_to_string | ✅ | -| uptime_string_to_seconds | ✅ | -| version_metadata | ✅ | -| vlanconfig_to_list | ✅ | -| vlanlist_to_config | ✅ | -| wildcardmask_to_netmask | ✅ | +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | +| abbreviated_interface_name | ✅ | ✅ | ✅ | +| abbreviated_interface_name_list | ✅ | ✅ | ✅ | +| asn_to_int | ✅ | ✅ | ✅ | +| bits_to_name | ✅ | ✅ | ✅ | +| bytes_to_name | ✅ | ✅ | ✅ | +| canonical_interface_name | ✅ | ✅ | ✅ | +| canonical_interface_name_list | ✅ | ✅ | ✅ | +| cidr_to_netmask | ✅ | ✅ | ✅ | +| cidr_to_netmaskv6 | ✅ | ✅ | ✅ | +| clean_config | ✅ | ✅ | ✅ | +| compare_version_loose | ✅ | ✅ | ✅ | +| compare_version_strict | ✅ | ✅ | ✅ | +| config_compliance | ✅ | ✅ | ✅ | +| config_section_not_parsed | ✅ | ✅ | ✅ | +| delimiter_change | ✅ | ✅ | ✅ | +| diff_network_config | ✅ | ✅ | ✅ | +| feature_compliance | ✅ | ✅ | ✅ | +| find_unordered_cfg_lines | ✅ | ✅ | ✅ | +| fqdn_to_ip | ❌ | ❌ | ✅ | +| get_all_host | ❌ | ❌ | ✅ | +| get_broadcast_address | ✅ | ✅ | ✅ | +| get_first_usable | ✅ | ✅ | ✅ | +| get_ips_sorted | ✅ | ✅ | ✅ | +| get_nist_urls | ✅ | ✅ | ✅ | +| get_nist_vendor_platform_urls | ✅ | ✅ | ✅ | +| get_oui | ✅ | ✅ | ✅ | +| get_peer_ip | ✅ | ✅ | ✅ | +| get_range_ips | ✅ | ✅ | ✅ | +| get_upgrade_path | ✅ | ✅ | ✅ | +| get_usable_range | ✅ | ✅ | ✅ | +| hash_data | ✅ | ✅ | ✅ | +| int_to_asdot | ✅ | ✅ | ✅ | +| interface_range_compress | ✅ | ✅ | ✅ | +| interface_range_expansion | ✅ | ✅ | ✅ | +| ip_addition | ✅ | ✅ | ✅ | +| ip_subtract | ✅ | ✅ | ✅ | +| ip_to_bin | ✅ | ✅ | ✅ | +| ip_to_hex | ✅ | ✅ | ✅ | +| ipaddress_address | ✅ | ✅ | ✅ | +| ipaddress_interface | ✅ | ✅ | ✅ | +| ipaddress_network | ✅ | ✅ | ✅ | +| is_classful | ✅ | ✅ | ✅ | +| is_fqdn_resolvable | ❌ | ❌ | ✅ | +| is_ip | ✅ | ✅ | ✅ | +| is_ip_range | ✅ | ✅ | ✅ | +| is_ip_within | ✅ | ✅ | ✅ | +| is_netmask | ✅ | ✅ | ✅ | +| is_network | ✅ | ✅ | ✅ | +| is_reversible_wildcardmask | ✅ | ✅ | ✅ | +| is_valid_mac | ✅ | ✅ | ✅ | +| longest_prefix_match | ✅ | ✅ | ✅ | +| mac_normalize | ✅ | ✅ | ✅ | +| mac_to_format | ✅ | ✅ | ✅ | +| mac_to_int | ✅ | ✅ | ✅ | +| mac_type | ✅ | ✅ | ✅ | +| name_to_bits | ✅ | ✅ | ✅ | +| name_to_bytes | ✅ | ✅ | ✅ | +| name_to_name | ✅ | ✅ | ✅ | +| netmask_to_cidr | ✅ | ✅ | ✅ | +| netmask_to_wildcardmask | ✅ | ✅ | ✅ | +| normalise_delimiter_caret_c | ✅ | ✅ | ✅ | +| paloalto_panos_brace_to_set | ✅ | ✅ | ✅ | +| paloalto_panos_clean_newlines | ✅ | ✅ | ✅ | +| regex_findall | ❌ | ❌ | ✅ | +| regex_match | ❌ | ❌ | ✅ | +| regex_search | ❌ | ❌ | ✅ | +| regex_split | ❌ | ❌ | ✅ | +| regex_sub | ❌ | ❌ | ✅ | +| sanitize_config | ✅ | ✅ | ✅ | +| section_config | ✅ | ✅ | ✅ | +| sort_interface_list | ✅ | ✅ | ✅ | +| split_interface | ✅ | ✅ | ✅ | +| uptime_seconds_to_string | ✅ | ✅ | ✅ | +| uptime_string_to_seconds | ✅ | ✅ | ✅ | +| version_metadata | ✅ | ✅ | ✅ | +| vlanconfig_to_list | ✅ | ✅ | ✅ | +| vlanlist_to_config | ✅ | ✅ | ✅ | +| wildcardmask_to_netmask | ✅ | ✅ | ✅ | +## Infrahub filters + +These filters are provided by the Infrahub SDK for artifact and file object content composition. + + +| Name | CORE | WORKER | LOCAL | +| ---- | ---- | ------ | ----- | +| `artifact_content` | ❌ | ✅ | ❌ | +| `file_object_content` | ❌ | ✅ | ❌ | +| `file_object_content_by_hfid` | ❌ | ✅ | ❌ | +| `file_object_content_by_id` | ❌ | ✅ | ❌ | +| `from_json` | ✅ | ✅ | ✅ | +| `from_yaml` | ✅ | ✅ | ✅ | + + +### Usage examples + +**Inline artifact content by `storage_id`:** + +```jinja2 +{{ artifact.node.storage_id.value | artifact_content }} +``` + +**Inline file object content:** + +```jinja2 +{# By storage_id #} +{{ file_object.storage_id.value | file_object_content }} + +{# By node UUID #} +{{ file_object.id | file_object_content_by_id }} + +{# By Human-Friendly ID #} +{{ hfid_components | file_object_content_by_hfid(kind="NetworkCircuitContract") }} +``` + +**Parse structured content with chaining:** + +```jinja2 +{# JSON artifact → access parsed fields #} +{% set config = artifact.node.storage_id.value | artifact_content | from_json %} +interface {{ config.interface_name }} + ip address {{ config.ip_address }} + +{# YAML artifact → iterate parsed data #} +{% set config = artifact.node.storage_id.value | artifact_content | from_yaml %} +{% for route in config.static_routes %} +ip route {{ route.prefix }} {{ route.next_hop }} +{% endfor %} +``` + +Client-dependent filters (`artifact_content`, `file_object_content`, `file_object_content_by_id`, `file_object_content_by_hfid`) require an `InfrahubClient` to be passed to `Jinja2Template`: + +```python +from infrahub_sdk.template import Jinja2Template + +# At construction time +template = Jinja2Template(template=my_template, client=client) + +# Or via deferred injection +template = Jinja2Template(template=my_template) +template.set_client(client) +``` + ## Known issues ### Unable to combine the map and sort filters (https://github.com/pallets/jinja/issues/2081) diff --git a/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx index ba9ea4dc..01e0f9f6 100644 --- a/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx +++ b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx @@ -222,7 +222,7 @@ Return a cloned version of the client using the same configuration #### `execute_graphql` ```python -execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, raise_for_error: bool | None = None, tracker: str | None = None) -> dict +execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, tracker: str | None = None) -> dict ``` Execute a GraphQL query (or mutation). @@ -235,9 +235,6 @@ If retry_on_failure is True, the query will retry until the server becomes reach - `branch_name`: Name of the branch on which the query will be executed. Defaults to None. - `at`: Time when the query should be executed. Defaults to None. - `timeout`: Timeout in second for the query. Defaults to None. -- `raise_for_error`: Deprecated. Controls only HTTP status handling. -- None (default) or True\: HTTP errors raise via resp.raise_for_status(). -- False\: HTTP errors are not automatically raised. Defaults to None. **Raises:** @@ -262,7 +259,7 @@ login(self, refresh: bool = False) -> None #### `query_gql_query` ```python -query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> dict +query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None) -> dict ``` #### `create_diff` @@ -274,7 +271,7 @@ create_diff(self, branch: str, name: str, from_time: datetime, to_time: datetime #### `get_diff_summary` ```python -get_diff_summary(self, branch: str, name: str | None = None, from_time: datetime | None = None, to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> list[NodeDiff] +get_diff_summary(self, branch: str, name: str | None = None, from_time: datetime | None = None, to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None) -> list[NodeDiff] ``` #### `get_diff_tree` @@ -290,46 +287,22 @@ Returns None if no diff exists. #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaType +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> SchemaType | None ```
-Show 6 other overloads - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaType | None -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> SchemaType -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNode -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNode | None -``` +Show 2 other overloads #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> CoreNode | None +allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> CoreNode | None ``` #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> CoreNode | SchemaType | None +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None) -> CoreNode | SchemaType | None ``` Allocate a new IP address by using the provided resource pool. @@ -344,7 +317,6 @@ Allocate a new IP address by using the provided resource pool. - `branch`: Name of the branch to allocate from. Defaults to default_branch. - `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. - `tracker`: The offset for pagination. -- `raise_for_error`: Deprecated, raise an error if the HTTP status is not 2XX. Returns: InfrahubNode: Node corresponding to the allocated resource. @@ -353,46 +325,22 @@ Returns: #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaType +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> SchemaType | None ```
-Show 6 other overloads - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaType | None -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> SchemaType -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNode -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNode | None -``` +Show 2 other overloads #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> CoreNode | None +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> CoreNode | None ``` #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> CoreNode | SchemaType | None +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None) -> CoreNode | SchemaType | None ``` Allocate a new IP prefix by using the provided resource pool. @@ -408,7 +356,6 @@ Allocate a new IP prefix by using the provided resource pool. - `branch`: Name of the branch to allocate from. Defaults to default_branch. - `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. - `tracker`: The offset for pagination. -- `raise_for_error`: Deprecated, raise an error if the HTTP status is not 2XX. Returns: InfrahubNode: Node corresponding to the allocated resource. @@ -556,7 +503,7 @@ Return a cloned version of the client using the same configuration #### `execute_graphql` ```python -execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, raise_for_error: bool | None = None, tracker: str | None = None) -> dict +execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, tracker: str | None = None) -> dict ``` Execute a GraphQL query (or mutation). @@ -569,10 +516,6 @@ If retry_on_failure is True, the query will retry until the server becomes reach - `branch_name`: Name of the branch on which the query will be executed. Defaults to None. - `at`: Time when the query should be executed. Defaults to None. - `timeout`: Timeout in second for the query. Defaults to None. -- `raise_for_error`: Deprecated. Controls only HTTP status handling. -- None (default) or True\: HTTP errors raise via `resp.raise_for_status()`. -- False\: HTTP errors are not automatically raised. -GraphQL errors always raise `GraphQLError`. Defaults to None. **Raises:** @@ -702,7 +645,7 @@ get_list_repositories(self, branches: dict[str, BranchData] | None = None, kind: #### `query_gql_query` ```python -query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> dict +query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None) -> dict ``` #### `create_diff` @@ -714,7 +657,7 @@ create_diff(self, branch: str, name: str, from_time: datetime, to_time: datetime #### `get_diff_summary` ```python -get_diff_summary(self, branch: str, name: str | None = None, from_time: datetime | None = None, to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> list[NodeDiff] +get_diff_summary(self, branch: str, name: str | None = None, from_time: datetime | None = None, to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None) -> list[NodeDiff] ``` #### `get_diff_tree` @@ -730,46 +673,22 @@ Returns None if no diff exists. #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaTypeSync +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> SchemaTypeSync | None ```
-Show 6 other overloads - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaTypeSync | None -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> SchemaTypeSync -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNodeSync -``` - -#### `allocate_next_ip_address` - -```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNodeSync | None -``` +Show 2 other overloads #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> CoreNodeSync | None +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> CoreNodeSync | None ``` #### `allocate_next_ip_address` ```python -allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> CoreNodeSync | SchemaTypeSync | None +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None) -> CoreNodeSync | SchemaTypeSync | None ``` Allocate a new IP address by using the provided resource pool. @@ -784,7 +703,6 @@ Allocate a new IP address by using the provided resource pool. - `branch`: Name of the branch to allocate from. Defaults to default_branch. - `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. - `tracker`: The offset for pagination. -- `raise_for_error`: The limit for pagination. Returns: InfrahubNodeSync: Node corresponding to the allocated resource. @@ -793,46 +711,22 @@ Returns: #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaTypeSync +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> SchemaTypeSync | None ```
-Show 6 other overloads - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaTypeSync | None -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> SchemaTypeSync -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNodeSync -``` - -#### `allocate_next_ip_prefix` - -```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNodeSync | None -``` +Show 2 other overloads #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool | None = ...) -> CoreNodeSync | None +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ...) -> CoreNodeSync | None ``` #### `allocate_next_ip_prefix` ```python -allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool | None = None) -> CoreNodeSync | SchemaTypeSync | None +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None) -> CoreNodeSync | SchemaTypeSync | None ``` Allocate a new IP prefix by using the provided resource pool. @@ -848,7 +742,6 @@ Allocate a new IP prefix by using the provided resource pool. - `branch`: Name of the branch to allocate from. Defaults to default_branch. - `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. - `tracker`: The offset for pagination. -- `raise_for_error`: The limit for pagination. Returns: InfrahubNodeSync: Node corresponding to the allocated resource. @@ -934,9 +827,3 @@ handle_relogin(func: Callable[..., Coroutine[Any, Any, httpx.Response]]) -> Call ```python handle_relogin_sync(func: Callable[..., httpx.Response]) -> Callable[..., httpx.Response] ``` - -### `raise_for_error_deprecation_warning` - -```python -raise_for_error_deprecation_warning(value: bool | None) -> None -``` diff --git a/infrahub_sdk/client.py b/infrahub_sdk/client.py index bd041bf3..ab8fb659 100644 --- a/infrahub_sdk/client.py +++ b/infrahub_sdk/client.py @@ -4,7 +4,6 @@ import copy import logging import time -import warnings from collections.abc import AsyncIterator, Callable, Coroutine, Iterator, Mapping, MutableMapping from contextlib import asynccontextmanager, contextmanager from datetime import datetime @@ -109,15 +108,6 @@ def wrapper(client: InfrahubClientSync, *args: Any, **kwargs: Any) -> httpx.Resp return wrapper -def raise_for_error_deprecation_warning(value: bool | None) -> None: - if value is not None: - warnings.warn( - "Using `raise_for_error` is deprecated, use `try/except` to handle errors.", - DeprecationWarning, - stacklevel=1, - ) - - class BaseClient: """Base class for InfrahubClient and InfrahubClientSync""" @@ -915,7 +905,6 @@ async def execute_graphql( branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, - raise_for_error: bool | None = None, tracker: str | None = None, ) -> dict: """Execute a GraphQL query (or mutation). @@ -927,9 +916,6 @@ async def execute_graphql( branch_name (str, optional): Name of the branch on which the query will be executed. Defaults to None. at (str, optional): Time when the query should be executed. Defaults to None. timeout (int, optional): Timeout in second for the query. Defaults to None. - raise_for_error (bool | None, optional): Deprecated. Controls only HTTP status handling. - - None (default) or True: HTTP errors raise via resp.raise_for_status(). - - False: HTTP errors are not automatically raised. Defaults to None. Raises: GraphQLError: When the GraphQL response contains errors. @@ -937,8 +923,6 @@ async def execute_graphql( Returns: dict: The GraphQL data payload (response["data"]). """ - raise_for_error_deprecation_warning(value=raise_for_error) - branch_name = branch_name or self.default_branch url = self._graphql_url(branch_name=branch_name, at=at) @@ -959,9 +943,7 @@ async def execute_graphql( retry = self.retry_on_failure try: resp = await self._post(url=url, payload=payload, headers=headers, timeout=timeout) - - if raise_for_error in {None, True}: - resp.raise_for_status() + resp.raise_for_status() retry = False except ServerNotReachableError: @@ -1304,10 +1286,7 @@ async def query_gql_query( at: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> dict: - raise_for_error_deprecation_warning(value=raise_for_error) - url = f"{self.address}/api/query/{name}" url_params = copy.deepcopy(params or {}) url_params["branch"] = branch_name or self.default_branch @@ -1351,8 +1330,7 @@ async def query_gql_query( timeout=timeout or self.default_timeout, ) - if raise_for_error in {None, True}: - resp.raise_for_status() + resp.raise_for_status() return decode_json(response=resp) @@ -1393,7 +1371,6 @@ async def get_diff_summary( to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> list[NodeDiff]: query = get_diff_summary_query() input_data = {"branch_name": branch} @@ -1410,7 +1387,6 @@ async def get_diff_summary( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, variables=input_data, ) @@ -1493,69 +1469,8 @@ async def allocate_next_ip_address( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> SchemaType: ... - - @overload - async def allocate_next_ip_address( - self, - resource_pool: CoreNode, - kind: type[SchemaType], - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, ) -> SchemaType | None: ... - @overload - async def allocate_next_ip_address( - self, - resource_pool: CoreNode, - kind: type[SchemaType], - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: bool | None = ..., - ) -> SchemaType: ... - - @overload - async def allocate_next_ip_address( - self, - resource_pool: CoreNode, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> CoreNode: ... - - @overload - async def allocate_next_ip_address( - self, - resource_pool: CoreNode, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, - ) -> CoreNode | None: ... - @overload async def allocate_next_ip_address( self, @@ -1568,7 +1483,6 @@ async def allocate_next_ip_address( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: bool | None = ..., ) -> CoreNode | None: ... async def allocate_next_ip_address( @@ -1582,7 +1496,6 @@ async def allocate_next_ip_address( branch: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> CoreNode | SchemaType | None: """Allocate a new IP address by using the provided resource pool. @@ -1595,7 +1508,6 @@ async def allocate_next_ip_address( branch (str, optional): Name of the branch to allocate from. Defaults to default_branch. timeout (int, optional): Flag to indicate whether to populate the store with the retrieved nodes. tracker (str, optional): The offset for pagination. - raise_for_error (bool, optional): Deprecated, raise an error if the HTTP status is not 2XX. Returns: InfrahubNode: Node corresponding to the allocated resource. """ @@ -1617,7 +1529,6 @@ async def allocate_next_ip_address( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, ) if response[mutation_name]["ok"]: @@ -1638,57 +1549,8 @@ async def allocate_next_ip_prefix( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> SchemaType: ... - - @overload - async def allocate_next_ip_prefix( - self, - resource_pool: CoreNode, - kind: type[SchemaType], - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, ) -> SchemaType | None: ... - @overload - async def allocate_next_ip_prefix( - self, - resource_pool: CoreNode, - kind: type[SchemaType], - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: bool | None = ..., - ) -> SchemaType: ... - - @overload - async def allocate_next_ip_prefix( - self, - resource_pool: CoreNode, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> CoreNode: ... - @overload async def allocate_next_ip_prefix( self, @@ -1702,23 +1564,6 @@ async def allocate_next_ip_prefix( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: Literal[False] = False, - ) -> CoreNode | None: ... - - @overload - async def allocate_next_ip_prefix( - self, - resource_pool: CoreNode, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: bool | None = ..., ) -> CoreNode | None: ... async def allocate_next_ip_prefix( @@ -1733,7 +1578,6 @@ async def allocate_next_ip_prefix( branch: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> CoreNode | SchemaType | None: """Allocate a new IP prefix by using the provided resource pool. @@ -1747,7 +1591,6 @@ async def allocate_next_ip_prefix( branch: Name of the branch to allocate from. Defaults to default_branch. timeout: Flag to indicate whether to populate the store with the retrieved nodes. tracker: The offset for pagination. - raise_for_error (bool, optional): Deprecated, raise an error if the HTTP status is not 2XX. Returns: InfrahubNode: Node corresponding to the allocated resource. """ @@ -1770,7 +1613,6 @@ async def allocate_next_ip_prefix( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, ) if response[mutation_name]["ok"]: @@ -1883,7 +1725,6 @@ async def convert_object_type( "target_kind": target_kind, }, branch_name=branch_name, - raise_for_error=True, ) return await InfrahubNode.from_graphql(client=self, branch=branch_name, data=response["ConvertObjectType"]) @@ -1971,7 +1812,6 @@ def execute_graphql( branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, - raise_for_error: bool | None = None, tracker: str | None = None, ) -> dict: """Execute a GraphQL query (or mutation). @@ -1983,10 +1823,6 @@ def execute_graphql( branch_name (str, optional): Name of the branch on which the query will be executed. Defaults to None. at (str, optional): Time when the query should be executed. Defaults to None. timeout (int, optional): Timeout in second for the query. Defaults to None. - raise_for_error (bool | None, optional): Deprecated. Controls only HTTP status handling. - - None (default) or True: HTTP errors raise via `resp.raise_for_status()`. - - False: HTTP errors are not automatically raised. - GraphQL errors always raise `GraphQLError`. Defaults to None. Raises: GraphQLError: When the GraphQL response contains errors. @@ -1994,8 +1830,6 @@ def execute_graphql( Returns: dict: The GraphQL data payload (`response["data"]`). """ - raise_for_error_deprecation_warning(value=raise_for_error) - branch_name = branch_name or self.default_branch url = self._graphql_url(branch_name=branch_name, at=at) @@ -2016,9 +1850,7 @@ def execute_graphql( retry = self.retry_on_failure try: resp = self._post(url=url, payload=payload, headers=headers, timeout=timeout) - - if raise_for_error in {None, True}: - resp.raise_for_status() + resp.raise_for_status() retry = False except ServerNotReachableError: @@ -2737,10 +2569,7 @@ def query_gql_query( at: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> dict: - raise_for_error_deprecation_warning(value=raise_for_error) - url = f"{self.address}/api/query/{name}" url_params = copy.deepcopy(params or {}) url_params["branch"] = branch_name or self.default_branch @@ -2783,8 +2612,7 @@ def query_gql_query( timeout=timeout or self.default_timeout, ) - if raise_for_error in {None, True}: - resp.raise_for_status() + resp.raise_for_status() return decode_json(response=resp) @@ -2825,7 +2653,6 @@ def get_diff_summary( to_time: datetime | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> list[NodeDiff]: query = get_diff_summary_query() input_data = {"branch_name": branch} @@ -2842,7 +2669,6 @@ def get_diff_summary( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, variables=input_data, ) @@ -2925,69 +2751,8 @@ def allocate_next_ip_address( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> SchemaTypeSync: ... - - @overload - def allocate_next_ip_address( - self, - resource_pool: CoreNodeSync, - kind: type[SchemaTypeSync], - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, ) -> SchemaTypeSync | None: ... - @overload - def allocate_next_ip_address( - self, - resource_pool: CoreNodeSync, - kind: type[SchemaTypeSync], - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: bool | None = ..., - ) -> SchemaTypeSync: ... - - @overload - def allocate_next_ip_address( - self, - resource_pool: CoreNodeSync, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> CoreNodeSync: ... - - @overload - def allocate_next_ip_address( - self, - resource_pool: CoreNodeSync, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - address_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, - ) -> CoreNodeSync | None: ... - @overload def allocate_next_ip_address( self, @@ -3000,7 +2765,6 @@ def allocate_next_ip_address( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: bool | None = ..., ) -> CoreNodeSync | None: ... def allocate_next_ip_address( @@ -3014,7 +2778,6 @@ def allocate_next_ip_address( branch: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> CoreNodeSync | SchemaTypeSync | None: """Allocate a new IP address by using the provided resource pool. @@ -3027,7 +2790,6 @@ def allocate_next_ip_address( branch (str, optional): Name of the branch to allocate from. Defaults to default_branch. timeout (int, optional): Flag to indicate whether to populate the store with the retrieved nodes. tracker (str, optional): The offset for pagination. - raise_for_error (bool, optional): The limit for pagination. Returns: InfrahubNodeSync: Node corresponding to the allocated resource. """ @@ -3049,7 +2811,6 @@ def allocate_next_ip_address( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, ) if response[mutation_name]["ok"]: @@ -3070,73 +2831,8 @@ def allocate_next_ip_prefix( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> SchemaTypeSync: ... - - @overload - def allocate_next_ip_prefix( - self, - resource_pool: CoreNodeSync, - kind: type[SchemaTypeSync], - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, ) -> SchemaTypeSync | None: ... - @overload - def allocate_next_ip_prefix( - self, - resource_pool: CoreNodeSync, - kind: type[SchemaTypeSync], - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: bool | None = ..., - ) -> SchemaTypeSync: ... - - @overload - def allocate_next_ip_prefix( - self, - resource_pool: CoreNodeSync, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[True] = True, - ) -> CoreNodeSync: ... - - @overload - def allocate_next_ip_prefix( - self, - resource_pool: CoreNodeSync, - kind: None = ..., - identifier: str | None = ..., - prefix_length: int | None = ..., - member_type: str | None = ..., - prefix_type: str | None = ..., - data: dict[str, Any] | None = ..., - branch: str | None = ..., - timeout: int | None = ..., - tracker: str | None = ..., - raise_for_error: Literal[False] = False, - ) -> CoreNodeSync | None: ... - @overload def allocate_next_ip_prefix( self, @@ -3150,7 +2846,6 @@ def allocate_next_ip_prefix( branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., - raise_for_error: bool | None = ..., ) -> CoreNodeSync | None: ... def allocate_next_ip_prefix( @@ -3165,7 +2860,6 @@ def allocate_next_ip_prefix( branch: str | None = None, timeout: int | None = None, tracker: str | None = None, - raise_for_error: bool | None = None, ) -> CoreNodeSync | SchemaTypeSync | None: """Allocate a new IP prefix by using the provided resource pool. @@ -3179,7 +2873,6 @@ def allocate_next_ip_prefix( branch (str, optional): Name of the branch to allocate from. Defaults to default_branch. timeout (int, optional): Flag to indicate whether to populate the store with the retrieved nodes. tracker (str, optional): The offset for pagination. - raise_for_error (bool, optional): The limit for pagination. Returns: InfrahubNodeSync: Node corresponding to the allocated resource. """ @@ -3202,7 +2895,6 @@ def allocate_next_ip_prefix( branch_name=branch, timeout=timeout, tracker=tracker, - raise_for_error=raise_for_error, ) if response[mutation_name]["ok"]: @@ -3441,6 +3133,5 @@ def convert_object_type( "target_kind": target_kind, }, branch_name=branch_name, - raise_for_error=True, ) return InfrahubNodeSync.from_graphql(client=self, branch=branch_name, data=response["ConvertObjectType"]) diff --git a/infrahub_sdk/config.py b/infrahub_sdk/config.py index 4c3ebbed..bda031dd 100644 --- a/infrahub_sdk/config.py +++ b/infrahub_sdk/config.py @@ -5,7 +5,7 @@ from typing import Any from pydantic import Field, PrivateAttr, field_validator, model_validator -from pydantic_settings import BaseSettings, SettingsConfigDict +from pydantic_settings import BaseSettings, InitSettingsSource, PydanticBaseSettingsSource, SettingsConfigDict from typing_extensions import Self from .constants import InfrahubClientMode @@ -81,6 +81,39 @@ class ConfigBase(BaseSettings): tls_ca_file: str | None = Field(default=None, description="File path to CA cert or bundle in PEM format") _ssl_context: ssl.SSLContext | None = PrivateAttr(default=None) + @classmethod + def settings_customise_sources( + cls, + settings_cls: type[BaseSettings], + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ) -> tuple[PydanticBaseSettingsSource, ...]: + """ + Customize settings sources to track which fields were explicitly provided. + This allows us to properly handle authentication method precedence. + """ + + class TrackingInitSource(InitSettingsSource): + """Wrapper around InitSettingsSource that tracks explicitly provided fields.""" + + def __call__(self) -> dict[str, Any]: + init_data = super().__call__() + # Store which fields were explicitly provided in constructor + if init_data: + return {**init_data, "_explicit_fields": set(init_data.keys())} + return init_data + + # Create tracking wrapper with proper initialization + init_kwargs: dict[str, Any] = {} + if isinstance(init_settings, InitSettingsSource): + init_kwargs = init_settings.init_kwargs + tracking_init = TrackingInitSource(settings_cls=settings_cls, init_kwargs=init_kwargs) + + # Return sources in priority order: explicit init values, env vars, dotenv, file secrets + return tracking_init, env_settings, dotenv_settings, file_secret_settings + @model_validator(mode="before") @classmethod def validate_credentials_input(cls, values: dict[str, Any]) -> dict[str, Any]: @@ -105,8 +138,41 @@ def set_transport(cls, values: dict[str, Any]) -> dict[str, Any]: @model_validator(mode="before") @classmethod def validate_mix_authentication_schemes(cls, values: dict[str, Any]) -> dict[str, Any]: - if values.get("password") and values.get("api_token"): - raise ValueError("Unable to combine password with token based authentication") + """ + Handle conflicts between token and password authentication methods. + + When both methods are present (from explicit args or environment variables), + we prioritize the explicitly provided method. If we can determine which fields + were explicitly set, we use that; otherwise, we prefer password auth when both + username and password are present. + """ + # Extract tracking information about explicitly provided fields + explicit_fields = values.pop("_explicit_fields", set()) + + has_password = values.get("password") and values.get("username") + has_token = values.get("api_token") + + # If both auth methods are present, decide which to use + if has_password and has_token: + # Check if one method was explicitly provided + token_explicit = "api_token" in explicit_fields + password_explicit = "username" in explicit_fields or "password" in explicit_fields + + if token_explicit and not password_explicit: + # User explicitly provided token, password came from env - use token + values["username"] = None + values["password"] = None + elif password_explicit and not token_explicit: + # User explicitly provided password, token came from env - use password + values["api_token"] = None + else: + # Both explicitly provided, or both from environment - ambiguous, raise error + raise ValueError("Cannot use both 'api_token' and 'username'/'password' authentication simultaneously") + elif has_token and not has_password: + # Only token auth present - clear any partial password credentials + values["username"] = None + values["password"] = None + return values @field_validator("address") diff --git a/infrahub_sdk/ctl/cli_commands.py b/infrahub_sdk/ctl/cli_commands.py index d7a636ed..66f2069f 100644 --- a/infrahub_sdk/ctl/cli_commands.py +++ b/infrahub_sdk/ctl/cli_commands.py @@ -35,6 +35,7 @@ from ..ctl.repository import find_repository_config_file, get_repository_config from ..ctl.schema import app as schema_app from ..ctl.task import app as task_app +from ..ctl.telemetry import app as telemetry_app from ..ctl.transform import list_transforms from ..ctl.utils import ( catch_exception, @@ -44,6 +45,7 @@ ) from ..ctl.validate import app as validate_app from ..exceptions import GraphQLError, ModuleImportError +from ..graphql.query_renderer import render_query from ..node import InfrahubNode from ..protocols_generator.generator import CodeGenerator from ..schema import MainSchemaTypesAll, SchemaRoot @@ -68,6 +70,7 @@ app.add_typer(object_app, name="object") app.add_typer(graphql_app, name="graphql") app.add_typer(task_app, name="task") +app.add_typer(telemetry_app, name="telemetry") app.command(name="dump")(dump) app.command(name="load")(load) @@ -342,7 +345,7 @@ def transform( convert_query_response=transform_config.convert_query_response, ) # Get data - query_str = repository_config.get_query(name=transform.query).load_query() + query_str = render_query(name=transform.query, config=repository_config) data = asyncio.run( transform.client.execute_graphql(query=query_str, variables=variables_dict, branch_name=transform.branch_name) ) diff --git a/infrahub_sdk/ctl/telemetry.py b/infrahub_sdk/ctl/telemetry.py new file mode 100644 index 00000000..070462a8 --- /dev/null +++ b/infrahub_sdk/ctl/telemetry.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import json +from datetime import datetime +from pathlib import Path +from typing import Any +from urllib.parse import urlencode + +import typer +from rich.console import Console +from rich.table import Table + +from ..async_typer import AsyncTyper +from .client import initialize_client +from .parameters import CONFIG_PARAM +from .utils import catch_exception + +app = AsyncTyper() +console = Console() + +EXPORT_PAGE_SIZE = 1000 + + +@app.command(name="list") +@catch_exception(console=console) +async def list_snapshots( + start_date: datetime | None = typer.Option( + None, help="Start date filter (ISO 8601)", formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S%z"] + ), + end_date: datetime | None = typer.Option( + None, help="End date filter (ISO 8601)", formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S%z"] + ), + limit: int = typer.Option(50, help="Maximum number of results"), + _: str = CONFIG_PARAM, +) -> None: + """List telemetry snapshots with summary information.""" + client = initialize_client() + + params: dict[str, str | int] = {"limit": limit} + if start_date: + params["start_date"] = start_date.isoformat() + if end_date: + params["end_date"] = end_date.isoformat() + + url = f"{client.address}/api/telemetry/snapshots?{urlencode(params)}" + response = await client._get(url=url, timeout=client.default_timeout) + response.raise_for_status() + data = response.json() + + snapshots = data.get("snapshots", []) + if not snapshots: + console.print("No telemetry snapshots found.") + return + + table = Table() + table.add_column("Date") + table.add_column("Version") + table.add_column("Type") + table.add_column("Deployment") + table.add_column("Remote Status") + + for snap in snapshots: + table.add_row( + snap.get("created_at", ""), + snap.get("infrahub_version", ""), + snap.get("kind", ""), + snap.get("deployment_id", ""), + snap.get("remote_send_status", ""), + ) + + console.print(table) + console.print(f"Showing {len(snapshots)} of {data.get('count', len(snapshots))} total snapshots") + + +@app.command(name="export") +@catch_exception(console=console) +async def export_snapshots( + output: str = typer.Option("telemetry-export.json", help="Output file path"), + start_date: datetime | None = typer.Option( + None, help="Start date filter (ISO 8601)", formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S%z"] + ), + end_date: datetime | None = typer.Option( + None, help="End date filter (ISO 8601)", formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S%z"] + ), + _: str = CONFIG_PARAM, +) -> None: + """Export telemetry snapshots to a JSON file. + + Pages through the API automatically so that all matching snapshots are exported, + not just the first page. + """ + client = initialize_client() + + base_params: dict[str, str | int] = {} + if start_date: + base_params["start_date"] = start_date.isoformat() + if end_date: + base_params["end_date"] = end_date.isoformat() + + snapshots: list[dict[str, Any]] = [] + offset = 0 + total: int | None = None + + while True: + params: dict[str, str | int] = {**base_params, "limit": EXPORT_PAGE_SIZE, "offset": offset} + url = f"{client.address}/api/telemetry/snapshots?{urlencode(params)}" + response = await client._get(url=url, timeout=client.default_timeout) + response.raise_for_status() + data = response.json() + + page: list[dict[str, Any]] = data.get("snapshots", []) + snapshots.extend(page) + + if total is None: + total = int(data.get("count", len(page))) + + if len(page) < EXPORT_PAGE_SIZE or len(snapshots) >= total: + break + + offset += EXPORT_PAGE_SIZE + + if not snapshots: + console.print("No telemetry snapshots found.") + raise typer.Exit(code=2) + + output_path = Path(output) + output_path.write_text(json.dumps(snapshots, indent=2), encoding="utf-8") + console.print(f"Exported {len(snapshots)} snapshots to {output_path}") diff --git a/infrahub_sdk/ctl/utils.py b/infrahub_sdk/ctl/utils.py index 7130ea80..230b0093 100644 --- a/infrahub_sdk/ctl/utils.py +++ b/infrahub_sdk/ctl/utils.py @@ -20,6 +20,7 @@ Error, FileNotValidError, GraphQLError, + GraphQLQueryError, NodeNotFoundError, ResourceNotDefinedError, SchemaNotFoundError, @@ -27,6 +28,7 @@ ServerNotResponsiveError, ValidationError, ) +from ..graphql.query_renderer import render_query from ..yaml import YamlFile from .client import initialize_client_sync from .exceptions import QueryNotFoundError @@ -66,7 +68,7 @@ def handle_exception(exc: Exception, console: Console, exit_code: int) -> NoRetu if isinstance(exc, GraphQLError): print_graphql_errors(console=console, errors=exc.errors) raise typer.Exit(code=exit_code) - if isinstance(exc, (SchemaNotFoundError, NodeNotFoundError, ResourceNotDefinedError)): + if isinstance(exc, (SchemaNotFoundError, NodeNotFoundError, ResourceNotDefinedError, GraphQLQueryError)): console.print(f"[red]Error: {exc!s}") raise typer.Exit(code=exit_code) @@ -114,8 +116,7 @@ def execute_graphql_query( debug: bool = False, ) -> dict: console = Console() - query_object = repository_config.get_query(name=query) - query_str = query_object.load_query() + query_str = render_query(name=query, config=repository_config) client = initialize_client_sync() @@ -126,7 +127,6 @@ def execute_graphql_query( query=query_str, branch_name=branch, variables=variables_dict, - raise_for_error=False, ) if debug: diff --git a/infrahub_sdk/ctl/validate.py b/infrahub_sdk/ctl/validate.py index 07256faf..9b6f9d6a 100644 --- a/infrahub_sdk/ctl/validate.py +++ b/infrahub_sdk/ctl/validate.py @@ -87,7 +87,6 @@ def validate_graphql( query=query_str, branch_name=branch, variables=variables_dict, - raise_for_error=False, ) except GraphQLError as exc: console.print(f"[red]{len(exc.errors)} error(s) occurred while executing the query") diff --git a/infrahub_sdk/exceptions.py b/infrahub_sdk/exceptions.py index 727239bf..6401d289 100644 --- a/infrahub_sdk/exceptions.py +++ b/infrahub_sdk/exceptions.py @@ -173,3 +173,49 @@ class TimestampFormatError(Error): def __init__(self, message: str | None = None) -> None: self.message = message or "Invalid timestamp format" super().__init__(self.message) + + +class GraphQLQueryError(Error): + """Base class for errors raised during GraphQL query rendering (fragment resolution).""" + + +class QuerySyntaxError(GraphQLQueryError): + def __init__(self, syntax_error: str) -> None: + self.message = f"GraphQL syntax error: {syntax_error}" + super().__init__(self.message) + + +class FragmentNotFoundError(GraphQLQueryError): + def __init__(self, fragment_name: str, query_file: str | None = None, message: str | None = None) -> None: + self.fragment_name = fragment_name + self.query_file = query_file + if message: + self.message = message + elif query_file: + self.message = f"Fragment '{fragment_name}' not found (referenced in '{query_file}')." + else: + self.message = f"Fragment '{fragment_name}' not found." + super().__init__(self.message) + + +class DuplicateFragmentError(GraphQLQueryError): + def __init__(self, fragment_name: str, message: str | None = None) -> None: + self.fragment_name = fragment_name + self.message = ( + message or f"Fragment '{fragment_name}' is defined more than once across declared fragment files." + ) + super().__init__(self.message) + + +class CircularFragmentError(GraphQLQueryError): + def __init__(self, cycle: list[str], message: str | None = None) -> None: + self.cycle = cycle + self.message = message or f"Circular fragment dependency detected: {' -> '.join(cycle)}." + super().__init__(self.message) + + +class FragmentFileNotFoundError(GraphQLQueryError): + def __init__(self, file_path: str, message: str | None = None) -> None: + self.file_path = file_path + self.message = message or f"Fragment file '{file_path}' declared in graphql_fragments does not exist." + super().__init__(self.message) diff --git a/infrahub_sdk/graphql/query_renderer.py b/infrahub_sdk/graphql/query_renderer.py new file mode 100644 index 00000000..8ee64e83 --- /dev/null +++ b/infrahub_sdk/graphql/query_renderer.py @@ -0,0 +1,179 @@ +"""GraphQL query rendering: fragment parsing, inlining, and loading from InfrahubRepositoryConfig.""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import TYPE_CHECKING + +from graphql import DocumentNode, FragmentDefinitionNode, FragmentSpreadNode, parse, print_ast +from graphql.error import GraphQLSyntaxError +from graphql.language.ast import Node as ASTNode + +from ..exceptions import CircularFragmentError, DuplicateFragmentError, FragmentNotFoundError, QuerySyntaxError + +if TYPE_CHECKING: + from ..schema.repository import InfrahubRepositoryConfig + + +def _iter_nodes(node: ASTNode) -> Iterator[ASTNode]: + """Yield node and every descendant in depth-first order.""" + stack: list[ASTNode] = [node] + while stack: + current = stack.pop() + yield current + for key in reversed(current.keys): + child = getattr(current, key, None) + if isinstance(child, ASTNode): + stack.append(child) + elif isinstance(child, tuple): + stack.extend(item for item in reversed(child) if isinstance(item, ASTNode)) + + +def _collect_spread_names(node: ASTNode) -> list[str]: + """Return the names of all fragment spreads within node.""" + return [n.name.value for n in _iter_nodes(node) if isinstance(n, FragmentSpreadNode)] + + +def build_fragment_index(fragment_files: list[str]) -> dict[str, FragmentDefinitionNode]: + """Parse all fragment file contents and return a mapping from fragment name to its AST node. + + Raises DuplicateFragmentError if the same fragment name appears more than once. + """ + index: dict[str, FragmentDefinitionNode] = {} + for content in fragment_files: + try: + doc = parse(content) + except GraphQLSyntaxError as exc: + raise QuerySyntaxError(syntax_error=str(exc)) from exc + for definition in doc.definitions: + if isinstance(definition, FragmentDefinitionNode): + name = definition.name.value + if name in index: + raise DuplicateFragmentError(fragment_name=name) + index[name] = definition + return index + + +def collect_required_fragments( + query_doc: DocumentNode, + fragment_index: dict[str, FragmentDefinitionNode], +) -> list[str]: + """Walk query_doc and collect all fragment names required (transitively). + + Returns a topologically ordered list of unique fragment names. + Raises FragmentNotFoundError for any unresolved name. + Raises CircularFragmentError for cyclic dependencies. + """ + # Collect spreads only from operation definitions — any fragment definitions already + # present in the query document are self-contained and do not need external resolution. + top_level_spreads = [ + node.name.value + for definition in query_doc.definitions + if not isinstance(definition, FragmentDefinitionNode) + for node in _iter_nodes(definition) + if isinstance(node, FragmentSpreadNode) + ] + + local_fragments = { + definition.name.value for definition in query_doc.definitions if isinstance(definition, FragmentDefinitionNode) + } + + ordered: list[str] = [] + visited: set[str] = set() + + def resolve(name: str, stack: list[str]) -> None: + if name in stack: + cycle = [*stack[stack.index(name) :], name] + raise CircularFragmentError(cycle=cycle) + if name in visited: + return + if name in local_fragments: + return + if name not in fragment_index: + raise FragmentNotFoundError(fragment_name=name) + stack.append(name) + for dep in _collect_spread_names(fragment_index[name]): + resolve(dep, stack) + stack.pop() + visited.add(name) + ordered.append(name) + + for spread_name in top_level_spreads: + resolve(spread_name, []) + + return ordered + + +def render_query_with_fragments(query_str: str, fragment_files: list[str]) -> str: + """Return a self-contained GraphQL document with required fragment definitions inlined. + + If the query contains no fragment spreads, query_str is returned unchanged. + + Raises: + QuerySyntaxError: Query string or a fragment file contains invalid GraphQL syntax. + DuplicateFragmentError: Same fragment name declared in multiple files. + FragmentNotFoundError: Query references a fragment not found in any declared file. + CircularFragmentError: Circular dependency detected among fragments. + """ + try: + query_doc = parse(query_str) + except GraphQLSyntaxError as exc: + raise QuerySyntaxError(syntax_error=str(exc)) from exc + + return _render_doc_with_fragments(query_doc, query_str, fragment_files) + + +def _render_doc_with_fragments(query_doc: DocumentNode, query_str: str, fragment_files: list[str]) -> str: + """Inline fragments into an already-parsed query document. + + query_str is returned unchanged when the document contains no fragment spreads. + """ + if not _has_fragment_spread(query_doc): + return query_str + + fragment_index = build_fragment_index(fragment_files) + required_names = collect_required_fragments(query_doc, fragment_index) + + query_definitions = list(query_doc.definitions) + fragment_definitions = [fragment_index[name] for name in required_names] + + output_doc = DocumentNode(definitions=tuple(query_definitions + fragment_definitions)) + return print_ast(output_doc) + + +def _has_fragment_spread(doc: DocumentNode) -> bool: + """Return True if the document contains any fragment spread in an operation definition.""" + return any( + isinstance(node, FragmentSpreadNode) + for definition in doc.definitions + if not isinstance(definition, FragmentDefinitionNode) + for node in _iter_nodes(definition) + ) + + +def render_query(name: str, config: InfrahubRepositoryConfig, relative_path: str = ".") -> str: + """Return a self-contained GraphQL document for the named query, with fragment definitions inlined. + + Fragment files are only loaded from disk when the query actually uses fragment spreads. + + Raises: + ResourceNotDefinedError: Query name not found in config. + QuerySyntaxError: Query string contains invalid GraphQL syntax. + FragmentFileNotFoundError: A declared fragment file path does not exist. + DuplicateFragmentError: Same fragment name declared in multiple files. + FragmentNotFoundError: Query references a fragment not found in any declared file. + CircularFragmentError: Circular dependency detected among fragments. + """ + raw = config.get_query(name).load_query(relative_path=relative_path) + try: + query_doc = parse(raw) + except GraphQLSyntaxError as exc: + raise QuerySyntaxError(syntax_error=str(exc)) from exc + + if not _has_fragment_spread(query_doc) or not config.graphql_fragments: + return raw + + fragment_contents: list[str] = [] + for frag in config.graphql_fragments: + fragment_contents.extend(frag.load_fragments(relative_path=relative_path)) + return _render_doc_with_fragments(query_doc, raw, fragment_contents) diff --git a/infrahub_sdk/node/node.py b/infrahub_sdk/node/node.py index a47209dc..24185886 100644 --- a/infrahub_sdk/node/node.py +++ b/infrahub_sdk/node/node.py @@ -11,11 +11,9 @@ from ..graphql import Mutation, Query from ..schema import ( GenericSchemaAPI, - ProfileSchemaAPI, RelationshipCardinality, RelationshipKind, RelationshipSchemaAPI, - TemplateSchemaAPI, ) from ..utils import compare_lists, generate_short_id from .attribute import Attribute @@ -68,21 +66,14 @@ def __init__(self, schema: MainSchemaTypesAPI, branch: str, data: dict | None = self._attributes = [item.name for item in self._schema.attributes] self._relationships = [item.name for item in self._schema.relationships] - # GenericSchemaAPI doesn't have inherit_from - inherit_from: list[str] = getattr(schema, "inherit_from", None) or [] - self._artifact_support = "CoreArtifactTarget" in inherit_from - self._file_object_support = "CoreFileObject" in inherit_from - self._artifact_definition_support = schema.kind == "CoreArtifactDefinition" + self._artifact_support = schema.supports_artifacts + self._file_object_support = schema.supports_file_object + self._hierarchy_support = schema.supports_hierarchy + self._artifact_definition_support = schema.supports_artifact_definition self._file_content: bytes | Path | BinaryIO | None = None self._file_name: str | None = None - # Check if this node is hierarchical (supports parent/children and ancestors/descendants) - if not isinstance(schema, (ProfileSchemaAPI, GenericSchemaAPI, TemplateSchemaAPI)): - self._hierarchy_support = getattr(schema, "hierarchy", None) is not None - else: - self._hierarchy_support = False - if not self.id: self._existing = False @@ -567,6 +558,32 @@ def _get_attribute(self, name: str) -> Attribute: raise ResourceNotDefinedError(message=f"The node doesn't have an attribute for {name}") + @staticmethod + def _build_rel_query_data( + rel_schema: RelationshipSchemaAPI, + peer_data: dict[str, Any], + property: bool, + include_metadata: bool, + ) -> dict[str, Any] | None: + if rel_schema.cardinality == RelationshipCardinality.ONE: + rel_data = RelatedNodeBase._generate_query_data( + peer_data=peer_data, property=property, include_metadata=include_metadata + ) + # Nodes involved in a hierarchy are required to inherit from a common ancestor node, and graphql + # tries to resolve attributes in this ancestor instead of actual node. To avoid + # invalid queries issues when attribute is missing in the common ancestor, we use a fragment + # to explicit actual node kind we are querying. + if rel_schema.kind == RelationshipKind.HIERARCHY: + data_node = rel_data["node"] + rel_data["node"] = {} + rel_data["node"][f"...on {rel_schema.peer}"] = data_node + return rel_data + if rel_schema.cardinality == RelationshipCardinality.MANY: + return RelationshipManagerBase._generate_query_data( + peer_data=peer_data, property=property, include_metadata=include_metadata + ) + return None + class InfrahubNode(InfrahubNodeBase): """Represents a Infrahub node in an asynchronous context.""" @@ -626,7 +643,7 @@ def _init_relationships(self, data: dict | RelatedNode | None = None) -> None: for rel_schema in self._schema.relationships: rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None - if rel_schema.cardinality == "one": + if rel_schema.cardinality == RelationshipCardinality.ONE: if isinstance(rel_data, RelatedNode): peer_id_data: dict[str, Any] = { key: value @@ -653,74 +670,25 @@ def _init_relationships(self, data: dict | RelatedNode | None = None) -> None: data=rel_data, ) # Initialize parent, children, ancestors and descendants for hierarchical nodes - if self._hierarchy_support: - # Create pseudo-schema for parent (cardinality one) - parent_schema = RelationshipSchemaAPI( - name="parent", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - kind=RelationshipKind.HIERARCHY, - cardinality="one", - optional=True, - ) - parent_data = data.get("parent", None) if isinstance(data, dict) else None - self._hierarchical_data["parent"] = RelatedNode( - name="parent", - client=self._client, - branch=self._branch, - schema=parent_schema, - data=parent_data, - ) - # Create pseudo-schema for children (many cardinality) - children_schema = RelationshipSchemaAPI( - name="children", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - kind=RelationshipKind.HIERARCHY, - cardinality="many", - optional=True, - ) - children_data = data.get("children", None) if isinstance(data, dict) else None - self._hierarchical_data["children"] = RelationshipManager( - name="children", - client=self._client, - node=self, - branch=self._branch, - schema=children_schema, - data=children_data, - ) - # Create pseudo-schema for ancestors (read-only, many cardinality) - ancestors_schema = RelationshipSchemaAPI( - name="ancestors", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - cardinality="many", - read_only=True, - optional=True, - ) - ancestors_data = data.get("ancestors", None) if isinstance(data, dict) else None - self._hierarchical_data["ancestors"] = RelationshipManager( - name="ancestors", - client=self._client, - node=self, - branch=self._branch, - schema=ancestors_schema, - data=ancestors_data, - ) - # Create pseudo-schema for descendants (read-only, many cardinality) - descendants_schema = RelationshipSchemaAPI( - name="descendants", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - cardinality="many", - read_only=True, - optional=True, - ) - descendants_data = data.get("descendants", None) if isinstance(data, dict) else None - self._hierarchical_data["descendants"] = RelationshipManager( - name="descendants", - client=self._client, - node=self, - branch=self._branch, - schema=descendants_schema, - data=descendants_data, - ) + for rel_schema in self._schema.hierarchical_relationship_schemas: + rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None + if rel_schema.cardinality_is_one: + self._hierarchical_data[rel_schema.name] = RelatedNode( + name=rel_schema.name, + client=self._client, + branch=self._branch, + schema=rel_schema, + data=rel_data, + ) + else: + self._hierarchical_data[rel_schema.name] = RelationshipManager( + name=rel_schema.name, + client=self._client, + node=self, + branch=self._branch, + schema=rel_schema, + data=rel_data, + ) def __getattr__(self, name: str) -> Attribute | RelationshipManager | RelatedNode: if "_attribute_data" in self.__dict__ and name in self._attribute_data: @@ -1010,8 +978,7 @@ async def generate_query_data_node( if insert_alias: data[attr_name]["@alias"] = f"__alias__{self._schema.kind}__{attr_name}" elif insert_alias: - if insert_alias: - data[attr_name] = {"@alias": f"__alias__{self._schema.kind}__{attr_name}"} + data[attr_name] = {"@alias": f"__alias__{self._schema.kind}__{attr_name}"} for rel_name in self._relationships: if exclude and rel_name in exclude: @@ -1039,24 +1006,8 @@ async def generate_query_data_node( include_metadata=include_metadata, ) - rel_data: dict[str, Any] - if rel_schema and rel_schema.cardinality == "one": - rel_data = RelatedNode._generate_query_data( - peer_data=peer_data, property=property, include_metadata=include_metadata - ) - # Nodes involved in a hierarchy are required to inherit from a common ancestor node, and graphql - # tries to resolve attributes in this ancestor instead of actual node. To avoid - # invalid queries issues when attribute is missing in the common ancestor, we use a fragment - # to explicit actual node kind we are querying. - if rel_schema.kind == RelationshipKind.HIERARCHY: - data_node = rel_data["node"] - rel_data["node"] = {} - rel_data["node"][f"...on {rel_schema.peer}"] = data_node - elif rel_schema and rel_schema.cardinality == "many": - rel_data = RelationshipManager._generate_query_data( - peer_data=peer_data, property=property, include_metadata=include_metadata - ) - else: + rel_data = self._build_rel_query_data(rel_schema, peer_data, property, include_metadata) + if rel_data is None: continue data[rel_name] = rel_data @@ -1509,7 +1460,7 @@ def _init_relationships(self, data: dict | None = None) -> None: for rel_schema in self._schema.relationships: rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None - if rel_schema.cardinality == "one": + if rel_schema.cardinality == RelationshipCardinality.ONE: if isinstance(rel_data, RelatedNodeSync): peer_id_data: dict[str, Any] = { key: value @@ -1537,77 +1488,25 @@ def _init_relationships(self, data: dict | None = None) -> None: ) # Initialize parent, children, ancestors and descendants for hierarchical nodes - if self._hierarchy_support: - # Create pseudo-schema for parent (cardinality one) - parent_schema = RelationshipSchemaAPI( - name="parent", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - kind=RelationshipKind.HIERARCHY, - cardinality="one", - optional=True, - ) - parent_data = data.get("parent", None) if isinstance(data, dict) else None - self._hierarchical_data["parent"] = RelatedNodeSync( - name="parent", - client=self._client, - branch=self._branch, - schema=parent_schema, - data=parent_data, - ) - - # Create pseudo-schema for children (many cardinality) - children_schema = RelationshipSchemaAPI( - name="children", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - kind=RelationshipKind.HIERARCHY, - cardinality="many", - optional=True, - ) - children_data = data.get("children", None) if isinstance(data, dict) else None - self._hierarchical_data["children"] = RelationshipManagerSync( - name="children", - client=self._client, - node=self, - branch=self._branch, - schema=children_schema, - data=children_data, - ) - - # Create pseudo-schema for ancestors (read-only, many cardinality) - ancestors_schema = RelationshipSchemaAPI( - name="ancestors", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - cardinality="many", - read_only=True, - optional=True, - ) - ancestors_data = data.get("ancestors", None) if isinstance(data, dict) else None - self._hierarchical_data["ancestors"] = RelationshipManagerSync( - name="ancestors", - client=self._client, - node=self, - branch=self._branch, - schema=ancestors_schema, - data=ancestors_data, - ) - - # Create pseudo-schema for descendants (read-only, many cardinality) - descendants_schema = RelationshipSchemaAPI( - name="descendants", - peer=self._schema.hierarchy, # type: ignore[union-attr, arg-type] - cardinality="many", - read_only=True, - optional=True, - ) - descendants_data = data.get("descendants", None) if isinstance(data, dict) else None - self._hierarchical_data["descendants"] = RelationshipManagerSync( - name="descendants", - client=self._client, - node=self, - branch=self._branch, - schema=descendants_schema, - data=descendants_data, - ) + for rel_schema in self._schema.hierarchical_relationship_schemas: + rel_data = data.get(rel_schema.name, None) if isinstance(data, dict) else None + if rel_schema.cardinality_is_one: + self._hierarchical_data[rel_schema.name] = RelatedNodeSync( + name=rel_schema.name, + client=self._client, + branch=self._branch, + schema=rel_schema, + data=rel_data, + ) + else: + self._hierarchical_data[rel_schema.name] = RelationshipManagerSync( + name=rel_schema.name, + client=self._client, + node=self, + branch=self._branch, + schema=rel_schema, + data=rel_data, + ) def __getattr__(self, name: str) -> Attribute | RelationshipManagerSync | RelatedNodeSync: if "_attribute_data" in self.__dict__ and name in self._attribute_data: @@ -1889,8 +1788,7 @@ def generate_query_data_node( if insert_alias: data[attr_name]["@alias"] = f"__alias__{self._schema.kind}__{attr_name}" elif insert_alias: - if insert_alias: - data[attr_name] = {"@alias": f"__alias__{self._schema.kind}__{attr_name}"} + data[attr_name] = {"@alias": f"__alias__{self._schema.kind}__{attr_name}"} for rel_name in self._relationships: if exclude and rel_name in exclude: @@ -1918,24 +1816,8 @@ def generate_query_data_node( include_metadata=include_metadata, ) - rel_data: dict[str, Any] - if rel_schema and rel_schema.cardinality == "one": - rel_data = RelatedNodeSync._generate_query_data( - peer_data=peer_data, property=property, include_metadata=include_metadata - ) - # Nodes involved in a hierarchy are required to inherit from a common ancestor node, and graphql - # tries to resolve attributes in this ancestor instead of actual node. To avoid - # invalid queries issues when attribute is missing in the common ancestor, we use a fragment - # to explicit actual node kind we are querying. - if rel_schema.kind == RelationshipKind.HIERARCHY: - data_node = rel_data["node"] - rel_data["node"] = {} - rel_data["node"][f"...on {rel_schema.peer}"] = data_node - elif rel_schema and rel_schema.cardinality == "many": - rel_data = RelationshipManagerSync._generate_query_data( - peer_data=peer_data, property=property, include_metadata=include_metadata - ) - else: + rel_data = self._build_rel_query_data(rel_schema, peer_data, property, include_metadata) + if rel_data is None: continue data[rel_name] = rel_data diff --git a/infrahub_sdk/object_store.py b/infrahub_sdk/object_store.py index bf5fc862..4841564a 100644 --- a/infrahub_sdk/object_store.py +++ b/infrahub_sdk/object_store.py @@ -11,8 +11,24 @@ from .client import InfrahubClient, InfrahubClientSync +ALLOWED_TEXT_CONTENT_TYPES = {"application/json", "application/yaml", "application/x-yaml"} + + +def _extract_content_type(response: httpx.Response) -> str: + """Extract and normalize the content-type from an HTTP response, stripping parameters.""" + return response.headers.get("content-type", "").split(";")[0].strip().lower() + + class ObjectStoreBase: - pass + @staticmethod + def _validate_text_content(response: httpx.Response, identifier: str) -> str: + """Validate that a file response has a text-based content-type and return the text.""" + content_type = _extract_content_type(response) + if not content_type.startswith("text/") and content_type not in ALLOWED_TEXT_CONTENT_TYPES: + raise ValueError( + f"Binary content not supported: content-type '{content_type}' for identifier '{identifier}'" + ) + return response.text class ObjectStore(ObjectStoreBase): @@ -62,6 +78,44 @@ async def upload(self, content: str, tracker: str | None = None) -> dict[str, st return resp.json() + async def _get_file(self, url: str, identifier: str, tracker: str | None = None) -> str: + """Fetch a file endpoint and validate that the response is text-based.""" + headers = copy.copy(self.client.headers or {}) + if self.client.insert_tracker and tracker: + headers["X-Infrahub-Tracker"] = tracker + + try: + resp = await self.client._get(url=url, headers=headers) + resp.raise_for_status() + except ServerNotReachableError: + self.client.log.error(f"Unable to connect to {self.client.address} .. ") + raise + except httpx.HTTPStatusError as exc: + if exc.response.status_code in {401, 403}: + response = exc.response.json() + errors = response.get("errors") + messages = [error.get("message") for error in errors] + raise AuthenticationError(" | ".join(messages)) from exc + raise + + return self._validate_text_content(response=resp, identifier=identifier) + + async def get_file_by_storage_id(self, storage_id: str, tracker: str | None = None) -> str: + """Retrieve file object content by storage_id.""" + url = f"{self.client.address}/api/files/by-storage-id/{storage_id}" + return await self._get_file(url=url, identifier=storage_id, tracker=tracker) + + async def get_file_by_id(self, node_id: str, tracker: str | None = None) -> str: + """Retrieve file object content by node UUID.""" + url = f"{self.client.address}/api/files/{node_id}" + return await self._get_file(url=url, identifier=node_id, tracker=tracker) + + async def get_file_by_hfid(self, kind: str, hfid: list[str], tracker: str | None = None) -> str: + """Retrieve file object content by Human-Friendly ID.""" + params = "&".join(f"hfid={h}" for h in hfid) + url = f"{self.client.address}/api/files/by-hfid/{kind}?{params}" + return await self._get_file(url=url, identifier=f"{kind}:{'/'.join(hfid)}", tracker=tracker) + class ObjectStoreSync(ObjectStoreBase): def __init__(self, client: InfrahubClientSync) -> None: @@ -109,3 +163,41 @@ def upload(self, content: str, tracker: str | None = None) -> dict[str, str]: raise AuthenticationError(" | ".join(messages)) from exc return resp.json() + + def _get_file(self, url: str, identifier: str, tracker: str | None = None) -> str: + """Fetch a file endpoint and validate that the response is text-based.""" + headers = copy.copy(self.client.headers or {}) + if self.client.insert_tracker and tracker: + headers["X-Infrahub-Tracker"] = tracker + + try: + resp = self.client._get(url=url, headers=headers) + resp.raise_for_status() + except ServerNotReachableError: + self.client.log.error(f"Unable to connect to {self.client.address} .. ") + raise + except httpx.HTTPStatusError as exc: + if exc.response.status_code in {401, 403}: + response = exc.response.json() + errors = response.get("errors") + messages = [error.get("message") for error in errors] + raise AuthenticationError(" | ".join(messages)) from exc + raise + + return self._validate_text_content(resp, identifier) + + def get_file_by_storage_id(self, storage_id: str, tracker: str | None = None) -> str: + """Retrieve file object content by storage_id.""" + url = f"{self.client.address}/api/files/by-storage-id/{storage_id}" + return self._get_file(url=url, identifier=storage_id, tracker=tracker) + + def get_file_by_id(self, node_id: str, tracker: str | None = None) -> str: + """Retrieve file object content by node UUID.""" + url = f"{self.client.address}/api/files/{node_id}" + return self._get_file(url=url, identifier=node_id, tracker=tracker) + + def get_file_by_hfid(self, kind: str, hfid: list[str], tracker: str | None = None) -> str: + """Retrieve file object content by Human-Friendly ID.""" + params = "&".join(f"hfid={h}" for h in hfid) + url = f"{self.client.address}/api/files/by-hfid/{kind}?{params}" + return self._get_file(url=url, identifier=f"{kind}:{'/'.join(hfid)}", tracker=tracker) diff --git a/infrahub_sdk/protocols.py b/infrahub_sdk/protocols.py index c359ad5c..426c9923 100644 --- a/infrahub_sdk/protocols.py +++ b/infrahub_sdk/protocols.py @@ -97,8 +97,6 @@ class CoreCheck(CoreNode): class CoreComment(CoreNode): text: String - created_at: DateTimeOptional - created_by: RelatedNode class CoreCredential(CoreNode): @@ -152,6 +150,13 @@ class CoreGroup(CoreNode): children: RelationshipManager +class CoreKeyValue(CoreNode): + name: String + key: String + description: StringOptional + value: String + + class CoreMenu(CoreNode): namespace: String name: String @@ -197,10 +202,8 @@ class CoreTaskTarget(CoreNode): class CoreThread(CoreNode): label: StringOptional resolved: Boolean - created_at: DateTimeOptional change: RelatedNode comments: RelationshipManager - created_by: RelatedNode class CoreTransformation(CoreNode): @@ -240,6 +243,7 @@ class CoreWebhook(CoreNode): description: StringOptional url: URL validate_certificates: BooleanOptional + headers: RelationshipManager class CoreWeightedPoolResource(CoreNode): @@ -348,6 +352,10 @@ class CoreDataValidator(CoreValidator): pass +class CoreEnvKeyValue(CoreKeyValue): + pass + + class CoreFileCheck(CoreCheck): files: ListAttributeOptional commit: StringOptional @@ -552,6 +560,10 @@ class CoreStandardWebhook(CoreWebhook, CoreTaskTarget): shared_key: String +class CoreStaticKeyValue(CoreKeyValue): + pass + + class CoreThreadComment(CoreComment): thread: RelatedNode @@ -662,8 +674,6 @@ class CoreCheckSync(CoreNodeSync): class CoreCommentSync(CoreNodeSync): text: String - created_at: DateTimeOptional - created_by: RelatedNodeSync class CoreCredentialSync(CoreNodeSync): @@ -717,6 +727,13 @@ class CoreGroupSync(CoreNodeSync): children: RelationshipManagerSync +class CoreKeyValueSync(CoreNodeSync): + name: String + key: String + description: StringOptional + value: String + + class CoreMenuSync(CoreNodeSync): namespace: String name: String @@ -762,10 +779,8 @@ class CoreTaskTargetSync(CoreNodeSync): class CoreThreadSync(CoreNodeSync): label: StringOptional resolved: Boolean - created_at: DateTimeOptional change: RelatedNodeSync comments: RelationshipManagerSync - created_by: RelatedNodeSync class CoreTransformationSync(CoreNodeSync): @@ -805,6 +820,7 @@ class CoreWebhookSync(CoreNodeSync): description: StringOptional url: URL validate_certificates: BooleanOptional + headers: RelationshipManagerSync class CoreWeightedPoolResourceSync(CoreNodeSync): @@ -913,6 +929,10 @@ class CoreDataValidatorSync(CoreValidatorSync): pass +class CoreEnvKeyValueSync(CoreKeyValueSync): + pass + + class CoreFileCheckSync(CoreCheckSync): files: ListAttributeOptional commit: StringOptional @@ -1117,6 +1137,10 @@ class CoreStandardWebhookSync(CoreWebhookSync, CoreTaskTargetSync): shared_key: String +class CoreStaticKeyValueSync(CoreKeyValueSync): + pass + + class CoreThreadCommentSync(CoreCommentSync): thread: RelatedNodeSync diff --git a/infrahub_sdk/protocols_generator/generator.py b/infrahub_sdk/protocols_generator/generator.py index 14c8438f..16439838 100644 --- a/infrahub_sdk/protocols_generator/generator.py +++ b/infrahub_sdk/protocols_generator/generator.py @@ -14,6 +14,7 @@ NodeSchema, NodeSchemaAPI, ProfileSchemaAPI, + RelationshipCardinality, RelationshipSchemaAPI, TemplateSchemaAPI, ) @@ -128,7 +129,7 @@ def _jinja2_filter_render_relationship(value: RelationshipSchemaAPI, sync: bool cardinality = value.cardinality type_ = "RelatedNode" - if cardinality == "many": + if cardinality == RelationshipCardinality.MANY: type_ = "RelationshipManager" if sync: diff --git a/infrahub_sdk/schema/__init__.py b/infrahub_sdk/schema/__init__.py index 557e76f3..f4a67b1d 100644 --- a/infrahub_sdk/schema/__init__.py +++ b/infrahub_sdk/schema/__init__.py @@ -217,10 +217,10 @@ def generate_payload_create( elif key in schema.relationship_names: rel = schema.get_relationship(name=key) if rel: - if rel.cardinality == "one": + if rel.cardinality == RelationshipCardinality.ONE: obj_data[key] = {"id": str(value)} obj_data[key].update(item_metadata) - elif rel.cardinality == "many": + elif rel.cardinality == RelationshipCardinality.MANY: obj_data[key] = [{"id": str(item)} for item in value] for item in obj_data[key]: item.update(item_metadata) diff --git a/infrahub_sdk/schema/main.py b/infrahub_sdk/schema/main.py index 34a35177..849ffca5 100644 --- a/infrahub_sdk/schema/main.py +++ b/infrahub_sdk/schema/main.py @@ -150,6 +150,14 @@ class RelationshipSchemaAPI(RelationshipSchema): hierarchical: str | None = None allow_override: AllowOverrideType = AllowOverrideType.ANY + @property + def cardinality_is_one(self) -> bool: + return self.cardinality == RelationshipCardinality.ONE + + @property + def cardinality_is_many(self) -> bool: + return self.cardinality == RelationshipCardinality.MANY + class BaseSchemaAttrRel(BaseModel): attributes: list[AttributeSchema] = Field(default_factory=list) @@ -279,6 +287,36 @@ class BaseSchema(BaseModel): def kind(self) -> str: return self.namespace + self.name + @property + def supports_artifact_definition(self) -> bool: + """Returns True if this schema represents CoreArtifactDefinition. Only meaningful for NodeSchemaAPI.""" + return self.kind == "CoreArtifactDefinition" + + @property + def supports_artifacts(self) -> bool: + """Returns True if this schema supports artifact operations via CoreArtifactTarget inheritance. + Only NodeSchemaAPI overrides this; all other schema types return False by design because + artifact capability is tied to node inheritance, not profiles, templates, or generics.""" + return False + + @property + def supports_file_object(self) -> bool: + """Returns True if this schema supports file object operations via CoreFileObject inheritance. + Only NodeSchemaAPI overrides this; all other schema types return False by design because + file object capability is tied to node inheritance, not profiles, templates, or generics.""" + return False + + @property + def supports_hierarchy(self) -> bool: + """Returns True if this schema participates in a hierarchy. Only NodeSchemaAPI overrides this.""" + return False + + @property + def hierarchical_relationship_schemas(self) -> list[RelationshipSchemaAPI]: + """Returns pseudo-schemas for parent/children/ancestors/descendants if hierarchy is set. + Only NodeSchemaAPI overrides this; all other schema types return an empty list.""" + return [] + class GenericSchema(BaseSchema, BaseSchemaAttrRel): def convert_api(self) -> GenericSchemaAPI: @@ -289,7 +327,9 @@ class GenericSchemaAPI(BaseSchema, BaseSchemaAttrRelAPI): """A Generic can be either an Interface or a Union depending if there are some Attributes or Relationships defined.""" hash: str | None = None + hierarchical: bool | None = None used_by: list[str] = Field(default_factory=list) + restricted_namespaces: list[str] | None = None class BaseNodeSchema(BaseSchema): @@ -313,6 +353,37 @@ class NodeSchemaAPI(BaseNodeSchema, BaseSchemaAttrRelAPI): hash: str | None = None hierarchy: str | None = None + @property + def supports_artifacts(self) -> bool: + return "CoreArtifactTarget" in self.inherit_from + + @property + def supports_file_object(self) -> bool: + return "CoreFileObject" in self.inherit_from + + @property + def supports_hierarchy(self) -> bool: + return self.hierarchy is not None + + @property + def hierarchical_relationship_schemas(self) -> list[RelationshipSchemaAPI]: + if self.hierarchy is None: + return [] + return [ + RelationshipSchemaAPI( + name="parent", peer=self.hierarchy, kind=RelationshipKind.HIERARCHY, cardinality="one", optional=True + ), + RelationshipSchemaAPI( + name="children", peer=self.hierarchy, kind=RelationshipKind.HIERARCHY, cardinality="many", optional=True + ), + RelationshipSchemaAPI( + name="ancestors", peer=self.hierarchy, cardinality="many", read_only=True, optional=True + ), + RelationshipSchemaAPI( + name="descendants", peer=self.hierarchy, cardinality="many", read_only=True, optional=True + ), + ] + class ProfileSchemaAPI(BaseSchema, BaseSchemaAttrRelAPI): inherit_from: list[str] = Field(default_factory=list) diff --git a/infrahub_sdk/schema/repository.py b/infrahub_sdk/schema/repository.py index 0ecdbe35..91354459 100644 --- a/infrahub_sdk/schema/repository.py +++ b/infrahub_sdk/schema/repository.py @@ -1,13 +1,14 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Any, TypeVar +from typing import TYPE_CHECKING, Any from pydantic import BaseModel, ConfigDict, Field, field_validator from .._importer import import_module from ..checks import InfrahubCheck from ..exceptions import ( + FragmentFileNotFoundError, ModuleImportError, ResourceNotDefinedError, ) @@ -20,8 +21,6 @@ InfrahubNodeTypes = InfrahubNode | InfrahubNodeSync -ResourceClass = TypeVar("ResourceClass") - class InfrahubRepositoryConfigElement(BaseModel): """Class to regroup all elements of the Infrahub configuration for a repository for typing purpose.""" @@ -161,6 +160,28 @@ def load_query(self, relative_path: str = ".") -> str: return file_name.read_text(encoding="UTF-8") +class InfrahubRepositoryFragmentConfig(InfrahubRepositoryConfigElement): + model_config = ConfigDict(extra="forbid") + name: str = Field(..., description="Logical name for this fragment file or directory") + file_path: Path = Field( + ..., description="Path to a .gql fragment file or a directory of .gql files, relative to repo root" + ) + + def load_fragments(self, relative_path: str = ".") -> list[str]: + """Return raw content of all fragment files at file_path. + + If file_path is a .gql file, returns a single-element list. + If file_path is a directory, returns one entry per .gql file found (sorted alphabetically). + Raises FragmentFileNotFoundError if file_path does not exist. + """ + resolved = Path(f"{relative_path}/{self.file_path}") + if not resolved.exists(): + raise FragmentFileNotFoundError(file_path=str(self.file_path)) + if resolved.is_dir(): + return [f.read_text(encoding="UTF-8") for f in sorted(resolved.glob("*.gql"))] + return [resolved.read_text(encoding="UTF-8")] + + class InfrahubObjectConfig(InfrahubRepositoryConfigElement): model_config = ConfigDict(extra="forbid") name: str = Field(..., description="The name associated to the object file") @@ -173,18 +194,6 @@ class InfrahubMenuConfig(InfrahubRepositoryConfigElement): file_path: Path = Field(..., description="The file within the repository containing menu data.") -RESOURCE_MAP: dict[Any, str] = { - InfrahubJinja2TransformConfig: "jinja2_transforms", - InfrahubCheckDefinitionConfig: "check_definitions", - InfrahubRepositoryArtifactDefinitionConfig: "artifact_definitions", - InfrahubPythonTransformConfig: "python_transforms", - InfrahubGeneratorDefinitionConfig: "generator_definitions", - InfrahubRepositoryGraphQLConfig: "queries", - InfrahubObjectConfig: "objects", - InfrahubMenuConfig: "menus", -} - - class InfrahubRepositoryConfig(BaseModel): model_config = ConfigDict(extra="forbid") check_definitions: list[InfrahubCheckDefinitionConfig] = Field( @@ -204,6 +213,9 @@ class InfrahubRepositoryConfig(BaseModel): default_factory=list, description="Generator definitions" ) queries: list[InfrahubRepositoryGraphQLConfig] = Field(default_factory=list, description="GraphQL Queries") + graphql_fragments: list[InfrahubRepositoryFragmentConfig] = Field( + default_factory=list, description="GraphQL fragment files declared for this repository" + ) objects: list[Path] = Field(default_factory=list, description="Objects") menus: list[Path] = Field(default_factory=list, description="Menus") @@ -214,6 +226,7 @@ class InfrahubRepositoryConfig(BaseModel): "python_transforms", "generator_definitions", "queries", + "graphql_fragments", ) @classmethod def unique_items(cls, v: list[Any]) -> list[Any]: @@ -222,49 +235,65 @@ def unique_items(cls, v: list[Any]) -> list[Any]: raise ValueError(f"Found multiples element with the same names: {dups}") return v - def _has_resource(self, resource_id: str, resource_type: type[ResourceClass], resource_field: str = "name") -> bool: - return any(getattr(item, resource_field) == resource_id for item in getattr(self, RESOURCE_MAP[resource_type])) - - def _get_resource( - self, resource_id: str, resource_type: type[ResourceClass], resource_field: str = "name" - ) -> ResourceClass: - for item in getattr(self, RESOURCE_MAP[resource_type]): - if getattr(item, resource_field) == resource_id: - return item - raise ResourceNotDefinedError(f"Unable to find {resource_id!r} in {RESOURCE_MAP[resource_type]!r}") - def has_jinja2_transform(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubJinja2TransformConfig) + return any(item.name == name for item in self.jinja2_transforms) def get_jinja2_transform(self, name: str) -> InfrahubJinja2TransformConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubJinja2TransformConfig) + for item in self.jinja2_transforms: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'jinja2_transforms'") def has_check_definition(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubCheckDefinitionConfig) + return any(item.name == name for item in self.check_definitions) def get_check_definition(self, name: str) -> InfrahubCheckDefinitionConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubCheckDefinitionConfig) + for item in self.check_definitions: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'check_definitions'") def has_artifact_definition(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubRepositoryArtifactDefinitionConfig) + return any(item.name == name for item in self.artifact_definitions) def get_artifact_definition(self, name: str) -> InfrahubRepositoryArtifactDefinitionConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubRepositoryArtifactDefinitionConfig) + for item in self.artifact_definitions: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'artifact_definitions'") def has_generator_definition(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubGeneratorDefinitionConfig) + return any(item.name == name for item in self.generator_definitions) def get_generator_definition(self, name: str) -> InfrahubGeneratorDefinitionConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubGeneratorDefinitionConfig) + for item in self.generator_definitions: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'generator_definitions'") def has_python_transform(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubPythonTransformConfig) + return any(item.name == name for item in self.python_transforms) def get_python_transform(self, name: str) -> InfrahubPythonTransformConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubPythonTransformConfig) + for item in self.python_transforms: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'python_transforms'") def has_query(self, name: str) -> bool: - return self._has_resource(resource_id=name, resource_type=InfrahubRepositoryGraphQLConfig) + return any(item.name == name for item in self.queries) def get_query(self, name: str) -> InfrahubRepositoryGraphQLConfig: - return self._get_resource(resource_id=name, resource_type=InfrahubRepositoryGraphQLConfig) + for item in self.queries: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'queries'") + + def has_fragment(self, name: str) -> bool: + return any(item.name == name for item in self.graphql_fragments) + + def get_fragment(self, name: str) -> InfrahubRepositoryFragmentConfig: + for item in self.graphql_fragments: + if item.name == name: + return item + raise ResourceNotDefinedError(f"Unable to find {name!r} in 'graphql_fragments'") diff --git a/infrahub_sdk/spec/object.py b/infrahub_sdk/spec/object.py index 4eb2b882..a4d6b79b 100644 --- a/infrahub_sdk/spec/object.py +++ b/infrahub_sdk/spec/object.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Field from ..exceptions import ObjectValidationError, ValidationError -from ..schema import GenericSchemaAPI, RelationshipKind, RelationshipSchema +from ..schema import GenericSchemaAPI, RelationshipCardinality, RelationshipKind, RelationshipSchema from ..utils import is_valid_uuid from ..yaml import InfrahubFile, InfrahubFileKind from .models import InfrahubObjectParameters @@ -102,7 +102,10 @@ def is_mandatory(self) -> bool: # For hierarchical node, currently the relationship to the parent is always optional in the schema even if it's mandatory # In order to build the tree from top to bottom, we need to consider it as mandatory # While it should technically work bottom-up, it created some unexpected behavior while loading the menu - if self.peer_rel.cardinality == "one" and self.peer_rel.kind == RelationshipKind.HIERARCHY: + if ( + self.peer_rel.cardinality == RelationshipCardinality.ONE + and self.peer_rel.kind == RelationshipKind.HIERARCHY + ): return True return not self.peer_rel.optional @@ -116,9 +119,9 @@ def is_reference(self) -> bool: def get_context(self, value: Any) -> dict: """Return a dict to insert to the context if the relationship is mandatory""" - if self.peer_rel and self.is_mandatory and self.peer_rel.cardinality == "one": + if self.peer_rel and self.is_mandatory and self.peer_rel.cardinality == RelationshipCardinality.ONE: return {self.peer_rel.name: value} - if self.peer_rel and self.is_mandatory and self.peer_rel.cardinality == "many": + if self.peer_rel and self.is_mandatory and self.peer_rel.cardinality == RelationshipCardinality.MANY: return {self.peer_rel.name: [value]} return {} @@ -162,21 +165,21 @@ async def get_relationship_info( id=rel_schema.identifier or "", direction=rel_schema.direction ) - if rel_schema.cardinality == "one" and isinstance(value, list): + if rel_schema.cardinality == RelationshipCardinality.ONE and isinstance(value, list): # validate the list is composed of string if validate_list_of_scalars(value): info.format = RelationshipDataFormat.ONE_REF else: info.reason_relationship_not_valid = "Too many objects provided for a relationship of cardinality one" - elif rel_schema.cardinality == "one" and isinstance(value, str): + elif rel_schema.cardinality == RelationshipCardinality.ONE and isinstance(value, str): info.format = RelationshipDataFormat.ONE_REF - elif rel_schema.cardinality == "one" and isinstance(value, dict) and "data" in value: + elif rel_schema.cardinality == RelationshipCardinality.ONE and isinstance(value, dict) and "data" in value: info.format = RelationshipDataFormat.ONE_OBJ elif ( - rel_schema.cardinality == "many" + rel_schema.cardinality == RelationshipCardinality.MANY and isinstance(value, dict) and "data" in value and validate_list_of_objects(value["data"]) @@ -185,11 +188,11 @@ async def get_relationship_info( # it's helpful if there is only one type of object to manage info.format = RelationshipDataFormat.MANY_OBJ_DICT_LIST - elif rel_schema.cardinality == "many" and isinstance(value, dict) and "data" not in value: + elif rel_schema.cardinality == RelationshipCardinality.MANY and isinstance(value, dict) and "data" not in value: info.reason_relationship_not_valid = "Invalid structure for a relationship of cardinality many," " either provide a dict with data as a list or a list of objects" - elif rel_schema.cardinality == "many" and isinstance(value, list): + elif rel_schema.cardinality == RelationshipCardinality.MANY and isinstance(value, list): if validate_list_of_data_dicts(value): info.format = RelationshipDataFormat.MANY_OBJ_LIST_DICT elif validate_list_of_hfids(value): diff --git a/infrahub_sdk/template/__init__.py b/infrahub_sdk/template/__init__.py index 6a7f7fe2..d46be687 100644 --- a/infrahub_sdk/template/__init__.py +++ b/infrahub_sdk/template/__init__.py @@ -3,7 +3,7 @@ import linecache from collections.abc import Callable from pathlib import Path -from typing import Any, NoReturn +from typing import TYPE_CHECKING, Any, NoReturn import jinja2 from jinja2 import meta, nodes @@ -19,9 +19,13 @@ JinjaTemplateSyntaxError, JinjaTemplateUndefinedError, ) -from .filters import AVAILABLE_FILTERS +from .filters import AVAILABLE_FILTERS, ExecutionContext +from .infrahub_filters import InfrahubFilters, from_json, from_yaml from .models import UndefinedJinja2Error +if TYPE_CHECKING: + from infrahub_sdk.client import InfrahubClient + netutils_filters = jinja2_convenience_function() @@ -31,6 +35,7 @@ def __init__( template: str | Path, template_directory: Path | None = None, filters: dict[str, Callable] | None = None, + client: InfrahubClient | None = None, ) -> None: self.is_string_based = isinstance(template, str) self.is_file_based = isinstance(template, Path) @@ -39,17 +44,35 @@ def __init__( self._environment: jinja2.Environment | None = None self._available_filters = [filter_definition.name for filter_definition in AVAILABLE_FILTERS] - self._trusted_filters = [ - filter_definition.name for filter_definition in AVAILABLE_FILTERS if filter_definition.trusted - ] self._filters = filters or {} + self._user_filter_names: set[str] = set(self._filters.keys()) for user_filter in self._filters: self._available_filters.append(user_filter) - self._trusted_filters.append(user_filter) + + self._register_client_filters(client=client) + self._register_filter("from_json", from_json) + self._register_filter("from_yaml", from_yaml) self._template_definition: jinja2.Template | None = None + def set_client(self, client: InfrahubClient) -> None: + """Set or replace the InfrahubClient used by client-dependent filters.""" + self._infrahub_filters.set_client(client=client) + if self._environment: + for name in InfrahubFilters.get_filter_names(): + self._environment.filters[name] = self._filters[name] + + def _register_filter(self, name: str, func: Callable) -> None: + self._filters[name] = func + if name not in self._available_filters: + self._available_filters.append(name) + + def _register_client_filters(self, client: InfrahubClient | None) -> None: + self._infrahub_filters = InfrahubFilters(client=client) + for name in InfrahubFilters.get_filter_names(): + self._register_filter(name, getattr(self._infrahub_filters, name)) + def get_environment(self) -> jinja2.Environment: if self._environment: return self._environment @@ -86,10 +109,16 @@ def get_variables(self) -> list[str]: return sorted(meta.find_undeclared_variables(template)) - def validate(self, restricted: bool = True) -> None: - allowed_list = self._available_filters - if restricted: - allowed_list = self._trusted_filters + def validate(self, restricted: bool = True, context: ExecutionContext | None = None) -> None: + effective_context = ( + context if context is not None else ExecutionContext.CORE if restricted else ExecutionContext.LOCAL + ) + + allowed_list = [fd.name for fd in AVAILABLE_FILTERS if fd.allowed_contexts & effective_context] + # User-supplied filters are always allowed (but not SDK-injected ones) + for user_filter in self._user_filter_names: + if user_filter not in allowed_list: + allowed_list.append(user_filter) env = self.get_environment() template_source = self._template diff --git a/infrahub_sdk/template/exceptions.py b/infrahub_sdk/template/exceptions.py index 6ef60b43..1b45c7cb 100644 --- a/infrahub_sdk/template/exceptions.py +++ b/infrahub_sdk/template/exceptions.py @@ -39,3 +39,13 @@ def __init__(self, message: str | None, errors: list[UndefinedJinja2Error]) -> N class JinjaTemplateOperationViolationError(JinjaTemplateError): def __init__(self, message: str | None = None) -> None: self.message = message or "Forbidden code found in the template" + + +class JinjaFilterError(JinjaTemplateError): + def __init__(self, filter_name: str, message: str, hint: str | None = None) -> None: + self.filter_name = filter_name + self.hint = hint + full_message = f"Filter '{filter_name}': {message}" + if hint is not None: + full_message += f" — {hint}" + super().__init__(full_message) diff --git a/infrahub_sdk/template/filters.py b/infrahub_sdk/template/filters.py index 1d082b39..622cffd1 100644 --- a/infrahub_sdk/template/filters.py +++ b/infrahub_sdk/template/filters.py @@ -1,151 +1,184 @@ from dataclasses import dataclass +from enum import Flag, auto + + +class ExecutionContext(Flag): + CORE = auto() + WORKER = auto() + LOCAL = auto() + ALL = CORE | WORKER | LOCAL @dataclass class FilterDefinition: name: str - trusted: bool + allowed_contexts: ExecutionContext source: str + @property + def trusted(self) -> bool: + """Backward compatibility: trusted means allowed in all contexts.""" + return self.allowed_contexts == ExecutionContext.ALL + BUILTIN_FILTERS = [ - FilterDefinition(name="abs", trusted=True, source="jinja2"), - FilterDefinition(name="attr", trusted=False, source="jinja2"), - FilterDefinition(name="batch", trusted=False, source="jinja2"), - FilterDefinition(name="capitalize", trusted=True, source="jinja2"), - FilterDefinition(name="center", trusted=True, source="jinja2"), - FilterDefinition(name="count", trusted=True, source="jinja2"), - FilterDefinition(name="d", trusted=True, source="jinja2"), - FilterDefinition(name="default", trusted=True, source="jinja2"), - FilterDefinition(name="dictsort", trusted=False, source="jinja2"), - FilterDefinition(name="e", trusted=True, source="jinja2"), - FilterDefinition(name="escape", trusted=True, source="jinja2"), - FilterDefinition(name="filesizeformat", trusted=True, source="jinja2"), - FilterDefinition(name="first", trusted=True, source="jinja2"), - FilterDefinition(name="float", trusted=True, source="jinja2"), - FilterDefinition(name="forceescape", trusted=True, source="jinja2"), - FilterDefinition(name="format", trusted=True, source="jinja2"), - FilterDefinition(name="groupby", trusted=False, source="jinja2"), - FilterDefinition(name="indent", trusted=True, source="jinja2"), - FilterDefinition(name="int", trusted=True, source="jinja2"), - FilterDefinition(name="items", trusted=False, source="jinja2"), - FilterDefinition(name="join", trusted=True, source="jinja2"), - FilterDefinition(name="last", trusted=True, source="jinja2"), - FilterDefinition(name="length", trusted=True, source="jinja2"), - FilterDefinition(name="list", trusted=True, source="jinja2"), - FilterDefinition(name="lower", trusted=True, source="jinja2"), - FilterDefinition(name="map", trusted=False, source="jinja2"), - FilterDefinition(name="max", trusted=True, source="jinja2"), - FilterDefinition(name="min", trusted=True, source="jinja2"), - FilterDefinition(name="pprint", trusted=False, source="jinja2"), - FilterDefinition(name="random", trusted=False, source="jinja2"), - FilterDefinition(name="reject", trusted=False, source="jinja2"), - FilterDefinition(name="rejectattr", trusted=False, source="jinja2"), - FilterDefinition(name="replace", trusted=True, source="jinja2"), - FilterDefinition(name="reverse", trusted=True, source="jinja2"), - FilterDefinition(name="round", trusted=True, source="jinja2"), - FilterDefinition(name="safe", trusted=False, source="jinja2"), - FilterDefinition(name="select", trusted=False, source="jinja2"), - FilterDefinition(name="selectattr", trusted=False, source="jinja2"), - FilterDefinition(name="slice", trusted=True, source="jinja2"), - FilterDefinition(name="sort", trusted=False, source="jinja2"), - FilterDefinition(name="string", trusted=True, source="jinja2"), - FilterDefinition(name="striptags", trusted=True, source="jinja2"), - FilterDefinition(name="sum", trusted=True, source="jinja2"), - FilterDefinition(name="title", trusted=True, source="jinja2"), - FilterDefinition(name="tojson", trusted=False, source="jinja2"), - FilterDefinition(name="trim", trusted=True, source="jinja2"), - FilterDefinition(name="truncate", trusted=True, source="jinja2"), - FilterDefinition(name="unique", trusted=False, source="jinja2"), - FilterDefinition(name="upper", trusted=True, source="jinja2"), - FilterDefinition(name="urlencode", trusted=True, source="jinja2"), - FilterDefinition(name="urlize", trusted=False, source="jinja2"), - FilterDefinition(name="wordcount", trusted=True, source="jinja2"), - FilterDefinition(name="wordwrap", trusted=True, source="jinja2"), - FilterDefinition(name="xmlattr", trusted=False, source="jinja2"), + FilterDefinition(name="abs", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="attr", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="batch", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="capitalize", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="center", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="count", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="d", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="default", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition( + name="dictsort", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2" + ), + FilterDefinition(name="e", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="escape", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="filesizeformat", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="first", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="float", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="forceescape", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="format", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition( + name="groupby", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2" + ), + FilterDefinition(name="indent", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="int", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="items", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="join", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="last", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="length", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="list", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="lower", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="map", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="max", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="min", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="pprint", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="random", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="reject", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition( + name="rejectattr", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2" + ), + FilterDefinition(name="replace", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="reverse", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="round", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="safe", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="select", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition( + name="selectattr", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2" + ), + FilterDefinition(name="slice", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="sort", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="string", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="striptags", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="sum", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="title", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="tojson", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="trim", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="truncate", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="unique", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="upper", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="urlencode", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="urlize", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2"), + FilterDefinition(name="wordcount", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition(name="wordwrap", allowed_contexts=ExecutionContext.ALL, source="jinja2"), + FilterDefinition( + name="xmlattr", allowed_contexts=ExecutionContext.WORKER | ExecutionContext.LOCAL, source="jinja2" + ), ] NETUTILS_FILTERS = [ - FilterDefinition(name="abbreviated_interface_name", trusted=True, source="netutils"), - FilterDefinition(name="abbreviated_interface_name_list", trusted=True, source="netutils"), - FilterDefinition(name="asn_to_int", trusted=True, source="netutils"), - FilterDefinition(name="bits_to_name", trusted=True, source="netutils"), - FilterDefinition(name="bytes_to_name", trusted=True, source="netutils"), - FilterDefinition(name="canonical_interface_name", trusted=True, source="netutils"), - FilterDefinition(name="canonical_interface_name_list", trusted=True, source="netutils"), - FilterDefinition(name="cidr_to_netmask", trusted=True, source="netutils"), - FilterDefinition(name="cidr_to_netmaskv6", trusted=True, source="netutils"), - FilterDefinition(name="clean_config", trusted=True, source="netutils"), - FilterDefinition(name="compare_version_loose", trusted=True, source="netutils"), - FilterDefinition(name="compare_version_strict", trusted=True, source="netutils"), - FilterDefinition(name="config_compliance", trusted=True, source="netutils"), - FilterDefinition(name="config_section_not_parsed", trusted=True, source="netutils"), - FilterDefinition(name="delimiter_change", trusted=True, source="netutils"), - FilterDefinition(name="diff_network_config", trusted=True, source="netutils"), - FilterDefinition(name="feature_compliance", trusted=True, source="netutils"), - FilterDefinition(name="find_unordered_cfg_lines", trusted=True, source="netutils"), - FilterDefinition(name="fqdn_to_ip", trusted=False, source="netutils"), - FilterDefinition(name="get_all_host", trusted=False, source="netutils"), - FilterDefinition(name="get_broadcast_address", trusted=True, source="netutils"), - FilterDefinition(name="get_first_usable", trusted=True, source="netutils"), - FilterDefinition(name="get_ips_sorted", trusted=True, source="netutils"), - FilterDefinition(name="get_nist_urls", trusted=True, source="netutils"), - FilterDefinition(name="get_nist_vendor_platform_urls", trusted=True, source="netutils"), - FilterDefinition(name="get_oui", trusted=True, source="netutils"), - FilterDefinition(name="get_peer_ip", trusted=True, source="netutils"), - FilterDefinition(name="get_range_ips", trusted=True, source="netutils"), - FilterDefinition(name="get_upgrade_path", trusted=True, source="netutils"), - FilterDefinition(name="get_usable_range", trusted=True, source="netutils"), - FilterDefinition(name="hash_data", trusted=True, source="netutils"), - FilterDefinition(name="int_to_asdot", trusted=True, source="netutils"), - FilterDefinition(name="interface_range_compress", trusted=True, source="netutils"), - FilterDefinition(name="interface_range_expansion", trusted=True, source="netutils"), - FilterDefinition(name="ip_addition", trusted=True, source="netutils"), - FilterDefinition(name="ip_subtract", trusted=True, source="netutils"), - FilterDefinition(name="ip_to_bin", trusted=True, source="netutils"), - FilterDefinition(name="ip_to_hex", trusted=True, source="netutils"), - FilterDefinition(name="ipaddress_address", trusted=True, source="netutils"), - FilterDefinition(name="ipaddress_interface", trusted=True, source="netutils"), - FilterDefinition(name="ipaddress_network", trusted=True, source="netutils"), - FilterDefinition(name="is_classful", trusted=True, source="netutils"), - FilterDefinition(name="is_fqdn_resolvable", trusted=False, source="netutils"), - FilterDefinition(name="is_ip", trusted=True, source="netutils"), - FilterDefinition(name="is_ip_range", trusted=True, source="netutils"), - FilterDefinition(name="is_ip_within", trusted=True, source="netutils"), - FilterDefinition(name="is_netmask", trusted=True, source="netutils"), - FilterDefinition(name="is_network", trusted=True, source="netutils"), - FilterDefinition(name="is_reversible_wildcardmask", trusted=True, source="netutils"), - FilterDefinition(name="is_valid_mac", trusted=True, source="netutils"), - FilterDefinition(name="longest_prefix_match", trusted=True, source="netutils"), - FilterDefinition(name="mac_normalize", trusted=True, source="netutils"), - FilterDefinition(name="mac_to_format", trusted=True, source="netutils"), - FilterDefinition(name="mac_to_int", trusted=True, source="netutils"), - FilterDefinition(name="mac_type", trusted=True, source="netutils"), - FilterDefinition(name="name_to_bits", trusted=True, source="netutils"), - FilterDefinition(name="name_to_bytes", trusted=True, source="netutils"), - FilterDefinition(name="name_to_name", trusted=True, source="netutils"), - FilterDefinition(name="netmask_to_cidr", trusted=True, source="netutils"), - FilterDefinition(name="netmask_to_wildcardmask", trusted=True, source="netutils"), - FilterDefinition(name="normalise_delimiter_caret_c", trusted=True, source="netutils"), - FilterDefinition(name="paloalto_panos_brace_to_set", trusted=True, source="netutils"), - FilterDefinition(name="paloalto_panos_clean_newlines", trusted=True, source="netutils"), - FilterDefinition(name="regex_findall", trusted=False, source="netutils"), - FilterDefinition(name="regex_match", trusted=False, source="netutils"), - FilterDefinition(name="regex_search", trusted=False, source="netutils"), - FilterDefinition(name="regex_split", trusted=False, source="netutils"), - FilterDefinition(name="regex_sub", trusted=False, source="netutils"), - FilterDefinition(name="sanitize_config", trusted=True, source="netutils"), - FilterDefinition(name="section_config", trusted=True, source="netutils"), - FilterDefinition(name="sort_interface_list", trusted=True, source="netutils"), - FilterDefinition(name="split_interface", trusted=True, source="netutils"), - FilterDefinition(name="uptime_seconds_to_string", trusted=True, source="netutils"), - FilterDefinition(name="uptime_string_to_seconds", trusted=True, source="netutils"), - FilterDefinition(name="version_metadata", trusted=True, source="netutils"), - FilterDefinition(name="vlanconfig_to_list", trusted=True, source="netutils"), - FilterDefinition(name="vlanlist_to_config", trusted=True, source="netutils"), - FilterDefinition(name="wildcardmask_to_netmask", trusted=True, source="netutils"), + FilterDefinition(name="abbreviated_interface_name", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="abbreviated_interface_name_list", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="asn_to_int", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="bits_to_name", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="bytes_to_name", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="canonical_interface_name", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="canonical_interface_name_list", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="cidr_to_netmask", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="cidr_to_netmaskv6", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="clean_config", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="compare_version_loose", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="compare_version_strict", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="config_compliance", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="config_section_not_parsed", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="delimiter_change", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="diff_network_config", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="feature_compliance", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="find_unordered_cfg_lines", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="fqdn_to_ip", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="get_all_host", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="get_broadcast_address", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_first_usable", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_ips_sorted", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_nist_urls", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_nist_vendor_platform_urls", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_oui", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_peer_ip", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_range_ips", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_upgrade_path", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="get_usable_range", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="hash_data", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="int_to_asdot", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="interface_range_compress", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="interface_range_expansion", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ip_addition", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ip_subtract", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ip_to_bin", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ip_to_hex", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ipaddress_address", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ipaddress_interface", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="ipaddress_network", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_classful", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_fqdn_resolvable", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="is_ip", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_ip_range", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_ip_within", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_netmask", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_network", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_reversible_wildcardmask", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="is_valid_mac", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="longest_prefix_match", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="mac_normalize", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="mac_to_format", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="mac_to_int", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="mac_type", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="name_to_bits", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="name_to_bytes", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="name_to_name", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="netmask_to_cidr", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="netmask_to_wildcardmask", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="normalise_delimiter_caret_c", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="paloalto_panos_brace_to_set", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="paloalto_panos_clean_newlines", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="regex_findall", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="regex_match", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="regex_search", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="regex_split", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="regex_sub", allowed_contexts=ExecutionContext.LOCAL, source="netutils"), + FilterDefinition(name="sanitize_config", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="section_config", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="sort_interface_list", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="split_interface", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="uptime_seconds_to_string", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="uptime_string_to_seconds", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="version_metadata", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="vlanconfig_to_list", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="vlanlist_to_config", allowed_contexts=ExecutionContext.ALL, source="netutils"), + FilterDefinition(name="wildcardmask_to_netmask", allowed_contexts=ExecutionContext.ALL, source="netutils"), +] + + +INFRAHUB_FILTERS = [ + FilterDefinition(name="artifact_content", allowed_contexts=ExecutionContext.WORKER, source="infrahub"), + FilterDefinition(name="file_object_content", allowed_contexts=ExecutionContext.WORKER, source="infrahub"), + FilterDefinition(name="file_object_content_by_hfid", allowed_contexts=ExecutionContext.WORKER, source="infrahub"), + FilterDefinition(name="file_object_content_by_id", allowed_contexts=ExecutionContext.WORKER, source="infrahub"), + FilterDefinition(name="from_json", allowed_contexts=ExecutionContext.ALL, source="infrahub"), + FilterDefinition(name="from_yaml", allowed_contexts=ExecutionContext.ALL, source="infrahub"), ] -AVAILABLE_FILTERS = BUILTIN_FILTERS + NETUTILS_FILTERS +AVAILABLE_FILTERS = BUILTIN_FILTERS + NETUTILS_FILTERS + INFRAHUB_FILTERS diff --git a/infrahub_sdk/template/infrahub_filters.py b/infrahub_sdk/template/infrahub_filters.py new file mode 100644 index 00000000..6279177d --- /dev/null +++ b/infrahub_sdk/template/infrahub_filters.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import inspect +import json +from collections.abc import Callable, Coroutine +from typing import TYPE_CHECKING, Any + +import yaml + +from infrahub_sdk.exceptions import AuthenticationError +from infrahub_sdk.template.exceptions import JinjaFilterError + +if TYPE_CHECKING: + from infrahub_sdk.client import InfrahubClient + + +class InfrahubFilters: + """Holds an optional InfrahubClient and exposes async filter methods for Jinja2 templates.""" + + @classmethod + def get_filter_names(cls) -> tuple[str, ...]: + """Return all public async filter method names by convention.""" + return tuple( + name + for name in sorted(vars(cls)) + if not name.startswith("_") and inspect.iscoroutinefunction(vars(cls)[name]) + ) + + def __init__(self, client: InfrahubClient | None = None) -> None: + self._client = client + + def set_client(self, client: InfrahubClient) -> None: + self._client = client + + def _require_client(self, filter_name: str) -> InfrahubClient: + if self._client is None: + raise JinjaFilterError( + filter_name=filter_name, + message="requires an InfrahubClient", + hint="pass a client via Jinja2Template(client=...)", + ) + return self._client + + async def artifact_content(self, storage_id: str) -> str: + """Retrieve artifact content by storage_id.""" + client = self._require_client(filter_name="artifact_content") + if storage_id is None: + raise JinjaFilterError( + filter_name="artifact_content", + message="storage_id is null", + hint="ensure the GraphQL query returns a valid storage_id value", + ) + if not storage_id: + raise JinjaFilterError( + filter_name="artifact_content", + message="storage_id is empty", + hint="ensure the GraphQL query returns a non-empty storage_id value", + ) + try: + return await client.object_store.get(identifier=storage_id) + except AuthenticationError as exc: + raise JinjaFilterError( + filter_name="artifact_content", message=f"permission denied for storage_id: {storage_id}" + ) from exc + except Exception as exc: + raise JinjaFilterError( + filter_name="artifact_content", + message=f"failed to retrieve content for storage_id: {storage_id}", + hint=str(exc), + ) from exc + + async def _fetch_file_object( + self, filter_name: str, identifier: str | list[str], label: str, fetch: Callable[[], Coroutine[Any, Any, str]] + ) -> str: + if identifier is None: + raise JinjaFilterError( + filter_name=filter_name, + message=f"{label} is null", + hint=f"ensure the GraphQL query returns a valid {label} value", + ) + if not identifier: + raise JinjaFilterError( + filter_name=filter_name, + message=f"{label} is empty", + hint=f"ensure the GraphQL query returns a non-empty {label} value", + ) + try: + return await fetch() + except AuthenticationError as exc: + raise JinjaFilterError( + filter_name=filter_name, message=f"permission denied for {label}: {identifier}" + ) from exc + except ValueError as exc: + raise JinjaFilterError(filter_name=filter_name, message=str(exc)) from exc + except JinjaFilterError: + raise + except Exception as exc: + raise JinjaFilterError( + filter_name=filter_name, message=f"failed to retrieve content for {label}: {identifier}", hint=str(exc) + ) from exc + + async def file_object_content(self, storage_id: str) -> str: + """Retrieve file object content by storage_id.""" + client = self._require_client(filter_name="file_object_content") + return await self._fetch_file_object( + filter_name="file_object_content", + identifier=storage_id, + label="storage_id", + fetch=lambda: client.object_store.get_file_by_storage_id(storage_id=storage_id), + ) + + async def file_object_content_by_id(self, node_id: str) -> str: + """Retrieve file object content by node UUID.""" + client = self._require_client(filter_name="file_object_content_by_id") + return await self._fetch_file_object( + filter_name="file_object_content_by_id", + identifier=node_id, + label="node_id", + fetch=lambda: client.object_store.get_file_by_id(node_id=node_id), + ) + + async def file_object_content_by_hfid(self, hfid: str | list[str], kind: str = "") -> str: + """Retrieve file object content by Human-Friendly ID.""" + client = self._require_client(filter_name="file_object_content_by_hfid") + if not kind: + raise JinjaFilterError( + filter_name="file_object_content_by_hfid", + message="'kind' argument is required", + hint='use {{ hfid | file_object_content_by_hfid(kind="MyKind") }}', + ) + hfid_list = hfid if isinstance(hfid, list) else [hfid] + if not all(hfid_list): + raise JinjaFilterError( + filter_name="file_object_content_by_hfid", + message="hfid contains empty elements", + hint="ensure all HFID components are non-empty strings", + ) + return await self._fetch_file_object( + filter_name="file_object_content_by_hfid", + identifier=hfid, + label="hfid", + fetch=lambda: client.object_store.get_file_by_hfid(kind=kind, hfid=hfid_list), + ) + + +def from_json(value: str) -> dict | list: + """Parse a JSON string into a Python dict or list.""" + if not value: + return {} + try: + return json.loads(value) + except (json.JSONDecodeError, TypeError) as exc: + raise JinjaFilterError(filter_name="from_json", message=f"invalid JSON: {exc}") from exc + + +def from_yaml(value: str) -> dict | list: + """Parse a YAML string into a Python dict or list.""" + if not value: + return {} + try: + result = yaml.safe_load(value) + # yaml.safe_load("") returns None, normalize to {} + if result is None: + return {} + return result + except yaml.YAMLError as exc: + raise JinjaFilterError(filter_name="from_yaml", message=f"invalid YAML: {exc}") from exc diff --git a/infrahub_sdk/transfer/importer/json.py b/infrahub_sdk/transfer/importer/json.py index 9c0b7ab9..32928df9 100644 --- a/infrahub_sdk/transfer/importer/json.py +++ b/infrahub_sdk/transfer/importer/json.py @@ -12,6 +12,7 @@ from ...exceptions import GraphQLError from ...node import InfrahubNode, RelatedNode, RelationshipManager +from ...schema import RelationshipCardinality from ..exceptions import TransferFileNotFoundError from .interface import ImporterInterface @@ -130,11 +131,14 @@ async def update_optional_relationships(self) -> None: relationship_schema = self.optional_relationships_schemas_by_node_kind[node_kind][relationship_attr] # Check if we are in a many-many relationship, ignore importing it if it is - if relationship_schema.cardinality == "many": + if relationship_schema.cardinality == RelationshipCardinality.MANY: if relationship_schema.peer not in self.schemas_by_kind: continue for peer_relationship in self.schemas_by_kind[relationship_schema.peer].relationships: - if peer_relationship.cardinality == "many" and peer_relationship.peer == node_kind: + if ( + peer_relationship.cardinality == RelationshipCardinality.MANY + and peer_relationship.peer == node_kind + ): ignore = True if not ignore: diff --git a/pyproject.toml b/pyproject.toml index 4e0716a6..15afc396 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -123,6 +123,9 @@ addopts = "-vs --cov-report term-missing --cov-report xml --dist loadscope" [tool.ty.environment] python-version = "3.10" +[tool.ty.rules] +possibly-missing-attribute = "error" + [[tool.ty.overrides]] include = ["infrahub_sdk/**"] @@ -361,7 +364,7 @@ max-line-length = 150 [tool.ruff.lint.mccabe] # Target max-complexity=10 -max-complexity = 17 +max-complexity = 14 [tool.ruff.lint.per-file-ignores] diff --git a/specs b/specs new file mode 120000 index 00000000..cc917769 --- /dev/null +++ b/specs @@ -0,0 +1 @@ +dev/specs \ No newline at end of file diff --git a/tasks.py b/tasks.py index fe1a0808..dbd4ca32 100644 --- a/tasks.py +++ b/tasks.py @@ -145,7 +145,18 @@ def _generate_infrahub_sdk_template_documentation() -> None: from docs.docs_generation.content_gen_methods import Jinja2DocContentGenMethod from docs.docs_generation.pages import DocPage, MDXDocPage from infrahub_sdk.template import Jinja2Template - from infrahub_sdk.template.filters import BUILTIN_FILTERS, NETUTILS_FILTERS + from infrahub_sdk.template.filters import BUILTIN_FILTERS, INFRAHUB_FILTERS, NETUTILS_FILTERS, ExecutionContext + + def _filters_with_contexts(filters: list) -> list[dict]: + return [ + { + "name": f.name, + "core": bool(f.allowed_contexts & ExecutionContext.CORE), + "worker": bool(f.allowed_contexts & ExecutionContext.WORKER), + "local": bool(f.allowed_contexts & ExecutionContext.LOCAL), + } + for f in filters + ] print(" - Generate Infrahub SDK template documentation") # Generating one documentation page for template documentation @@ -155,7 +166,11 @@ def _generate_infrahub_sdk_template_documentation() -> None: template=Path("sdk_template_reference.j2"), template_directory=DOCUMENTATION_DIRECTORY / "_templates", ), - template_variables={"builtin": BUILTIN_FILTERS, "netutils": NETUTILS_FILTERS}, + template_variables={ + "builtin": _filters_with_contexts(BUILTIN_FILTERS), + "netutils": _filters_with_contexts(NETUTILS_FILTERS), + "infrahub": _filters_with_contexts(INFRAHUB_FILTERS), + }, ), ) output_path = DOCUMENTATION_DIRECTORY / "docs" / "python-sdk" / "reference" / "templating.mdx" diff --git a/tests/constants.py b/tests/constants.py index 1c64b631..c41bd318 100644 --- a/tests/constants.py +++ b/tests/constants.py @@ -1,3 +1,9 @@ +from pathlib import Path + CLIENT_TYPE_ASYNC = "standard" CLIENT_TYPE_SYNC = "sync" CLIENT_TYPES = [CLIENT_TYPE_ASYNC, CLIENT_TYPE_SYNC] + +TEST_DIR = Path(__file__).parent +FIXTURES_DIR = TEST_DIR / "fixtures" +FIXTURE_REPOS_DIR = FIXTURES_DIR / "repos" diff --git a/tests/fixtures/models/valid_generic_schema.json b/tests/fixtures/models/valid_generic_schema.json new file mode 100644 index 00000000..82b1d0be --- /dev/null +++ b/tests/fixtures/models/valid_generic_schema.json @@ -0,0 +1,30 @@ +{ + "version": "1.0", + "generics": [ + { + "name": "Animal", + "namespace": "Testing", + "attributes": [ + { + "name": "name", + "kind": "Text" + } + ], + "restricted_namespaces": ["Dog"] + } + ], + "nodes": [ + { + "name": "Dog", + "namespace": "Dog", + "inherit_from": ["TestingAnimal"], + "attributes": [ + { + "name": "breed", + "kind": "Text", + "optional": true + } + ] + } + ] +} diff --git a/tests/fixtures/repos/fragment_inlining/.infrahub.yml b/tests/fixtures/repos/fragment_inlining/.infrahub.yml new file mode 100644 index 00000000..ac1edc08 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/.infrahub.yml @@ -0,0 +1,16 @@ +--- +graphql_fragments: + - name: interfaces + file_path: fragments/interfaces.gql + - name: devices + file_path: fragments/devices.gql + +graphql_queries: + - name: query_two_files + file_path: queries/query_two_files.gql + - name: query_no_fragments + file_path: queries/query_no_fragments.gql + - name: query_transitive + file_path: queries/query_transitive.gql + - name: query_missing_fragment + file_path: queries/query_missing_fragment.gql diff --git a/tests/fixtures/repos/fragment_inlining/fragments/devices.gql b/tests/fixtures/repos/fragment_inlining/fragments/devices.gql new file mode 100644 index 00000000..73e631f7 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/fragments/devices.gql @@ -0,0 +1,24 @@ +fragment deviceFragment on InfraDevice { + id + name { + value + } + interfaces { + edges { + node { + ...interfaceFragment + } + } + } +} + +fragment chassisFragment on InfraDevice { + id + platform { + node { + name { + value + } + } + } +} diff --git a/tests/fixtures/repos/fragment_inlining/fragments/interfaces.gql b/tests/fixtures/repos/fragment_inlining/fragments/interfaces.gql new file mode 100644 index 00000000..6080a414 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/fragments/interfaces.gql @@ -0,0 +1,26 @@ +fragment interfaceFragment on InterfaceL3 { + id + name { + value + } + ip_addresses { + edges { + node { + id + address { + value + } + } + } + } +} + +fragment portFragment on InterfaceL2 { + id + name { + value + } + enabled { + value + } +} diff --git a/tests/fixtures/repos/fragment_inlining/queries/query_missing_fragment.gql b/tests/fixtures/repos/fragment_inlining/queries/query_missing_fragment.gql new file mode 100644 index 00000000..c8b33a90 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/queries/query_missing_fragment.gql @@ -0,0 +1,9 @@ +query QueryMissingFragment { + InfraDevice { + edges { + node { + ...undeclaredFragment + } + } + } +} diff --git a/tests/fixtures/repos/fragment_inlining/queries/query_no_fragments.gql b/tests/fixtures/repos/fragment_inlining/queries/query_no_fragments.gql new file mode 100644 index 00000000..98d0253b --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/queries/query_no_fragments.gql @@ -0,0 +1,12 @@ +query QueryNoFragments { + InfraDevice { + edges { + node { + id + name { + value + } + } + } + } +} diff --git a/tests/fixtures/repos/fragment_inlining/queries/query_transitive.gql b/tests/fixtures/repos/fragment_inlining/queries/query_transitive.gql new file mode 100644 index 00000000..fe69a696 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/queries/query_transitive.gql @@ -0,0 +1,9 @@ +query QueryTransitive { + InfraDevice { + edges { + node { + ...deviceFragment + } + } + } +} diff --git a/tests/fixtures/repos/fragment_inlining/queries/query_two_files.gql b/tests/fixtures/repos/fragment_inlining/queries/query_two_files.gql new file mode 100644 index 00000000..1e222276 --- /dev/null +++ b/tests/fixtures/repos/fragment_inlining/queries/query_two_files.gql @@ -0,0 +1,10 @@ +query QueryTwoFiles { + InfraDevice { + edges { + node { + ...interfaceFragment + ...deviceFragment + } + } + } +} diff --git a/tests/integration/test_infrahubctl.py b/tests/integration/test_infrahubctl.py index f9b47c42..e2fa8171 100644 --- a/tests/integration/test_infrahubctl.py +++ b/tests/integration/test_infrahubctl.py @@ -24,7 +24,9 @@ from infrahub_sdk import InfrahubClient from infrahub_sdk.node import InfrahubNode -FIXTURE_BASE_DIR = Path(Path(Path(__file__).resolve()).parent / ".." / "fixtures") +from tests.constants import FIXTURES_DIR + +FIXTURE_BASE_DIR = FIXTURES_DIR runner = CliRunner() diff --git a/tests/unit/ctl/test_graphql_app.py b/tests/unit/ctl/test_graphql_app.py index aca9ee95..fc3ea72f 100644 --- a/tests/unit/ctl/test_graphql_app.py +++ b/tests/unit/ctl/test_graphql_app.py @@ -10,11 +10,12 @@ from typer.testing import CliRunner from infrahub_sdk.ctl.graphql import app, find_gql_files, get_graphql_query +from tests.constants import FIXTURES_DIR as _FIXTURES_DIR from tests.helpers.cli import remove_ansi_color runner = CliRunner() -FIXTURES_DIR = Path(__file__).parent.parent.parent / "fixtures" / "unit" / "test_infrahubctl" / "graphql" +FIXTURES_DIR = _FIXTURES_DIR / "unit" / "test_infrahubctl" / "graphql" class TestFindGqlFiles: diff --git a/tests/unit/ctl/test_render_app.py b/tests/unit/ctl/test_render_app.py index f159dfa0..f399af00 100644 --- a/tests/unit/ctl/test_render_app.py +++ b/tests/unit/ctl/test_render_app.py @@ -1,19 +1,18 @@ import json from dataclasses import dataclass -from pathlib import Path import pytest from pytest_httpx import HTTPXMock from typer.testing import CliRunner from infrahub_sdk.ctl.cli_commands import app +from tests.constants import FIXTURE_REPOS_DIR from tests.helpers.fixtures import read_fixture from tests.helpers.utils import strip_color, temp_repo_and_cd runner = CliRunner() - -FIXTURE_BASE_DIR = Path(Path(Path(__file__).resolve()).parent / ".." / ".." / "fixtures" / "repos") +FIXTURE_BASE_DIR = FIXTURE_REPOS_DIR @dataclass diff --git a/tests/unit/ctl/test_schema_app.py b/tests/unit/ctl/test_schema_app.py index 1fcfe62b..11fe51a2 100644 --- a/tests/unit/ctl/test_schema_app.py +++ b/tests/unit/ctl/test_schema_app.py @@ -128,3 +128,48 @@ def test_schema_load_notvalid_namespace(httpx_mock: HTTPXMock) -> None: fixture_file.read_text(encoding="utf-8"), ) assert content_json == {"schemas": [fixture_file_content]} + + +def test_load_valid_generic_schema(httpx_mock: HTTPXMock) -> None: + """A test which ensures that a generic schema is correctly loaded when loaded from infrahubctl command""" + + # Arrange + fixture_file = get_fixtures_dir() / "models" / "valid_generic_schema.json" + + httpx_mock.add_response( + method="POST", + url="http://mock/api/schema/load?branch=main", + status_code=200, + json={ + "hash": "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4", + "previous_hash": "d3f7f4e7161f0ae6538a01d5a42dc661", + "diff": { + "added": { + "TestingAnimal": {"added": {}, "changed": {}, "removed": {}}, + "DogDog": {"added": {}, "changed": {}, "removed": {}}, + }, + "changed": {}, + "removed": {}, + }, + "schema_updated": True, + }, + ) + + # Act + result = runner.invoke(app=app, args=["load", str(fixture_file)]) + + # Assert + assert result.exit_code == 0 + assert f"schema '{fixture_file}' loaded successfully" in remove_ansi_color(result.stdout.replace("\n", "")) + + content = httpx_mock.get_requests()[0].content.decode("utf8") + content_json = yaml.safe_load(content) + fixture_file_content = yaml.safe_load( + fixture_file.read_text(encoding="utf-8"), + ) + assert content_json == {"schemas": [fixture_file_content]} + + # Verify restricted_namespaces is present in the payload sent to the API + sent_generics = content_json["schemas"][0]["generics"] + assert len(sent_generics) == 1 + assert sent_generics[0]["restricted_namespaces"] == ["Dog"] diff --git a/tests/unit/ctl/test_transform_app.py b/tests/unit/ctl/test_transform_app.py index 5b7cb4f1..b05be5c6 100644 --- a/tests/unit/ctl/test_transform_app.py +++ b/tests/unit/ctl/test_transform_app.py @@ -12,15 +12,13 @@ from infrahub_sdk.ctl.cli_commands import app from infrahub_sdk.repository import GitRepoManager +from tests.constants import FIXTURES_DIR from tests.helpers.fixtures import read_fixture from tests.helpers.utils import change_directory, strip_color runner = CliRunner() - -FIXTURE_BASE_DIR = Path( - Path(Path(__file__).resolve()).parent / ".." / ".." / "fixtures" / "integration" / "test_infrahubctl" -) +FIXTURE_BASE_DIR = FIXTURES_DIR / "integration" / "test_infrahubctl" @pytest.fixture diff --git a/tests/unit/sdk/graphql/test_fragment_renderer.py b/tests/unit/sdk/graphql/test_fragment_renderer.py new file mode 100644 index 00000000..7d622112 --- /dev/null +++ b/tests/unit/sdk/graphql/test_fragment_renderer.py @@ -0,0 +1,338 @@ +"""Unit tests for fragment rendering functions in infrahub_sdk.graphql.query_renderer.""" + +from __future__ import annotations + +import pytest +from graphql import parse + +from infrahub_sdk.exceptions import ( + CircularFragmentError, + DuplicateFragmentError, + FragmentNotFoundError, + QuerySyntaxError, +) +from infrahub_sdk.graphql.query_renderer import ( + build_fragment_index, + collect_required_fragments, + render_query_with_fragments, +) + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +FRAG_INTERFACE = """ +fragment interfaceFragment on InterfaceL3 { + id + name { value } +} +""" + +FRAG_DEVICE = """ +fragment deviceFragment on InfraDevice { + id + interfaces { + edges { + node { + ...interfaceFragment + } + } + } +} +""" + +FRAG_PORT = """ +fragment portFragment on InterfaceL2 { + id + enabled { value } +} +""" + +QUERY_USE_INTERFACE = """ +query Q { + Devices { + edges { + node { + ...interfaceFragment + } + } + } +} +""" + +QUERY_NO_SPREADS = """ +query Q { + Devices { + edges { + node { + id + name { value } + } + } + } +} +""" + +QUERY_USE_DEVICE = """ +query Q { + Devices { + edges { + node { + ...deviceFragment + } + } + } +} +""" + +QUERY_USE_BOTH = """ +query Q { + Devices { + edges { + node { + ...interfaceFragment + ...deviceFragment + } + } + } +} +""" + +QUERY_USE_INTERFACE_TWICE = """ +query Q { + A: Devices { + edges { + node { + ...interfaceFragment + } + } + } + B: Devices { + edges { + node { + ...interfaceFragment + } + } + } +} +""" + +QUERY_MISSING_FRAGMENT = """ +query Q { + Devices { + edges { + node { + ...undeclaredFragment + } + } + } +} +""" + + +# --------------------------------------------------------------------------- +# build_fragment_index tests +# --------------------------------------------------------------------------- + + +def test_build_fragment_index_invalid_syntax_raises() -> None: + with pytest.raises(QuerySyntaxError): + build_fragment_index(["this is not @@ valid graphql"]) + + +def test_build_fragment_index_single_file() -> None: + index = build_fragment_index([FRAG_INTERFACE]) + assert "interfaceFragment" in index + + +def test_build_fragment_index_skips_non_fragment_definitions() -> None: + # A file that mixes an operation definition with a fragment definition — + # the operation is not a FragmentDefinitionNode and must be skipped. + mixed = "query Q { id }\nfragment mixedFragment on T { id }" + index = build_fragment_index([mixed]) + assert "mixedFragment" in index + assert len(index) == 1 + + +def test_build_fragment_index_multiple_files() -> None: + index = build_fragment_index([FRAG_INTERFACE, FRAG_DEVICE]) + assert "interfaceFragment" in index + assert "deviceFragment" in index + + +def test_build_fragment_index_duplicate_same_name_two_files() -> None: + with pytest.raises(DuplicateFragmentError) as exc_info: + build_fragment_index([FRAG_INTERFACE, FRAG_INTERFACE]) + assert exc_info.value.fragment_name == "interfaceFragment" + + +def test_build_fragment_index_duplicate_same_name_within_file() -> None: + combined = FRAG_INTERFACE + FRAG_INTERFACE + with pytest.raises(DuplicateFragmentError) as exc_info: + build_fragment_index([combined]) + assert exc_info.value.fragment_name == "interfaceFragment" + + +# --------------------------------------------------------------------------- +# collect_required_fragments tests +# --------------------------------------------------------------------------- + + +def test_collect_required_fragments_direct_single() -> None: + index = build_fragment_index([FRAG_INTERFACE]) + doc = parse(QUERY_USE_INTERFACE) + required = collect_required_fragments(doc, index) + assert required == ["interfaceFragment"] + + +def test_collect_required_fragments_transitive() -> None: + """deviceFragment spreads interfaceFragment — both must be collected.""" + index = build_fragment_index([FRAG_INTERFACE, FRAG_DEVICE]) + doc = parse(QUERY_USE_DEVICE) + required = collect_required_fragments(doc, index) + assert "deviceFragment" in required + assert "interfaceFragment" in required + # interfaceFragment must appear before deviceFragment (dependency first) + assert required.index("interfaceFragment") < required.index("deviceFragment") + + +def test_collect_required_fragments_deduplication() -> None: + """Fragment used twice in query must appear only once in output.""" + index = build_fragment_index([FRAG_INTERFACE]) + doc = parse(QUERY_USE_INTERFACE_TWICE) + required = collect_required_fragments(doc, index) + assert required.count("interfaceFragment") == 1 + + +def test_collect_required_fragments_missing_raises() -> None: + index = build_fragment_index([FRAG_INTERFACE]) + doc = parse(QUERY_MISSING_FRAGMENT) + with pytest.raises(FragmentNotFoundError) as exc_info: + collect_required_fragments(doc, index) + assert exc_info.value.fragment_name == "undeclaredFragment" + + +def test_collect_required_fragments_circular_raises() -> None: + frag_a = "fragment FragA on Foo { ...FragB }" + frag_b = "fragment FragB on Foo { ...FragA }" + query = "query Q { foo { ...FragA } }" + index = build_fragment_index([frag_a, frag_b]) + doc = parse(query) + with pytest.raises(CircularFragmentError) as exc_info: + collect_required_fragments(doc, index) + assert "FragA" in exc_info.value.cycle + assert "FragB" in exc_info.value.cycle + + +# --------------------------------------------------------------------------- +# render_query_with_fragments tests +# --------------------------------------------------------------------------- + + +def test_render_no_fragment_files_raises_when_spreads_present() -> None: + with pytest.raises(FragmentNotFoundError): + render_query_with_fragments(QUERY_USE_INTERFACE, []) + + +def test_render_no_spreads_returns_unchanged() -> None: + result = render_query_with_fragments(QUERY_NO_SPREADS, [FRAG_INTERFACE]) + # Content should be semantically equivalent (re-printed by graphql-core) + assert "interfaceFragment" not in result + + +def test_render_single_spread_from_one_file() -> None: + result = render_query_with_fragments(QUERY_USE_INTERFACE, [FRAG_INTERFACE]) + assert "fragment interfaceFragment" in result + assert "fragment deviceFragment" not in result + + +def test_render_spreads_across_two_files() -> None: + result = render_query_with_fragments(QUERY_USE_BOTH, [FRAG_INTERFACE, FRAG_DEVICE]) + assert "fragment interfaceFragment" in result + assert "fragment deviceFragment" in result + + +def test_render_transitive_dependency_included() -> None: + """Query uses ...deviceFragment only; interfaceFragment must be inlined transitively.""" + result = render_query_with_fragments(QUERY_USE_DEVICE, [FRAG_INTERFACE, FRAG_DEVICE]) + assert "fragment deviceFragment" in result + assert "fragment interfaceFragment" in result + + +def test_render_surplus_fragment_excluded() -> None: + """portFragment is not referenced — it must not appear in output.""" + result = render_query_with_fragments(QUERY_USE_INTERFACE, [FRAG_INTERFACE, FRAG_PORT]) + assert "fragment interfaceFragment" in result + assert "fragment portFragment" not in result + + +def test_render_deduplication_definition_appears_once() -> None: + """Fragment spread twice in query; definition must appear exactly once.""" + result = render_query_with_fragments(QUERY_USE_INTERFACE_TWICE, [FRAG_INTERFACE]) + assert result.count("fragment interfaceFragment") == 1 + + +def test_render_missing_fragment_raises() -> None: + with pytest.raises(FragmentNotFoundError) as exc_info: + render_query_with_fragments(QUERY_MISSING_FRAGMENT, [FRAG_INTERFACE]) + assert exc_info.value.fragment_name == "undeclaredFragment" + + +def test_render_duplicate_fragment_raises() -> None: + with pytest.raises(DuplicateFragmentError): + render_query_with_fragments(QUERY_USE_INTERFACE, [FRAG_INTERFACE, FRAG_INTERFACE]) + + +def test_render_circular_fragment_raises() -> None: + frag_a = "fragment FragA on Foo { ...FragB }" + frag_b = "fragment FragB on Foo { ...FragA }" + query = "query Q { foo { ...FragA } }" + with pytest.raises(CircularFragmentError): + render_query_with_fragments(query, [frag_a, frag_b]) + + +def test_render_invalid_query_syntax_raises() -> None: + with pytest.raises(QuerySyntaxError): + render_query_with_fragments("this is not @@ valid graphql", []) + + +def test_render_invalid_fragment_file_syntax_raises() -> None: + with pytest.raises(QuerySyntaxError): + render_query_with_fragments(QUERY_USE_INTERFACE, ["this is not @@ valid graphql"]) + + +# --------------------------------------------------------------------------- +# Inline (query-local) fragment tests +# --------------------------------------------------------------------------- + +QUERY_WITH_INLINE_FRAGMENT = """ +query Q { + Devices { + edges { + node { + ...deviceFields + } + } + } +} + +fragment deviceFields on InfraDevice { + id + name { value } +} +""" + + +def test_collect_required_fragments_inline_fragment_not_raised() -> None: + """A fragment defined inside the query document must not raise FragmentNotFoundError.""" + doc = parse(QUERY_WITH_INLINE_FRAGMENT) + # Empty index — no external fragment files + required = collect_required_fragments(doc, {}) + # The inline fragment is self-contained; nothing needs to be appended from external files + assert required == [] + + +def test_render_inline_fragment_not_raised() -> None: + """render_query_with_fragments must not raise when the query already defines its own fragment.""" + result = render_query_with_fragments(QUERY_WITH_INLINE_FRAGMENT, []) + assert "fragment deviceFields" in result diff --git a/tests/unit/sdk/graphql/test_query_renderer.py b/tests/unit/sdk/graphql/test_query_renderer.py new file mode 100644 index 00000000..bf71552a --- /dev/null +++ b/tests/unit/sdk/graphql/test_query_renderer.py @@ -0,0 +1,54 @@ +"""Unit tests for infrahub_sdk.graphql.query_renderer.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from infrahub_sdk.exceptions import FragmentNotFoundError +from infrahub_sdk.graphql.query_renderer import render_query +from infrahub_sdk.schema.repository import ( + InfrahubRepositoryConfig, + InfrahubRepositoryFragmentConfig, + InfrahubRepositoryGraphQLConfig, +) +from tests.constants import FIXTURE_REPOS_DIR + +FIXTURE_REPO = str(FIXTURE_REPOS_DIR / "fragment_inlining") + + +@pytest.fixture +def repo_config() -> InfrahubRepositoryConfig: + return InfrahubRepositoryConfig( + graphql_fragments=[ + InfrahubRepositoryFragmentConfig(name="interfaces", file_path=Path("fragments/interfaces.gql")), + InfrahubRepositoryFragmentConfig(name="devices", file_path=Path("fragments/devices.gql")), + ], + queries=[ + InfrahubRepositoryGraphQLConfig(name="query_two_files", file_path=Path("queries/query_two_files.gql")), + InfrahubRepositoryGraphQLConfig( + name="query_no_fragments", file_path=Path("queries/query_no_fragments.gql") + ), + InfrahubRepositoryGraphQLConfig( + name="query_missing_fragment", file_path=Path("queries/query_missing_fragment.gql") + ), + ], + ) + + +def test_render_query_inlines_fragments(repo_config: InfrahubRepositoryConfig) -> None: + result = render_query(name="query_two_files", config=repo_config, relative_path=FIXTURE_REPO) + assert "interfaceFragment" in result + assert "deviceFragment" in result + + +def test_render_query_no_fragments_unchanged(repo_config: InfrahubRepositoryConfig) -> None: + original = (Path(FIXTURE_REPO) / "queries" / "query_no_fragments.gql").read_text(encoding="UTF-8") + result = render_query(name="query_no_fragments", config=repo_config, relative_path=FIXTURE_REPO) + assert result.count("fragment ") == original.count("fragment ") + + +def test_render_query_missing_fragment_raises(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(FragmentNotFoundError): + render_query(name="query_missing_fragment", config=repo_config, relative_path=FIXTURE_REPO) diff --git a/tests/unit/sdk/pool/test_allocate.py b/tests/unit/sdk/pool/test_allocate.py index eacc1a7b..26a63b52 100644 --- a/tests/unit/sdk/pool/test_allocate.py +++ b/tests/unit/sdk/pool/test_allocate.py @@ -11,6 +11,7 @@ from pytest_httpx import HTTPXMock + from infrahub_sdk.protocols import BuiltinIPAddressSync from infrahub_sdk.protocols_base import CoreNode, CoreNodeSync from infrahub_sdk.schema import NodeSchemaAPI from tests.unit.sdk.conftest import BothClients @@ -115,8 +116,8 @@ async def test_allocate_next_ip_address( ) assert ip_address - assert str(cast("InfrahubNodeSync", ip_address).address.value) == "192.0.2.0/32" - assert cast("InfrahubNodeSync", ip_address).description.value == "test" + assert str(cast("BuiltinIPAddressSync", ip_address).address.value) == "192.0.2.0/32" + assert cast("BuiltinIPAddressSync", ip_address).description.value == "test" @pytest.mark.parametrize("client_type", client_types) diff --git a/tests/unit/sdk/test_config.py b/tests/unit/sdk/test_config.py index bc7b538d..fb735f78 100644 --- a/tests/unit/sdk/test_config.py +++ b/tests/unit/sdk/test_config.py @@ -5,10 +5,11 @@ def test_combine_authentications() -> None: + # When both username/password and api_token are explicitly provided, raise an error with pytest.raises(ValidationError) as exc: Config(api_token="testing", username="test", password="testpassword") - assert "Unable to combine password with token based authentication" in str(exc.value) + assert "Cannot use both 'api_token' and 'username'/'password' authentication simultaneously" in str(exc.value) def test_missing_password() -> None: @@ -39,3 +40,54 @@ def test_config_address() -> None: config = Config(address=address) assert config.address == address + + +def test_password_auth_overrides_env_token(monkeypatch: pytest.MonkeyPatch) -> None: + """Test that explicit username/password overrides INFRAHUB_API_TOKEN from environment""" + # Set environment variable for api_token + monkeypatch.setenv("INFRAHUB_API_TOKEN", "token-from-env") + + # Create configuration with explicit username/password + config = Config(address="https://sandbox.infrahub.app", username="testuser", password="testpass") + + # Password auth should be active and api_token should be cleared + assert config.username == "testuser" + assert config.password == "testpass" + assert config.api_token is None + assert config.password_authentication is True + + +def test_token_auth_overrides_env_password(monkeypatch: pytest.MonkeyPatch) -> None: + """Test that explicit api_token overrides INFRAHUB_USERNAME and INFRAHUB_PASSWORD from environment""" + # Set environment variables for username/password + monkeypatch.setenv("INFRAHUB_USERNAME", "user-from-env") + monkeypatch.setenv("INFRAHUB_PASSWORD", "pass-from-env") + + # Create configuration with explicit api_token + config = Config(address="https://sandbox.infrahub.app", api_token="explicit-token") + + # Token auth should be active and username/password should be cleared + assert config.api_token == "explicit-token" + assert config.username is None + assert config.password is None + assert config.password_authentication is False + + +def test_password_auth_overrides_env_token_when_password_env_var_and_username_explicit( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test that explicit username/password overrides INFRAHUB_API_TOKEN from environment when only username is provided + through Config object and password is provided through environment variable""" + + # Set environment variable for api_token and password + monkeypatch.setenv("INFRAHUB_API_TOKEN", "token-from-env") + monkeypatch.setenv("INFRAHUB_PASSWORD", "testpass") + + # Create configuration with explicit username + config = Config(address="https://sandbox.infrahub.app", username="testuser") + + # Password auth should be active and api_token should be cleared + assert config.username == "testuser" + assert config.password == "testpass" + assert config.api_token is None + assert config.password_authentication is True diff --git a/tests/unit/sdk/test_infrahub_filters.py b/tests/unit/sdk/test_infrahub_filters.py new file mode 100644 index 00000000..4b361cd5 --- /dev/null +++ b/tests/unit/sdk/test_infrahub_filters.py @@ -0,0 +1,342 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import httpx +import pytest + +from infrahub_sdk.template import Jinja2Template +from infrahub_sdk.template.exceptions import JinjaFilterError, JinjaTemplateError, JinjaTemplateOperationViolationError +from infrahub_sdk.template.filters import INFRAHUB_FILTERS, ExecutionContext, FilterDefinition +from infrahub_sdk.template.infrahub_filters import InfrahubFilters, from_json, from_yaml + +if TYPE_CHECKING: + from pytest_httpx import HTTPXMock + + from infrahub_sdk import InfrahubClient + + +pytestmark = pytest.mark.httpx_mock(can_send_already_matched_responses=True) + +ARTIFACT_CONTENT_URL = "http://mock/api/storage/object" +FILE_BY_STORAGE_ID_URL = "http://mock/api/files/by-storage-id" + +CLIENT_FILTER_PARAMS = [ + pytest.param( + "artifact_content", + "{{ storage_id | artifact_content }}", + f"{ARTIFACT_CONTENT_URL}/test-id", + {"content-type": "text/plain"}, + id="artifact_content", + ), + pytest.param( + "file_object_content", + "{{ storage_id | file_object_content }}", + f"{FILE_BY_STORAGE_ID_URL}/test-id", + {"content-type": "text/plain"}, + id="file_object_content", + ), +] + + +class TestJinjaFilterError: + def test_instantiation_without_hint(self) -> None: + exc = JinjaFilterError(filter_name="my_filter", message="something broke") + assert exc.filter_name == "my_filter" + assert exc.hint is None + assert exc.message == "Filter 'my_filter': something broke" + + def test_instantiation_with_hint(self) -> None: + exc = JinjaFilterError(filter_name="my_filter", message="something broke", hint="try harder") + assert exc.filter_name == "my_filter" + assert exc.hint == "try harder" + assert exc.message == "Filter 'my_filter': something broke — try harder" + + +class TestFilterDefinition: + def test_trusted_when_all_contexts(self) -> None: + fd = FilterDefinition(name="abs", allowed_contexts=ExecutionContext.ALL, source="jinja2") + assert fd.trusted is True + + def test_not_trusted_when_local_only(self) -> None: + fd = FilterDefinition(name="safe", allowed_contexts=ExecutionContext.LOCAL, source="jinja2") + assert fd.trusted is False + + def test_not_trusted_when_worker_only(self) -> None: + fd = FilterDefinition(name="artifact_content", allowed_contexts=ExecutionContext.WORKER, source="infrahub") + assert fd.trusted is False + + def test_not_trusted_when_core_only(self) -> None: + fd = FilterDefinition(name="custom", allowed_contexts=ExecutionContext.CORE, source="test") + assert fd.trusted is False + + def test_infrahub_filters_list_sorted(self) -> None: + """Infrahub filter names should be in alphabetical order.""" + names = [fd.name for fd in INFRAHUB_FILTERS] + assert names == sorted(names) + + +class TestValidateContext: + def test_restricted_true_blocks_untrusted_filters(self) -> None: + """restricted=True behaves like ExecutionContext.CORE -- blocks LOCAL-only filters.""" + jinja = Jinja2Template(template="{{ network | get_all_host }}") + with pytest.raises(JinjaTemplateOperationViolationError) as exc: + jinja.validate(restricted=True) + assert exc.value.message == "The 'get_all_host' filter isn't allowed to be used" + + def test_restricted_false_allows_all_filters(self) -> None: + """restricted=False behaves like ExecutionContext.LOCAL -- allows everything.""" + jinja = Jinja2Template(template="{{ network | get_all_host }}") + jinja.validate(restricted=False) + + def test_context_core_blocks_artifact_content(self) -> None: + jinja = Jinja2Template(template="{{ sid | artifact_content }}") + with pytest.raises(JinjaTemplateOperationViolationError) as exc: + jinja.validate(context=ExecutionContext.CORE) + assert exc.value.message == "The 'artifact_content' filter isn't allowed to be used" + + def test_context_worker_allows_artifact_content(self) -> None: + jinja = Jinja2Template(template="{{ sid | artifact_content }}") + jinja.validate(context=ExecutionContext.WORKER) + + def test_context_worker_blocks_local_only_filters(self) -> None: + """WORKER context should still block LOCAL-only filters like 'fqdn_to_ip'.""" + jinja = Jinja2Template(template="{{ data | fqdn_to_ip }}") + with pytest.raises(JinjaTemplateOperationViolationError) as exc: + jinja.validate(context=ExecutionContext.WORKER) + assert exc.value.message == "The 'fqdn_to_ip' filter isn't allowed to be used" + + def test_context_local_allows_local_only_filters(self) -> None: + jinja = Jinja2Template(template="{{ data | fqdn_to_ip }}") + jinja.validate(context=ExecutionContext.LOCAL) + + def test_context_local_blocks_artifact_content(self) -> None: + """LOCAL context blocks artifact_content (WORKER only) — these filters require a worker.""" + jinja = Jinja2Template(template="{{ sid | artifact_content }}") + with pytest.raises(JinjaTemplateOperationViolationError): + jinja.validate(context=ExecutionContext.LOCAL) + + @pytest.mark.parametrize("context", [ExecutionContext.CORE, ExecutionContext.WORKER]) + def test_user_filters_always_allowed(self, context: ExecutionContext) -> None: + def my_custom_filter(value: str) -> str: + return value.upper() + + jinja = Jinja2Template(template="{{ name | my_custom }}", filters={"my_custom": my_custom_filter}) + jinja.validate(context=context) + + def test_context_core_allows_from_json(self) -> None: + jinja = Jinja2Template(template="{{ '{\"a\":1}' | from_json }}") + jinja.validate(context=ExecutionContext.CORE) + + +class TestClientDependentFilters: + @pytest.mark.parametrize(("filter_name", "template", "url", "headers"), CLIENT_FILTER_PARAMS) + async def test_happy_path( + self, + filter_name: str, + template: str, + url: str, + headers: dict[str, str], + client: InfrahubClient, + httpx_mock: HTTPXMock, + ) -> None: + httpx_mock.add_response(method="GET", url=url, text="rendered content", headers=headers) + jinja = Jinja2Template(template=template, client=client) + result = await jinja.render(variables={"storage_id": "test-id"}) + assert result == "rendered content" + + @pytest.mark.parametrize(("filter_name", "template", "url", "headers"), CLIENT_FILTER_PARAMS) + @pytest.mark.parametrize( + ("storage_id_value", "expected_message"), + [ + pytest.param( + None, + "Filter '{filter_name}': storage_id is null" + " — ensure the GraphQL query returns a valid storage_id value", + id="null", + ), + pytest.param( + "", + "Filter '{filter_name}': storage_id is empty" + " — ensure the GraphQL query returns a non-empty storage_id value", + id="empty", + ), + ], + ) + async def test_invalid_storage_id( + self, + filter_name: str, + template: str, + url: str, + headers: dict[str, str], + storage_id_value: str | None, + expected_message: str, + client: InfrahubClient, + ) -> None: + jinja = Jinja2Template(template=template, client=client) + with pytest.raises(JinjaTemplateError) as exc: + await jinja.render(variables={"storage_id": storage_id_value}) + assert exc.value.message == expected_message.format(filter_name=filter_name) + + @pytest.mark.parametrize( + ("template", "url"), + [ + pytest.param( + "{{ storage_id | artifact_content }}", f"{ARTIFACT_CONTENT_URL}/abc-123", id="artifact_content" + ), + pytest.param( + "{{ storage_id | file_object_content }}", f"{FILE_BY_STORAGE_ID_URL}/abc-123", id="file_object_content" + ), + ], + ) + async def test_store_exception_is_wrapped( + self, template: str, url: str, client: InfrahubClient, httpx_mock: HTTPXMock + ) -> None: + httpx_mock.add_exception(httpx.ConnectError("connection timeout"), method="GET", url=url) + jinja = Jinja2Template(template=template, client=client) + with pytest.raises(JinjaTemplateError): + await jinja.render(variables={"storage_id": "abc-123"}) + + @pytest.mark.parametrize( + ("template", "url", "storage_id", "expected_message"), + [ + pytest.param( + "{{ storage_id | artifact_content }}", + f"{ARTIFACT_CONTENT_URL}/sid-x", + "sid-x", + "Filter 'artifact_content': permission denied for storage_id: sid-x", + id="artifact_content", + ), + pytest.param( + "{{ storage_id | file_object_content }}", + f"{FILE_BY_STORAGE_ID_URL}/fid-x", + "fid-x", + "Filter 'file_object_content': permission denied for storage_id: fid-x", + id="file_object_content", + ), + ], + ) + async def test_auth_error( + self, + template: str, + url: str, + storage_id: str, + expected_message: str, + client: InfrahubClient, + httpx_mock: HTTPXMock, + ) -> None: + httpx_mock.add_response(method="GET", url=url, status_code=403, json={"errors": [{"message": "forbidden"}]}) + jinja = Jinja2Template(template=template, client=client) + with pytest.raises(JinjaTemplateError) as exc: + await jinja.render(variables={"storage_id": storage_id}) + assert exc.value.message == expected_message + + async def test_file_object_content_binary_content_rejected( + self, client: InfrahubClient, httpx_mock: HTTPXMock + ) -> None: + httpx_mock.add_response( + method="GET", + url=f"{FILE_BY_STORAGE_ID_URL}/fid-bin", + content=b"\x00\x01\x02", + headers={"content-type": "application/octet-stream"}, + ) + jinja = Jinja2Template(template="{{ storage_id | file_object_content }}", client=client) + with pytest.raises(JinjaTemplateError) as exc: + await jinja.render(variables={"storage_id": "fid-bin"}) + assert ( + exc.value.message == "Filter 'file_object_content': Binary content not supported:" + " content-type 'application/octet-stream' for identifier 'fid-bin'" + ) + + async def test_file_object_content_by_hfid_missing_kind(self, client: InfrahubClient) -> None: + jinja = Jinja2Template(template="{{ hfid | file_object_content_by_hfid }}", client=client) + with pytest.raises(JinjaTemplateError) as exc: + await jinja.render(variables={"hfid": ["contract-2024"]}) + assert exc.value.message == ( + "Filter 'file_object_content_by_hfid': 'kind' argument is required" + ' — use {{ hfid | file_object_content_by_hfid(kind="MyKind") }}' + ) + + +class TestFromJsonFilter: + def test_valid_json(self) -> None: + result = from_json('{"key": "value", "num": 42}') + assert result == {"key": "value", "num": 42} + + def test_valid_json_list(self) -> None: + result = from_json("[1, 2, 3]") + assert result == [1, 2, 3] + + def test_empty_string_returns_empty_dict(self) -> None: + assert from_json("") == {} + + def test_malformed_json_raises_error(self) -> None: + with pytest.raises(JinjaFilterError) as exc: + from_json("{not valid json}") + assert exc.value.filter_name == "from_json" + assert exc.value.message is not None + assert exc.value.message.startswith("Filter 'from_json': invalid JSON: Expecting property name") + + async def test_render_through_template(self) -> None: + jinja = Jinja2Template(template="{{ data | from_json }}") + result = await jinja.render(variables={"data": '{"a": 1}'}) + assert result == "{'a': 1}" + + +class TestFromYamlFilter: + def test_valid_yaml(self) -> None: + result = from_yaml("key: value\nnum: 42") + assert result == {"key": "value", "num": 42} + + def test_valid_yaml_list(self) -> None: + result = from_yaml("- one\n- two\n- three") + assert result == ["one", "two", "three"] + + def test_empty_string_returns_empty_dict(self) -> None: + assert from_yaml("") == {} + + def test_malformed_yaml_raises_error(self) -> None: + with pytest.raises(JinjaFilterError) as exc: + from_yaml("key:\n\t- broken: [unclosed") + assert exc.value.filter_name == "from_yaml" + assert exc.value.message is not None + assert exc.value.message.startswith("Filter 'from_yaml': invalid YAML: while scanning for the next token") + + async def test_render_through_template(self) -> None: + jinja = Jinja2Template(template="{{ data | from_yaml }}") + result = await jinja.render(variables={"data": "key: value"}) + assert result == "{'key': 'value'}" + + +class TestFilterChaining: + async def test_artifact_content_piped_to_from_json(self, client: InfrahubClient, httpx_mock: HTTPXMock) -> None: + json_payload = '{"hostname": "router1", "interfaces": ["eth0", "eth1"]}' + httpx_mock.add_response(method="GET", url=f"{ARTIFACT_CONTENT_URL}/store-789", text=json_payload) + jinja = Jinja2Template(template="{{ storage_id | artifact_content | from_json }}", client=client) + result = await jinja.render(variables={"storage_id": "store-789"}) + assert result == "{'hostname': 'router1', 'interfaces': ['eth0', 'eth1']}" + + +class TestClientFilter: + @pytest.mark.parametrize("filter_name", InfrahubFilters.get_filter_names()) + async def test_no_client_raises(self, filter_name: str) -> None: + filters = InfrahubFilters(client=None) + method = getattr(filters, filter_name) + with pytest.raises(JinjaFilterError) as exc: + await method("some-id") + assert ( + exc.value.message + == f"Filter '{filter_name}': requires an InfrahubClient — pass a client via Jinja2Template(client=...)" + ) + assert exc.value.filter_name == filter_name + + async def test_set_client_enables_artifact_content(self, client: InfrahubClient, httpx_mock: HTTPXMock) -> None: + httpx_mock.add_response(method="GET", url=f"{ARTIFACT_CONTENT_URL}/abc", text="deferred content") + tpl = Jinja2Template(template="{{ sid | artifact_content }}") + + with pytest.raises(JinjaTemplateError): + await tpl.render(variables={"sid": "abc"}) + + tpl.set_client(client=client) + result = await tpl.render(variables={"sid": "abc"}) + assert result == "deferred content" diff --git a/tests/unit/sdk/test_schema.py b/tests/unit/sdk/test_schema.py index 149b584c..c6c01372 100644 --- a/tests/unit/sdk/test_schema.py +++ b/tests/unit/sdk/test_schema.py @@ -476,3 +476,28 @@ def test_schema_base__get_schema_name__returns_correct_schema_name_for_protocols assert InfrahubSchemaBase._get_schema_name(schema=BuiltinIPAddressSync) == "BuiltinIPAddress" assert InfrahubSchemaBase._get_schema_name(schema=BuiltinIPAddress) == "BuiltinIPAddress" assert InfrahubSchemaBase._get_schema_name(schema="BuiltinIPAddress") == "BuiltinIPAddress" + + +async def test_schema_load_surfaces_api_error_on_422(client: InfrahubClient, httpx_mock: HTTPXMock) -> None: + """Validate that schema.load surfaces API error responses to the caller.""" + # Arrange + schema_payload = {"version": "1.0", "nodes": [{"name": "Dummy", "namespace": "Test"}]} + api_error_message = "Something went wrong on the server side." + + httpx_mock.add_response( + method="POST", + url="http://mock/api/schema/load?branch=main", + status_code=422, + json={ + "data": None, + "errors": [{"message": api_error_message, "extensions": {"code": 422}}], + }, + ) + + # Act + response = await client.schema.load(schemas=[schema_payload]) + + # Assert + assert response.errors + assert response.errors["errors"][0]["message"] == api_error_message + assert not response.schema_updated diff --git a/tests/unit/sdk/test_schema_repository.py b/tests/unit/sdk/test_schema_repository.py new file mode 100644 index 00000000..87578a28 --- /dev/null +++ b/tests/unit/sdk/test_schema_repository.py @@ -0,0 +1,318 @@ +import tempfile +from pathlib import Path + +import pytest + +from infrahub_sdk.exceptions import FragmentFileNotFoundError, ResourceNotDefinedError +from infrahub_sdk.schema.repository import InfrahubRepositoryConfig, InfrahubRepositoryFragmentConfig + + +@pytest.fixture +def repo_config() -> InfrahubRepositoryConfig: + return InfrahubRepositoryConfig.model_validate( + { + "jinja2_transforms": [{"name": "j2_transform", "query": "q1", "template_path": "templates/foo.j2"}], + "check_definitions": [{"name": "my_check", "file_path": "check.py"}], + "artifact_definitions": [ + { + "name": "my_artifact", + "parameters": {}, + "content_type": "text/plain", + "targets": "group", + "transformation": "t", + } + ], + "generator_definitions": [{"name": "my_generator", "file_path": "g.py", "query": "q", "targets": "grp"}], + "python_transforms": [{"name": "my_python_transform", "file_path": "pt.py"}], + "queries": [{"name": "my_query", "file_path": "q.gql"}], + } + ) + + +# --- Duplicate name validation --- + + +def test_duplicate_jinja2_transforms_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "jinja2_transforms": [ + {"name": "dup", "query": "q", "template_path": "t.j2"}, + {"name": "dup", "query": "q2", "template_path": "t2.j2"}, + ] + } + ) + + +def test_duplicate_check_definitions_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "check_definitions": [ + {"name": "dup", "file_path": "check.py"}, + {"name": "dup", "file_path": "check2.py"}, + ] + } + ) + + +def test_duplicate_artifact_definitions_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "artifact_definitions": [ + { + "name": "dup", + "parameters": {}, + "content_type": "text/plain", + "targets": "g", + "transformation": "t", + }, + { + "name": "dup", + "parameters": {}, + "content_type": "text/plain", + "targets": "g", + "transformation": "t", + }, + ] + } + ) + + +def test_duplicate_python_transforms_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "python_transforms": [ + {"name": "dup", "file_path": "t.py"}, + {"name": "dup", "file_path": "t2.py"}, + ] + } + ) + + +def test_duplicate_generator_definitions_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "generator_definitions": [ + {"name": "dup", "file_path": "g.py", "query": "q", "targets": "grp"}, + {"name": "dup", "file_path": "g2.py", "query": "q", "targets": "grp"}, + ] + } + ) + + +def test_duplicate_queries_raises() -> None: + with pytest.raises(ValueError, match="same names"): + InfrahubRepositoryConfig.model_validate( + { + "queries": [ + {"name": "dup", "file_path": "q.gql"}, + {"name": "dup", "file_path": "q2.gql"}, + ] + } + ) + + +# --- has_jinja2_transform / get_jinja2_transform --- + + +def test_has_jinja2_transform_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_jinja2_transform("j2_transform") is True + + +def test_has_jinja2_transform_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_jinja2_transform("missing") is False + + +def test_get_jinja2_transform_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_jinja2_transform("j2_transform") + assert result.name == "j2_transform" + + +def test_get_jinja2_transform_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_jinja2_transform("missing") + + +# --- has_check_definition / get_check_definition --- + + +def test_has_check_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_check_definition("my_check") is True + + +def test_has_check_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_check_definition("missing") is False + + +def test_get_check_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_check_definition("my_check") + assert result.name == "my_check" + + +def test_get_check_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_check_definition("missing") + + +# --- has_artifact_definition / get_artifact_definition --- + + +def test_has_artifact_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_artifact_definition("my_artifact") is True + + +def test_has_artifact_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_artifact_definition("missing") is False + + +def test_get_artifact_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_artifact_definition("my_artifact") + assert result.name == "my_artifact" + + +def test_get_artifact_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_artifact_definition("missing") + + +# --- has_generator_definition / get_generator_definition --- + + +def test_has_generator_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_generator_definition("my_generator") is True + + +def test_has_generator_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_generator_definition("missing") is False + + +def test_get_generator_definition_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_generator_definition("my_generator") + assert result.name == "my_generator" + + +def test_get_generator_definition_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_generator_definition("missing") + + +# --- has_python_transform / get_python_transform --- + + +def test_has_python_transform_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_python_transform("my_python_transform") is True + + +def test_has_python_transform_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_python_transform("missing") is False + + +def test_get_python_transform_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_python_transform("my_python_transform") + assert result.name == "my_python_transform" + + +def test_get_python_transform_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_python_transform("missing") + + +# --- has_query / get_query --- + + +def test_has_query_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_query("my_query") is True + + +def test_has_query_not_found(repo_config: InfrahubRepositoryConfig) -> None: + assert repo_config.has_query("missing") is False + + +def test_get_query_found(repo_config: InfrahubRepositoryConfig) -> None: + result = repo_config.get_query("my_query") + assert result.name == "my_query" + + +def test_get_query_not_found(repo_config: InfrahubRepositoryConfig) -> None: + with pytest.raises(ResourceNotDefinedError): + repo_config.get_query("missing") + + +# --- InfrahubRepositoryFragmentConfig / graphql_fragments --- + + +def test_parse_infrahub_yml_with_graphql_fragments() -> None: + config = InfrahubRepositoryConfig( + graphql_fragments=[ + InfrahubRepositoryFragmentConfig(name="interfaces", file_path=Path("fragments/interfaces.gql")), + InfrahubRepositoryFragmentConfig(name="devices", file_path=Path("fragments/devices.gql")), + ] + ) + assert len(config.graphql_fragments) == 2 + assert config.graphql_fragments[0].name == "interfaces" + assert str(config.graphql_fragments[0].file_path) == "fragments/interfaces.gql" + + +def test_graphql_fragments_defaults_to_empty() -> None: + config = InfrahubRepositoryConfig() + assert config.graphql_fragments == [] + + +def test_has_fragment_found() -> None: + config = InfrahubRepositoryConfig( + graphql_fragments=[InfrahubRepositoryFragmentConfig(name="ifaces", file_path=Path("frags/ifaces.gql"))] + ) + assert config.has_fragment("ifaces") is True + + +def test_has_fragment_not_found() -> None: + config = InfrahubRepositoryConfig() + assert config.has_fragment("missing") is False + + +def test_get_fragment_found() -> None: + config = InfrahubRepositoryConfig( + graphql_fragments=[InfrahubRepositoryFragmentConfig(name="ifaces", file_path=Path("frags/ifaces.gql"))] + ) + result = config.get_fragment("ifaces") + assert result.name == "ifaces" + + +def test_get_fragment_not_found() -> None: + config = InfrahubRepositoryConfig() + with pytest.raises(ResourceNotDefinedError): + config.get_fragment("missing") + + +def test_load_fragments_single_file() -> None: + with tempfile.TemporaryDirectory() as tmp: + frag_file = Path(tmp) / "ifaces.gql" + frag_file.write_text("fragment F on T { id }", encoding="UTF-8") + cfg = InfrahubRepositoryFragmentConfig(name="ifaces", file_path=Path("ifaces.gql")) + result = cfg.load_fragments(relative_path=tmp) + assert len(result) == 1 + assert "fragment F on T" in result[0] + + +def test_load_fragments_directory() -> None: + with tempfile.TemporaryDirectory() as tmp: + (Path(tmp) / "a.gql").write_text("fragment A on T { id }", encoding="UTF-8") + (Path(tmp) / "b.gql").write_text("fragment B on T { id }", encoding="UTF-8") + (Path(tmp) / "not_a_gql.txt").write_text("ignored", encoding="UTF-8") + cfg = InfrahubRepositoryFragmentConfig(name="all", file_path=Path()) + result = cfg.load_fragments(relative_path=tmp) + assert len(result) == 2 + combined = "".join(result) + assert "fragment A" in combined + assert "fragment B" in combined + + +def test_load_fragments_missing_file_raises() -> None: + cfg = InfrahubRepositoryFragmentConfig(name="ifaces", file_path=Path("does_not_exist.gql")) + with tempfile.TemporaryDirectory() as tmp, pytest.raises(FragmentFileNotFoundError) as exc_info: + cfg.load_fragments(relative_path=tmp) + assert "does_not_exist.gql" in exc_info.value.file_path