diff --git a/robosystems_client/api/backup/create_backup.py b/robosystems_client/api/backup/create_backup.py deleted file mode 100644 index ca57030..0000000 --- a/robosystems_client/api/backup/create_backup.py +++ /dev/null @@ -1,445 +0,0 @@ -from http import HTTPStatus -from typing import Any -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.backup_create_request import BackupCreateRequest -from ...models.error_response import ErrorResponse -from ...models.http_validation_error import HTTPValidationError -from ...types import Response - - -def _get_kwargs( - graph_id: str, - *, - body: BackupCreateRequest, -) -> dict[str, Any]: - headers: dict[str, Any] = {} - - _kwargs: dict[str, Any] = { - "method": "post", - "url": "/v1/graphs/{graph_id}/backups".format( - graph_id=quote(str(graph_id), safe=""), - ), - } - - _kwargs["json"] = body.to_dict() - - headers["Content-Type"] = "application/json" - - _kwargs["headers"] = headers - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | ErrorResponse | HTTPValidationError | None: - if response.status_code == 202: - response_202 = response.json() - return response_202 - - if response.status_code == 400: - response_400 = ErrorResponse.from_dict(response.json()) - - return response_400 - - if response.status_code == 403: - response_403 = ErrorResponse.from_dict(response.json()) - - return response_403 - - if response.status_code == 404: - response_404 = ErrorResponse.from_dict(response.json()) - - return response_404 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if response.status_code == 500: - response_500 = ErrorResponse.from_dict(response.json()) - - return response_500 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | ErrorResponse | HTTPValidationError]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: BackupCreateRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError]: - """Create Backup - - Create a backup of the graph database. - - Creates a complete backup of the graph database (.lbug file) with: - - **Format**: Full database backup only (complete .lbug file) - - **Compression**: Always enabled for optimal storage - - **Encryption**: Optional AES-256 encryption for security - - **Retention**: Configurable retention period (1-2555 days) - - **Backup Features:** - - **Complete Backup**: Full database file backup - - **Consistency**: Point-in-time consistent snapshot - - **Download Support**: Unencrypted backups can be downloaded - - **Restore Support**: Future support for encrypted backup restoration - - **Operation State Machine:** - ``` - pending → processing → completed - ↘ failed - ``` - - **pending**: Backup queued, waiting to start - - **processing**: Actively backing up database - - **completed**: Backup successfully created and stored - - **failed**: Backup failed (check error message) - - **Expected Durations:** - Operation times vary by database size: - - **Small** (<1GB): 30 seconds - 2 minutes - - **Medium** (1-10GB): 2-10 minutes - - **Large** (10-100GB): 10-30 minutes - - **Very Large** (>100GB): 30+ minutes - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Backup progress:', data.progress_percent + '%'); - console.log('Status:', data.status); // pending, processing, completed, failed - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only full_dump format is supported (no CSV/JSON exports) - - Compression is always enabled - - Encrypted backups cannot be downloaded (security measure) - - All backups are stored securely in cloud storage - - **Credit Consumption:** - - Base cost: 25.0 credits - - Large databases (>10GB): 50.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - body (BackupCreateRequest): Request model for creating a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - *, - client: AuthenticatedClient, - body: BackupCreateRequest, -) -> Any | ErrorResponse | HTTPValidationError | None: - """Create Backup - - Create a backup of the graph database. - - Creates a complete backup of the graph database (.lbug file) with: - - **Format**: Full database backup only (complete .lbug file) - - **Compression**: Always enabled for optimal storage - - **Encryption**: Optional AES-256 encryption for security - - **Retention**: Configurable retention period (1-2555 days) - - **Backup Features:** - - **Complete Backup**: Full database file backup - - **Consistency**: Point-in-time consistent snapshot - - **Download Support**: Unencrypted backups can be downloaded - - **Restore Support**: Future support for encrypted backup restoration - - **Operation State Machine:** - ``` - pending → processing → completed - ↘ failed - ``` - - **pending**: Backup queued, waiting to start - - **processing**: Actively backing up database - - **completed**: Backup successfully created and stored - - **failed**: Backup failed (check error message) - - **Expected Durations:** - Operation times vary by database size: - - **Small** (<1GB): 30 seconds - 2 minutes - - **Medium** (1-10GB): 2-10 minutes - - **Large** (10-100GB): 10-30 minutes - - **Very Large** (>100GB): 30+ minutes - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Backup progress:', data.progress_percent + '%'); - console.log('Status:', data.status); // pending, processing, completed, failed - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only full_dump format is supported (no CSV/JSON exports) - - Compression is always enabled - - Encrypted backups cannot be downloaded (security measure) - - All backups are stored securely in cloud storage - - **Credit Consumption:** - - Base cost: 25.0 credits - - Large databases (>10GB): 50.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - body (BackupCreateRequest): Request model for creating a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError - """ - - return sync_detailed( - graph_id=graph_id, - client=client, - body=body, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: BackupCreateRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError]: - """Create Backup - - Create a backup of the graph database. - - Creates a complete backup of the graph database (.lbug file) with: - - **Format**: Full database backup only (complete .lbug file) - - **Compression**: Always enabled for optimal storage - - **Encryption**: Optional AES-256 encryption for security - - **Retention**: Configurable retention period (1-2555 days) - - **Backup Features:** - - **Complete Backup**: Full database file backup - - **Consistency**: Point-in-time consistent snapshot - - **Download Support**: Unencrypted backups can be downloaded - - **Restore Support**: Future support for encrypted backup restoration - - **Operation State Machine:** - ``` - pending → processing → completed - ↘ failed - ``` - - **pending**: Backup queued, waiting to start - - **processing**: Actively backing up database - - **completed**: Backup successfully created and stored - - **failed**: Backup failed (check error message) - - **Expected Durations:** - Operation times vary by database size: - - **Small** (<1GB): 30 seconds - 2 minutes - - **Medium** (1-10GB): 2-10 minutes - - **Large** (10-100GB): 10-30 minutes - - **Very Large** (>100GB): 30+ minutes - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Backup progress:', data.progress_percent + '%'); - console.log('Status:', data.status); // pending, processing, completed, failed - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only full_dump format is supported (no CSV/JSON exports) - - Compression is always enabled - - Encrypted backups cannot be downloaded (security measure) - - All backups are stored securely in cloud storage - - **Credit Consumption:** - - Base cost: 25.0 credits - - Large databases (>10GB): 50.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - body (BackupCreateRequest): Request model for creating a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - *, - client: AuthenticatedClient, - body: BackupCreateRequest, -) -> Any | ErrorResponse | HTTPValidationError | None: - """Create Backup - - Create a backup of the graph database. - - Creates a complete backup of the graph database (.lbug file) with: - - **Format**: Full database backup only (complete .lbug file) - - **Compression**: Always enabled for optimal storage - - **Encryption**: Optional AES-256 encryption for security - - **Retention**: Configurable retention period (1-2555 days) - - **Backup Features:** - - **Complete Backup**: Full database file backup - - **Consistency**: Point-in-time consistent snapshot - - **Download Support**: Unencrypted backups can be downloaded - - **Restore Support**: Future support for encrypted backup restoration - - **Operation State Machine:** - ``` - pending → processing → completed - ↘ failed - ``` - - **pending**: Backup queued, waiting to start - - **processing**: Actively backing up database - - **completed**: Backup successfully created and stored - - **failed**: Backup failed (check error message) - - **Expected Durations:** - Operation times vary by database size: - - **Small** (<1GB): 30 seconds - 2 minutes - - **Medium** (1-10GB): 2-10 minutes - - **Large** (10-100GB): 10-30 minutes - - **Very Large** (>100GB): 30+ minutes - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Backup progress:', data.progress_percent + '%'); - console.log('Status:', data.status); // pending, processing, completed, failed - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only full_dump format is supported (no CSV/JSON exports) - - Compression is always enabled - - Encrypted backups cannot be downloaded (security measure) - - All backups are stored securely in cloud storage - - **Credit Consumption:** - - Base cost: 25.0 credits - - Large databases (>10GB): 50.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - body (BackupCreateRequest): Request model for creating a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - client=client, - body=body, - ) - ).parsed diff --git a/robosystems_client/api/backup/restore_backup.py b/robosystems_client/api/backup/restore_backup.py deleted file mode 100644 index 4a05625..0000000 --- a/robosystems_client/api/backup/restore_backup.py +++ /dev/null @@ -1,479 +0,0 @@ -from http import HTTPStatus -from typing import Any -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.backup_restore_request import BackupRestoreRequest -from ...models.error_response import ErrorResponse -from ...models.http_validation_error import HTTPValidationError -from ...types import Response - - -def _get_kwargs( - graph_id: str, - backup_id: str, - *, - body: BackupRestoreRequest, -) -> dict[str, Any]: - headers: dict[str, Any] = {} - - _kwargs: dict[str, Any] = { - "method": "post", - "url": "/v1/graphs/{graph_id}/backups/{backup_id}/restore".format( - graph_id=quote(str(graph_id), safe=""), - backup_id=quote(str(backup_id), safe=""), - ), - } - - _kwargs["json"] = body.to_dict() - - headers["Content-Type"] = "application/json" - - _kwargs["headers"] = headers - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | ErrorResponse | HTTPValidationError | None: - if response.status_code == 202: - response_202 = response.json() - return response_202 - - if response.status_code == 400: - response_400 = ErrorResponse.from_dict(response.json()) - - return response_400 - - if response.status_code == 403: - response_403 = ErrorResponse.from_dict(response.json()) - - return response_403 - - if response.status_code == 404: - response_404 = ErrorResponse.from_dict(response.json()) - - return response_404 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if response.status_code == 500: - response_500 = ErrorResponse.from_dict(response.json()) - - return response_500 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | ErrorResponse | HTTPValidationError]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - backup_id: str, - *, - client: AuthenticatedClient, - body: BackupRestoreRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError]: - """Restore Encrypted Backup - - Restore a graph database from an encrypted backup. - - Restores a complete graph database from an encrypted backup: - - **Format**: Only full_dump backups can be restored - - **Encryption**: Only encrypted backups can be restored (security requirement) - - **System Backup**: Creates automatic backup of existing database before restore - - **Verification**: Optionally verifies database integrity after restore - - **Restore Features:** - - **Atomic Operation**: Complete replacement of database - - **Rollback Protection**: System backup created before restore - - **Data Integrity**: Verification ensures successful restore - - **Security**: Only encrypted backups to prevent data tampering - - **Operation State Machine:** - ``` - pending → backing_up_current → downloading → restoring → verifying → completed - ↘ failed - ``` - - **pending**: Restore queued, waiting to start - - **backing_up_current**: Creating safety backup of existing database - - **downloading**: Downloading backup from storage - - **restoring**: Replacing database with backup contents - - **verifying**: Verifying database integrity (if enabled) - - **completed**: Restore successful, database operational - - **failed**: Restore failed (rollback may be available) - - **Expected Durations:** - Operation times vary by database size (includes backup + restore): - - **Small** (<1GB): 1-3 minutes - - **Medium** (1-10GB): 5-15 minutes - - **Large** (10-100GB): 20-45 minutes - - **Very Large** (>100GB): 45+ minutes - - Note: Restore operations take longer than backups due to safety backup step. - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Restore progress:', data.message); - console.log('Status:', data.status); // Shows current state - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only encrypted backups can be restored (security measure) - - Existing database is backed up to S3 before restore - - Restore is a destructive operation - existing data is replaced - - System backups are stored separately for recovery - - **Credit Consumption:** - - Base cost: 100.0 credits - - Large databases (>10GB): 200.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - backup_id (str): Backup identifier - body (BackupRestoreRequest): Request model for restoring from a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - backup_id=backup_id, - body=body, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - backup_id: str, - *, - client: AuthenticatedClient, - body: BackupRestoreRequest, -) -> Any | ErrorResponse | HTTPValidationError | None: - """Restore Encrypted Backup - - Restore a graph database from an encrypted backup. - - Restores a complete graph database from an encrypted backup: - - **Format**: Only full_dump backups can be restored - - **Encryption**: Only encrypted backups can be restored (security requirement) - - **System Backup**: Creates automatic backup of existing database before restore - - **Verification**: Optionally verifies database integrity after restore - - **Restore Features:** - - **Atomic Operation**: Complete replacement of database - - **Rollback Protection**: System backup created before restore - - **Data Integrity**: Verification ensures successful restore - - **Security**: Only encrypted backups to prevent data tampering - - **Operation State Machine:** - ``` - pending → backing_up_current → downloading → restoring → verifying → completed - ↘ failed - ``` - - **pending**: Restore queued, waiting to start - - **backing_up_current**: Creating safety backup of existing database - - **downloading**: Downloading backup from storage - - **restoring**: Replacing database with backup contents - - **verifying**: Verifying database integrity (if enabled) - - **completed**: Restore successful, database operational - - **failed**: Restore failed (rollback may be available) - - **Expected Durations:** - Operation times vary by database size (includes backup + restore): - - **Small** (<1GB): 1-3 minutes - - **Medium** (1-10GB): 5-15 minutes - - **Large** (10-100GB): 20-45 minutes - - **Very Large** (>100GB): 45+ minutes - - Note: Restore operations take longer than backups due to safety backup step. - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Restore progress:', data.message); - console.log('Status:', data.status); // Shows current state - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only encrypted backups can be restored (security measure) - - Existing database is backed up to S3 before restore - - Restore is a destructive operation - existing data is replaced - - System backups are stored separately for recovery - - **Credit Consumption:** - - Base cost: 100.0 credits - - Large databases (>10GB): 200.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - backup_id (str): Backup identifier - body (BackupRestoreRequest): Request model for restoring from a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError - """ - - return sync_detailed( - graph_id=graph_id, - backup_id=backup_id, - client=client, - body=body, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - backup_id: str, - *, - client: AuthenticatedClient, - body: BackupRestoreRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError]: - """Restore Encrypted Backup - - Restore a graph database from an encrypted backup. - - Restores a complete graph database from an encrypted backup: - - **Format**: Only full_dump backups can be restored - - **Encryption**: Only encrypted backups can be restored (security requirement) - - **System Backup**: Creates automatic backup of existing database before restore - - **Verification**: Optionally verifies database integrity after restore - - **Restore Features:** - - **Atomic Operation**: Complete replacement of database - - **Rollback Protection**: System backup created before restore - - **Data Integrity**: Verification ensures successful restore - - **Security**: Only encrypted backups to prevent data tampering - - **Operation State Machine:** - ``` - pending → backing_up_current → downloading → restoring → verifying → completed - ↘ failed - ``` - - **pending**: Restore queued, waiting to start - - **backing_up_current**: Creating safety backup of existing database - - **downloading**: Downloading backup from storage - - **restoring**: Replacing database with backup contents - - **verifying**: Verifying database integrity (if enabled) - - **completed**: Restore successful, database operational - - **failed**: Restore failed (rollback may be available) - - **Expected Durations:** - Operation times vary by database size (includes backup + restore): - - **Small** (<1GB): 1-3 minutes - - **Medium** (1-10GB): 5-15 minutes - - **Large** (10-100GB): 20-45 minutes - - **Very Large** (>100GB): 45+ minutes - - Note: Restore operations take longer than backups due to safety backup step. - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Restore progress:', data.message); - console.log('Status:', data.status); // Shows current state - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only encrypted backups can be restored (security measure) - - Existing database is backed up to S3 before restore - - Restore is a destructive operation - existing data is replaced - - System backups are stored separately for recovery - - **Credit Consumption:** - - Base cost: 100.0 credits - - Large databases (>10GB): 200.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - backup_id (str): Backup identifier - body (BackupRestoreRequest): Request model for restoring from a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - backup_id=backup_id, - body=body, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - backup_id: str, - *, - client: AuthenticatedClient, - body: BackupRestoreRequest, -) -> Any | ErrorResponse | HTTPValidationError | None: - """Restore Encrypted Backup - - Restore a graph database from an encrypted backup. - - Restores a complete graph database from an encrypted backup: - - **Format**: Only full_dump backups can be restored - - **Encryption**: Only encrypted backups can be restored (security requirement) - - **System Backup**: Creates automatic backup of existing database before restore - - **Verification**: Optionally verifies database integrity after restore - - **Restore Features:** - - **Atomic Operation**: Complete replacement of database - - **Rollback Protection**: System backup created before restore - - **Data Integrity**: Verification ensures successful restore - - **Security**: Only encrypted backups to prevent data tampering - - **Operation State Machine:** - ``` - pending → backing_up_current → downloading → restoring → verifying → completed - ↘ failed - ``` - - **pending**: Restore queued, waiting to start - - **backing_up_current**: Creating safety backup of existing database - - **downloading**: Downloading backup from storage - - **restoring**: Replacing database with backup contents - - **verifying**: Verifying database integrity (if enabled) - - **completed**: Restore successful, database operational - - **failed**: Restore failed (rollback may be available) - - **Expected Durations:** - Operation times vary by database size (includes backup + restore): - - **Small** (<1GB): 1-3 minutes - - **Medium** (1-10GB): 5-15 minutes - - **Large** (10-100GB): 20-45 minutes - - **Very Large** (>100GB): 45+ minutes - - Note: Restore operations take longer than backups due to safety backup step. - - **Progress Monitoring:** - Use the returned operation_id to connect to the SSE stream: - ```javascript - const eventSource = new EventSource('/v1/operations/{operation_id}/stream'); - eventSource.addEventListener('operation_progress', (event) => { - const data = JSON.parse(event.data); - console.log('Restore progress:', data.message); - console.log('Status:', data.status); // Shows current state - }); - ``` - - **SSE Connection Limits:** - - Maximum 5 concurrent SSE connections per user - - Rate limited to 10 new connections per minute - - Automatic circuit breaker for Redis failures - - Graceful degradation if event system unavailable - - **Important Notes:** - - Only encrypted backups can be restored (security measure) - - Existing database is backed up to S3 before restore - - Restore is a destructive operation - existing data is replaced - - System backups are stored separately for recovery - - **Credit Consumption:** - - Base cost: 100.0 credits - - Large databases (>10GB): 200.0 credits - - Multiplied by graph tier - - Returns operation details for SSE monitoring. - - Args: - graph_id (str): - backup_id (str): Backup identifier - body (BackupRestoreRequest): Request model for restoring from a backup. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - backup_id=backup_id, - client=client, - body=body, - ) - ).parsed diff --git a/robosystems_client/api/materialize/__init__.py b/robosystems_client/api/graph_operations/__init__.py similarity index 100% rename from robosystems_client/api/materialize/__init__.py rename to robosystems_client/api/graph_operations/__init__.py diff --git a/robosystems_client/api/graph_operations/op_create_backup.py b/robosystems_client/api/graph_operations/op_create_backup.py new file mode 100644 index 0000000..985a57b --- /dev/null +++ b/robosystems_client/api/graph_operations/op_create_backup.py @@ -0,0 +1,249 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.backup_create_request import BackupCreateRequest +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + body: BackupCreateRequest, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/create-backup".format( + graph_id=quote(str(graph_id), safe=""), + ), + } + + _kwargs["json"] = body.to_dict() + + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 202: + response_202 = OperationEnvelope.from_dict(response.json()) + + return response_202 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: BackupCreateRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Create Backup + + Create a backup of the graph database (async). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (BackupCreateRequest): Request model for creating a backup. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + body: BackupCreateRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Create Backup + + Create a backup of the graph database (async). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (BackupCreateRequest): Request model for creating a backup. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: BackupCreateRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Create Backup + + Create a backup of the graph database (async). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (BackupCreateRequest): Request model for creating a backup. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + body: BackupCreateRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Create Backup + + Create a backup of the graph database (async). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (BackupCreateRequest): Request model for creating a backup. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/graph_operations/op_create_subgraph.py b/robosystems_client/api/graph_operations/op_create_subgraph.py new file mode 100644 index 0000000..ff7e6bf --- /dev/null +++ b/robosystems_client/api/graph_operations/op_create_subgraph.py @@ -0,0 +1,249 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.create_subgraph_request import CreateSubgraphRequest +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + body: CreateSubgraphRequest, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/create-subgraph".format( + graph_id=quote(str(graph_id), safe=""), + ), + } + + _kwargs["json"] = body.to_dict() + + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 200: + response_200 = OperationEnvelope.from_dict(response.json()) + + return response_200 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: CreateSubgraphRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Create Subgraph + + Create a new subgraph, optionally forking parent data. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (CreateSubgraphRequest): Request model for creating a subgraph. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + body: CreateSubgraphRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Create Subgraph + + Create a new subgraph, optionally forking parent data. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (CreateSubgraphRequest): Request model for creating a subgraph. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: CreateSubgraphRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Create Subgraph + + Create a new subgraph, optionally forking parent data. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (CreateSubgraphRequest): Request model for creating a subgraph. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + body: CreateSubgraphRequest, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Create Subgraph + + Create a new subgraph, optionally forking parent data. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (CreateSubgraphRequest): Request model for creating a subgraph. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/graph_operations/op_delete_subgraph.py b/robosystems_client/api/graph_operations/op_delete_subgraph.py new file mode 100644 index 0000000..5e156c9 --- /dev/null +++ b/robosystems_client/api/graph_operations/op_delete_subgraph.py @@ -0,0 +1,249 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.delete_subgraph_op import DeleteSubgraphOp +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + body: DeleteSubgraphOp, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/delete-subgraph".format( + graph_id=quote(str(graph_id), safe=""), + ), + } + + _kwargs["json"] = body.to_dict() + + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 200: + response_200 = OperationEnvelope.from_dict(response.json()) + + return response_200 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: DeleteSubgraphOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Delete Subgraph + + Delete a subgraph database. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (DeleteSubgraphOp): Body for the delete-subgraph operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + body: DeleteSubgraphOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Delete Subgraph + + Delete a subgraph database. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (DeleteSubgraphOp): Body for the delete-subgraph operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: DeleteSubgraphOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Delete Subgraph + + Delete a subgraph database. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (DeleteSubgraphOp): Body for the delete-subgraph operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + body: DeleteSubgraphOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Delete Subgraph + + Delete a subgraph database. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (DeleteSubgraphOp): Body for the delete-subgraph operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/graph_operations/op_materialize.py b/robosystems_client/api/graph_operations/op_materialize.py new file mode 100644 index 0000000..4f1af65 --- /dev/null +++ b/robosystems_client/api/graph_operations/op_materialize.py @@ -0,0 +1,343 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + force: bool | Unset = False, + rebuild: bool | Unset = False, + ignore_errors: bool | Unset = True, + dry_run: bool | Unset = False, + source: None | str | Unset = UNSET, + materialize_embeddings: bool | Unset = False, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + params: dict[str, Any] = {} + + params["force"] = force + + params["rebuild"] = rebuild + + params["ignore_errors"] = ignore_errors + + params["dry_run"] = dry_run + + json_source: None | str | Unset + if isinstance(source, Unset): + json_source = UNSET + else: + json_source = source + params["source"] = json_source + + params["materialize_embeddings"] = materialize_embeddings + + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/materialize".format( + graph_id=quote(str(graph_id), safe=""), + ), + "params": params, + } + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 202: + response_202 = OperationEnvelope.from_dict(response.json()) + + return response_202 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + force: bool | Unset = False, + rebuild: bool | Unset = False, + ignore_errors: bool | Unset = True, + dry_run: bool | Unset = False, + source: None | str | Unset = UNSET, + materialize_embeddings: bool | Unset = False, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Materialize Graph + + Materialize graph from staging tables or extensions OLTP. + + Delegates to the existing materialize_graph handler which handles + distributed locking, source routing, and Dagster/direct dispatch. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + force (bool | Unset): Default: False. + rebuild (bool | Unset): Default: False. + ignore_errors (bool | Unset): Default: True. + dry_run (bool | Unset): Default: False. + source (None | str | Unset): + materialize_embeddings (bool | Unset): Default: False. + idempotency_key (None | str | Unset): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + force=force, + rebuild=rebuild, + ignore_errors=ignore_errors, + dry_run=dry_run, + source=source, + materialize_embeddings=materialize_embeddings, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + force: bool | Unset = False, + rebuild: bool | Unset = False, + ignore_errors: bool | Unset = True, + dry_run: bool | Unset = False, + source: None | str | Unset = UNSET, + materialize_embeddings: bool | Unset = False, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Materialize Graph + + Materialize graph from staging tables or extensions OLTP. + + Delegates to the existing materialize_graph handler which handles + distributed locking, source routing, and Dagster/direct dispatch. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + force (bool | Unset): Default: False. + rebuild (bool | Unset): Default: False. + ignore_errors (bool | Unset): Default: True. + dry_run (bool | Unset): Default: False. + source (None | str | Unset): + materialize_embeddings (bool | Unset): Default: False. + idempotency_key (None | str | Unset): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + force=force, + rebuild=rebuild, + ignore_errors=ignore_errors, + dry_run=dry_run, + source=source, + materialize_embeddings=materialize_embeddings, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + force: bool | Unset = False, + rebuild: bool | Unset = False, + ignore_errors: bool | Unset = True, + dry_run: bool | Unset = False, + source: None | str | Unset = UNSET, + materialize_embeddings: bool | Unset = False, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Materialize Graph + + Materialize graph from staging tables or extensions OLTP. + + Delegates to the existing materialize_graph handler which handles + distributed locking, source routing, and Dagster/direct dispatch. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + force (bool | Unset): Default: False. + rebuild (bool | Unset): Default: False. + ignore_errors (bool | Unset): Default: True. + dry_run (bool | Unset): Default: False. + source (None | str | Unset): + materialize_embeddings (bool | Unset): Default: False. + idempotency_key (None | str | Unset): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + force=force, + rebuild=rebuild, + ignore_errors=ignore_errors, + dry_run=dry_run, + source=source, + materialize_embeddings=materialize_embeddings, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + force: bool | Unset = False, + rebuild: bool | Unset = False, + ignore_errors: bool | Unset = True, + dry_run: bool | Unset = False, + source: None | str | Unset = UNSET, + materialize_embeddings: bool | Unset = False, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Materialize Graph + + Materialize graph from staging tables or extensions OLTP. + + Delegates to the existing materialize_graph handler which handles + distributed locking, source routing, and Dagster/direct dispatch. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + force (bool | Unset): Default: False. + rebuild (bool | Unset): Default: False. + ignore_errors (bool | Unset): Default: True. + dry_run (bool | Unset): Default: False. + source (None | str | Unset): + materialize_embeddings (bool | Unset): Default: False. + idempotency_key (None | str | Unset): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + force=force, + rebuild=rebuild, + ignore_errors=ignore_errors, + dry_run=dry_run, + source=source, + materialize_embeddings=materialize_embeddings, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/graph_operations/op_restore_backup.py b/robosystems_client/api/graph_operations/op_restore_backup.py new file mode 100644 index 0000000..e97c806 --- /dev/null +++ b/robosystems_client/api/graph_operations/op_restore_backup.py @@ -0,0 +1,261 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...models.restore_backup_op import RestoreBackupOp +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + body: RestoreBackupOp, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/restore-backup".format( + graph_id=quote(str(graph_id), safe=""), + ), + } + + _kwargs["json"] = body.to_dict() + + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 202: + response_202 = OperationEnvelope.from_dict(response.json()) + + return response_202 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: RestoreBackupOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Restore Backup + + Restore a graph database from an encrypted backup. + + Blocked for entity graphs — OLTP is the source of truth, use + the materialize operation instead. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (RestoreBackupOp): Body for the restore-backup operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + body: RestoreBackupOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Restore Backup + + Restore a graph database from an encrypted backup. + + Blocked for entity graphs — OLTP is the source of truth, use + the materialize operation instead. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (RestoreBackupOp): Body for the restore-backup operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: RestoreBackupOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Restore Backup + + Restore a graph database from an encrypted backup. + + Blocked for entity graphs — OLTP is the source of truth, use + the materialize operation instead. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (RestoreBackupOp): Body for the restore-backup operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + body: RestoreBackupOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Restore Backup + + Restore a graph database from an encrypted backup. + + Blocked for entity graphs — OLTP is the source of truth, use + the materialize operation instead. + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (RestoreBackupOp): Body for the restore-backup operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/graph_operations/op_upgrade_tier.py b/robosystems_client/api/graph_operations/op_upgrade_tier.py new file mode 100644 index 0000000..58d2a2d --- /dev/null +++ b/robosystems_client/api/graph_operations/op_upgrade_tier.py @@ -0,0 +1,249 @@ +from http import HTTPStatus +from typing import Any, cast +from urllib.parse import quote + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.http_validation_error import HTTPValidationError +from ...models.operation_envelope import OperationEnvelope +from ...models.operation_error import OperationError +from ...models.upgrade_tier_op import UpgradeTierOp +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + graph_id: str, + *, + body: UpgradeTierOp, + idempotency_key: None | str | Unset = UNSET, +) -> dict[str, Any]: + headers: dict[str, Any] = {} + if not isinstance(idempotency_key, Unset): + headers["Idempotency-Key"] = idempotency_key + + _kwargs: dict[str, Any] = { + "method": "post", + "url": "/v1/graphs/{graph_id}/operations/upgrade-tier".format( + graph_id=quote(str(graph_id), safe=""), + ), + } + + _kwargs["json"] = body.to_dict() + + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + if response.status_code == 202: + response_202 = OperationEnvelope.from_dict(response.json()) + + return response_202 + + if response.status_code == 400: + response_400 = OperationError.from_dict(response.json()) + + return response_400 + + if response.status_code == 401: + response_401 = cast(Any, None) + return response_401 + + if response.status_code == 403: + response_403 = cast(Any, None) + return response_403 + + if response.status_code == 404: + response_404 = OperationError.from_dict(response.json()) + + return response_404 + + if response.status_code == 409: + response_409 = OperationError.from_dict(response.json()) + + return response_409 + + if response.status_code == 422: + response_422 = HTTPValidationError.from_dict(response.json()) + + return response_422 + + if response.status_code == 429: + response_429 = cast(Any, None) + return response_429 + + if response.status_code == 500: + response_500 = cast(Any, None) + return response_500 + + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: AuthenticatedClient | Client, response: httpx.Response +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: UpgradeTierOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Upgrade Tier + + Change the infrastructure tier on a graph (async EBS migration). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (UpgradeTierOp): Body for the upgrade-tier operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + graph_id: str, + *, + client: AuthenticatedClient, + body: UpgradeTierOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Upgrade Tier + + Change the infrastructure tier on a graph (async EBS migration). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (UpgradeTierOp): Body for the upgrade-tier operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return sync_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ).parsed + + +async def asyncio_detailed( + graph_id: str, + *, + client: AuthenticatedClient, + body: UpgradeTierOp, + idempotency_key: None | str | Unset = UNSET, +) -> Response[Any | HTTPValidationError | OperationEnvelope | OperationError]: + """Upgrade Tier + + Change the infrastructure tier on a graph (async EBS migration). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (UpgradeTierOp): Body for the upgrade-tier operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Any | HTTPValidationError | OperationEnvelope | OperationError] + """ + + kwargs = _get_kwargs( + graph_id=graph_id, + body=body, + idempotency_key=idempotency_key, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + graph_id: str, + *, + client: AuthenticatedClient, + body: UpgradeTierOp, + idempotency_key: None | str | Unset = UNSET, +) -> Any | HTTPValidationError | OperationEnvelope | OperationError | None: + """Upgrade Tier + + Change the infrastructure tier on a graph (async EBS migration). + + **Idempotency**: supply an `Idempotency-Key` header to make safe retries; replays within 24 hours + return the same envelope. Reusing the key with a different body returns HTTP 409 Conflict. + + Args: + graph_id (str): + idempotency_key (None | str | Unset): + body (UpgradeTierOp): Body for the upgrade-tier operation. + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Any | HTTPValidationError | OperationEnvelope | OperationError + """ + + return ( + await asyncio_detailed( + graph_id=graph_id, + client=client, + body=body, + idempotency_key=idempotency_key, + ) + ).parsed diff --git a/robosystems_client/api/materialize/get_materialization_status.py b/robosystems_client/api/materialize/get_materialization_status.py deleted file mode 100644 index 93d74f8..0000000 --- a/robosystems_client/api/materialize/get_materialization_status.py +++ /dev/null @@ -1,275 +0,0 @@ -from http import HTTPStatus -from typing import Any, cast -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.error_response import ErrorResponse -from ...models.http_validation_error import HTTPValidationError -from ...models.materialize_status_response import MaterializeStatusResponse -from ...types import Response - - -def _get_kwargs( - graph_id: str, -) -> dict[str, Any]: - _kwargs: dict[str, Any] = { - "method": "get", - "url": "/v1/graphs/{graph_id}/materialize".format( - graph_id=quote(str(graph_id), safe=""), - ), - } - - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse | None: - if response.status_code == 200: - response_200 = MaterializeStatusResponse.from_dict(response.json()) - - return response_200 - - if response.status_code == 401: - response_401 = cast(Any, None) - return response_401 - - if response.status_code == 403: - response_403 = ErrorResponse.from_dict(response.json()) - - return response_403 - - if response.status_code == 404: - response_404 = ErrorResponse.from_dict(response.json()) - - return response_404 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - *, - client: AuthenticatedClient, -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse]: - """Get Materialization Status - - Get current materialization status for the graph. - - Shows whether the graph is stale (DuckDB has changes not yet in graph database), - when it was last materialized, and how long since last materialization. - - **Status Information:** - - Whether graph is currently stale - - Reason for staleness if applicable - - When graph became stale - - When graph was last materialized - - Total materialization count - - Hours since last materialization - - **Use Cases:** - - Decide if materialization is needed - - Monitor graph freshness - - Track materialization history - - Understand data pipeline state - - **Important Notes:** - - Stale graph means DuckDB has changes not in graph - - Graph becomes stale after file deletions - - Materialization clears staleness - - Status retrieval is included - no credit consumption - - Args: - graph_id (str): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - *, - client: AuthenticatedClient, -) -> Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse | None: - """Get Materialization Status - - Get current materialization status for the graph. - - Shows whether the graph is stale (DuckDB has changes not yet in graph database), - when it was last materialized, and how long since last materialization. - - **Status Information:** - - Whether graph is currently stale - - Reason for staleness if applicable - - When graph became stale - - When graph was last materialized - - Total materialization count - - Hours since last materialization - - **Use Cases:** - - Decide if materialization is needed - - Monitor graph freshness - - Track materialization history - - Understand data pipeline state - - **Important Notes:** - - Stale graph means DuckDB has changes not in graph - - Graph becomes stale after file deletions - - Materialization clears staleness - - Status retrieval is included - no credit consumption - - Args: - graph_id (str): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse - """ - - return sync_detailed( - graph_id=graph_id, - client=client, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - *, - client: AuthenticatedClient, -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse]: - """Get Materialization Status - - Get current materialization status for the graph. - - Shows whether the graph is stale (DuckDB has changes not yet in graph database), - when it was last materialized, and how long since last materialization. - - **Status Information:** - - Whether graph is currently stale - - Reason for staleness if applicable - - When graph became stale - - When graph was last materialized - - Total materialization count - - Hours since last materialization - - **Use Cases:** - - Decide if materialization is needed - - Monitor graph freshness - - Track materialization history - - Understand data pipeline state - - **Important Notes:** - - Stale graph means DuckDB has changes not in graph - - Graph becomes stale after file deletions - - Materialization clears staleness - - Status retrieval is included - no credit consumption - - Args: - graph_id (str): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - *, - client: AuthenticatedClient, -) -> Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse | None: - """Get Materialization Status - - Get current materialization status for the graph. - - Shows whether the graph is stale (DuckDB has changes not yet in graph database), - when it was last materialized, and how long since last materialization. - - **Status Information:** - - Whether graph is currently stale - - Reason for staleness if applicable - - When graph became stale - - When graph was last materialized - - Total materialization count - - Hours since last materialization - - **Use Cases:** - - Decide if materialization is needed - - Monitor graph freshness - - Track materialization history - - Understand data pipeline state - - **Important Notes:** - - Stale graph means DuckDB has changes not in graph - - Graph becomes stale after file deletions - - Materialization clears staleness - - Status retrieval is included - no credit consumption - - Args: - graph_id (str): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError | MaterializeStatusResponse - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - client=client, - ) - ).parsed diff --git a/robosystems_client/api/materialize/materialize_graph.py b/robosystems_client/api/materialize/materialize_graph.py deleted file mode 100644 index 2b4bf7a..0000000 --- a/robosystems_client/api/materialize/materialize_graph.py +++ /dev/null @@ -1,440 +0,0 @@ -from http import HTTPStatus -from typing import Any, cast -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.error_response import ErrorResponse -from ...models.http_validation_error import HTTPValidationError -from ...models.materialize_request import MaterializeRequest -from ...models.materialize_response import MaterializeResponse -from ...types import Response - - -def _get_kwargs( - graph_id: str, - *, - body: MaterializeRequest, -) -> dict[str, Any]: - headers: dict[str, Any] = {} - - _kwargs: dict[str, Any] = { - "method": "post", - "url": "/v1/graphs/{graph_id}/materialize".format( - graph_id=quote(str(graph_id), safe=""), - ), - } - - _kwargs["json"] = body.to_dict() - - headers["Content-Type"] = "application/json" - - _kwargs["headers"] = headers - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | ErrorResponse | HTTPValidationError | MaterializeResponse | None: - if response.status_code == 200: - response_200 = MaterializeResponse.from_dict(response.json()) - - return response_200 - - if response.status_code == 400: - response_400 = ErrorResponse.from_dict(response.json()) - - return response_400 - - if response.status_code == 401: - response_401 = cast(Any, None) - return response_401 - - if response.status_code == 403: - response_403 = ErrorResponse.from_dict(response.json()) - - return response_403 - - if response.status_code == 404: - response_404 = ErrorResponse.from_dict(response.json()) - - return response_404 - - if response.status_code == 409: - response_409 = ErrorResponse.from_dict(response.json()) - - return response_409 - - if response.status_code == 413: - response_413 = ErrorResponse.from_dict(response.json()) - - return response_413 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if response.status_code == 500: - response_500 = cast(Any, None) - return response_500 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeResponse]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: MaterializeRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeResponse]: - """Materialize Graph from DuckDB - - Rebuild entire graph from DuckDB staging tables (materialized view pattern). - - This endpoint rebuilds the complete graph database from the current state of DuckDB - staging tables. It automatically discovers all tables, ingests them in the correct - order (nodes before relationships), and clears the staleness flag. - - **When to Use:** - - After batch uploads (files uploaded with ingest_to_graph=false) - - After cascade file deletions (graph marked stale) - - To ensure graph consistency with DuckDB state - - Periodic full refresh - - **What Happens:** - 1. Discovers all tables for the graph from PostgreSQL registry - 2. Sorts tables (nodes before relationships) - 3. Ingests all tables from DuckDB to graph in order - 4. Clears staleness flag on success - 5. Returns detailed materialization report - - **Staleness Check:** - By default, only materializes if graph is stale (after deletions or missed ingestions). - Use `force=true` to rebuild regardless of staleness. - - **Rebuild Feature:** - Setting `rebuild=true` regenerates the entire graph database from scratch: - - Deletes existing graph database - - Recreates with fresh schema from active GraphSchema - - Ingests all data files - - Safe operation - DuckDB is source of truth - - Useful for schema changes or data corrections - - Graph marked as 'rebuilding' during process - - **Table Ordering:** - Node tables (PascalCase) are ingested before relationship tables (UPPERCASE) to - ensure referential integrity. - - **Error Handling:** - With `ignore_errors=true` (default), continues materializing even if individual - rows fail. Failed rows are logged but don't stop the process. - - **Concurrency Control:** - Only one materialization can run per graph at a time. If another materialization is in progress, - you'll receive a 409 Conflict error. The distributed lock automatically expires after - the configured TTL (default: 1 hour) to prevent deadlocks from failed materializations. - - **Performance:** - Full graph materialization can take minutes for large datasets. Consider running - during off-peak hours for production systems. - - **Dry Run:** - Set `dry_run=true` to validate limits without executing. Returns current usage, tier limits, - and any warnings or errors. No lock is acquired, no SSE operation is created. - - **Credits:** - Materialization is included - no credit consumption - - Args: - graph_id (str): - body (MaterializeRequest): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError | MaterializeResponse] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - *, - client: AuthenticatedClient, - body: MaterializeRequest, -) -> Any | ErrorResponse | HTTPValidationError | MaterializeResponse | None: - """Materialize Graph from DuckDB - - Rebuild entire graph from DuckDB staging tables (materialized view pattern). - - This endpoint rebuilds the complete graph database from the current state of DuckDB - staging tables. It automatically discovers all tables, ingests them in the correct - order (nodes before relationships), and clears the staleness flag. - - **When to Use:** - - After batch uploads (files uploaded with ingest_to_graph=false) - - After cascade file deletions (graph marked stale) - - To ensure graph consistency with DuckDB state - - Periodic full refresh - - **What Happens:** - 1. Discovers all tables for the graph from PostgreSQL registry - 2. Sorts tables (nodes before relationships) - 3. Ingests all tables from DuckDB to graph in order - 4. Clears staleness flag on success - 5. Returns detailed materialization report - - **Staleness Check:** - By default, only materializes if graph is stale (after deletions or missed ingestions). - Use `force=true` to rebuild regardless of staleness. - - **Rebuild Feature:** - Setting `rebuild=true` regenerates the entire graph database from scratch: - - Deletes existing graph database - - Recreates with fresh schema from active GraphSchema - - Ingests all data files - - Safe operation - DuckDB is source of truth - - Useful for schema changes or data corrections - - Graph marked as 'rebuilding' during process - - **Table Ordering:** - Node tables (PascalCase) are ingested before relationship tables (UPPERCASE) to - ensure referential integrity. - - **Error Handling:** - With `ignore_errors=true` (default), continues materializing even if individual - rows fail. Failed rows are logged but don't stop the process. - - **Concurrency Control:** - Only one materialization can run per graph at a time. If another materialization is in progress, - you'll receive a 409 Conflict error. The distributed lock automatically expires after - the configured TTL (default: 1 hour) to prevent deadlocks from failed materializations. - - **Performance:** - Full graph materialization can take minutes for large datasets. Consider running - during off-peak hours for production systems. - - **Dry Run:** - Set `dry_run=true` to validate limits without executing. Returns current usage, tier limits, - and any warnings or errors. No lock is acquired, no SSE operation is created. - - **Credits:** - Materialization is included - no credit consumption - - Args: - graph_id (str): - body (MaterializeRequest): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError | MaterializeResponse - """ - - return sync_detailed( - graph_id=graph_id, - client=client, - body=body, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: MaterializeRequest, -) -> Response[Any | ErrorResponse | HTTPValidationError | MaterializeResponse]: - """Materialize Graph from DuckDB - - Rebuild entire graph from DuckDB staging tables (materialized view pattern). - - This endpoint rebuilds the complete graph database from the current state of DuckDB - staging tables. It automatically discovers all tables, ingests them in the correct - order (nodes before relationships), and clears the staleness flag. - - **When to Use:** - - After batch uploads (files uploaded with ingest_to_graph=false) - - After cascade file deletions (graph marked stale) - - To ensure graph consistency with DuckDB state - - Periodic full refresh - - **What Happens:** - 1. Discovers all tables for the graph from PostgreSQL registry - 2. Sorts tables (nodes before relationships) - 3. Ingests all tables from DuckDB to graph in order - 4. Clears staleness flag on success - 5. Returns detailed materialization report - - **Staleness Check:** - By default, only materializes if graph is stale (after deletions or missed ingestions). - Use `force=true` to rebuild regardless of staleness. - - **Rebuild Feature:** - Setting `rebuild=true` regenerates the entire graph database from scratch: - - Deletes existing graph database - - Recreates with fresh schema from active GraphSchema - - Ingests all data files - - Safe operation - DuckDB is source of truth - - Useful for schema changes or data corrections - - Graph marked as 'rebuilding' during process - - **Table Ordering:** - Node tables (PascalCase) are ingested before relationship tables (UPPERCASE) to - ensure referential integrity. - - **Error Handling:** - With `ignore_errors=true` (default), continues materializing even if individual - rows fail. Failed rows are logged but don't stop the process. - - **Concurrency Control:** - Only one materialization can run per graph at a time. If another materialization is in progress, - you'll receive a 409 Conflict error. The distributed lock automatically expires after - the configured TTL (default: 1 hour) to prevent deadlocks from failed materializations. - - **Performance:** - Full graph materialization can take minutes for large datasets. Consider running - during off-peak hours for production systems. - - **Dry Run:** - Set `dry_run=true` to validate limits without executing. Returns current usage, tier limits, - and any warnings or errors. No lock is acquired, no SSE operation is created. - - **Credits:** - Materialization is included - no credit consumption - - Args: - graph_id (str): - body (MaterializeRequest): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | ErrorResponse | HTTPValidationError | MaterializeResponse] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - *, - client: AuthenticatedClient, - body: MaterializeRequest, -) -> Any | ErrorResponse | HTTPValidationError | MaterializeResponse | None: - """Materialize Graph from DuckDB - - Rebuild entire graph from DuckDB staging tables (materialized view pattern). - - This endpoint rebuilds the complete graph database from the current state of DuckDB - staging tables. It automatically discovers all tables, ingests them in the correct - order (nodes before relationships), and clears the staleness flag. - - **When to Use:** - - After batch uploads (files uploaded with ingest_to_graph=false) - - After cascade file deletions (graph marked stale) - - To ensure graph consistency with DuckDB state - - Periodic full refresh - - **What Happens:** - 1. Discovers all tables for the graph from PostgreSQL registry - 2. Sorts tables (nodes before relationships) - 3. Ingests all tables from DuckDB to graph in order - 4. Clears staleness flag on success - 5. Returns detailed materialization report - - **Staleness Check:** - By default, only materializes if graph is stale (after deletions or missed ingestions). - Use `force=true` to rebuild regardless of staleness. - - **Rebuild Feature:** - Setting `rebuild=true` regenerates the entire graph database from scratch: - - Deletes existing graph database - - Recreates with fresh schema from active GraphSchema - - Ingests all data files - - Safe operation - DuckDB is source of truth - - Useful for schema changes or data corrections - - Graph marked as 'rebuilding' during process - - **Table Ordering:** - Node tables (PascalCase) are ingested before relationship tables (UPPERCASE) to - ensure referential integrity. - - **Error Handling:** - With `ignore_errors=true` (default), continues materializing even if individual - rows fail. Failed rows are logged but don't stop the process. - - **Concurrency Control:** - Only one materialization can run per graph at a time. If another materialization is in progress, - you'll receive a 409 Conflict error. The distributed lock automatically expires after - the configured TTL (default: 1 hour) to prevent deadlocks from failed materializations. - - **Performance:** - Full graph materialization can take minutes for large datasets. Consider running - during off-peak hours for production systems. - - **Dry Run:** - Set `dry_run=true` to validate limits without executing. Returns current usage, tier limits, - and any warnings or errors. No lock is acquired, no SSE operation is created. - - **Credits:** - Materialization is included - no credit consumption - - Args: - graph_id (str): - body (MaterializeRequest): - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | ErrorResponse | HTTPValidationError | MaterializeResponse - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - client=client, - body=body, - ) - ).parsed diff --git a/robosystems_client/api/subgraphs/create_subgraph.py b/robosystems_client/api/subgraphs/create_subgraph.py deleted file mode 100644 index f164ef6..0000000 --- a/robosystems_client/api/subgraphs/create_subgraph.py +++ /dev/null @@ -1,292 +0,0 @@ -from http import HTTPStatus -from typing import Any -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.create_subgraph_request import CreateSubgraphRequest -from ...models.http_validation_error import HTTPValidationError -from ...types import Response - - -def _get_kwargs( - graph_id: str, - *, - body: CreateSubgraphRequest, -) -> dict[str, Any]: - headers: dict[str, Any] = {} - - _kwargs: dict[str, Any] = { - "method": "post", - "url": "/v1/graphs/{graph_id}/subgraphs".format( - graph_id=quote(str(graph_id), safe=""), - ), - } - - _kwargs["json"] = body.to_dict() - - headers["Content-Type"] = "application/json" - - _kwargs["headers"] = headers - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | HTTPValidationError | None: - if response.status_code == 200: - response_200 = response.json() - return response_200 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | HTTPValidationError]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: CreateSubgraphRequest, -) -> Response[Any | HTTPValidationError]: - """Create Subgraph - - Create a new subgraph within a parent graph, with optional data forking. - - **Requirements:** - - Valid authentication - - Parent graph must exist and be accessible to the user - - User must have 'admin' permission on the parent graph - - Parent graph tier must support subgraphs (LadybugDB Large/XLarge) - - Must be within subgraph quota limits - - Subgraph name must be unique within the parent graph - - **Fork Mode:** - When `fork_parent=true`, the operation: - - Returns immediately with an operation_id for SSE monitoring - - Copies data from parent graph to the new subgraph - - Supports selective forking via metadata.fork_options - - Tracks progress in real-time via SSE - - **Returns:** - - Without fork: Immediate SubgraphResponse with created subgraph details - - With fork: Operation response with SSE monitoring endpoint - - **Subgraph ID format:** `{parent_id}_{subgraph_name}` (e.g., kg1234567890abcdef_dev) - - **Usage:** - - Subgraphs share parent's credit pool - - Subgraph ID can be used in all standard `/v1/graphs/{graph_id}/*` endpoints - - Permissions inherited from parent graph - - Args: - graph_id (str): - body (CreateSubgraphRequest): Request model for creating a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - *, - client: AuthenticatedClient, - body: CreateSubgraphRequest, -) -> Any | HTTPValidationError | None: - """Create Subgraph - - Create a new subgraph within a parent graph, with optional data forking. - - **Requirements:** - - Valid authentication - - Parent graph must exist and be accessible to the user - - User must have 'admin' permission on the parent graph - - Parent graph tier must support subgraphs (LadybugDB Large/XLarge) - - Must be within subgraph quota limits - - Subgraph name must be unique within the parent graph - - **Fork Mode:** - When `fork_parent=true`, the operation: - - Returns immediately with an operation_id for SSE monitoring - - Copies data from parent graph to the new subgraph - - Supports selective forking via metadata.fork_options - - Tracks progress in real-time via SSE - - **Returns:** - - Without fork: Immediate SubgraphResponse with created subgraph details - - With fork: Operation response with SSE monitoring endpoint - - **Subgraph ID format:** `{parent_id}_{subgraph_name}` (e.g., kg1234567890abcdef_dev) - - **Usage:** - - Subgraphs share parent's credit pool - - Subgraph ID can be used in all standard `/v1/graphs/{graph_id}/*` endpoints - - Permissions inherited from parent graph - - Args: - graph_id (str): - body (CreateSubgraphRequest): Request model for creating a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | HTTPValidationError - """ - - return sync_detailed( - graph_id=graph_id, - client=client, - body=body, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - *, - client: AuthenticatedClient, - body: CreateSubgraphRequest, -) -> Response[Any | HTTPValidationError]: - """Create Subgraph - - Create a new subgraph within a parent graph, with optional data forking. - - **Requirements:** - - Valid authentication - - Parent graph must exist and be accessible to the user - - User must have 'admin' permission on the parent graph - - Parent graph tier must support subgraphs (LadybugDB Large/XLarge) - - Must be within subgraph quota limits - - Subgraph name must be unique within the parent graph - - **Fork Mode:** - When `fork_parent=true`, the operation: - - Returns immediately with an operation_id for SSE monitoring - - Copies data from parent graph to the new subgraph - - Supports selective forking via metadata.fork_options - - Tracks progress in real-time via SSE - - **Returns:** - - Without fork: Immediate SubgraphResponse with created subgraph details - - With fork: Operation response with SSE monitoring endpoint - - **Subgraph ID format:** `{parent_id}_{subgraph_name}` (e.g., kg1234567890abcdef_dev) - - **Usage:** - - Subgraphs share parent's credit pool - - Subgraph ID can be used in all standard `/v1/graphs/{graph_id}/*` endpoints - - Permissions inherited from parent graph - - Args: - graph_id (str): - body (CreateSubgraphRequest): Request model for creating a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - body=body, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - *, - client: AuthenticatedClient, - body: CreateSubgraphRequest, -) -> Any | HTTPValidationError | None: - """Create Subgraph - - Create a new subgraph within a parent graph, with optional data forking. - - **Requirements:** - - Valid authentication - - Parent graph must exist and be accessible to the user - - User must have 'admin' permission on the parent graph - - Parent graph tier must support subgraphs (LadybugDB Large/XLarge) - - Must be within subgraph quota limits - - Subgraph name must be unique within the parent graph - - **Fork Mode:** - When `fork_parent=true`, the operation: - - Returns immediately with an operation_id for SSE monitoring - - Copies data from parent graph to the new subgraph - - Supports selective forking via metadata.fork_options - - Tracks progress in real-time via SSE - - **Returns:** - - Without fork: Immediate SubgraphResponse with created subgraph details - - With fork: Operation response with SSE monitoring endpoint - - **Subgraph ID format:** `{parent_id}_{subgraph_name}` (e.g., kg1234567890abcdef_dev) - - **Usage:** - - Subgraphs share parent's credit pool - - Subgraph ID can be used in all standard `/v1/graphs/{graph_id}/*` endpoints - - Permissions inherited from parent graph - - Args: - graph_id (str): - body (CreateSubgraphRequest): Request model for creating a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | HTTPValidationError - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - client=client, - body=body, - ) - ).parsed diff --git a/robosystems_client/api/subgraphs/delete_subgraph.py b/robosystems_client/api/subgraphs/delete_subgraph.py deleted file mode 100644 index 16c6bd9..0000000 --- a/robosystems_client/api/subgraphs/delete_subgraph.py +++ /dev/null @@ -1,320 +0,0 @@ -from http import HTTPStatus -from typing import Any, cast -from urllib.parse import quote - -import httpx - -from ... import errors -from ...client import AuthenticatedClient, Client -from ...models.delete_subgraph_request import DeleteSubgraphRequest -from ...models.delete_subgraph_response import DeleteSubgraphResponse -from ...models.http_validation_error import HTTPValidationError -from ...types import Response - - -def _get_kwargs( - graph_id: str, - subgraph_name: str, - *, - body: DeleteSubgraphRequest, -) -> dict[str, Any]: - headers: dict[str, Any] = {} - - _kwargs: dict[str, Any] = { - "method": "delete", - "url": "/v1/graphs/{graph_id}/subgraphs/{subgraph_name}".format( - graph_id=quote(str(graph_id), safe=""), - subgraph_name=quote(str(subgraph_name), safe=""), - ), - } - - _kwargs["json"] = body.to_dict() - - headers["Content-Type"] = "application/json" - - _kwargs["headers"] = headers - return _kwargs - - -def _parse_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Any | DeleteSubgraphResponse | HTTPValidationError | None: - if response.status_code == 200: - response_200 = DeleteSubgraphResponse.from_dict(response.json()) - - return response_200 - - if response.status_code == 400: - response_400 = cast(Any, None) - return response_400 - - if response.status_code == 401: - response_401 = cast(Any, None) - return response_401 - - if response.status_code == 403: - response_403 = cast(Any, None) - return response_403 - - if response.status_code == 404: - response_404 = cast(Any, None) - return response_404 - - if response.status_code == 409: - response_409 = cast(Any, None) - return response_409 - - if response.status_code == 422: - response_422 = HTTPValidationError.from_dict(response.json()) - - return response_422 - - if response.status_code == 500: - response_500 = cast(Any, None) - return response_500 - - if client.raise_on_unexpected_status: - raise errors.UnexpectedStatus(response.status_code, response.content) - else: - return None - - -def _build_response( - *, client: AuthenticatedClient | Client, response: httpx.Response -) -> Response[Any | DeleteSubgraphResponse | HTTPValidationError]: - return Response( - status_code=HTTPStatus(response.status_code), - content=response.content, - headers=response.headers, - parsed=_parse_response(client=client, response=response), - ) - - -def sync_detailed( - graph_id: str, - subgraph_name: str, - *, - client: AuthenticatedClient, - body: DeleteSubgraphRequest, -) -> Response[Any | DeleteSubgraphResponse | HTTPValidationError]: - """Delete Subgraph - - Delete a subgraph database. - - **Requirements:** - - Must be a valid subgraph (not parent graph) - - User must have admin access to parent graph - - Subgraph name must be alphanumeric (1-20 characters) - - Optional backup before deletion - - **Deletion Options:** - - `force`: Delete even if contains data - - `backup_first`: Create backup before deletion - - **Warning:** - Deletion is permanent unless backup is created. - All data in the subgraph will be lost. - - **Backup Location:** - If backup requested, stored in S3 graph database bucket at: - `s3://{graph_s3_bucket}/{instance_id}/{database_name}_{timestamp}.backup` - - **Notes:** - - Use the subgraph name (e.g., 'dev', 'staging') not the full subgraph ID - - Deletion does not affect parent graph's credit pool or permissions - - Backup creation consumes credits from parent graph's allocation - - Args: - graph_id (str): - subgraph_name (str): Subgraph name to delete (e.g., 'dev', 'staging') - body (DeleteSubgraphRequest): Request model for deleting a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | DeleteSubgraphResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - subgraph_name=subgraph_name, - body=body, - ) - - response = client.get_httpx_client().request( - **kwargs, - ) - - return _build_response(client=client, response=response) - - -def sync( - graph_id: str, - subgraph_name: str, - *, - client: AuthenticatedClient, - body: DeleteSubgraphRequest, -) -> Any | DeleteSubgraphResponse | HTTPValidationError | None: - """Delete Subgraph - - Delete a subgraph database. - - **Requirements:** - - Must be a valid subgraph (not parent graph) - - User must have admin access to parent graph - - Subgraph name must be alphanumeric (1-20 characters) - - Optional backup before deletion - - **Deletion Options:** - - `force`: Delete even if contains data - - `backup_first`: Create backup before deletion - - **Warning:** - Deletion is permanent unless backup is created. - All data in the subgraph will be lost. - - **Backup Location:** - If backup requested, stored in S3 graph database bucket at: - `s3://{graph_s3_bucket}/{instance_id}/{database_name}_{timestamp}.backup` - - **Notes:** - - Use the subgraph name (e.g., 'dev', 'staging') not the full subgraph ID - - Deletion does not affect parent graph's credit pool or permissions - - Backup creation consumes credits from parent graph's allocation - - Args: - graph_id (str): - subgraph_name (str): Subgraph name to delete (e.g., 'dev', 'staging') - body (DeleteSubgraphRequest): Request model for deleting a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | DeleteSubgraphResponse | HTTPValidationError - """ - - return sync_detailed( - graph_id=graph_id, - subgraph_name=subgraph_name, - client=client, - body=body, - ).parsed - - -async def asyncio_detailed( - graph_id: str, - subgraph_name: str, - *, - client: AuthenticatedClient, - body: DeleteSubgraphRequest, -) -> Response[Any | DeleteSubgraphResponse | HTTPValidationError]: - """Delete Subgraph - - Delete a subgraph database. - - **Requirements:** - - Must be a valid subgraph (not parent graph) - - User must have admin access to parent graph - - Subgraph name must be alphanumeric (1-20 characters) - - Optional backup before deletion - - **Deletion Options:** - - `force`: Delete even if contains data - - `backup_first`: Create backup before deletion - - **Warning:** - Deletion is permanent unless backup is created. - All data in the subgraph will be lost. - - **Backup Location:** - If backup requested, stored in S3 graph database bucket at: - `s3://{graph_s3_bucket}/{instance_id}/{database_name}_{timestamp}.backup` - - **Notes:** - - Use the subgraph name (e.g., 'dev', 'staging') not the full subgraph ID - - Deletion does not affect parent graph's credit pool or permissions - - Backup creation consumes credits from parent graph's allocation - - Args: - graph_id (str): - subgraph_name (str): Subgraph name to delete (e.g., 'dev', 'staging') - body (DeleteSubgraphRequest): Request model for deleting a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Response[Any | DeleteSubgraphResponse | HTTPValidationError] - """ - - kwargs = _get_kwargs( - graph_id=graph_id, - subgraph_name=subgraph_name, - body=body, - ) - - response = await client.get_async_httpx_client().request(**kwargs) - - return _build_response(client=client, response=response) - - -async def asyncio( - graph_id: str, - subgraph_name: str, - *, - client: AuthenticatedClient, - body: DeleteSubgraphRequest, -) -> Any | DeleteSubgraphResponse | HTTPValidationError | None: - """Delete Subgraph - - Delete a subgraph database. - - **Requirements:** - - Must be a valid subgraph (not parent graph) - - User must have admin access to parent graph - - Subgraph name must be alphanumeric (1-20 characters) - - Optional backup before deletion - - **Deletion Options:** - - `force`: Delete even if contains data - - `backup_first`: Create backup before deletion - - **Warning:** - Deletion is permanent unless backup is created. - All data in the subgraph will be lost. - - **Backup Location:** - If backup requested, stored in S3 graph database bucket at: - `s3://{graph_s3_bucket}/{instance_id}/{database_name}_{timestamp}.backup` - - **Notes:** - - Use the subgraph name (e.g., 'dev', 'staging') not the full subgraph ID - - Deletion does not affect parent graph's credit pool or permissions - - Backup creation consumes credits from parent graph's allocation - - Args: - graph_id (str): - subgraph_name (str): Subgraph name to delete (e.g., 'dev', 'staging') - body (DeleteSubgraphRequest): Request model for deleting a subgraph. - - Raises: - errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. - httpx.TimeoutException: If the request takes longer than Client.timeout. - - Returns: - Any | DeleteSubgraphResponse | HTTPValidationError - """ - - return ( - await asyncio_detailed( - graph_id=graph_id, - subgraph_name=subgraph_name, - client=client, - body=body, - ) - ).parsed diff --git a/robosystems_client/api/subgraphs/get_subgraph_info.py b/robosystems_client/api/subgraphs/get_subgraph_info.py index a40a6da..4516b0e 100644 --- a/robosystems_client/api/subgraphs/get_subgraph_info.py +++ b/robosystems_client/api/subgraphs/get_subgraph_info.py @@ -17,7 +17,7 @@ def _get_kwargs( ) -> dict[str, Any]: _kwargs: dict[str, Any] = { "method": "get", - "url": "/v1/graphs/{graph_id}/subgraphs/{subgraph_name}/info".format( + "url": "/v1/graphs/{graph_id}/subgraphs/{subgraph_name}".format( graph_id=quote(str(graph_id), safe=""), subgraph_name=quote(str(subgraph_name), safe=""), ), diff --git a/robosystems_client/clients/__init__.py b/robosystems_client/clients/__init__.py index 9ae0a62..8af9891 100644 --- a/robosystems_client/clients/__init__.py +++ b/robosystems_client/clients/__init__.py @@ -34,12 +34,6 @@ FileUploadResult, FileInfo, ) -from .materialization_client import ( - MaterializationClient, - MaterializationOptions, - MaterializationResult, - MaterializationStatus, -) from .table_client import ( TableClient, TableInfo, @@ -47,13 +41,14 @@ ) from .graph_client import ( GraphClient, + MaterializationOptions, + MaterializationResult, GraphMetadata, InitialEntityData, GraphInfo, ) from .investor_client import InvestorClient from .ledger_client import LedgerClient -from .report_client import ReportClient from .facade import ( RoboSystemsClients, RoboSystemsClientConfig, @@ -156,11 +151,6 @@ "FileUploadOptions", "FileUploadResult", "FileInfo", - # Materialization Client - "MaterializationClient", - "MaterializationOptions", - "MaterializationResult", - "MaterializationStatus", # Table Client "TableClient", "TableInfo", @@ -170,12 +160,13 @@ "GraphMetadata", "InitialEntityData", "GraphInfo", + "MaterializationOptions", + "MaterializationResult", # Ledger Client "LedgerClient", # Investor Client "InvestorClient", # Report Client - "ReportClient", # Utilities "QueryBuilder", "ResultProcessor", diff --git a/robosystems_client/clients/facade.py b/robosystems_client/clients/facade.py index 4599cdd..1ee7650 100644 --- a/robosystems_client/clients/facade.py +++ b/robosystems_client/clients/facade.py @@ -11,12 +11,10 @@ from .operation_client import OperationClient from .file_client import FileClient from .document_client import DocumentClient -from .materialization_client import MaterializationClient from .table_client import TableClient from .graph_client import GraphClient from .investor_client import InvestorClient from .ledger_client import LedgerClient -from .report_client import ReportClient from .sse_client import SSEClient @@ -70,13 +68,12 @@ def __init__(self, config: RoboSystemsClientConfig = None): self.agent = AgentClient(self.config) self.operations = OperationClient(self.config) self.files = FileClient(self.config) - self.materialization = MaterializationClient(self.config) self.tables = TableClient(self.config) self.documents = DocumentClient(self.config) self.graphs = GraphClient(self.config) self.ledger = LedgerClient(self.config) self.investor = InvestorClient(self.config) - self.reports = ReportClient(self.config) + self.reports = self.ledger # backward compat alias def monitor_operation( self, operation_id: str, on_progress: Optional[Callable] = None @@ -108,8 +105,6 @@ def close(self): self.operations.close_all() if hasattr(self.files, "close"): self.files.close() - if hasattr(self.materialization, "close"): - self.materialization.close() if hasattr(self.tables, "close"): self.tables.close() if hasattr(self.documents, "close"): diff --git a/robosystems_client/clients/graph_client.py b/robosystems_client/clients/graph_client.py index 4f35b21..1535711 100644 --- a/robosystems_client/clients/graph_client.py +++ b/robosystems_client/clients/graph_client.py @@ -1,7 +1,13 @@ """Graph Management Client -Provides high-level graph management operations with automatic operation monitoring. -Supports both SSE (Server-Sent Events) for real-time updates and polling fallback. +Provides high-level graph management and lifecycle operations with +automatic operation monitoring. Supports both SSE (Server-Sent Events) +for real-time updates and polling fallback. + +Graph lifecycle operations (create-subgraph, delete-subgraph, create-backup, +restore-backup, upgrade-tier, materialize) all go through the operations +surface at ``POST /v1/graphs/{graph_id}/operations/{op_name}`` and return +an ``OperationEnvelope``. """ from dataclasses import dataclass @@ -12,9 +18,16 @@ import httpx +from .operation_client import OperationClient, OperationProgress, MonitorOptions + logger = logging.getLogger(__name__) +# --------------------------------------------------------------------------- +# Data classes +# --------------------------------------------------------------------------- + + @dataclass class GraphMetadata: """Graph metadata for creation""" @@ -49,14 +62,72 @@ class GraphInfo: status: Optional[str] = None +@dataclass +class MaterializationOptions: + """Options for graph materialization operations""" + + ignore_errors: bool = True + rebuild: bool = False + force: bool = False + materialize_embeddings: bool = False + on_progress: Optional[Callable[[str], None]] = None + timeout: Optional[int] = 600 # 10 minute default timeout + + +@dataclass +class MaterializationResult: + """Result from materialization operation""" + + status: str + was_stale: bool + stale_reason: Optional[str] + tables_materialized: list[str] + total_rows: int + execution_time_ms: float + message: str + success: bool = True + error: Optional[str] = None + + class GraphClient: - """Client for graph management operations""" + """Client for graph management and lifecycle operations. + + Covers graph creation, info retrieval, and all graph lifecycle + operations (materialize, subgraphs, backups, tier changes). + """ def __init__(self, config: Dict[str, Any]): self.config = config self.base_url = config["base_url"] self.headers = config.get("headers", {}) self.token = config.get("token") + self._operation_client = None + + @property + def operation_client(self) -> OperationClient: + """Get or create the operation client for SSE monitoring.""" + if self._operation_client is None: + self._operation_client = OperationClient(self.config) + return self._operation_client + + def _get_authenticated_client(self): + """Build an AuthenticatedClient for API calls.""" + from ..client import AuthenticatedClient + + if not self.token: + raise ValueError("No API key provided. Set X-API-Key in headers.") + + return AuthenticatedClient( + base_url=self.base_url, + token=self.token, + prefix="", + auth_header_name="X-API-Key", + headers=self.headers, + ) + + # --------------------------------------------------------------------------- + # Graph creation + # --------------------------------------------------------------------------- def create_graph_and_wait( self, @@ -78,36 +149,20 @@ def create_graph_and_wait( metadata: Graph metadata initial_entity: Optional initial entity data create_entity: Whether to create the entity node and upload initial data. - Only applies when initial_entity is provided. Set to False to create - graph without populating entity data (useful for file-based ingestion). timeout: Maximum time to wait in seconds poll_interval: Time between status checks in seconds (for polling fallback) on_progress: Callback for progress updates - use_sse: Whether to try SSE first (default True). Falls back to polling on failure. + use_sse: Whether to try SSE first (default True). Returns: graph_id when creation completes - - Raises: - Exception: If creation fails or times out """ - from ..client import AuthenticatedClient from ..api.graphs.create_graph import sync_detailed as create_graph from ..models.create_graph_request import CreateGraphRequest from ..models.graph_metadata import GraphMetadata as APIGraphMetadata - if not self.token: - raise ValueError("No API key provided. Set X-API-Key in headers.") - - client = AuthenticatedClient( - base_url=self.base_url, - token=self.token, - prefix="", - auth_header_name="X-API-Key", - headers=self.headers, - ) + client = self._get_authenticated_client() - # Build API metadata api_metadata = APIGraphMetadata( graph_name=metadata.graph_name, description=metadata.description, @@ -115,7 +170,6 @@ def create_graph_and_wait( tags=metadata.tags or [], ) - # Build initial entity if provided initial_entity_dict = None if initial_entity: initial_entity_dict = { @@ -129,7 +183,6 @@ def create_graph_and_wait( if initial_entity.sic_description: initial_entity_dict["sic_description"] = initial_entity.sic_description - # Create graph request graph_create = CreateGraphRequest( metadata=api_metadata, initial_entity=initial_entity_dict, @@ -139,13 +192,11 @@ def create_graph_and_wait( if on_progress: on_progress(f"Creating graph: {metadata.graph_name}") - # Execute create request response = create_graph(client=client, body=graph_create) if not response.parsed: raise RuntimeError(f"Failed to create graph: {response.status_code}") - # Extract graph_id or operation_id if isinstance(response.parsed, dict): graph_id = response.parsed.get("graph_id") operation_id = response.parsed.get("operation_id") @@ -153,20 +204,17 @@ def create_graph_and_wait( graph_id = getattr(response.parsed, "graph_id", None) operation_id = getattr(response.parsed, "operation_id", None) - # If graph_id returned immediately, we're done if graph_id: if on_progress: on_progress(f"Graph created: {graph_id}") return graph_id - # Otherwise, wait for operation to complete if not operation_id: raise RuntimeError("No graph_id or operation_id in response") if on_progress: on_progress(f"Graph creation queued (operation: {operation_id})") - # Try SSE first, fall back to polling if use_sse: try: return self._wait_with_sse(operation_id, timeout, on_progress) @@ -175,32 +223,231 @@ def create_graph_and_wait( if on_progress: on_progress("SSE unavailable, using polling...") - # Fallback to polling return self._wait_with_polling( operation_id, timeout, poll_interval, on_progress, client ) - def _wait_with_sse( - self, - operation_id: str, - timeout: int, - on_progress: Optional[Callable[[str], None]], - ) -> str: + # --------------------------------------------------------------------------- + # Graph info + # --------------------------------------------------------------------------- + + def get_graph_info(self, graph_id: str) -> GraphInfo: """ - Wait for operation completion using SSE stream. + Get information about a graph. Args: - operation_id: Operation ID to monitor - timeout: Maximum time to wait in seconds - on_progress: Callback for progress updates + graph_id: The graph ID Returns: - graph_id when operation completes + GraphInfo with graph details + """ + from ..api.graphs.get_graphs import sync_detailed as get_graphs + + client = self._get_authenticated_client() + response = get_graphs(client=client) + + if not response.parsed: + raise RuntimeError(f"Failed to get graphs: {response.status_code}") + + data = response.parsed + graphs = None + + if isinstance(data, dict): + graphs = data.get("graphs", []) + elif hasattr(data, "additional_properties"): + graphs = data.additional_properties.get("graphs", []) + elif hasattr(data, "graphs"): + graphs = data.graphs + else: + raise RuntimeError("Unexpected response format from get_graphs") + + graph_data = None + for graph in graphs: + if isinstance(graph, dict): + if graph.get("graph_id") == graph_id or graph.get("id") == graph_id: + graph_data = graph + break + elif hasattr(graph, "graph_id"): + if graph.graph_id == graph_id or getattr(graph, "id", None) == graph_id: + graph_data = graph + break + + if not graph_data: + raise ValueError(f"Graph not found: {graph_id}") + + if isinstance(graph_data, dict): + return GraphInfo( + graph_id=graph_data.get("graph_id") or graph_data.get("id", graph_id), + graph_name=graph_data.get("graph_name") or graph_data.get("name", ""), + description=graph_data.get("description"), + schema_extensions=graph_data.get("schema_extensions"), + tags=graph_data.get("tags"), + created_at=graph_data.get("created_at"), + status=graph_data.get("status"), + ) + else: + return GraphInfo( + graph_id=getattr(graph_data, "graph_id", None) + or getattr(graph_data, "id", graph_id), + graph_name=getattr(graph_data, "graph_name", None) + or getattr(graph_data, "name", ""), + description=getattr(graph_data, "description", None), + schema_extensions=getattr(graph_data, "schema_extensions", None), + tags=getattr(graph_data, "tags", None), + created_at=getattr(graph_data, "created_at", None), + status=getattr(graph_data, "status", None), + ) + + # --------------------------------------------------------------------------- + # Materialize + # --------------------------------------------------------------------------- + + def materialize( + self, + graph_id: str, + options: Optional[MaterializationOptions] = None, + ) -> MaterializationResult: + """ + Materialize graph from staging tables or extensions OLTP. + + Submits a materialization job and monitors progress via SSE. + The operation runs asynchronously on the server but this method waits + for completion and returns the final result. + + Args: + graph_id: Graph database identifier + options: Materialization options (ignore_errors, rebuild, force, timeout) - Raises: - RuntimeError: If operation fails - TimeoutError: If operation times out + Returns: + MaterializationResult with detailed execution information """ + from ..api.graph_operations.op_materialize import ( + sync_detailed as materialize_graph, + ) + + options = options or MaterializationOptions() + + try: + if options.on_progress: + options.on_progress("Submitting materialization job...") + + client = self._get_authenticated_client() + + response = materialize_graph( + graph_id=graph_id, + client=client, + ignore_errors=options.ignore_errors, + rebuild=options.rebuild, + force=options.force, + materialize_embeddings=options.materialize_embeddings, + ) + + # Handle non-success status codes + if response.status_code not in (200, 202) or not response.parsed: + error_msg = f"Materialization failed: {response.status_code}" + if hasattr(response, "content"): + try: + error_data = json.loads(response.content) + error_msg = error_data.get("detail", error_msg) + except Exception: + pass + + return MaterializationResult( + status="failed", + was_stale=False, + stale_reason=None, + tables_materialized=[], + total_rows=0, + execution_time_ms=0, + message=error_msg, + success=False, + error=error_msg, + ) + + # Get the operation_id from the envelope + result_data = response.parsed + operation_id = getattr(result_data, "operation_id", None) + + if options.on_progress: + options.on_progress(f"Materialization queued (operation: {operation_id})") + + # Monitor the operation via SSE until completion + def on_sse_progress(progress: OperationProgress): + if options.on_progress: + msg = progress.message + if progress.percentage is not None: + msg += f" ({progress.percentage:.0f}%)" + options.on_progress(msg) + + monitor_options = MonitorOptions( + on_progress=on_sse_progress, + timeout=options.timeout, + ) + + op_result = self.operation_client.monitor_operation(operation_id, monitor_options) + + if op_result.status.value == "completed": + sse_result = op_result.result or {} + + if options.on_progress: + tables = sse_result.get("tables_materialized", []) + rows = sse_result.get("total_rows", 0) + time_ms = sse_result.get("execution_time_ms", 0) + options.on_progress( + f"Materialization complete: {len(tables)} tables, " + f"{rows:,} rows in {time_ms:.2f}ms" + ) + + return MaterializationResult( + status="success", + was_stale=sse_result.get("was_stale", False), + stale_reason=sse_result.get("stale_reason"), + tables_materialized=sse_result.get("tables_materialized", []), + total_rows=sse_result.get("total_rows", 0), + execution_time_ms=sse_result.get( + "execution_time_ms", op_result.execution_time_ms or 0 + ), + message=sse_result.get("message", "Graph materialized successfully"), + success=True, + ) + else: + return MaterializationResult( + status=op_result.status.value, + was_stale=False, + stale_reason=None, + tables_materialized=[], + total_rows=0, + execution_time_ms=op_result.execution_time_ms or 0, + message=op_result.error or f"Operation {op_result.status.value}", + success=False, + error=op_result.error, + ) + + except Exception as e: + logger.error(f"Materialization failed: {e}") + return MaterializationResult( + status="failed", + was_stale=False, + stale_reason=None, + tables_materialized=[], + total_rows=0, + execution_time_ms=0, + message=str(e), + success=False, + error=str(e), + ) + + # --------------------------------------------------------------------------- + # SSE / polling helpers (used by create_graph_and_wait) + # --------------------------------------------------------------------------- + + def _wait_with_sse( + self, + operation_id: str, + timeout: int, + on_progress: Optional[Callable[[str], None]], + ) -> str: + """Wait for operation completion using SSE stream.""" stream_url = f"{self.base_url}/v1/operations/{operation_id}/stream" headers = {"X-API-Key": self.token, "Accept": "text/event-stream"} @@ -214,14 +461,12 @@ def _wait_with_sse( event_data = "" for line in response.iter_lines(): - # Check timeout if time.time() - start_time > timeout: raise TimeoutError(f"Graph creation timed out after {timeout}s") line = line.strip() if not line: - # Empty line = end of event, process it if event_type and event_data: result = self._process_sse_event(event_type, event_data, on_progress) if result is not None: @@ -234,7 +479,6 @@ def _wait_with_sse( event_type = line[6:].strip() elif line.startswith("data:"): event_data = line[5:].strip() - # Ignore other lines (comments, id, retry, etc.) raise TimeoutError(f"SSE stream ended without completion after {timeout}s") @@ -244,15 +488,7 @@ def _process_sse_event( event_data: str, on_progress: Optional[Callable[[str], None]], ) -> Optional[str]: - """ - Process a single SSE event. - - Returns: - graph_id if operation completed, None to continue waiting - - Raises: - RuntimeError: If operation failed - """ + """Process a single SSE event. Returns graph_id if completed.""" try: data = json.loads(event_data) except json.JSONDecodeError: @@ -288,7 +524,6 @@ def _process_sse_event( reason = data.get("reason", "Operation was cancelled") raise RuntimeError(f"Graph creation cancelled: {reason}") - # Ignore other event types (keepalive, etc.) return None def _wait_with_polling( @@ -299,23 +534,7 @@ def _wait_with_polling( on_progress: Optional[Callable[[str], None]], client: Any, ) -> str: - """ - Wait for operation completion using polling. - - Args: - operation_id: Operation ID to monitor - timeout: Maximum time to wait in seconds - poll_interval: Time between status checks - on_progress: Callback for progress updates - client: Authenticated HTTP client - - Returns: - graph_id when operation completes - - Raises: - RuntimeError: If operation fails - TimeoutError: If operation times out - """ + """Wait for operation completion using polling.""" from ..api.operations.get_operation_status import sync_detailed as get_status max_attempts = timeout // poll_interval @@ -327,12 +546,10 @@ def _wait_with_polling( if not status_response.parsed: continue - # Handle both dict and object responses status_data = status_response.parsed if isinstance(status_data, dict): status = status_data.get("status") else: - # Check for additional_properties first (common in generated clients) if hasattr(status_data, "additional_properties"): status = status_data.additional_properties.get("status") else: @@ -342,7 +559,6 @@ def _wait_with_polling( on_progress(f"Status: {status} (attempt {attempt + 1}/{max_attempts})") if status == "completed": - # Extract graph_id from result if isinstance(status_data, dict): result = status_data.get("result", {}) elif hasattr(status_data, "additional_properties"): @@ -363,7 +579,6 @@ def _wait_with_polling( raise RuntimeError("Operation completed but no graph_id in result") elif status == "failed": - # Extract error message if isinstance(status_data, dict): error = ( status_data.get("error") or status_data.get("message") or "Unknown error" @@ -377,104 +592,12 @@ def _wait_with_polling( raise TimeoutError(f"Graph creation timed out after {timeout}s") - def get_graph_info(self, graph_id: str) -> GraphInfo: - """ - Get information about a graph. - - Args: - graph_id: The graph ID - - Returns: - GraphInfo with graph details - - Raises: - ValueError: If graph not found - """ - from ..client import AuthenticatedClient - from ..api.graphs.get_graphs import sync_detailed as get_graphs - - if not self.token: - raise ValueError("No API key provided. Set X-API-Key in headers.") - - client = AuthenticatedClient( - base_url=self.base_url, - token=self.token, - prefix="", - auth_header_name="X-API-Key", - headers=self.headers, - ) - - # Use get_graphs and filter for the specific graph - response = get_graphs(client=client) - - if not response.parsed: - raise RuntimeError(f"Failed to get graphs: {response.status_code}") - - data = response.parsed - graphs = None - - # Extract graphs list from response - if isinstance(data, dict): - graphs = data.get("graphs", []) - elif hasattr(data, "additional_properties"): - graphs = data.additional_properties.get("graphs", []) - elif hasattr(data, "graphs"): - graphs = data.graphs - else: - raise RuntimeError("Unexpected response format from get_graphs") - - # Find the specific graph by ID - graph_data = None - for graph in graphs: - if isinstance(graph, dict): - if graph.get("graph_id") == graph_id or graph.get("id") == graph_id: - graph_data = graph - break - elif hasattr(graph, "graph_id"): - if graph.graph_id == graph_id or getattr(graph, "id", None) == graph_id: - graph_data = graph - break - - if not graph_data: - raise ValueError(f"Graph not found: {graph_id}") - - # Build GraphInfo from the found graph - if isinstance(graph_data, dict): - return GraphInfo( - graph_id=graph_data.get("graph_id") or graph_data.get("id", graph_id), - graph_name=graph_data.get("graph_name") or graph_data.get("name", ""), - description=graph_data.get("description"), - schema_extensions=graph_data.get("schema_extensions"), - tags=graph_data.get("tags"), - created_at=graph_data.get("created_at"), - status=graph_data.get("status"), - ) - else: - return GraphInfo( - graph_id=getattr(graph_data, "graph_id", None) - or getattr(graph_data, "id", graph_id), - graph_name=getattr(graph_data, "graph_name", None) - or getattr(graph_data, "name", ""), - description=getattr(graph_data, "description", None), - schema_extensions=getattr(graph_data, "schema_extensions", None), - tags=getattr(graph_data, "tags", None), - created_at=getattr(graph_data, "created_at", None), - status=getattr(graph_data, "status", None), - ) - def delete_graph(self, graph_id: str) -> None: """ Delete a graph. Note: This method is not yet available as the delete_graph endpoint - is not included in the generated SDK. This will be implemented when - the endpoint is added to the API specification. - - Args: - graph_id: The graph ID to delete - - Raises: - NotImplementedError: This feature is not yet available + is not included in the generated SDK. """ raise NotImplementedError( "Graph deletion is not yet available. " @@ -482,5 +605,5 @@ def delete_graph(self, graph_id: str) -> None: ) def close(self): - """Clean up resources (placeholder for consistency)""" + """Clean up resources.""" pass diff --git a/robosystems_client/clients/ledger_client.py b/robosystems_client/clients/ledger_client.py index 896e5cb..580bd26 100644 --- a/robosystems_client/clients/ledger_client.py +++ b/robosystems_client/clients/ledger_client.py @@ -16,8 +16,8 @@ `envelope.result` and returns either a dict or, for async dispatches (e.g. auto-map, create-report), a small ack dict. -Reports + publish lists + statements live on `ReportClient` — they -belong to the same backend surface but deserve their own facade. +Reports, statements, and publish lists are included on this client — +same backend surface as the ledger operations. """ from __future__ import annotations @@ -86,6 +86,33 @@ from ..api.extensions_robo_ledger.op_update_taxonomy import ( sync_detailed as op_update_taxonomy, ) +from ..api.extensions_robo_ledger.op_add_publish_list_members import ( + sync_detailed as op_add_publish_list_members, +) +from ..api.extensions_robo_ledger.op_create_publish_list import ( + sync_detailed as op_create_publish_list, +) +from ..api.extensions_robo_ledger.op_create_report import ( + sync_detailed as op_create_report, +) +from ..api.extensions_robo_ledger.op_delete_publish_list import ( + sync_detailed as op_delete_publish_list, +) +from ..api.extensions_robo_ledger.op_delete_report import ( + sync_detailed as op_delete_report, +) +from ..api.extensions_robo_ledger.op_regenerate_report import ( + sync_detailed as op_regenerate_report, +) +from ..api.extensions_robo_ledger.op_remove_publish_list_member import ( + sync_detailed as op_remove_publish_list_member, +) +from ..api.extensions_robo_ledger.op_share_report import ( + sync_detailed as op_share_report, +) +from ..api.extensions_robo_ledger.op_update_publish_list import ( + sync_detailed as op_update_publish_list, +) from ..api.extensions_robo_ledger.op_create_associations import ( sync_detailed as op_create_associations, ) @@ -174,6 +201,19 @@ parse_trial_balance, parse_unmapped_elements, ) +from ..graphql.queries.ledger import ( + GET_PUBLISH_LIST_QUERY, + GET_REPORT_QUERY, + GET_STATEMENT_QUERY, + LIST_PUBLISH_LISTS_QUERY, + LIST_REPORTS_QUERY, + parse_publish_list, + parse_publish_lists, + parse_report, + parse_reports, + parse_statement, +) +from ..models.add_publish_list_members_operation import AddPublishListMembersOperation from ..models.auto_map_elements_operation import AutoMapElementsOperation from ..models.bulk_association_item import BulkAssociationItem from ..models.bulk_create_associations_request import BulkCreateAssociationsRequest @@ -214,7 +254,17 @@ ) from ..models.initialize_ledger_request import InitializeLedgerRequest from ..models.manual_line_item_request import ManualLineItemRequest +from ..models.create_publish_list_request import CreatePublishListRequest +from ..models.create_report_request import CreateReportRequest +from ..models.delete_publish_list_operation import DeletePublishListOperation +from ..models.delete_report_operation import DeleteReportOperation from ..models.operation_envelope import OperationEnvelope +from ..models.regenerate_report_operation import RegenerateReportOperation +from ..models.remove_publish_list_member_operation import ( + RemovePublishListMemberOperation, +) +from ..models.share_report_operation import ShareReportOperation +from ..models.update_publish_list_operation import UpdatePublishListOperation from ..models.reopen_period_operation import ReopenPeriodOperation from ..models.set_close_target_operation import SetCloseTargetOperation from ..models.truncate_schedule_operation import TruncateScheduleOperation @@ -1142,3 +1192,175 @@ def reopen_period( response = op_reopen_period(graph_id=graph_id, body=body, client=self._get_client()) envelope = self._call_op("Reopen period", response) return envelope.result or {} + + # ── Reports ───────────────────────────────────────────────────────── + + def create_report( + self, + graph_id: str, + name: str, + mapping_id: str, + period_start: str, + period_end: str, + taxonomy_id: str = "tax_usgaap_reporting", + period_type: str = "quarterly", + comparative: bool = True, + ) -> dict[str, Any]: + """Kick off report creation (async). Returns an operation ack.""" + body = CreateReportRequest( + name=name, + mapping_id=mapping_id, + period_start=period_start, + period_end=period_end, + taxonomy_id=taxonomy_id, + period_type=period_type, + comparative=comparative, + ) + response = op_create_report(graph_id=graph_id, body=body, client=self._get_client()) + envelope = self._call_op("Create report", response) + return {"operation_id": envelope.operation_id, "status": envelope.status} + + def list_reports(self, graph_id: str) -> list[dict[str, Any]]: + """List all reports for a graph (includes received shared reports).""" + data = self._query(graph_id, LIST_REPORTS_QUERY) + return parse_reports(data) + + def get_report(self, graph_id: str, report_id: str) -> dict[str, Any] | None: + """Get a single report with its period list + available structures.""" + data = self._query(graph_id, GET_REPORT_QUERY, {"reportId": report_id}) + return parse_report(data) + + def get_statement( + self, graph_id: str, report_id: str, structure_type: str + ) -> dict[str, Any] | None: + """Render a financial statement — facts viewed through a structure. + + `structure_type`: income_statement, balance_sheet, cash_flow_statement, ... + """ + data = self._query( + graph_id, + GET_STATEMENT_QUERY, + {"reportId": report_id, "structureType": structure_type}, + ) + return parse_statement(data) + + def regenerate_report( + self, + graph_id: str, + report_id: str, + period_start: str | None = None, + period_end: str | None = None, + ) -> dict[str, Any]: + """Regenerate an existing report (async). Returns an operation ack.""" + body = RegenerateReportOperation( + report_id=report_id, + period_start=period_start if period_start is not None else UNSET, + period_end=period_end if period_end is not None else UNSET, + ) + response = op_regenerate_report( + graph_id=graph_id, body=body, client=self._get_client() + ) + envelope = self._call_op("Regenerate report", response) + return {"operation_id": envelope.operation_id, "status": envelope.status} + + def delete_report(self, graph_id: str, report_id: str) -> None: + """Delete a report and its generated facts.""" + body = DeleteReportOperation(report_id=report_id) + response = op_delete_report(graph_id=graph_id, body=body, client=self._get_client()) + self._call_op("Delete report", response) + + def share_report( + self, graph_id: str, report_id: str, publish_list_id: str + ) -> dict[str, Any]: + """Share a published report to every member of a publish list (async).""" + body = ShareReportOperation(report_id=report_id, publish_list_id=publish_list_id) + response = op_share_report(graph_id=graph_id, body=body, client=self._get_client()) + envelope = self._call_op("Share report", response) + return {"operation_id": envelope.operation_id, "status": envelope.status} + + def is_shared_report(self, report: dict[str, Any] | Any) -> bool: + """Check if a report was received via sharing (vs locally created).""" + if isinstance(report, dict): + return report.get("source_graph_id") is not None + return getattr(report, "source_graph_id", None) is not None + + # ── Publish Lists ──────────────────────────────────────────────────── + + def list_publish_lists( + self, graph_id: str, limit: int = 100, offset: int = 0 + ) -> dict[str, Any] | None: + """List publish lists with pagination.""" + data = self._query( + graph_id, LIST_PUBLISH_LISTS_QUERY, {"limit": limit, "offset": offset} + ) + return parse_publish_lists(data) + + def get_publish_list(self, graph_id: str, list_id: str) -> dict[str, Any] | None: + """Get a single publish list with its full member list.""" + data = self._query(graph_id, GET_PUBLISH_LIST_QUERY, {"listId": list_id}) + return parse_publish_list(data) + + def create_publish_list( + self, graph_id: str, name: str, description: str | None = None + ) -> dict[str, Any]: + """Create a new publish list.""" + body = CreatePublishListRequest( + name=name, + description=description if description is not None else UNSET, + ) + response = op_create_publish_list( + graph_id=graph_id, body=body, client=self._get_client() + ) + envelope = self._call_op("Create publish list", response) + return envelope.result or {} + + def update_publish_list( + self, + graph_id: str, + list_id: str, + name: str | None = None, + description: str | None = None, + ) -> dict[str, Any]: + """Update a publish list's name or description.""" + body = UpdatePublishListOperation( + list_id=list_id, + name=name if name is not None else UNSET, + description=description if description is not None else UNSET, + ) + response = op_update_publish_list( + graph_id=graph_id, body=body, client=self._get_client() + ) + envelope = self._call_op("Update publish list", response) + return envelope.result or {} + + def delete_publish_list(self, graph_id: str, list_id: str) -> None: + """Delete a publish list.""" + body = DeletePublishListOperation(list_id=list_id) + response = op_delete_publish_list( + graph_id=graph_id, body=body, client=self._get_client() + ) + self._call_op("Delete publish list", response) + + def add_publish_list_members( + self, graph_id: str, list_id: str, target_graph_ids: list[str] + ) -> dict[str, Any]: + """Add target graphs as members of a publish list.""" + body = AddPublishListMembersOperation( + list_id=list_id, target_graph_ids=target_graph_ids + ) + response = op_add_publish_list_members( + graph_id=graph_id, body=body, client=self._get_client() + ) + envelope = self._call_op("Add publish list members", response) + return envelope.result or {} + + def remove_publish_list_member( + self, graph_id: str, list_id: str, member_id: str + ) -> dict[str, Any]: + """Remove a single member from a publish list.""" + body = RemovePublishListMemberOperation(list_id=list_id, member_id=member_id) + response = op_remove_publish_list_member( + graph_id=graph_id, body=body, client=self._get_client() + ) + envelope = self._call_op("Remove publish list member", response) + return envelope.result if envelope.result is not None else {"deleted": True} diff --git a/robosystems_client/clients/materialization_client.py b/robosystems_client/clients/materialization_client.py deleted file mode 100644 index 6e5ff3c..0000000 --- a/robosystems_client/clients/materialization_client.py +++ /dev/null @@ -1,293 +0,0 @@ -"""Materialization Client for RoboSystems API - -Manages graph materialization from DuckDB staging tables. -Treats the graph database as a materialized view of the mutable DuckDB data lake. -""" - -from dataclasses import dataclass -from typing import Dict, Any, Optional, Callable -import logging - -from ..api.materialize.materialize_graph import ( - sync_detailed as materialize_graph, -) -from ..api.materialize.get_materialization_status import ( - sync_detailed as get_materialization_status, -) -from ..models.materialize_request import MaterializeRequest -from .operation_client import OperationClient, OperationProgress, MonitorOptions - -logger = logging.getLogger(__name__) - - -@dataclass -class MaterializationOptions: - """Options for graph materialization operations""" - - ignore_errors: bool = True - rebuild: bool = False - force: bool = False - materialize_embeddings: bool = False - on_progress: Optional[Callable[[str], None]] = None - timeout: Optional[int] = 600 # 10 minute default timeout - - -@dataclass -class MaterializationResult: - """Result from materialization operation""" - - status: str - was_stale: bool - stale_reason: Optional[str] - tables_materialized: list[str] - total_rows: int - execution_time_ms: float - message: str - success: bool = True - error: Optional[str] = None - - -@dataclass -class MaterializationStatus: - """Status information about graph materialization""" - - graph_id: str - is_stale: bool - stale_reason: Optional[str] - stale_since: Optional[str] - last_materialized_at: Optional[str] - materialization_count: int - hours_since_materialization: Optional[float] - message: str - - -class MaterializationClient: - """Client for managing graph materialization operations""" - - def __init__(self, config: Dict[str, Any]): - self.config = config - self.base_url = config["base_url"] - self.headers = config.get("headers", {}) - self.token = config.get("token") - self._operation_client = None - - @property - def operation_client(self) -> OperationClient: - """Get or create the operation client for SSE monitoring.""" - if self._operation_client is None: - self._operation_client = OperationClient(self.config) - return self._operation_client - - def materialize( - self, - graph_id: str, - options: Optional[MaterializationOptions] = None, - ) -> MaterializationResult: - """ - Materialize graph from DuckDB staging tables. - - Submits a materialization job to Dagster and monitors progress via SSE. - The operation runs asynchronously on the server but this method waits - for completion and returns the final result. - - Args: - graph_id: Graph database identifier - options: Materialization options (ignore_errors, rebuild, force, timeout) - - Returns: - MaterializationResult with detailed execution information - - When to use: - - After batch uploads (files uploaded with ingest_to_graph=false) - - After cascade file deletions (graph marked stale) - - Periodic full refresh to ensure consistency - - Recovery from partial materialization failures - """ - options = options or MaterializationOptions() - - try: - if options.on_progress: - options.on_progress("Submitting materialization job...") - - request = MaterializeRequest( - ignore_errors=options.ignore_errors, - rebuild=options.rebuild, - force=options.force, - materialize_embeddings=options.materialize_embeddings, - ) - - from ..client import AuthenticatedClient - - if not self.token: - raise Exception("No API key provided. Set X-API-Key in headers.") - - client = AuthenticatedClient( - base_url=self.base_url, - token=self.token, - prefix="", - auth_header_name="X-API-Key", - headers=self.headers, - ) - - kwargs = { - "graph_id": graph_id, - "client": client, - "body": request, - } - - response = materialize_graph(**kwargs) - - # Handle non-200 status codes - if response.status_code != 200 or not response.parsed: - error_msg = f"Materialization failed: {response.status_code}" - if hasattr(response, "content"): - try: - import json - - error_data = json.loads(response.content) - error_msg = error_data.get("detail", error_msg) - except Exception: - pass - - return MaterializationResult( - status="failed", - was_stale=False, - stale_reason=None, - tables_materialized=[], - total_rows=0, - execution_time_ms=0, - message=error_msg, - success=False, - error=error_msg, - ) - - # Get the operation_id from the queued response - result_data = response.parsed - operation_id = result_data.operation_id - - if options.on_progress: - options.on_progress(f"Materialization queued (operation: {operation_id})") - - # Monitor the operation via SSE until completion - def on_sse_progress(progress: OperationProgress): - if options.on_progress: - msg = progress.message - if progress.percentage is not None: - msg += f" ({progress.percentage:.0f}%)" - options.on_progress(msg) - - monitor_options = MonitorOptions( - on_progress=on_sse_progress, - timeout=options.timeout, - ) - - op_result = self.operation_client.monitor_operation(operation_id, monitor_options) - - # Convert operation result to materialization result - if op_result.status.value == "completed": - # Extract details from SSE completion event result - sse_result = op_result.result or {} - - if options.on_progress: - tables = sse_result.get("tables_materialized", []) - rows = sse_result.get("total_rows", 0) - time_ms = sse_result.get("execution_time_ms", 0) - options.on_progress( - f"✅ Materialization complete: {len(tables)} tables, " - f"{rows:,} rows in {time_ms:.2f}ms" - ) - - return MaterializationResult( - status="success", - was_stale=sse_result.get("was_stale", False), - stale_reason=sse_result.get("stale_reason"), - tables_materialized=sse_result.get("tables_materialized", []), - total_rows=sse_result.get("total_rows", 0), - execution_time_ms=sse_result.get( - "execution_time_ms", op_result.execution_time_ms or 0 - ), - message=sse_result.get("message", "Graph materialized successfully"), - success=True, - ) - else: - # Operation failed or was cancelled - return MaterializationResult( - status=op_result.status.value, - was_stale=False, - stale_reason=None, - tables_materialized=[], - total_rows=0, - execution_time_ms=op_result.execution_time_ms or 0, - message=op_result.error or f"Operation {op_result.status.value}", - success=False, - error=op_result.error, - ) - - except Exception as e: - logger.error(f"Materialization failed: {e}") - return MaterializationResult( - status="failed", - was_stale=False, - stale_reason=None, - tables_materialized=[], - total_rows=0, - execution_time_ms=0, - message=str(e), - success=False, - error=str(e), - ) - - def status(self, graph_id: str) -> Optional[MaterializationStatus]: - """ - Get current materialization status for the graph. - - Shows whether the graph is stale (DuckDB has changes not yet in graph database), - when it was last materialized, and how long since last materialization. - - Args: - graph_id: Graph database identifier - - Returns: - MaterializationStatus with staleness and timing information - """ - try: - from ..client import AuthenticatedClient - - if not self.token: - raise Exception("No API key provided. Set X-API-Key in headers.") - - client = AuthenticatedClient( - base_url=self.base_url, - token=self.token, - prefix="", - auth_header_name="X-API-Key", - headers=self.headers, - ) - - kwargs = { - "graph_id": graph_id, - "client": client, - } - - response = get_materialization_status(**kwargs) - - if response.status_code != 200 or not response.parsed: - logger.error(f"Failed to get materialization status: {response.status_code}") - return None - - status_data = response.parsed - - return MaterializationStatus( - graph_id=status_data.graph_id, - is_stale=status_data.is_stale, - stale_reason=status_data.stale_reason, - stale_since=status_data.stale_since, - last_materialized_at=status_data.last_materialized_at, - materialization_count=status_data.materialization_count, - hours_since_materialization=status_data.hours_since_materialization, - message=status_data.message, - ) - - except Exception as e: - logger.error(f"Failed to get materialization status: {e}") - return None diff --git a/robosystems_client/clients/report_client.py b/robosystems_client/clients/report_client.py deleted file mode 100644 index 387d578..0000000 --- a/robosystems_client/clients/report_client.py +++ /dev/null @@ -1,299 +0,0 @@ -"""Report Client for RoboSystems API. - -High-level facade for the report + publish-list surface: create/list/ -view/regenerate/share/delete reports, render financial statements, and -manage publish lists (distribution lists for shared reports). - -**Transport split:** -- **Reads** (list, get, statement, list_publish_lists, get_publish_list) - go through GraphQL at `/extensions/{graph_id}/graphql`. -- **Writes** (create, regenerate, delete, share, publish-list CRUD) go - through named operations at - `/extensions/roboledger/{graph_id}/operations/{operation_name}`. - -Async dispatches (create, regenerate, share) return a small ack dict -`{operation_id, status}`; consumers can subscribe to progress via -`/v1/operations/{operation_id}/stream`. -""" - -from __future__ import annotations - -from http import HTTPStatus -from typing import Any - -from ..api.extensions_robo_ledger.op_add_publish_list_members import ( - sync_detailed as op_add_publish_list_members, -) -from ..api.extensions_robo_ledger.op_create_publish_list import ( - sync_detailed as op_create_publish_list, -) -from ..api.extensions_robo_ledger.op_create_report import ( - sync_detailed as op_create_report, -) -from ..api.extensions_robo_ledger.op_delete_publish_list import ( - sync_detailed as op_delete_publish_list, -) -from ..api.extensions_robo_ledger.op_delete_report import ( - sync_detailed as op_delete_report, -) -from ..api.extensions_robo_ledger.op_regenerate_report import ( - sync_detailed as op_regenerate_report, -) -from ..api.extensions_robo_ledger.op_remove_publish_list_member import ( - sync_detailed as op_remove_publish_list_member, -) -from ..api.extensions_robo_ledger.op_share_report import ( - sync_detailed as op_share_report, -) -from ..api.extensions_robo_ledger.op_update_publish_list import ( - sync_detailed as op_update_publish_list, -) -from ..client import AuthenticatedClient -from ..graphql.client import GraphQLClient -from ..graphql.queries.ledger import ( - GET_PUBLISH_LIST_QUERY, - GET_REPORT_QUERY, - GET_STATEMENT_QUERY, - LIST_PUBLISH_LISTS_QUERY, - LIST_REPORTS_QUERY, - parse_publish_list, - parse_publish_lists, - parse_report, - parse_reports, - parse_statement, -) -from ..models.add_publish_list_members_operation import AddPublishListMembersOperation -from ..models.create_publish_list_request import CreatePublishListRequest -from ..models.create_report_request import CreateReportRequest -from ..models.delete_publish_list_operation import DeletePublishListOperation -from ..models.delete_report_operation import DeleteReportOperation -from ..models.operation_envelope import OperationEnvelope -from ..models.regenerate_report_operation import RegenerateReportOperation -from ..models.remove_publish_list_member_operation import ( - RemovePublishListMemberOperation, -) -from ..models.share_report_operation import ShareReportOperation -from ..models.update_publish_list_operation import UpdatePublishListOperation -from ..types import UNSET - - -class ReportClient: - """High-level facade for reports + publish lists + statements.""" - - def __init__(self, config: dict[str, Any]): - self.config = config - self.base_url = config["base_url"] - self.headers = config.get("headers", {}) - self.token = config.get("token") - self.timeout = config.get("timeout", 60) - - def _get_client(self) -> AuthenticatedClient: - if not self.token: - raise RuntimeError("No API key provided. Set X-API-Key in headers.") - return AuthenticatedClient( - base_url=self.base_url, - token=self.token, - prefix="", - auth_header_name="X-API-Key", - headers=self.headers, - ) - - def _get_graphql_client(self) -> GraphQLClient: - if not self.token: - raise RuntimeError("No API key provided. Set X-API-Key in headers.") - return GraphQLClient( - base_url=self.base_url, - token=self.token, - headers=self.headers, - timeout=self.timeout, - ) - - def _query( - self, - graph_id: str, - query: str, - variables: dict[str, Any] | None = None, - ) -> dict[str, Any]: - return self._get_graphql_client().execute(graph_id, query, variables) - - def _call_op(self, label: str, response: Any) -> OperationEnvelope: - if response.status_code not in (HTTPStatus.OK, HTTPStatus.ACCEPTED): - raise RuntimeError( - f"{label} failed: {response.status_code}: {response.content!r}" - ) - envelope = response.parsed - if not isinstance(envelope, OperationEnvelope): - raise RuntimeError(f"{label} failed: unexpected response shape: {envelope!r}") - return envelope - - # ── Reports ───────────────────────────────────────────────────────── - - def create( - self, - graph_id: str, - name: str, - mapping_id: str, - period_start: str, - period_end: str, - taxonomy_id: str = "tax_usgaap_reporting", - period_type: str = "quarterly", - comparative: bool = True, - ) -> dict[str, Any]: - """Kick off report creation (async). Returns an operation ack.""" - body = CreateReportRequest( - name=name, - mapping_id=mapping_id, - period_start=period_start, - period_end=period_end, - taxonomy_id=taxonomy_id, - period_type=period_type, - comparative=comparative, - ) - response = op_create_report(graph_id=graph_id, body=body, client=self._get_client()) - envelope = self._call_op("Create report", response) - return {"operation_id": envelope.operation_id, "status": envelope.status} - - def list(self, graph_id: str) -> list[dict[str, Any]]: - """List all reports for a graph (includes received shared reports).""" - data = self._query(graph_id, LIST_REPORTS_QUERY) - return parse_reports(data) - - def get(self, graph_id: str, report_id: str) -> dict[str, Any] | None: - """Get a single report with its period list + available structures.""" - data = self._query(graph_id, GET_REPORT_QUERY, {"reportId": report_id}) - return parse_report(data) - - def statement( - self, graph_id: str, report_id: str, structure_type: str - ) -> dict[str, Any] | None: - """Render a financial statement — facts viewed through a structure. - - `structure_type`: income_statement, balance_sheet, cash_flow_statement, … - """ - data = self._query( - graph_id, - GET_STATEMENT_QUERY, - {"reportId": report_id, "structureType": structure_type}, - ) - return parse_statement(data) - - def regenerate( - self, - graph_id: str, - report_id: str, - period_start: str | None = None, - period_end: str | None = None, - ) -> dict[str, Any]: - """Regenerate an existing report (async). Returns an operation ack.""" - body = RegenerateReportOperation( - report_id=report_id, - period_start=period_start if period_start is not None else UNSET, - period_end=period_end if period_end is not None else UNSET, - ) - response = op_regenerate_report( - graph_id=graph_id, body=body, client=self._get_client() - ) - envelope = self._call_op("Regenerate report", response) - return {"operation_id": envelope.operation_id, "status": envelope.status} - - def delete(self, graph_id: str, report_id: str) -> None: - """Delete a report and its generated facts.""" - body = DeleteReportOperation(report_id=report_id) - response = op_delete_report(graph_id=graph_id, body=body, client=self._get_client()) - self._call_op("Delete report", response) - - def share( - self, graph_id: str, report_id: str, publish_list_id: str - ) -> dict[str, Any]: - """Share a published report to every member of a publish list (async).""" - body = ShareReportOperation(report_id=report_id, publish_list_id=publish_list_id) - response = op_share_report(graph_id=graph_id, body=body, client=self._get_client()) - envelope = self._call_op("Share report", response) - return {"operation_id": envelope.operation_id, "status": envelope.status} - - def is_shared_report(self, report: dict[str, Any] | Any) -> bool: - """Check if a report was received via sharing (vs locally created).""" - if isinstance(report, dict): - return report.get("source_graph_id") is not None - return getattr(report, "source_graph_id", None) is not None - - # ── Publish Lists ──────────────────────────────────────────────────── - - def list_publish_lists( - self, graph_id: str, limit: int = 100, offset: int = 0 - ) -> dict[str, Any] | None: - """List publish lists with pagination.""" - data = self._query( - graph_id, LIST_PUBLISH_LISTS_QUERY, {"limit": limit, "offset": offset} - ) - return parse_publish_lists(data) - - def get_publish_list(self, graph_id: str, list_id: str) -> dict[str, Any] | None: - """Get a single publish list with its full member list.""" - data = self._query(graph_id, GET_PUBLISH_LIST_QUERY, {"listId": list_id}) - return parse_publish_list(data) - - def create_publish_list( - self, graph_id: str, name: str, description: str | None = None - ) -> dict[str, Any]: - """Create a new publish list.""" - body = CreatePublishListRequest( - name=name, - description=description if description is not None else UNSET, - ) - response = op_create_publish_list( - graph_id=graph_id, body=body, client=self._get_client() - ) - envelope = self._call_op("Create publish list", response) - return envelope.result or {} - - def update_publish_list( - self, - graph_id: str, - list_id: str, - name: str | None = None, - description: str | None = None, - ) -> dict[str, Any]: - """Update a publish list's name or description.""" - body = UpdatePublishListOperation( - list_id=list_id, - name=name if name is not None else UNSET, - description=description if description is not None else UNSET, - ) - response = op_update_publish_list( - graph_id=graph_id, body=body, client=self._get_client() - ) - envelope = self._call_op("Update publish list", response) - return envelope.result or {} - - def delete_publish_list(self, graph_id: str, list_id: str) -> None: - """Delete a publish list.""" - body = DeletePublishListOperation(list_id=list_id) - response = op_delete_publish_list( - graph_id=graph_id, body=body, client=self._get_client() - ) - self._call_op("Delete publish list", response) - - def add_members( - self, graph_id: str, list_id: str, target_graph_ids: list[str] - ) -> dict[str, Any]: - """Add target graphs as members of a publish list.""" - body = AddPublishListMembersOperation( - list_id=list_id, target_graph_ids=target_graph_ids - ) - response = op_add_publish_list_members( - graph_id=graph_id, body=body, client=self._get_client() - ) - envelope = self._call_op("Add publish list members", response) - return envelope.result or {} - - def remove_member( - self, graph_id: str, list_id: str, member_id: str - ) -> dict[str, Any]: - """Remove a single member from a publish list.""" - body = RemovePublishListMemberOperation(list_id=list_id, member_id=member_id) - response = op_remove_publish_list_member( - graph_id=graph_id, body=body, client=self._get_client() - ) - envelope = self._call_op("Remove publish list member", response) - return envelope.result if envelope.result is not None else {"deleted": True} diff --git a/robosystems_client/models/__init__.py b/robosystems_client/models/__init__.py index 6f3ca70..5d3170a 100644 --- a/robosystems_client/models/__init__.py +++ b/robosystems_client/models/__init__.py @@ -36,7 +36,6 @@ from .backup_limits import BackupLimits from .backup_list_response import BackupListResponse from .backup_response import BackupResponse -from .backup_restore_request import BackupRestoreRequest from .backup_stats_response import BackupStatsResponse from .backup_stats_response_backup_formats import BackupStatsResponseBackupFormats from .batch_agent_request import BatchAgentRequest @@ -131,8 +130,7 @@ from .delete_schedule_request import DeleteScheduleRequest from .delete_security_operation import DeleteSecurityOperation from .delete_structure_request import DeleteStructureRequest -from .delete_subgraph_request import DeleteSubgraphRequest -from .delete_subgraph_response import DeleteSubgraphResponse +from .delete_subgraph_op import DeleteSubgraphOp from .delete_taxonomy_request import DeleteTaxonomyRequest from .detailed_transactions_response import DetailedTransactionsResponse from .detailed_transactions_response_date_range import ( @@ -217,10 +215,6 @@ from .login_request import LoginRequest from .logout_user_response_logoutuser import LogoutUserResponseLogoutuser from .manual_line_item_request import ManualLineItemRequest -from .materialize_request import MaterializeRequest -from .materialize_response import MaterializeResponse -from .materialize_response_limit_check_type_0 import MaterializeResponseLimitCheckType0 -from .materialize_status_response import MaterializeStatusResponse from .mcp_tool_call import MCPToolCall from .mcp_tool_call_arguments import MCPToolCallArguments from .mcp_tools_response import MCPToolsResponse @@ -285,6 +279,7 @@ from .reset_password_request import ResetPasswordRequest from .reset_password_validate_response import ResetPasswordValidateResponse from .response_mode import ResponseMode +from .restore_backup_op import RestoreBackupOp from .reverse_journal_entry_request import ReverseJournalEntryRequest from .schedule_metadata_request import ScheduleMetadataRequest from .schema_export_response import SchemaExportResponse @@ -370,6 +365,7 @@ from .update_taxonomy_request import UpdateTaxonomyRequest from .update_user_request import UpdateUserRequest from .upgrade_subscription_request import UpgradeSubscriptionRequest +from .upgrade_tier_op import UpgradeTierOp from .user_graphs_response import UserGraphsResponse from .user_response import UserResponse from .validation_error import ValidationError @@ -412,7 +408,6 @@ "BackupLimits", "BackupListResponse", "BackupResponse", - "BackupRestoreRequest", "BackupStatsResponse", "BackupStatsResponseBackupFormats", "BatchAgentRequest", @@ -497,8 +492,7 @@ "DeleteScheduleRequest", "DeleteSecurityOperation", "DeleteStructureRequest", - "DeleteSubgraphRequest", - "DeleteSubgraphResponse", + "DeleteSubgraphOp", "DeleteTaxonomyRequest", "DetailedTransactionsResponse", "DetailedTransactionsResponseDateRange", @@ -569,10 +563,6 @@ "LoginRequest", "LogoutUserResponseLogoutuser", "ManualLineItemRequest", - "MaterializeRequest", - "MaterializeResponse", - "MaterializeResponseLimitCheckType0", - "MaterializeStatusResponse", "MCPToolCall", "MCPToolCallArguments", "MCPToolsResponse", @@ -631,6 +621,7 @@ "ResetPasswordRequest", "ResetPasswordValidateResponse", "ResponseMode", + "RestoreBackupOp", "ReverseJournalEntryRequest", "ScheduleMetadataRequest", "SchemaExportResponse", @@ -700,6 +691,7 @@ "UpdateTaxonomyRequest", "UpdateUserRequest", "UpgradeSubscriptionRequest", + "UpgradeTierOp", "UserGraphsResponse", "UserResponse", "ValidationError", diff --git a/robosystems_client/models/delete_subgraph_request.py b/robosystems_client/models/delete_subgraph_op.py similarity index 56% rename from robosystems_client/models/delete_subgraph_request.py rename to robosystems_client/models/delete_subgraph_op.py index 1fb0263..e27de6e 100644 --- a/robosystems_client/models/delete_subgraph_request.py +++ b/robosystems_client/models/delete_subgraph_op.py @@ -1,78 +1,69 @@ from __future__ import annotations from collections.abc import Mapping -from typing import Any, TypeVar, cast +from typing import Any, TypeVar from attrs import define as _attrs_define from attrs import field as _attrs_field from ..types import UNSET, Unset -T = TypeVar("T", bound="DeleteSubgraphRequest") +T = TypeVar("T", bound="DeleteSubgraphOp") @_attrs_define -class DeleteSubgraphRequest: - """Request model for deleting a subgraph. +class DeleteSubgraphOp: + """Body for the delete-subgraph operation. Attributes: - force (bool | Unset): Force deletion even if subgraph contains data Default: False. - backup_first (bool | Unset): Create a backup before deletion Default: True. - backup_location (None | str | Unset): S3 location for backup (uses default if not specified) + subgraph_name (str): Subgraph name to delete (e.g., 'dev', 'staging') + force (bool | Unset): Delete even if subgraph contains data Default: False. + backup_first (bool | Unset): Create a backup before deleting Default: True. """ + subgraph_name: str force: bool | Unset = False backup_first: bool | Unset = True - backup_location: None | str | Unset = UNSET additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> dict[str, Any]: + subgraph_name = self.subgraph_name + force = self.force backup_first = self.backup_first - backup_location: None | str | Unset - if isinstance(self.backup_location, Unset): - backup_location = UNSET - else: - backup_location = self.backup_location - field_dict: dict[str, Any] = {} field_dict.update(self.additional_properties) - field_dict.update({}) + field_dict.update( + { + "subgraph_name": subgraph_name, + } + ) if force is not UNSET: field_dict["force"] = force if backup_first is not UNSET: field_dict["backup_first"] = backup_first - if backup_location is not UNSET: - field_dict["backup_location"] = backup_location return field_dict @classmethod def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: d = dict(src_dict) + subgraph_name = d.pop("subgraph_name") + force = d.pop("force", UNSET) backup_first = d.pop("backup_first", UNSET) - def _parse_backup_location(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - backup_location = _parse_backup_location(d.pop("backup_location", UNSET)) - - delete_subgraph_request = cls( + delete_subgraph_op = cls( + subgraph_name=subgraph_name, force=force, backup_first=backup_first, - backup_location=backup_location, ) - delete_subgraph_request.additional_properties = d - return delete_subgraph_request + delete_subgraph_op.additional_properties = d + return delete_subgraph_op @property def additional_keys(self) -> list[str]: diff --git a/robosystems_client/models/delete_subgraph_response.py b/robosystems_client/models/delete_subgraph_response.py deleted file mode 100644 index 649cc51..0000000 --- a/robosystems_client/models/delete_subgraph_response.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations - -import datetime -from collections.abc import Mapping -from typing import Any, TypeVar, cast - -from attrs import define as _attrs_define -from attrs import field as _attrs_field -from dateutil.parser import isoparse - -from ..types import UNSET, Unset - -T = TypeVar("T", bound="DeleteSubgraphResponse") - - -@_attrs_define -class DeleteSubgraphResponse: - """Response model for subgraph deletion. - - Attributes: - graph_id (str): Deleted subgraph identifier - status (str): Deletion status - deleted_at (datetime.datetime): When deletion occurred - backup_location (None | str | Unset): Location of backup if created - message (None | str | Unset): Additional information about the deletion - """ - - graph_id: str - status: str - deleted_at: datetime.datetime - backup_location: None | str | Unset = UNSET - message: None | str | Unset = UNSET - additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> dict[str, Any]: - graph_id = self.graph_id - - status = self.status - - deleted_at = self.deleted_at.isoformat() - - backup_location: None | str | Unset - if isinstance(self.backup_location, Unset): - backup_location = UNSET - else: - backup_location = self.backup_location - - message: None | str | Unset - if isinstance(self.message, Unset): - message = UNSET - else: - message = self.message - - field_dict: dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update( - { - "graph_id": graph_id, - "status": status, - "deleted_at": deleted_at, - } - ) - if backup_location is not UNSET: - field_dict["backup_location"] = backup_location - if message is not UNSET: - field_dict["message"] = message - - return field_dict - - @classmethod - def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: - d = dict(src_dict) - graph_id = d.pop("graph_id") - - status = d.pop("status") - - deleted_at = isoparse(d.pop("deleted_at")) - - def _parse_backup_location(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - backup_location = _parse_backup_location(d.pop("backup_location", UNSET)) - - def _parse_message(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - message = _parse_message(d.pop("message", UNSET)) - - delete_subgraph_response = cls( - graph_id=graph_id, - status=status, - deleted_at=deleted_at, - backup_location=backup_location, - message=message, - ) - - delete_subgraph_response.additional_properties = d - return delete_subgraph_response - - @property - def additional_keys(self) -> list[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/robosystems_client/models/materialize_request.py b/robosystems_client/models/materialize_request.py deleted file mode 100644 index c7d3dc2..0000000 --- a/robosystems_client/models/materialize_request.py +++ /dev/null @@ -1,103 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from typing import Any, TypeVar, cast - -from attrs import define as _attrs_define - -from ..types import UNSET, Unset - -T = TypeVar("T", bound="MaterializeRequest") - - -@_attrs_define -class MaterializeRequest: - """ - Attributes: - force (bool | Unset): Force materialization even if graph is not stale Default: False. - rebuild (bool | Unset): Delete and recreate graph database before materialization Default: False. - ignore_errors (bool | Unset): Continue ingestion on row errors Default: True. - dry_run (bool | Unset): Validate limits without executing materialization. Returns usage, limits, and warnings. - Default: False. - source (None | str | Unset): Data source for materialization. Auto-detected from graph type if not specified. - 'staged' materializes from uploaded files (generic graphs). 'extensions' materializes from the extensions OLTP - database (entity graphs). - materialize_embeddings (bool | Unset): Include embedding columns in materialization and build HNSW vector - indexes in the graph database. When false (default), embedding columns are NULLed out to save space. Set to true - for graphs that need vector search. Default: False. - """ - - force: bool | Unset = False - rebuild: bool | Unset = False - ignore_errors: bool | Unset = True - dry_run: bool | Unset = False - source: None | str | Unset = UNSET - materialize_embeddings: bool | Unset = False - - def to_dict(self) -> dict[str, Any]: - force = self.force - - rebuild = self.rebuild - - ignore_errors = self.ignore_errors - - dry_run = self.dry_run - - source: None | str | Unset - if isinstance(self.source, Unset): - source = UNSET - else: - source = self.source - - materialize_embeddings = self.materialize_embeddings - - field_dict: dict[str, Any] = {} - - field_dict.update({}) - if force is not UNSET: - field_dict["force"] = force - if rebuild is not UNSET: - field_dict["rebuild"] = rebuild - if ignore_errors is not UNSET: - field_dict["ignore_errors"] = ignore_errors - if dry_run is not UNSET: - field_dict["dry_run"] = dry_run - if source is not UNSET: - field_dict["source"] = source - if materialize_embeddings is not UNSET: - field_dict["materialize_embeddings"] = materialize_embeddings - - return field_dict - - @classmethod - def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: - d = dict(src_dict) - force = d.pop("force", UNSET) - - rebuild = d.pop("rebuild", UNSET) - - ignore_errors = d.pop("ignore_errors", UNSET) - - dry_run = d.pop("dry_run", UNSET) - - def _parse_source(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - source = _parse_source(d.pop("source", UNSET)) - - materialize_embeddings = d.pop("materialize_embeddings", UNSET) - - materialize_request = cls( - force=force, - rebuild=rebuild, - ignore_errors=ignore_errors, - dry_run=dry_run, - source=source, - materialize_embeddings=materialize_embeddings, - ) - - return materialize_request diff --git a/robosystems_client/models/materialize_response.py b/robosystems_client/models/materialize_response.py deleted file mode 100644 index f84b958..0000000 --- a/robosystems_client/models/materialize_response.py +++ /dev/null @@ -1,140 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from typing import TYPE_CHECKING, Any, TypeVar, cast - -from attrs import define as _attrs_define -from attrs import field as _attrs_field - -from ..types import UNSET, Unset - -if TYPE_CHECKING: - from ..models.materialize_response_limit_check_type_0 import ( - MaterializeResponseLimitCheckType0, - ) - - -T = TypeVar("T", bound="MaterializeResponse") - - -@_attrs_define -class MaterializeResponse: - """Response for queued materialization operation. - - Example: - {'graph_id': 'kg_abc123', 'message': 'Materialization queued. Monitor via SSE stream.', 'operation_id': - '550e8400-e29b-41d4-a716-446655440000', 'status': 'queued'} - - Attributes: - graph_id (str): Graph database identifier - operation_id (str): SSE operation ID for progress tracking - message (str): Human-readable status message - status (str | Unset): Operation status Default: 'queued'. - limit_check (MaterializeResponseLimitCheckType0 | None | Unset): Limit check results (only present for dry_run - requests) - """ - - graph_id: str - operation_id: str - message: str - status: str | Unset = "queued" - limit_check: MaterializeResponseLimitCheckType0 | None | Unset = UNSET - additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> dict[str, Any]: - from ..models.materialize_response_limit_check_type_0 import ( - MaterializeResponseLimitCheckType0, - ) - - graph_id = self.graph_id - - operation_id = self.operation_id - - message = self.message - - status = self.status - - limit_check: dict[str, Any] | None | Unset - if isinstance(self.limit_check, Unset): - limit_check = UNSET - elif isinstance(self.limit_check, MaterializeResponseLimitCheckType0): - limit_check = self.limit_check.to_dict() - else: - limit_check = self.limit_check - - field_dict: dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update( - { - "graph_id": graph_id, - "operation_id": operation_id, - "message": message, - } - ) - if status is not UNSET: - field_dict["status"] = status - if limit_check is not UNSET: - field_dict["limit_check"] = limit_check - - return field_dict - - @classmethod - def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: - from ..models.materialize_response_limit_check_type_0 import ( - MaterializeResponseLimitCheckType0, - ) - - d = dict(src_dict) - graph_id = d.pop("graph_id") - - operation_id = d.pop("operation_id") - - message = d.pop("message") - - status = d.pop("status", UNSET) - - def _parse_limit_check( - data: object, - ) -> MaterializeResponseLimitCheckType0 | None | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - try: - if not isinstance(data, dict): - raise TypeError() - limit_check_type_0 = MaterializeResponseLimitCheckType0.from_dict(data) - - return limit_check_type_0 - except (TypeError, ValueError, AttributeError, KeyError): - pass - return cast(MaterializeResponseLimitCheckType0 | None | Unset, data) - - limit_check = _parse_limit_check(d.pop("limit_check", UNSET)) - - materialize_response = cls( - graph_id=graph_id, - operation_id=operation_id, - message=message, - status=status, - limit_check=limit_check, - ) - - materialize_response.additional_properties = d - return materialize_response - - @property - def additional_keys(self) -> list[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/robosystems_client/models/materialize_status_response.py b/robosystems_client/models/materialize_status_response.py deleted file mode 100644 index 072a45b..0000000 --- a/robosystems_client/models/materialize_status_response.py +++ /dev/null @@ -1,172 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from typing import Any, TypeVar, cast - -from attrs import define as _attrs_define -from attrs import field as _attrs_field - -from ..types import UNSET, Unset - -T = TypeVar("T", bound="MaterializeStatusResponse") - - -@_attrs_define -class MaterializeStatusResponse: - """ - Attributes: - graph_id (str): Graph database identifier - is_stale (bool): Whether graph is currently stale - message (str): Human-readable status summary - stale_reason (None | str | Unset): Reason for staleness if applicable - stale_since (None | str | Unset): When graph became stale (ISO timestamp) - last_materialized_at (None | str | Unset): When graph was last materialized (ISO timestamp) - materialization_count (int | Unset): Total number of materializations performed Default: 0. - hours_since_materialization (float | None | Unset): Hours since last materialization - """ - - graph_id: str - is_stale: bool - message: str - stale_reason: None | str | Unset = UNSET - stale_since: None | str | Unset = UNSET - last_materialized_at: None | str | Unset = UNSET - materialization_count: int | Unset = 0 - hours_since_materialization: float | None | Unset = UNSET - additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> dict[str, Any]: - graph_id = self.graph_id - - is_stale = self.is_stale - - message = self.message - - stale_reason: None | str | Unset - if isinstance(self.stale_reason, Unset): - stale_reason = UNSET - else: - stale_reason = self.stale_reason - - stale_since: None | str | Unset - if isinstance(self.stale_since, Unset): - stale_since = UNSET - else: - stale_since = self.stale_since - - last_materialized_at: None | str | Unset - if isinstance(self.last_materialized_at, Unset): - last_materialized_at = UNSET - else: - last_materialized_at = self.last_materialized_at - - materialization_count = self.materialization_count - - hours_since_materialization: float | None | Unset - if isinstance(self.hours_since_materialization, Unset): - hours_since_materialization = UNSET - else: - hours_since_materialization = self.hours_since_materialization - - field_dict: dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update( - { - "graph_id": graph_id, - "is_stale": is_stale, - "message": message, - } - ) - if stale_reason is not UNSET: - field_dict["stale_reason"] = stale_reason - if stale_since is not UNSET: - field_dict["stale_since"] = stale_since - if last_materialized_at is not UNSET: - field_dict["last_materialized_at"] = last_materialized_at - if materialization_count is not UNSET: - field_dict["materialization_count"] = materialization_count - if hours_since_materialization is not UNSET: - field_dict["hours_since_materialization"] = hours_since_materialization - - return field_dict - - @classmethod - def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: - d = dict(src_dict) - graph_id = d.pop("graph_id") - - is_stale = d.pop("is_stale") - - message = d.pop("message") - - def _parse_stale_reason(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - stale_reason = _parse_stale_reason(d.pop("stale_reason", UNSET)) - - def _parse_stale_since(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - stale_since = _parse_stale_since(d.pop("stale_since", UNSET)) - - def _parse_last_materialized_at(data: object) -> None | str | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(None | str | Unset, data) - - last_materialized_at = _parse_last_materialized_at( - d.pop("last_materialized_at", UNSET) - ) - - materialization_count = d.pop("materialization_count", UNSET) - - def _parse_hours_since_materialization(data: object) -> float | None | Unset: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(float | None | Unset, data) - - hours_since_materialization = _parse_hours_since_materialization( - d.pop("hours_since_materialization", UNSET) - ) - - materialize_status_response = cls( - graph_id=graph_id, - is_stale=is_stale, - message=message, - stale_reason=stale_reason, - stale_since=stale_since, - last_materialized_at=last_materialized_at, - materialization_count=materialization_count, - hours_since_materialization=hours_since_materialization, - ) - - materialize_status_response.additional_properties = d - return materialize_status_response - - @property - def additional_keys(self) -> list[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/robosystems_client/models/operation_envelope.py b/robosystems_client/models/operation_envelope.py index 5f4b077..41a2b0f 100644 --- a/robosystems_client/models/operation_envelope.py +++ b/robosystems_client/models/operation_envelope.py @@ -18,40 +18,33 @@ @_attrs_define class OperationEnvelope: - """Uniform response shape for every extensions operation endpoint. - - Every dispatch through `/extensions/{domain}/{graph_id}/operations/{op}` - returns an envelope carrying an `op_` operation_id. That id is the - bridge to the platform's monitoring surface: pass it to - `GET /v1/operations/{operation_id}/stream` (see `routers/operations.py`) - to subscribe to SSE progress events. Sync commands complete in the - envelope itself; async commands (`status: "pending"`, HTTP 202) hand off - to a background worker and stream their tail through the same SSE - endpoint until completion. Failed dispatches still mint an `operation_id` - so the audit log and any partial SSE events stay correlatable. + """Uniform response shape for every operation endpoint. + + Every dispatch through an operation surface returns an envelope carrying + an ``op_`` operation_id. That id is the bridge to the platform's + monitoring surface: pass it to + ``GET /v1/operations/{operation_id}/stream`` (see ``routers/operations.py``) + to subscribe to SSE progress events. Sync commands complete in the + envelope itself; async commands (``status: "pending"``, HTTP 202) hand + off to a background worker and stream their tail through the same SSE + endpoint until completion. Failed dispatches still mint an + ``operation_id`` so the audit log and any partial SSE events stay + correlatable. Fields: - - `operation`: kebab-case command name (e.g. `close-period`) - - `operation_id`: `op_`-prefixed ULID; always present, usable for audit - correlation and — for async commands — SSE subscription via - `/v1/operations/{operation_id}/stream` - - `status`: `"completed"` (sync, HTTP 200), `"pending"` (async, HTTP 202), - or `"failed"` (error responses) - - `result`: the domain-specific payload (the original Pydantic response) - or `None` for async/failed cases - - `at`: ISO-8601 UTC timestamp of when the envelope was minted (for sync - ops this is the completion time; for async/pending it's the enqueue time) - - `created_by`: user ID of the caller who initiated this operation, for - audit correlation without having to cross-reference the audit log. - Always populated for dispatcher-routed calls; may be `None` for legacy - direct `wrap_completed(...)` callers. - - `idempotent_replay`: `True` when the dispatcher returned this envelope - from the idempotency cache (the underlying command did NOT execute - again). `False` on every fresh execution. Clients can use this to - distinguish "my retry succeeded" from "the server re-ran the command" - without having to track their own request identity. The metrics - decorator also reads this attribute to suppress business-event counter - increments on replays so dashboards stay honest. + - ``operation``: kebab-case command name (e.g. ``close-period``) + - ``operation_id``: ``op_``-prefixed ULID; always present, usable for + audit correlation and — for async commands — SSE subscription via + ``/v1/operations/{operation_id}/stream`` + - ``status``: ``"completed"`` (sync, HTTP 200), ``"pending"`` + (async, HTTP 202), or ``"failed"`` (error responses) + - ``result``: the domain-specific payload (the original Pydantic + response) or ``None`` for async/failed cases + - ``at``: ISO-8601 UTC timestamp of when the envelope was minted + - ``created_by``: user ID of the caller who initiated this operation + - ``idempotent_replay``: ``True`` when the dispatcher returned this + envelope from the idempotency cache (the underlying command did NOT + execute again) Attributes: operation (str): Kebab-case operation name diff --git a/robosystems_client/models/backup_restore_request.py b/robosystems_client/models/restore_backup_op.py similarity index 80% rename from robosystems_client/models/backup_restore_request.py rename to robosystems_client/models/restore_backup_op.py index 0447166..a29cf31 100644 --- a/robosystems_client/models/backup_restore_request.py +++ b/robosystems_client/models/restore_backup_op.py @@ -8,30 +8,38 @@ from ..types import UNSET, Unset -T = TypeVar("T", bound="BackupRestoreRequest") +T = TypeVar("T", bound="RestoreBackupOp") @_attrs_define -class BackupRestoreRequest: - """Request model for restoring from a backup. +class RestoreBackupOp: + """Body for the restore-backup operation. Attributes: + backup_id (str): Backup identifier to restore from create_system_backup (bool | Unset): Create a system backup of existing database before restore Default: True. verify_after_restore (bool | Unset): Verify database integrity after restore Default: True. """ + backup_id: str create_system_backup: bool | Unset = True verify_after_restore: bool | Unset = True additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> dict[str, Any]: + backup_id = self.backup_id + create_system_backup = self.create_system_backup verify_after_restore = self.verify_after_restore field_dict: dict[str, Any] = {} field_dict.update(self.additional_properties) - field_dict.update({}) + field_dict.update( + { + "backup_id": backup_id, + } + ) if create_system_backup is not UNSET: field_dict["create_system_backup"] = create_system_backup if verify_after_restore is not UNSET: @@ -42,17 +50,20 @@ def to_dict(self) -> dict[str, Any]: @classmethod def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: d = dict(src_dict) + backup_id = d.pop("backup_id") + create_system_backup = d.pop("create_system_backup", UNSET) verify_after_restore = d.pop("verify_after_restore", UNSET) - backup_restore_request = cls( + restore_backup_op = cls( + backup_id=backup_id, create_system_backup=create_system_backup, verify_after_restore=verify_after_restore, ) - backup_restore_request.additional_properties = d - return backup_restore_request + restore_backup_op.additional_properties = d + return restore_backup_op @property def additional_keys(self) -> list[str]: diff --git a/robosystems_client/models/materialize_response_limit_check_type_0.py b/robosystems_client/models/upgrade_tier_op.py similarity index 67% rename from robosystems_client/models/materialize_response_limit_check_type_0.py rename to robosystems_client/models/upgrade_tier_op.py index 5d34fd7..512082a 100644 --- a/robosystems_client/models/materialize_response_limit_check_type_0.py +++ b/robosystems_client/models/upgrade_tier_op.py @@ -6,28 +6,44 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -T = TypeVar("T", bound="MaterializeResponseLimitCheckType0") +T = TypeVar("T", bound="UpgradeTierOp") @_attrs_define -class MaterializeResponseLimitCheckType0: - """ """ +class UpgradeTierOp: + """Body for the upgrade-tier operation. + Attributes: + new_tier (str): Target tier: ladybug-standard, ladybug-large, ladybug-xlarge + """ + + new_tier: str additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> dict[str, Any]: + new_tier = self.new_tier + field_dict: dict[str, Any] = {} field_dict.update(self.additional_properties) + field_dict.update( + { + "new_tier": new_tier, + } + ) return field_dict @classmethod def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T: d = dict(src_dict) - materialize_response_limit_check_type_0 = cls() + new_tier = d.pop("new_tier") + + upgrade_tier_op = cls( + new_tier=new_tier, + ) - materialize_response_limit_check_type_0.additional_properties = d - return materialize_response_limit_check_type_0 + upgrade_tier_op.additional_properties = d + return upgrade_tier_op @property def additional_keys(self) -> list[str]: diff --git a/tests/test_extensions.py b/tests/test_extensions.py index 2131b84..1a5503f 100644 --- a/tests/test_extensions.py +++ b/tests/test_extensions.py @@ -5,7 +5,6 @@ RoboSystemsClients, RoboSystemsClientConfig, FileClient, - MaterializationClient, TableClient, QueryClient, OperationClient, @@ -25,7 +24,6 @@ def test_extensions_initialization_default(self): assert isinstance(extensions.query, QueryClient) assert isinstance(extensions.operations, OperationClient) assert isinstance(extensions.files, FileClient) - assert isinstance(extensions.materialization, MaterializationClient) assert isinstance(extensions.tables, TableClient) assert isinstance(extensions.graphs, GraphClient) @@ -121,7 +119,7 @@ def test_file_client_receives_config(self): extensions.close() def test_materialization_client_receives_config(self): - """Test that MaterializationClient receives proper config.""" + """Test that GraphClient (which handles materialization) receives proper config.""" config = RoboSystemsClientConfig( base_url="https://api.robosystems.ai", headers={"X-API-Key": "test-token"}, @@ -129,8 +127,8 @@ def test_materialization_client_receives_config(self): extensions = RoboSystemsClients(config) - assert extensions.materialization.base_url == "https://api.robosystems.ai" - assert "X-API-Key" in extensions.materialization.headers + assert extensions.graphs.base_url == "https://api.robosystems.ai" + assert "X-API-Key" in extensions.graphs.headers extensions.close() diff --git a/tests/test_materialization_client.py b/tests/test_materialization_client.py index ad76ec1..9bdd0c0 100644 --- a/tests/test_materialization_client.py +++ b/tests/test_materialization_client.py @@ -1,12 +1,11 @@ -"""Unit tests for MaterializationClient.""" +"""Unit tests for GraphClient materialization.""" import pytest from unittest.mock import Mock, patch -from robosystems_client.clients.materialization_client import ( - MaterializationClient, +from robosystems_client.clients.graph_client import ( + GraphClient, MaterializationOptions, MaterializationResult, - MaterializationStatus, ) from robosystems_client.clients.operation_client import ( OperationResult, @@ -86,32 +85,14 @@ def test_materialization_result_with_error(self): assert result.success is False assert result.error == "Connection timeout" - def test_materialization_status(self): - """Test MaterializationStatus dataclass.""" - status = MaterializationStatus( - graph_id="graph-123", - is_stale=True, - stale_reason="Files uploaded since last materialization", - stale_since="2025-01-15T10:00:00Z", - last_materialized_at="2025-01-14T08:00:00Z", - materialization_count=5, - hours_since_materialization=26.0, - message="Graph is stale", - ) - - assert status.graph_id == "graph-123" - assert status.is_stale is True - assert status.materialization_count == 5 - assert status.hours_since_materialization == 26.0 - @pytest.mark.unit -class TestMaterializationClientInit: - """Test suite for MaterializationClient initialization.""" +class TestGraphClientMaterializeInit: + """Test suite for GraphClient initialization relevant to materialization.""" def test_client_initialization(self, mock_config): """Test that client initializes correctly with config.""" - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) assert client.base_url == "http://localhost:8000" assert client.token == "test-api-key" @@ -120,7 +101,7 @@ def test_client_initialization(self, mock_config): def test_operation_client_lazy_creation(self, mock_config): """Test that operation client is created lazily.""" - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) assert client._operation_client is None op_client = client.operation_client @@ -131,18 +112,19 @@ def test_operation_client_lazy_creation(self, mock_config): @pytest.mark.unit class TestMaterialize: - """Test suite for MaterializationClient.materialize method.""" + """Test suite for GraphClient.materialize method.""" - @patch("robosystems_client.clients.materialization_client.materialize_graph") - def test_materialize_success(self, mock_mat, mock_config, graph_id): + @patch("robosystems_client.api.graph_operations.op_materialize.sync_detailed") + @patch.object(GraphClient, "_get_authenticated_client") + def test_materialize_success(self, mock_auth, mock_mat, mock_config, graph_id): """Test successful materialization.""" - # Mock initial response with operation_id + mock_auth.return_value = Mock() + mock_resp = Mock() - mock_resp.status_code = 200 + mock_resp.status_code = 202 mock_resp.parsed = Mock(operation_id="op-mat-123") mock_mat.return_value = mock_resp - # Mock the operation client monitoring op_result = OperationResult( operation_id="op-mat-123", status=OperationStatus.COMPLETED, @@ -157,7 +139,7 @@ def test_materialize_success(self, mock_mat, mock_config, graph_id): execution_time_ms=2000.0, ) - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) client._operation_client = Mock() client._operation_client.monitor_operation.return_value = op_result @@ -168,26 +150,34 @@ def test_materialize_success(self, mock_mat, mock_config, graph_id): assert result.tables_materialized == ["Entity"] assert result.total_rows == 100 - @patch("robosystems_client.clients.materialization_client.materialize_graph") - def test_materialize_api_failure(self, mock_mat, mock_config, graph_id): + @patch("robosystems_client.api.graph_operations.op_materialize.sync_detailed") + @patch.object(GraphClient, "_get_authenticated_client") + def test_materialize_api_failure(self, mock_auth, mock_mat, mock_config, graph_id): """Test materialization when API returns error.""" + mock_auth.return_value = Mock() + mock_resp = Mock() mock_resp.status_code = 500 mock_resp.parsed = None mock_resp.content = b'{"detail": "Internal error"}' mock_mat.return_value = mock_resp - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) result = client.materialize(graph_id) assert result.success is False assert result.status == "failed" - @patch("robosystems_client.clients.materialization_client.materialize_graph") - def test_materialize_operation_failed(self, mock_mat, mock_config, graph_id): + @patch("robosystems_client.api.graph_operations.op_materialize.sync_detailed") + @patch.object(GraphClient, "_get_authenticated_client") + def test_materialize_operation_failed( + self, mock_auth, mock_mat, mock_config, graph_id + ): """Test materialization when operation fails.""" + mock_auth.return_value = Mock() + mock_resp = Mock() - mock_resp.status_code = 200 + mock_resp.status_code = 202 mock_resp.parsed = Mock(operation_id="op-fail") mock_mat.return_value = mock_resp @@ -198,7 +188,7 @@ def test_materialize_operation_failed(self, mock_mat, mock_config, graph_id): execution_time_ms=1000.0, ) - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) client._operation_client = Mock() client._operation_client.monitor_operation.return_value = op_result @@ -210,18 +200,21 @@ def test_materialize_operation_failed(self, mock_mat, mock_config, graph_id): def test_materialize_no_token(self, mock_config, graph_id): """Test materialize fails without API key.""" mock_config["token"] = None - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) result = client.materialize(graph_id) assert result.success is False assert "No API key" in result.error - @patch("robosystems_client.clients.materialization_client.materialize_graph") - def test_materialize_with_progress(self, mock_mat, mock_config, graph_id): + @patch("robosystems_client.api.graph_operations.op_materialize.sync_detailed") + @patch.object(GraphClient, "_get_authenticated_client") + def test_materialize_with_progress(self, mock_auth, mock_mat, mock_config, graph_id): """Test materialize calls progress callback.""" + mock_auth.return_value = Mock() + mock_resp = Mock() - mock_resp.status_code = 200 + mock_resp.status_code = 202 mock_resp.parsed = Mock(operation_id="op-progress") mock_mat.return_value = mock_resp @@ -243,7 +236,7 @@ def test_materialize_with_progress(self, mock_mat, mock_config, graph_id): on_progress=lambda msg: progress_messages.append(msg) ) - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) client._operation_client = Mock() client._operation_client.monitor_operation.return_value = op_result @@ -251,11 +244,14 @@ def test_materialize_with_progress(self, mock_mat, mock_config, graph_id): assert len(progress_messages) >= 2 # At least "Submitting" and "queued" - @patch("robosystems_client.clients.materialization_client.materialize_graph") - def test_materialize_with_rebuild(self, mock_mat, mock_config, graph_id): + @patch("robosystems_client.api.graph_operations.op_materialize.sync_detailed") + @patch.object(GraphClient, "_get_authenticated_client") + def test_materialize_with_rebuild(self, mock_auth, mock_mat, mock_config, graph_id): """Test materialize passes rebuild option.""" + mock_auth.return_value = Mock() + mock_resp = Mock() - mock_resp.status_code = 200 + mock_resp.status_code = 202 mock_resp.parsed = Mock(operation_id="op-rebuild") mock_mat.return_value = mock_resp @@ -272,7 +268,7 @@ def test_materialize_with_rebuild(self, mock_mat, mock_config, graph_id): execution_time_ms=5000.0, ) - client = MaterializationClient(mock_config) + client = GraphClient(mock_config) client._operation_client = Mock() client._operation_client.monitor_operation.return_value = op_result @@ -280,57 +276,7 @@ def test_materialize_with_rebuild(self, mock_mat, mock_config, graph_id): result = client.materialize(graph_id, options) assert result.success is True - # Verify the request body had rebuild=True + # Verify the call had rebuild=True and force=True call_kwargs = mock_mat.call_args[1] - assert call_kwargs["body"].rebuild is True - assert call_kwargs["body"].force is True - - -@pytest.mark.unit -class TestMaterializationStatus: - """Test suite for MaterializationClient.status method.""" - - @patch("robosystems_client.clients.materialization_client.get_materialization_status") - def test_get_status(self, mock_status, mock_config, graph_id): - """Test getting materialization status.""" - mock_resp = Mock() - mock_resp.status_code = 200 - mock_resp.parsed = Mock( - graph_id=graph_id, - is_stale=True, - stale_reason="New files uploaded", - stale_since="2025-01-15T10:00:00Z", - last_materialized_at="2025-01-14T08:00:00Z", - materialization_count=3, - hours_since_materialization=26.0, - message="Graph is stale", - ) - mock_status.return_value = mock_resp - - client = MaterializationClient(mock_config) - status = client.status(graph_id) - - assert status is not None - assert status.is_stale is True - assert status.materialization_count == 3 - - @patch("robosystems_client.clients.materialization_client.get_materialization_status") - def test_get_status_failure(self, mock_status, mock_config, graph_id): - """Test status returns None on failure.""" - mock_resp = Mock() - mock_resp.status_code = 500 - mock_resp.parsed = None - mock_status.return_value = mock_resp - - client = MaterializationClient(mock_config) - status = client.status(graph_id) - - assert status is None - - def test_status_no_token(self, mock_config, graph_id): - """Test status returns None without token.""" - mock_config["token"] = None - client = MaterializationClient(mock_config) - status = client.status(graph_id) - - assert status is None + assert call_kwargs["rebuild"] is True + assert call_kwargs["force"] is True diff --git a/tests/test_report_client.py b/tests/test_report_client.py deleted file mode 100644 index 1505cf2..0000000 --- a/tests/test_report_client.py +++ /dev/null @@ -1,266 +0,0 @@ -"""Unit tests for ReportClient. - -Reports + publish lists + statements ride the same transport split as -LedgerClient: GraphQL for reads, operation envelopes for writes. -""" - -from __future__ import annotations - -from http import HTTPStatus -from unittest.mock import Mock, patch - -import pytest - -from robosystems_client.clients.report_client import ReportClient -from robosystems_client.models.operation_envelope import OperationEnvelope -from robosystems_client.models.operation_envelope_status import OperationEnvelopeStatus - - -def _envelope( - operation: str, - result: dict | list | None, - status: OperationEnvelopeStatus = OperationEnvelopeStatus.COMPLETED, -) -> OperationEnvelope: - return OperationEnvelope( - operation=operation, - operation_id=f"op_{operation.upper()}_01", - status=status, - result=result, - at="2026-04-14T12:00:00Z", - ) - - -def _mock_response( - envelope: OperationEnvelope, status_code: int = HTTPStatus.OK -) -> Mock: - resp = Mock() - resp.status_code = status_code - resp.parsed = envelope - resp.content = b"" - return resp - - -@pytest.mark.unit -class TestReportReads: - @patch("robosystems_client.graphql.client.GraphQLClient.execute") - def test_list(self, mock_execute, mock_config, graph_id): - mock_execute.return_value = { - "reports": { - "reports": [ - { - "id": "rep_1", - "name": "Q1 2026", - "taxonomyId": "tax_usgaap", - "generationStatus": "completed", - "periodType": "quarterly", - "periodStart": "2026-01-01", - "periodEnd": "2026-03-31", - "comparative": True, - "mappingId": "map_1", - "aiGenerated": False, - "createdAt": "2026-04-01T00:00:00Z", - "lastGenerated": "2026-04-01T00:00:00Z", - "entityName": "ACME", - "sourceGraphId": None, - "sourceReportId": None, - "sharedAt": None, - "periods": [], - "structures": [], - } - ] - } - } - client = ReportClient(mock_config) - reports = client.list(graph_id) - assert len(reports) == 1 - assert reports[0]["id"] == "rep_1" - assert reports[0]["generation_status"] == "completed" - - @patch("robosystems_client.graphql.client.GraphQLClient.execute") - def test_get_returns_none_when_missing(self, mock_execute, mock_config, graph_id): - mock_execute.return_value = {"report": None} - client = ReportClient(mock_config) - assert client.get(graph_id, "rep_x") is None - - @patch("robosystems_client.graphql.client.GraphQLClient.execute") - def test_statement(self, mock_execute, mock_config, graph_id): - mock_execute.return_value = { - "statement": { - "reportId": "rep_1", - "structureId": "str_1", - "structureName": "Income Statement", - "structureType": "income_statement", - "unmappedCount": 0, - "periods": [{"start": "2026-01-01", "end": "2026-03-31", "label": "Q1"}], - "rows": [ - { - "elementId": "elem_rev", - "elementQname": "us-gaap:Revenues", - "elementName": "Revenues", - "classification": "revenue", - "values": [1000000], - "isSubtotal": False, - "depth": 0, - } - ], - "validation": { - "passed": True, - "checks": [], - "failures": [], - "warnings": [], - }, - } - } - client = ReportClient(mock_config) - stmt = client.statement(graph_id, "rep_1", "income_statement") - assert stmt is not None - assert stmt["rows"][0]["values"][0] == 1000000 - assert stmt["validation"]["passed"] is True - - @patch("robosystems_client.graphql.client.GraphQLClient.execute") - def test_list_publish_lists(self, mock_execute, mock_config, graph_id): - mock_execute.return_value = { - "publishLists": { - "publishLists": [ - { - "id": "pl_1", - "name": "Investors", - "description": None, - "memberCount": 2, - "createdBy": "user_1", - "createdAt": "2026-01-01T00:00:00Z", - "updatedAt": "2026-01-01T00:00:00Z", - } - ], - "pagination": {"total": 1, "limit": 100, "offset": 0, "hasMore": False}, - } - } - client = ReportClient(mock_config) - result = client.list_publish_lists(graph_id) - assert result is not None - assert len(result["publish_lists"]) == 1 - assert result["publish_lists"][0]["member_count"] == 2 - - @patch("robosystems_client.graphql.client.GraphQLClient.execute") - def test_get_publish_list_with_members(self, mock_execute, mock_config, graph_id): - mock_execute.return_value = { - "publishList": { - "id": "pl_1", - "name": "Investors", - "description": "VC distribution list", - "memberCount": 1, - "createdBy": "user_1", - "createdAt": "2026-01-01T00:00:00Z", - "updatedAt": "2026-01-01T00:00:00Z", - "members": [ - { - "id": "mem_1", - "targetGraphId": "graph_investor", - "targetGraphName": "Acme Ventures", - "targetOrgName": None, - "addedBy": "user_1", - "addedAt": "2026-01-01T00:00:00Z", - } - ], - } - } - client = ReportClient(mock_config) - detail = client.get_publish_list(graph_id, "pl_1") - assert detail is not None - assert len(detail["members"]) == 1 - assert detail["members"][0]["target_graph_id"] == "graph_investor" - - -@pytest.mark.unit -class TestReportWrites: - @patch("robosystems_client.clients.report_client.op_create_report") - def test_create_returns_ack(self, mock_op, mock_config, graph_id): - envelope = _envelope("create-report", None, status=OperationEnvelopeStatus.PENDING) - mock_op.return_value = _mock_response(envelope, status_code=HTTPStatus.ACCEPTED) - client = ReportClient(mock_config) - ack = client.create( - graph_id, - name="Q2 2026", - mapping_id="map_1", - period_start="2026-04-01", - period_end="2026-06-30", - ) - assert ack["status"] == OperationEnvelopeStatus.PENDING - assert ack["operation_id"].startswith("op_") - - @patch("robosystems_client.clients.report_client.op_regenerate_report") - def test_regenerate_returns_ack(self, mock_op, mock_config, graph_id): - envelope = _envelope( - "regenerate-report", None, status=OperationEnvelopeStatus.PENDING - ) - mock_op.return_value = _mock_response(envelope, status_code=HTTPStatus.ACCEPTED) - client = ReportClient(mock_config) - ack = client.regenerate( - graph_id, "rep_1", period_start="2026-04-01", period_end="2026-06-30" - ) - assert ack["status"] == OperationEnvelopeStatus.PENDING - - @patch("robosystems_client.clients.report_client.op_delete_report") - def test_delete(self, mock_op, mock_config, graph_id): - envelope = _envelope("delete-report", {"deleted": True}) - mock_op.return_value = _mock_response(envelope) - client = ReportClient(mock_config) - client.delete(graph_id, "rep_1") - assert mock_op.called - - @patch("robosystems_client.clients.report_client.op_share_report") - def test_share_returns_ack(self, mock_op, mock_config, graph_id): - envelope = _envelope("share-report", None, status=OperationEnvelopeStatus.PENDING) - mock_op.return_value = _mock_response(envelope, status_code=HTTPStatus.ACCEPTED) - client = ReportClient(mock_config) - ack = client.share(graph_id, "rep_1", "pl_1") - assert ack["operation_id"].startswith("op_") - - @patch("robosystems_client.clients.report_client.op_create_publish_list") - def test_create_publish_list(self, mock_op, mock_config, graph_id): - envelope = _envelope( - "create-publish-list", - { - "id": "pl_new", - "name": "New List", - "description": None, - "member_count": 0, - }, - ) - mock_op.return_value = _mock_response(envelope) - client = ReportClient(mock_config) - result = client.create_publish_list(graph_id, "New List") - assert result["id"] == "pl_new" - - @patch("robosystems_client.clients.report_client.op_add_publish_list_members") - def test_add_members(self, mock_op, mock_config, graph_id): - envelope = _envelope("add-publish-list-members", {"added": 2}) - mock_op.return_value = _mock_response(envelope) - client = ReportClient(mock_config) - result = client.add_members(graph_id, "pl_1", ["graph_a", "graph_b"]) - assert result["added"] == 2 - body = mock_op.call_args.kwargs["body"] - assert body.list_id == "pl_1" - assert body.target_graph_ids == ["graph_a", "graph_b"] - - @patch("robosystems_client.clients.report_client.op_remove_publish_list_member") - def test_remove_member(self, mock_op, mock_config, graph_id): - envelope = _envelope("remove-publish-list-member", {"deleted": True}) - mock_op.return_value = _mock_response(envelope) - client = ReportClient(mock_config) - result = client.remove_member(graph_id, "pl_1", "mem_1") - assert result["deleted"] is True - - -@pytest.mark.unit -class TestReportHelpers: - def test_is_shared_report_with_dict(self, mock_config): - client = ReportClient(mock_config) - assert client.is_shared_report({"source_graph_id": "graph_src"}) is True - assert client.is_shared_report({"source_graph_id": None}) is False - - def test_is_shared_report_with_object(self, mock_config): - client = ReportClient(mock_config) - obj = Mock() - obj.source_graph_id = "graph_src" - assert client.is_shared_report(obj) is True