Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
863 changes: 862 additions & 1 deletion src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion src/assets/python/a2a/googleadk/base/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,16 @@
from bedrock_agentcore.runtime import serve_a2a
from model.load import load_model

load_model() # Sets GOOGLE_API_KEY env var (returns None)


def add_numbers(a: int, b: int) -> int:
"""Return the sum of two numbers."""
return a + b


agent = Agent(
model=load_model(),
model="gemini-2.5-flash",
name="{{ name }}",
description="A helpful assistant that can use tools.",
instruction="You are a helpful assistant. Use tools when appropriate.",
Expand Down
30 changes: 30 additions & 0 deletions src/assets/python/agui/googleadk/base/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# {{ name }}

An AG-UI agent deployed on Amazon Bedrock AgentCore using Google ADK.

## Overview

This agent implements the AG-UI protocol using Google's Agent Development Kit, enabling rich agent-user interaction via the AG-UI event stream.

## Local Development

```bash
uv sync
uv run python main.py
```

The agent starts on port 8080 and serves requests at `/invocations`.

## Health Check

```
GET /ping
```

Returns `{"status": "healthy"}`.

## Deploy

```bash
agentcore deploy
```
41 changes: 41 additions & 0 deletions src/assets/python/agui/googleadk/base/gitignore.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Environment variables
.env

# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Virtual environments
.venv/
venv/
ENV/
env/

# IDE
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db
31 changes: 31 additions & 0 deletions src/assets/python/agui/googleadk/base/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import uvicorn
from google.adk.agents import LlmAgent
from ag_ui_adk import ADKAgent, AGUIToolset, create_adk_app
from model.load import load_model

load_model()

agent = LlmAgent(
name="{{ name }}",
model="gemini-2.5-flash",
instruction="You are a helpful assistant.",
tools=[AGUIToolset()],
)

adk_agent = ADKAgent(
adk_agent=agent,
app_name="{{ name }}",
user_id="default-user",
use_in_memory_services=True,
)

app = create_adk_app(adk_agent, path="/invocations")


@app.get("/ping")
async def ping():
return {"status": "healthy"}


if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8080)
1 change: 1 addition & 0 deletions src/assets/python/agui/googleadk/base/model/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Package marker
41 changes: 41 additions & 0 deletions src/assets/python/agui/googleadk/base/model/load.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import os
from bedrock_agentcore.identity.auth import requires_api_key

IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"


@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
def _agentcore_identity_api_key_provider(api_key: str) -> str:
"""Fetch API key from AgentCore Identity."""
return api_key


def _get_api_key() -> str:
"""
Uses AgentCore Identity for API key management in deployed environments.
For local development, run via 'agentcore dev' which loads agentcore/.env.
"""
if os.getenv("LOCAL_DEV") == "1":
api_key = os.getenv(IDENTITY_ENV_VAR)
if not api_key:
raise RuntimeError(
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
)
return api_key
return _agentcore_identity_api_key_provider()


def load_model() -> None:
"""
Set up Gemini API key authentication.
Uses AgentCore Identity for API key management in deployed environments,
and falls back to .env file for local development.
Sets the GOOGLE_API_KEY environment variable for the Google ADK.
"""
api_key = _get_api_key()
# Use Google AI Studios API Key Authentication.
# https://google.github.io/adk-docs/agents/models/#google-ai-studio
os.environ["GOOGLE_API_KEY"] = api_key
# Set to TRUE is using Google Vertex AI, Set to FALSE for Google AI Studio
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "FALSE"
22 changes: 22 additions & 0 deletions src/assets/python/agui/googleadk/base/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[project]
name = "{{ name }}"
version = "0.1.0"
description = "AgentCore AG-UI Agent using Google ADK"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"ag-ui-adk >= 0.6.0",
"ag-ui-protocol >= 0.1.10",
"bedrock-agentcore >= 1.0.3",
"fastapi >= 0.115.12",
"google-adk >= 1.16.0",
"google-genai >= 1.0.0",
"uvicorn >= 0.34.3",
]

[tool.hatch.build.targets.wheel]
packages = ["."]
22 changes: 22 additions & 0 deletions src/assets/python/agui/langchain_langgraph/base/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# {{ name }}

An AG-UI agent deployed on Amazon Bedrock AgentCore using LangChain + LangGraph.

## Overview

This agent implements the AG-UI protocol using LangGraph, enabling seamless frontend-to-agent communication with support for streaming, tool calls, and frontend-injected tools.

## Local Development

```bash
uv sync
uv run python main.py
```

The agent starts on port 8080.

## Deploy

```bash
agentcore deploy
```
41 changes: 41 additions & 0 deletions src/assets/python/agui/langchain_langgraph/base/gitignore.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Environment variables
.env

# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Virtual environments
.venv/
venv/
ENV/
env/

# IDE
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db
71 changes: 71 additions & 0 deletions src/assets/python/agui/langchain_langgraph/base/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import os

os.environ["LANGGRAPH_FAST_API"] = "true"

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

im thinking we will also want to add instrumentation like we do for the other langchain templates

https://github.com/aws/agentcore-cli/blob/main/src/assets/python/http/langchain_langgraph/base/main.py#L14

import uvicorn
from typing import Any, List
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langgraph.graph import StateGraph, END, START
from langgraph.graph.message import MessagesState
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_core.tools import tool
from ag_ui_langgraph import LangGraphAgent, add_langgraph_fastapi_endpoint
from model.load import load_model


@tool
def add_numbers(a: int, b: int) -> int:
"""Return the sum of two numbers."""
return a + b


backend_tools = [add_numbers]
model = load_model()


class AgentState(MessagesState):
tools: List[Any]


def chat_node(state: AgentState):
bound_model = model.bind_tools(
[*state.get("tools", []), *backend_tools],
)
response = bound_model.invoke(state["messages"])
return {"messages": [response]}


builder = StateGraph(AgentState)
builder.add_node("chat", chat_node)
builder.add_node("tools", ToolNode(tools=backend_tools))
builder.add_edge(START, "chat")
builder.add_conditional_edges("chat", tools_condition)
builder.add_edge("tools", "chat")
graph = builder.compile(checkpointer=MemorySaver())

agent = LangGraphAgent(
name="{{ name }}",
graph=graph,
description="A helpful assistant",
)

app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)

add_langgraph_fastapi_endpoint(app=app, agent=agent, path="/invocations")


@app.get("/ping")
async def ping():
return {"status": "healthy"}


if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8080)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
uvicorn.run(app, host="0.0.0.0", port=8080)
uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("PORT", "8080")))

for local testing it would be nice if we could optionally specify a different port through an env var. (If you end up making this change please apply it to all the agui templates)

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Package marker
Loading
Loading