diff --git a/.github/plugin/marketplace.json b/.github/plugin/marketplace.json index c37cdaf5f..04eb01d65 100644 --- a/.github/plugin/marketplace.json +++ b/.github/plugin/marketplace.json @@ -218,6 +218,12 @@ "description": "An AI partner, not a tool. Ember carries fire from person to person — helping humans discover that AI partnership isn't something you learn, it's something you find.", "version": "1.0.0" }, + { + "name": "fabric-data-agent", + "source": "fabric-data-agent", + "description": "Create, test, and tune Microsoft Fabric Data Agents from VS Code using natural language. Includes MCP tools for full lifecycle management — lakehouse connection, table selection, few-shot generation with SQL validation, CSV accuracy testing, and query tuning.", + "version": "1.0.0" + }, { "name": "fastah-ip-geo-tools", "source": "fastah-ip-geo-tools", diff --git a/agents/fabric-data-agent-manager.agent.md b/agents/fabric-data-agent-manager.agent.md new file mode 100644 index 000000000..7d1396a7b --- /dev/null +++ b/agents/fabric-data-agent-manager.agent.md @@ -0,0 +1,48 @@ +--- +name: "Fabric Data Agent Manager" +description: "Full lifecycle management of Microsoft Fabric Data Agents — create, configure, test, tune, and publish agents using natural language through MCP tools" +model: "gpt-4o" +tools: ["mcp"] +--- + +You are a specialist in managing Microsoft Fabric Data Agents. You help users through the full agent lifecycle — from creation to production — using MCP tools that connect to Fabric APIs. + +## Your Expertise + +- Creating and configuring Fabric Data Agents +- Connecting lakehouses and selecting tables from schemas +- Writing domain-specific AI instructions from semantic models (TMDL files) +- Generating and validating few-shot Q→SQL examples +- Running CSV-based accuracy tests +- Diagnosing and fixing failing queries (case sensitivity, missing filters, wrong tables) +- Publishing agents and testing with sample questions + +## Your Approach + +- Always ask for workspace and agent name before starting +- Confirm with the user before destructive operations (delete, replace instructions) +- After publishing, suggest testing with a sample question +- Show SQL queries alongside answers for transparency +- Validate all SQL against the database before adding as few-shots +- Use LOWER() for case-insensitive string matching in SQL + +## Workflow + +1. **Create** agent with name and workspace +2. **Connect** lakehouse datasource +3. **Select tables** — verify with get_agent_config (must show non-zero table count) +4. **Write instructions** — from semantic models, TMDL files, or domain knowledge +5. **Add few-shots** — generate Q→SQL pairs, validate each against SQL endpoint +6. **Publish** agent +7. **Test** with sample questions +8. **Tune** — diagnose failures, add corrective few-shots, re-publish, re-test + +## Guidelines + +- Never invent column names — always query INFORMATION_SCHEMA.COLUMNS first +- Always validate SQL by running it before adding as a few-shot +- Use `select_tables` (safe GET→modify→PUT) instead of `configure_agent_tables` (risky delete+recreate) +- After table selection, verify with `get_agent_config` — must show Selected tables > 0 +- For string filters, use `LOWER()` to handle case-sensitive SQL endpoints +- Default to last month when user doesn't specify a date range +- Always qualify tables with schema name (e.g., `TCA.table_name`) diff --git a/docs/README.agents.md b/docs/README.agents.md index 27b088b14..371e24caf 100644 --- a/docs/README.agents.md +++ b/docs/README.agents.md @@ -84,6 +84,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-agents) for guidelines on how to | [Expert Nuxt Developer](../agents/nuxt-expert.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fnuxt-expert.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fnuxt-expert.agent.md) | Expert Nuxt developer specializing in Nuxt 3, Nitro, server routes, data fetching strategies, and performance optimization with Vue 3 and TypeScript | | | [Expert React Frontend Engineer](../agents/expert-react-frontend-engineer.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-react-frontend-engineer.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-react-frontend-engineer.agent.md) | Expert React 19.2 frontend engineer specializing in modern hooks, Server Components, Actions, TypeScript, and performance optimization | | | [Expert Vue.js Frontend Engineer](../agents/vuejs-expert.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fvuejs-expert.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fvuejs-expert.agent.md) | Expert Vue.js frontend engineer specializing in Vue 3 Composition API, reactivity, state management, testing, and performance with TypeScript | | +| [Fabric Data Agent Manager](../agents/fabric-data-agent-manager.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffabric-data-agent-manager.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffabric-data-agent-manager.agent.md) | Full lifecycle management of Microsoft Fabric Data Agents — create, configure, test, tune, and publish agents using natural language through MCP tools | | | [Fedora Linux Expert](../agents/fedora-linux-expert.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffedora-linux-expert.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffedora-linux-expert.agent.md) | Fedora (Red Hat family) Linux specialist focused on dnf, SELinux, and modern systemd-based workflows. | | | [Frontend Performance Investigator](../agents/frontend-performance-investigator.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffrontend-performance-investigator.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffrontend-performance-investigator.agent.md) | Runtime web-performance specialist for diagnosing Core Web Vitals, Lighthouse regressions, layout shifts, long tasks, and slow network paths with Chrome DevTools MCP. | | | [Gem Browser Tester](../agents/gem-browser-tester.agent.md)
[![Install in VS Code](https://img.shields.io/badge/VS_Code-Install-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md)
[![Install in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Install-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md) | E2E browser testing, UI/UX validation, visual regression with browser. | | diff --git a/docs/README.plugins.md b/docs/README.plugins.md index fa6dbf7b0..83148e82b 100644 --- a/docs/README.plugins.md +++ b/docs/README.plugins.md @@ -42,6 +42,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how t | [doublecheck](../plugins/doublecheck/README.md) | Three-layer verification pipeline for AI output. Extracts claims, finds sources, and flags hallucination risks so humans can verify before acting. | 2 items | verification, hallucination, fact-check, source-citation, trust, safety | | [edge-ai-tasks](../plugins/edge-ai-tasks/README.md) | Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai | 2 items | architecture, planning, research, tasks, implementation | | [ember](../plugins/ember/README.md) | An AI partner, not a tool. Ember carries fire from person to person — helping humans discover that AI partnership isn't something you learn, it's something you find. | 2 items | ai-partnership, coaching, onboarding, collaboration, storytelling, developer-experience | +| [fabric-data-agent](../plugins/fabric-data-agent/README.md) | Create, test, and tune Microsoft Fabric Data Agents from VS Code using natural language. Includes MCP tools for full lifecycle management — lakehouse connection, table selection, few-shot generation with SQL validation, CSV accuracy testing, and query tuning. | 4 items | fabric, data-agent, mcp, microsoft, sql, accuracy-testing, few-shot, lakehouse | | [fastah-ip-geo-tools](../plugins/fastah-ip-geo-tools/README.md) | This plugin is for network operations engineers who wish to tune and publish IP geolocation feeds in RFC 8805 format. It consists of an AI Skill and an associated MCP server that geocodes geolocation place names to real cities for accuracy. | 1 items | geofeed, ip-geolocation, rfc-8805, rfc-9632, network-operations, isp, cloud, hosting, ixp | | [flowstudio-power-automate](../plugins/flowstudio-power-automate/README.md) | Give your AI agent full visibility into Power Automate cloud flows via the FlowStudio MCP server. Connect, debug, build, monitor health, and govern flows at scale — action-level inputs and outputs, not just status codes. | 5 items | power-automate, power-platform, flowstudio, mcp, model-context-protocol, cloud-flows, workflow-automation, monitoring, governance | | [frontend-web-dev](../plugins/frontend-web-dev/README.md) | Essential prompts, instructions, and chat modes for modern frontend web development including React, Angular, Vue, TypeScript, and CSS frameworks. | 4 items | frontend, web, react, typescript, javascript, css, html, angular, vue | diff --git a/docs/README.skills.md b/docs/README.skills.md index 780afeb8f..78772d190 100644 --- a/docs/README.skills.md +++ b/docs/README.skills.md @@ -133,6 +133,9 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to | [entra-agent-user](../skills/entra-agent-user/SKILL.md) | Create Agent Users in Microsoft Entra ID from Agent Identities, enabling AI agents to act as digital workers with user identity capabilities in Microsoft 365 and Azure environments. | None | | [eval-driven-dev](../skills/eval-driven-dev/SKILL.md) | Set up eval-based QA for Python LLM applications: instrument the app, build golden datasets, write and run eval tests, and iterate on failures. ALWAYS USE THIS SKILL when the user asks to set up QA, add tests, add evals, evaluate, benchmark, fix wrong behaviors, improve quality, or do quality assurance for any Python project that calls an LLM model. | `references/1-a-entry-point.md`
`references/1-b-eval-criteria.md`
`references/2-wrap-and-trace.md`
`references/3-define-evaluators.md`
`references/4-build-dataset.md`
`references/5-run-tests.md`
`references/6-investigate.md`
`references/evaluators.md`
`references/testing-api.md`
`references/wrap-api.md`
`resources` | | [excalidraw-diagram-generator](../skills/excalidraw-diagram-generator/SKILL.md) | Generate Excalidraw diagrams from natural language descriptions. Use when asked to "create a diagram", "make a flowchart", "visualize a process", "draw a system architecture", "create a mind map", or "generate an Excalidraw file". Supports flowcharts, relationship diagrams, mind maps, and system architecture diagrams. Outputs .excalidraw JSON files that can be opened directly in Excalidraw. | `references/element-types.md`
`references/excalidraw-schema.md`
`scripts/.gitignore`
`scripts/README.md`
`scripts/add-arrow.py`
`scripts/add-icon-to-diagram.py`
`scripts/split-excalidraw-library.py`
`templates` | +| [fabric-data-agent-create](../skills/fabric-data-agent-create/SKILL.md) | Step-by-step skill for creating a Microsoft Fabric Data Agent end-to-end — connect lakehouse, select tables, write instructions from semantic models, add validated few-shots, publish and test | None | +| [fabric-data-agent-test](../skills/fabric-data-agent-test/SKILL.md) | CSV-based accuracy testing for Fabric Data Agents — run test prompts, compare agent answers against expected values, report pass/fail with tolerance | None | +| [fabric-data-agent-tune](../skills/fabric-data-agent-tune/SKILL.md) | Diagnose and fix failing queries on Fabric Data Agents — reproduce issues, identify root causes, add corrective few-shots, re-publish and verify | None | | [fabric-lakehouse](../skills/fabric-lakehouse/SKILL.md) | Use this skill to get context about Fabric Lakehouse and its features for software systems and AI-powered functions. It offers descriptions of Lakehouse data components, organization with schemas and shortcuts, access control, and code examples. This skill supports users in designing, building, and optimizing Lakehouse solutions using best practices. | `references/getdata.md`
`references/pyspark.md` | | [fedora-linux-triage](../skills/fedora-linux-triage/SKILL.md) | Triage and resolve Fedora issues with dnf, systemd, and SELinux-aware guidance. | None | | [finalize-agent-prompt](../skills/finalize-agent-prompt/SKILL.md) | Finalize prompt file using the role of an AI agent to polish the prompt for the end user. | None | diff --git a/plugins/fabric-data-agent/.github/plugin/plugin.json b/plugins/fabric-data-agent/.github/plugin/plugin.json new file mode 100644 index 000000000..ac5571dce --- /dev/null +++ b/plugins/fabric-data-agent/.github/plugin/plugin.json @@ -0,0 +1,28 @@ +{ + "name": "fabric-data-agent", + "description": "Create, test, and tune Microsoft Fabric Data Agents from VS Code using natural language. Includes MCP tools for full lifecycle management — lakehouse connection, table selection, few-shot generation with SQL validation, CSV accuracy testing, and query tuning.", + "version": "1.0.0", + "keywords": [ + "fabric", + "data-agent", + "mcp", + "microsoft", + "sql", + "accuracy-testing", + "few-shot", + "lakehouse" + ], + "author": { + "name": "Hari Gouthami Narravula" + }, + "repository": "https://github.com/github/awesome-copilot", + "license": "MIT", + "agents": [ + "./agents/fabric-data-agent-manager.agent.md" + ], + "skills": [ + "./skills/fabric-data-agent-create/", + "./skills/fabric-data-agent-test/", + "./skills/fabric-data-agent-tune/" + ] +} diff --git a/plugins/fabric-data-agent/README.md b/plugins/fabric-data-agent/README.md new file mode 100644 index 000000000..3dcfd64dd --- /dev/null +++ b/plugins/fabric-data-agent/README.md @@ -0,0 +1,64 @@ +# Fabric Data Agent Plugin + +Create, test, and tune Microsoft Fabric Data Agents from VS Code using natural language. + +## What It Does + +This plugin provides an agent and three skills for managing Fabric Data Agents through GitHub Copilot: + +- **Fabric Data Agent Manager** (agent) — Full lifecycle: create → configure → publish → query → tune +- **#fabric-data-agent-create** (skill) — Guided end-to-end agent setup with SQL validation +- **#fabric-data-agent-test** (skill) — CSV-based accuracy testing with tolerance matching +- **#fabric-data-agent-tune** (skill) — Diagnose and fix failing queries + +## Prerequisites + +- Azure CLI (`az login`) for Fabric API authentication +- Fabric workspace access (Contributor role) + +## Setup — Connect the MCP Server + +This plugin requires the **Fabric Data Agent MCP server** to provide the tools Copilot uses. Set it up in 3 steps: + +### 1. Clone the repo + +```bash +git clone https://github.com/harigouthami/fabric-copilot-plugins.git +cd fabric-copilot-plugins/fabric-data-agent-mcp +``` + +### 2. Run setup + +```powershell +.\setup.ps1 +``` + +This installs `uv` (if needed), verifies your Azure CLI login, and configures `.vscode/mcp.json` automatically. + +### 3. Reload VS Code + +`Ctrl+Shift+P` → **"Reload Window"** — the MCP server tools will appear in Copilot Chat. + +## Example Usage + +``` +You: Create a data agent called ADOWIA in A3PInsights workspace +Copilot: ✅ Created. Which lakehouse to connect? + +You: External +Copilot: ✅ Connected. Found 4 schemas, 64 tables. Which tables? + +You: The tca_adowia* tables from TCA schema +Copilot: ✅ 7 tables selected and verified. + +You: [pastes Git repo URL with semantic model] +Copilot: [generates instructions from TMDL files, validates SQL, adds few-shots] + ✅ Published. Testing: "total time saved" → 7,496.5 hours +``` + +## Key Features + +- **SQL validation**: Every few-shot query is tested against the database before adding +- **Knowledge from Git**: Fetches TMDL files from ADO repos to auto-generate instructions +- **Accuracy testing**: CSV-based test runner with configurable tolerance +- **Tune loop**: Reproduce → Diagnose → Fix → Publish → Re-test in one conversation diff --git a/skills/fabric-data-agent-create/SKILL.md b/skills/fabric-data-agent-create/SKILL.md new file mode 100644 index 000000000..fd00d9ba3 --- /dev/null +++ b/skills/fabric-data-agent-create/SKILL.md @@ -0,0 +1,43 @@ +--- +name: fabric-data-agent-create +description: "Step-by-step skill for creating a Microsoft Fabric Data Agent end-to-end — connect lakehouse, select tables, write instructions from semantic models, add validated few-shots, publish and test" +--- + +# Create Fabric Data Agent + +Guide a user through creating a new Fabric Data Agent from scratch using MCP tools. + +## Prerequisites + +- Azure CLI authenticated (`az login`) +- Fabric workspace access (Contributor role) +- MCP server for Fabric Data Agents connected + +## Steps + +1. **Ask the user** for agent name and workspace +2. **Create** the agent using the MCP create tool +3. **List lakehouses** in the workspace — ask which to connect +4. **Connect datasource** — connects the lakehouse and discovers the schema +5. **List schemas and tables** — ask which tables to select +6. **Select tables** — use the safe GET→modify→PUT approach; verify selection shows non-zero table count +7. **Ask about knowledge base** — does the user have a semantic model, TMDL files, or SQL views in a Git repo? + - If yes: fetch from Git, parse table definitions, column names, measures, and relationships + - Use the knowledge to draft domain-specific instructions with exact column names and data types +8. **Show instructions to user** for approval before applying +9. **Generate few-shots** — create Q→SQL pairs from the domain knowledge + - Query `INFORMATION_SCHEMA.COLUMNS` to validate column names + - Run each SQL query against the database to confirm it executes + - Only add validated queries as few-shots +10. **Publish** the agent +11. **Test** with a sample question — show the SQL and answer + +## Key Rules + +- Never invent column names — always discover from INFORMATION_SCHEMA +- Always validate SQL before adding as few-shots +- Ask, don't assume — require user input at steps 1, 3, 5, 7, and 11 +- Print instructions and few-shots for user review before applying +- Verify table selection after every select operation +- Use `LOWER()` for case-insensitive string matching +- Default to last month when no date range is specified diff --git a/skills/fabric-data-agent-test/SKILL.md b/skills/fabric-data-agent-test/SKILL.md new file mode 100644 index 000000000..33a1325f8 --- /dev/null +++ b/skills/fabric-data-agent-test/SKILL.md @@ -0,0 +1,46 @@ +--- +name: fabric-data-agent-test +description: "CSV-based accuracy testing for Fabric Data Agents — run test prompts, compare agent answers against expected values, report pass/fail with tolerance" +--- + +# Test Fabric Data Agent Accuracy + +Run automated accuracy tests against a Fabric Data Agent using a CSV file with prompts and expected answers. + +## Prerequisites + +- A published Fabric Data Agent +- A CSV file with columns: `Number`, `Prompt`, `ExpectedAnswer` + +## CSV Format + +```csv +Number,Prompt,ExpectedAnswer +1,Total time savings in March 2026,7500 +2,Total unique users in March 2026,3081 +3,Total refinements in March 2026,577 +``` + +## Steps + +1. **Ask the user** for the workspace, agent, and CSV file path +2. **Verify the agent** is published and has tables selected (call get_agent_config) +3. **Run the accuracy test** — for each row in the CSV: + - Query the agent with the prompt + - Extract numeric values from the response + - Compare against expected answer with tolerance (default 5%) + - Record pass/fail and percentage difference +4. **Report results** — show overall accuracy and per-question details +5. If failures exist, **offer to diagnose** — reproduce the failing query, show the SQL, identify the root cause + +## Interpreting Results + +- **PASS**: Extracted number is within tolerance of expected value +- **FAIL**: Number is outside tolerance, or no number could be extracted +- **Diff%**: Percentage difference between expected and actual values + +## Follow-Up Actions + +- For failing queries: use the tune-agent skill to fix +- For passing queries with high diff: consider tightening instructions +- Re-run after fixes to confirm improvement diff --git a/skills/fabric-data-agent-tune/SKILL.md b/skills/fabric-data-agent-tune/SKILL.md new file mode 100644 index 000000000..bddfef907 --- /dev/null +++ b/skills/fabric-data-agent-tune/SKILL.md @@ -0,0 +1,47 @@ +--- +name: fabric-data-agent-tune +description: "Diagnose and fix failing queries on Fabric Data Agents — reproduce issues, identify root causes, add corrective few-shots, re-publish and verify" +--- + +# Tune Fabric Data Agent + +Improve a Fabric Data Agent's accuracy by diagnosing failing queries and applying targeted fixes. + +## Prerequisites + +- A published Fabric Data Agent +- A question the agent answers incorrectly +- The expected correct answer + +## Steps + +1. **Get current config** — check instructions, table selection, and few-shot count +2. **Reproduce the issue** — query the agent with the failing question, show the generated SQL +3. **Diagnose the root cause**: + - **Case sensitivity**: SQL endpoint may be case-sensitive; agent uses wrong casing + - **Missing date filter**: Agent doesn't scope to a time period + - **Wrong table**: Agent queries the wrong table for the metric + - **Missing few-shot**: No similar example exists to guide the agent + - **Instruction gap**: Instructions don't cover this pattern +4. **Apply the fix**: + - Bad SQL pattern → add a corrective few-shot with validated SQL + - Missing context → append to instructions + - Wrong tables → add missing tables with select_tables +5. **Publish** the updated agent +6. **Re-test** the same question to confirm the fix + +## Common Fixes + +| Symptom | Root Cause | Fix | +|---------|-----------|-----| +| "No data found" | Case-sensitive string match | Add `LOWER()` instruction + few-shot | +| Wrong number | No date filter | Add default date range instruction | +| Completely wrong answer | Wrong table | Add corrective few-shot with correct table | +| Partial answer | Missing aggregation source | Add instruction listing all relevant tables | + +## Key Rules + +- Always show the SQL so the user understands what went wrong +- Validate corrective SQL before adding as a few-shot +- Re-test after every fix to confirm improvement +- One fix at a time to isolate what works