diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 000000000..9d4fbe2a0
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[report]
+# Glob pattern(s) of files to omit from the report.
+omit =
+ */devops/compose/*
+ */helpers/test/outcomes/*/tmp.scratch/*
diff --git a/.github/gh_requirements.txt b/.github/gh_requirements.txt
index b6eb64ad6..6a8e6997b 100644
--- a/.github/gh_requirements.txt
+++ b/.github/gh_requirements.txt
@@ -1,4 +1,5 @@
+boto3 >= 1.20.17
+coverage
invoke
-tqdm
s3fs
-boto3 >= 1.20.17
+tqdm
diff --git a/.github/workflows/coverage_tests.yml b/.github/workflows/coverage_tests.yml
new file mode 100644
index 000000000..1f6e9f2ae
--- /dev/null
+++ b/.github/workflows/coverage_tests.yml
@@ -0,0 +1,173 @@
+name: Test coverage
+
+on:
+ workflow_dispatch: {}
+ # every day at 00:00 UTC.
+ schedule:
+ - cron: '0 0 * * *'
+
+env:
+ CSFY_CI: true
+
+permissions:
+ # Required to authenticate and retrieve temporary AWS credentials via OIDC.
+ id-token: write
+ # Required to fetch and check out code from the repository.
+ contents: read
+ # Required to authenticate and pull Docker images from GitHub Container Registry (GHCR).
+ packages: read
+
+
+jobs:
+ run_test_coverage:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@v1
+ with:
+ role-to-assume: ${{ vars.GH_ACTION_AWS_ROLE_ARN }}
+ role-session-name: ${{ vars.GH_ACTION_AWS_SESSION_NAME }}
+ aws-region: ${{ vars.CSFY_AWS_DEFAULT_REGION }}
+
+ - name: Login to GHCR
+ run: docker login ghcr.io -u gpsaggese -p ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Cleanup
+ run: sudo chmod 777 -R .
+
+ - name: Checkout code
+ uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Update PYTHONPATH
+ run: echo "PYTHONPATH=.:helpers" >> $GITHUB_ENV
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r .github/gh_requirements.txt
+
+ - name: Pull image from GHCR
+ run: docker pull ghcr.io/${{ github.repository }}:dev
+
+ # Only on scheduled runs, capture ISO weekday (1=Mon … 7=Sun).
+ - name: Set DAY_OF_WEEK
+ if: github.event_name == 'schedule'
+ run: echo "DAY_OF_WEEK=$(date -u +'%u')" >> $GITHUB_ENV
+
+ # This step is used to trigger the fast test coverage generation using the invoke task.
+ - name: Run Fast test and generate report
+ id: run_fast
+ continue-on-error: true
+ run: |
+ echo "Simulating fast test failure"
+ exit 1
+ # env:
+ # GH_ACTION_ACCESS_TOKEN: ${{ secrets.GH_ACTION_ACCESS_TOKEN }}
+ # CSFY_AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
+ # CSFY_AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
+ # CSFY_AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
+ # CSFY_AWS_DEFAULT_REGION: ${{ env.AWS_DEFAULT_REGION }}
+ # CSFY_ECR_BASE_PATH: ghcr.io/${{ github.repository_owner }}
+ # CSFY_AWS_S3_BUCKET: ${{ vars.CSFY_AWS_S3_BUCKET }}
+ # run: invoke run_coverage --suite fast
+
+ - name: Upload Fast Test Coverage to Codecov
+ id: upload_fast
+ # Only upload if the previous fast test run step succeeded (i.r report generated).
+ # failed step don’t generate a coverage report, so there's nothing to upload.
+ if: steps.run_fast.outcome == 'success'
+ continue-on-error: true
+ uses: codecov/codecov-action@v5
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage.xml
+ # Specify the Codecov flag name associated with this test suite.
+ # Required to separate coverage reports by type (e.g., fast, slow, superslow) inside the Codecov UI.
+ flags: fast
+ name: fast-test-coverage
+
+ - name: Run Slow test and generate report
+ id: run_slow
+ continue-on-error: true
+ env:
+ GH_ACTION_ACCESS_TOKEN: ${{ secrets.GH_ACTION_ACCESS_TOKEN }}
+ CSFY_AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
+ CSFY_AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
+ CSFY_AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
+ CSFY_AWS_DEFAULT_REGION: ${{ env.AWS_DEFAULT_REGION }}
+ CSFY_ECR_BASE_PATH: ghcr.io/${{ github.repository_owner }}
+ CSFY_AWS_S3_BUCKET: ${{ vars.CSFY_AWS_S3_BUCKET }}
+ run: invoke run_coverage --suite slow
+
+ - name: Upload Slow Test Coverage to Codecov
+ id: upload_slow
+ # Only upload if the previous slow test run step succeeded (i.r report generated).
+ if: steps.run_slow.outcome == 'success'
+ continue-on-error: true
+ uses: codecov/codecov-action@v5
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage.xml
+ flags: slow
+ name: slow-test-coverage
+
+ - name: Run Superslow test and generate report
+ # Run only on scheduled jobs or manual trigger
+ if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
+ env:
+ GH_ACTION_ACCESS_TOKEN: ${{ secrets.GH_ACTION_ACCESS_TOKEN }}
+ CSFY_AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
+ CSFY_AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
+ CSFY_AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
+ CSFY_AWS_DEFAULT_REGION: ${{ env.AWS_DEFAULT_REGION }}
+ CSFY_ECR_BASE_PATH: ghcr.io/${{ github.repository_owner }}
+ CSFY_AWS_S3_BUCKET: ${{ vars.CSFY_AWS_S3_BUCKET }}
+ run: |
+ # Only run superslow tests if it's Monday or a manual dispatch.
+ day_of_week=$(date +%u)
+ if [ "$day_of_week" = "1" ] || [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
+ invoke run_coverage --suite superslow
+ else
+ echo "Skipping superslow tests — not Monday and not manually triggered"
+ exit 0
+ fi
+
+ - name: Upload Superslow Test Coverage to Codecov
+ #TODO(Shaunak): Consider removing it when we turn this workflow into a reusable one.
+ if: steps.run_superslow.outcome == 'success'
+ uses: codecov/codecov-action@v5
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage.xml
+ flags: superslow
+ name: superslow-test-coverage
+
+ # Fail the job in CI if any of the fast/ slow run/ upload steps above failed.
+ - name: Fail if fast/slow test or upload failed
+ run: |
+ failed=""
+ if [ "${{ steps.run_fast.outcome }}" != "success" ]; then
+ echo "Fast test run failed"
+ failed="true"
+ fi
+ if [ "${{ steps.upload_fast.outcome }}" != "success" ]; then
+ echo "Fast test coverage upload failed"
+ failed="true"
+ fi
+ if [ "${{ steps.run_slow.outcome }}" != "success" ]; then
+ echo "Slow test run failed"
+ failed="true"
+ fi
+ if [ "${{ steps.upload_slow.outcome }}" != "success" ]; then
+ echo "Slow test coverage upload failed"
+ failed="true"
+ fi
+ if [ "$failed" = "true" ]; then
+ echo "At least one fast/slow test or upload step failed."
+ exit 1
+ fi
diff --git a/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.ipynb b/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.ipynb
index 091e2595e..48f2b4c28 100644
--- a/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.ipynb
+++ b/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.ipynb
@@ -1,338 +1,377 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "e3103ff3",
- "metadata": {},
- "source": [
- " TODO(Grisha): does it belong to the `devops` dir?"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9d992fed",
- "metadata": {},
- "source": [
- "# Description"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3e381a7d",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-02T08:02:05.889049Z",
- "start_time": "2024-02-02T08:02:05.883420Z"
- }
- },
- "source": [
- "The notebook reports the latest build status for multiple repos."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "982f47f1",
- "metadata": {},
- "source": [
- "# Imports"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "97bbec36",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:42.038091Z",
- "start_time": "2024-02-07T17:59:42.002068Z"
- }
- },
- "outputs": [],
- "source": [
- "%load_ext autoreload\n",
- "%autoreload 2\n",
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "518df056",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:42.078514Z",
- "start_time": "2024-02-07T17:59:42.041301Z"
- }
- },
- "outputs": [],
- "source": [
- "import logging\n",
- "from typing import Dict\n",
- "\n",
- "import pandas as pd\n",
- "from IPython.display import Markdown, display\n",
- "\n",
- "import helpers.hdbg as hdbg\n",
- "import helpers.henv as henv\n",
- "import helpers.hpandas as hpandas\n",
- "import helpers.hprint as hprint\n",
- "import helpers.lib_tasks_gh as hlitagh"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "f0793aa5",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:42.268049Z",
- "start_time": "2024-02-07T17:59:42.081426Z"
- }
- },
- "outputs": [],
- "source": [
- "hdbg.init_logger(verbosity=logging.INFO)\n",
- "_LOG = logging.getLogger(__name__)\n",
- "_LOG.info(\"%s\", henv.get_system_signature()[0])\n",
- "hprint.config_notebook()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "93c2d39f",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:42.338614Z",
- "start_time": "2024-02-07T17:59:42.271472Z"
- }
- },
- "outputs": [],
- "source": [
- "# Set the display options to print the full table.\n",
- "pd.set_option(\"display.max_colwidth\", None)\n",
- "pd.set_option(\"display.max_columns\", None)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "14f379d5",
- "metadata": {
- "lines_to_next_cell": 2
- },
- "source": [
- "# Utils"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1f41a8dd",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:42.380319Z",
- "start_time": "2024-02-07T17:59:42.343492Z"
- }
- },
- "outputs": [],
- "source": [
- "def make_clickable(url: str) -> str:\n",
- " \"\"\"\n",
- " Wrapper to make the URL value clickable.\n",
- "\n",
- " :param url: URL value to convert\n",
- " :return: clickable URL link\n",
- " \"\"\"\n",
- " return f'{url}'\n",
- "\n",
- "\n",
- "def color_format(val: str, status_color_mapping: Dict[str, str]) -> str:\n",
- " \"\"\"\n",
- " Return the color depends on status.\n",
- "\n",
- " :param val: value of the status e.g. `failure`\n",
- " :param status_color_mapping: mapping statuses to the colors e.g.:\n",
- " ```\n",
- " {\n",
- " \"success\": \"green\",\n",
- " \"failure\": \"red\",\n",
- " }\n",
- " ```\n",
- " \"\"\"\n",
- " if val in status_color_mapping:\n",
- " color = status_color_mapping[val]\n",
- " else:\n",
- " color = \"grey\"\n",
- " return f\"background-color: {color}\""
- ]
- },
- {
- "cell_type": "markdown",
- "id": "189f2c75",
- "metadata": {},
- "source": [
- "# GH workflows state"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "865bc9f2",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:57.513155Z",
- "start_time": "2024-02-07T17:59:42.383039Z"
- }
- },
- "outputs": [],
- "source": [
- "repo_list = [\n",
- " \"cryptokaizen/cmamp\",\n",
- " \"cryptokaizen/orange\",\n",
- " \"cryptokaizen/lemonade\",\n",
- " \"causify-ai/kaizenflow\",\n",
- "]\n",
- "workflow_df = hlitagh.gh_get_details_for_all_workflows(repo_list)\n",
- "# Reorder columns.\n",
- "columns_order = [\"repo_name\", \"workflow_name\", \"conclusion\", \"url\"]\n",
- "workflow_df = workflow_df[columns_order]\n",
- "# Make URL values clickable.\n",
- "workflow_df[\"url\"] = workflow_df[\"url\"].apply(make_clickable)\n",
- "_LOG.info(hpandas.df_to_str(workflow_df, log_level=logging.INFO))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "f7e999ce",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:57.585606Z",
- "start_time": "2024-02-07T17:59:57.515915Z"
- }
- },
- "outputs": [],
- "source": [
- "status_color_mapping = {\n",
- " \"success\": \"green\",\n",
- " \"failure\": \"red\",\n",
- "}\n",
- "repos = workflow_df[\"repo_name\"].unique()\n",
- "display(Markdown(\"## Overall Status\"))\n",
- "current_timestamp = pd.Timestamp.now(tz=\"America/New_York\")\n",
- "display(Markdown(f\"**Last run: {current_timestamp}**\"))\n",
- "for repo in repos:\n",
- " # Calculate the overall status.\n",
- " repo_df = workflow_df[workflow_df[\"repo_name\"] == repo]\n",
- " overall_status = hlitagh.gh_get_overall_build_status_for_repo(repo_df)\n",
- " display(Markdown(f\"## {repo}: {overall_status}\"))\n",
- " repo_df = repo_df.drop(columns=[\"repo_name\"])\n",
- " display(\n",
- " repo_df.style.map(\n",
- " color_format,\n",
- " status_color_mapping=status_color_mapping,\n",
- " subset=[\"conclusion\"],\n",
- " )\n",
- " )"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "a00870a9",
- "metadata": {},
- "source": [
- "# Allure reports"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "36e93fca",
- "metadata": {},
- "source": [
- "- fast tests: http://172.30.2.44/allure_reports/cmamp/fast/latest/index.html\n",
- "- slow tests: http://172.30.2.44/allure_reports/cmamp/slow/latest/index.html\n",
- "- superslow tests: http://172.30.2.44/allure_reports/cmamp/superslow/latest/index.html"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "bb8ed505",
- "metadata": {},
- "source": [
- "# Number of open pull requests"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "69dbda1d",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-02-07T17:59:59.309022Z",
- "start_time": "2024-02-07T17:59:57.588291Z"
- }
- },
- "outputs": [],
- "source": [
- "for repo in repo_list:\n",
- " number_prs = len(hlitagh.gh_get_open_prs(repo))\n",
- " _LOG.info(\"%s: %s\", repo, number_prs)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "ec63cb5e",
- "metadata": {},
- "source": [
- "# Code coverage HTML-page"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "569f9404",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2024-01-24T14:40:31.379819Z",
- "start_time": "2024-01-24T14:40:31.327151Z"
- }
- },
- "source": [
- "http://172.30.2.44/html_coverage/runner_master/"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.12.3"
- },
- "toc": {
- "base_numbering": 1,
- "nav_menu": {},
- "number_sections": true,
- "sideBar": true,
- "skip_h1_title": false,
- "title_cell": "Table of Contents",
- "title_sidebar": "Contents",
- "toc_cell": false,
- "toc_position": {},
- "toc_section_display": true,
- "toc_window_display": false
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "CONTENTS:\n",
+ "- [Description](#description)\n",
+ "- [Imports](#imports)\n",
+ "- [Utils](#utils)\n",
+ "- [GH workflows state](#gh-workflows-state)\n",
+ "- [Allure reports](#allure-reports)\n",
+ "- [Number of open pull requests](#number-of-open-pull-requests)\n",
+ "- [Code coverage HTML-page](#code-coverage-html-page)\n",
+ "- [Code Coverage Page - CodeCov](#code-coverage-page---codecov)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e3103ff3",
+ "metadata": {},
+ "source": [
+ " TODO(Grisha): does it belong to the `devops` dir?"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9d992fed",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Description"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3e381a7d",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-02T08:02:05.889049Z",
+ "start_time": "2024-02-02T08:02:05.883420Z"
+ }
+ },
+ "source": [
+ "The notebook reports the latest build status for multiple repos."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "982f47f1",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "97bbec36",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:42.038091Z",
+ "start_time": "2024-02-07T17:59:42.002068Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "%load_ext autoreload\n",
+ "%autoreload 2\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "518df056",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:42.078514Z",
+ "start_time": "2024-02-07T17:59:42.041301Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import logging\n",
+ "from typing import Dict\n",
+ "\n",
+ "import pandas as pd\n",
+ "from IPython.display import Markdown, display\n",
+ "\n",
+ "import helpers.hdbg as hdbg\n",
+ "import helpers.henv as henv\n",
+ "import helpers.hpandas as hpandas\n",
+ "import helpers.hprint as hprint\n",
+ "import helpers.lib_tasks_gh as hlitagh\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f0793aa5",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:42.268049Z",
+ "start_time": "2024-02-07T17:59:42.081426Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "hdbg.init_logger(verbosity=logging.INFO)\n",
+ "_LOG = logging.getLogger(__name__)\n",
+ "_LOG.info(\"%s\", henv.get_system_signature()[0])\n",
+ "hprint.config_notebook()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "93c2d39f",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:42.338614Z",
+ "start_time": "2024-02-07T17:59:42.271472Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "# Set the display options to print the full table.\n",
+ "pd.set_option(\"display.max_colwidth\", None)\n",
+ "pd.set_option(\"display.max_columns\", None)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14f379d5",
+ "metadata": {
+ "lines_to_next_cell": 2
+ },
+ "source": [
+ "\n",
+ "# Utils"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1f41a8dd",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:42.380319Z",
+ "start_time": "2024-02-07T17:59:42.343492Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "def make_clickable(url: str) -> str:\n",
+ " \"\"\"\n",
+ " Wrapper to make the URL value clickable.\n",
+ "\n",
+ " :param url: URL value to convert\n",
+ " :return: clickable URL link\n",
+ " \"\"\"\n",
+ " return f'{url}'\n",
+ "\n",
+ "\n",
+ "def color_format(val: str, status_color_mapping: Dict[str, str]) -> str:\n",
+ " \"\"\"\n",
+ " Return the color depends on status.\n",
+ "\n",
+ " :param val: value of the status e.g. `failure`\n",
+ " :param status_color_mapping: mapping statuses to the colors e.g.:\n",
+ " ```\n",
+ " {\n",
+ " \"success\": \"green\",\n",
+ " \"failure\": \"red\",\n",
+ " }\n",
+ " ```\n",
+ " \"\"\"\n",
+ " if val in status_color_mapping:\n",
+ " color = status_color_mapping[val]\n",
+ " else:\n",
+ " color = \"grey\"\n",
+ " return f\"background-color: {color}\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "189f2c75",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# GH workflows state"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "865bc9f2",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:57.513155Z",
+ "start_time": "2024-02-07T17:59:42.383039Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "repo_list = [\n",
+ " \"cryptokaizen/cmamp\",\n",
+ " \"cryptokaizen/orange\",\n",
+ " \"cryptokaizen/lemonade\",\n",
+ " \"causify-ai/kaizenflow\",\n",
+ "]\n",
+ "workflow_df = hlitagh.gh_get_details_for_all_workflows(repo_list)\n",
+ "# Reorder columns.\n",
+ "columns_order = [\"repo_name\", \"workflow_name\", \"conclusion\", \"url\"]\n",
+ "workflow_df = workflow_df[columns_order]\n",
+ "# Make URL values clickable.\n",
+ "workflow_df[\"url\"] = workflow_df[\"url\"].apply(make_clickable)\n",
+ "_LOG.info(hpandas.df_to_str(workflow_df, log_level=logging.INFO))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f7e999ce",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:57.585606Z",
+ "start_time": "2024-02-07T17:59:57.515915Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "status_color_mapping = {\n",
+ " \"success\": \"green\",\n",
+ " \"failure\": \"red\",\n",
+ "}\n",
+ "repos = workflow_df[\"repo_name\"].unique()\n",
+ "display(Markdown(\"## Overall Status\"))\n",
+ "current_timestamp = pd.Timestamp.now(tz=\"America/New_York\")\n",
+ "display(Markdown(f\"**Last run: {current_timestamp}**\"))\n",
+ "for repo in repos:\n",
+ " # Calculate the overall status.\n",
+ " repo_df = workflow_df[workflow_df[\"repo_name\"] == repo]\n",
+ " overall_status = hlitagh.gh_get_overall_build_status_for_repo(repo_df)\n",
+ " display(Markdown(f\"## {repo}: {overall_status}\"))\n",
+ " repo_df = repo_df.drop(columns=[\"repo_name\"])\n",
+ " display(\n",
+ " repo_df.style.map(\n",
+ " color_format,\n",
+ " status_color_mapping=status_color_mapping,\n",
+ " subset=[\"conclusion\"],\n",
+ " )\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a00870a9",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Allure reports"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "36e93fca",
+ "metadata": {},
+ "source": [
+ "- fast tests: http://172.30.2.44/allure_reports/cmamp/fast/latest/index.html\n",
+ "- slow tests: http://172.30.2.44/allure_reports/cmamp/slow/latest/index.html\n",
+ "- superslow tests: http://172.30.2.44/allure_reports/cmamp/superslow/latest/index.html"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bb8ed505",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Number of open pull requests"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "69dbda1d",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-02-07T17:59:59.309022Z",
+ "start_time": "2024-02-07T17:59:57.588291Z"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "for repo in repo_list:\n",
+ " number_prs = len(hlitagh.gh_get_open_prs(repo))\n",
+ " _LOG.info(\"%s: %s\", repo, number_prs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ec63cb5e",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Code coverage HTML-page"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "569f9404",
+ "metadata": {
+ "ExecuteTime": {
+ "end_time": "2024-01-24T14:40:31.379819Z",
+ "start_time": "2024-01-24T14:40:31.327151Z"
+ }
+ },
+ "source": [
+ "http://172.30.2.44/html_coverage/runner_master/"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "027d1b3d",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Code Coverage Page - CodeCov"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6c9219e1",
+ "metadata": {},
+ "source": [
+ "- Helpers: https://app.codecov.io/gh/causify-ai/helpers"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.3"
+ },
+ "toc": {
+ "base_numbering": 1,
+ "nav_menu": {},
+ "number_sections": true,
+ "sideBar": true,
+ "skip_h1_title": false,
+ "title_cell": "Table of Contents",
+ "title_sidebar": "Contents",
+ "toc_cell": false,
+ "toc_position": {},
+ "toc_section_display": true,
+ "toc_window_display": false
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.py b/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.py
index c7940be98..d0095209c 100644
--- a/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.py
+++ b/dev_scripts_helpers/update_devops_packages/notebooks/Master_buildmeister_dashboard.py
@@ -3,33 +3,40 @@
# jupytext:
# text_representation:
# extension: .py
-# format_name: percent
-# format_version: '1.3'
-# jupytext_version: 1.15.2
+# format_name: light
+# format_version: '1.5'
+# jupytext_version: 1.16.7
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
-# %% [markdown]
+# CONTENTS:
+# - [Description](#description)
+# - [Imports](#imports)
+# - [Utils](#utils)
+# - [GH workflows state](#gh-workflows-state)
+# - [Allure reports](#allure-reports)
+# - [Number of open pull requests](#number-of-open-pull-requests)
+# - [Code coverage HTML-page](#code-coverage-html-page)
+# - [Code Coverage Page - CodeCov](#code-coverage-page---codecov)
+
# TODO(Grisha): does it belong to the `devops` dir?
-# %% [markdown]
+#
# # Description
-# %% [markdown]
# The notebook reports the latest build status for multiple repos.
-# %% [markdown]
+#
# # Imports
-# %%
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
-# %%
+# +
import logging
from typing import Dict
@@ -42,22 +49,22 @@
import helpers.hprint as hprint
import helpers.lib_tasks_gh as hlitagh
-# %%
+# -
+
hdbg.init_logger(verbosity=logging.INFO)
_LOG = logging.getLogger(__name__)
_LOG.info("%s", henv.get_system_signature()[0])
hprint.config_notebook()
-# %%
# Set the display options to print the full table.
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_columns", None)
-# %% [markdown]
+#
# # Utils
-# %%
+# +
def make_clickable(url: str) -> str:
"""
Wrapper to make the URL value clickable.
@@ -88,10 +95,11 @@ def color_format(val: str, status_color_mapping: Dict[str, str]) -> str:
return f"background-color: {color}"
-# %% [markdown]
+# -
+
+#
# # GH workflows state
-# %%
repo_list = [
"cryptokaizen/cmamp",
"cryptokaizen/orange",
@@ -106,7 +114,6 @@ def color_format(val: str, status_color_mapping: Dict[str, str]) -> str:
workflow_df["url"] = workflow_df["url"].apply(make_clickable)
_LOG.info(hpandas.df_to_str(workflow_df, log_level=logging.INFO))
-# %%
status_color_mapping = {
"success": "green",
"failure": "red",
@@ -129,24 +136,26 @@ def color_format(val: str, status_color_mapping: Dict[str, str]) -> str:
)
)
-# %% [markdown]
+#
# # Allure reports
-# %% [markdown]
# - fast tests: http://172.30.2.44/allure_reports/cmamp/fast/latest/index.html
# - slow tests: http://172.30.2.44/allure_reports/cmamp/slow/latest/index.html
# - superslow tests: http://172.30.2.44/allure_reports/cmamp/superslow/latest/index.html
-# %% [markdown]
+#
# # Number of open pull requests
-# %%
for repo in repo_list:
number_prs = len(hlitagh.gh_get_open_prs(repo))
_LOG.info("%s: %s", repo, number_prs)
-# %% [markdown]
+#
# # Code coverage HTML-page
-# %% [markdown]
# http://172.30.2.44/html_coverage/runner_master/
+
+#
+# # Code Coverage Page - CodeCov
+
+# - Helpers: https://app.codecov.io/gh/causify-ai/helpers
diff --git a/helpers/lib_tasks_pytest.py b/helpers/lib_tasks_pytest.py
index 3ad73449a..6c9047be9 100644
--- a/helpers/lib_tasks_pytest.py
+++ b/helpers/lib_tasks_pytest.py
@@ -790,7 +790,7 @@ def _publish_html_coverage_report_on_s3(aws_profile: str) -> None:
def run_coverage_report( # type: ignore
ctx,
target_dir,
- generate_html_report=True,
+ generate_html_report=False,
publish_html_on_s3=True,
aws_profile="ck",
):
@@ -874,6 +874,111 @@ def run_coverage_report( # type: ignore
_publish_html_coverage_report_on_s3(aws_profile)
+def _get_inclusion_settings(target_dir: str) -> Tuple[str, Optional[str]]:
+ """
+ Determine include/omit glob patterns for the coverage report for both text
+ and HTML coverage reports.
+
+ :param target_dir: directory for coverage stats; use "." to indicate all directories
+ :return: glob pattern to include and a comma-separated glob pattern to omit
+
+ Examples:
+ 1. Cover everything (no submodules to omit):
+ ```
+ _get_inclusion_settings(".")
+ ```
+ ("*", "")
+
+ 2. Only cover code under a specific directory:
+ ```
+ _get_inclusion_settings("helpers")
+ ```
+ ("*/helpers/*", None)
+
+ Usage in `_run_coverage`:
+
+ # Entire repo coverage (e.g. your 'helpers' project root):
+ ```
+ include, omit = _get_inclusion_settings(".")
+ ```
+ # coverage report --include=* --sort=Cover
+ # coverage html --include=* [--omit=submodule1/*,submodule2/*]
+
+ # Single-directory coverage:
+ ```
+ include, omit = _get_inclusion_settings("helpers")
+ ```
+ # coverage report --include=*/helpers/* --sort=Cover
+ # coverage html --include=*/helpers/* [--omit=...]
+ """
+ if target_dir == ".":
+ include_in_report = "*"
+ exclude_from_report = ""
+ if hserver.skip_submodules_test():
+ submodule_paths: List[str] = hgit.get_submodule_paths()
+ exclude_from_report = ",".join(
+ f"{path}/*" for path in submodule_paths
+ )
+ else:
+ include_in_report = f"*/{target_dir}/*"
+ exclude_from_report = None
+ return include_in_report, exclude_from_report
+
+
+@task
+def run_coverage(
+ ctx, suite: str, target_dir: str = ".", generate_html_report: bool = False
+):
+ """
+ Unified task to run coverage for any test suite.
+
+ :param ctx: invoke context
+ :param suite: suite to run ("fast", "slow", "superslow")
+ :param target_dir: directory to measure coverage
+ """
+ hdbg.dassert_in(suite, ("fast", "slow", "superslow"))
+ # Build the command line.
+ test_cmd_parts = [
+ # Invoke the "_tests" task.
+ "invoke",
+ f"run_{suite}_tests",
+ # Enable coverage computation.
+ "--coverage",
+ # Specify which directory to test.
+ "-p",
+ target_dir,
+ ]
+ test_cmd = hlitauti.to_multi_line_cmd(test_cmd_parts)
+ # Run the tests under coverage.
+ hlitauti.run(ctx, test_cmd, use_system=False)
+ hdbg.dassert_file_exists(".coverage")
+ # Compute which files/dirs to include and omit in the report.
+ include_in_report, exclude_from_report = _get_inclusion_settings(target_dir)
+ report_cmd: List[str] = [
+ # Reset any previous coverage data to avoid contamination.
+ "coverage erase"
+ ]
+ # Generate a text report, including only our target paths.
+ report_stats_cmd: str = (
+ f"coverage report --include={include_in_report} --sort=Cover"
+ )
+ if exclude_from_report:
+ report_stats_cmd += f" --omit={exclude_from_report}"
+ report_cmd.append(report_stats_cmd)
+ # Produce HTML output for interactive browsing.
+ if generate_html_report:
+ report_html_cmd: str = f"coverage html --include={include_in_report}"
+ if exclude_from_report:
+ report_html_cmd += f" --omit={exclude_from_report}"
+ report_cmd.append(report_html_cmd)
+ # Export XML coverage report to integrate with Codecov.
+ report_cmd.append("coverage xml -o coverage.xml")
+ full_report_cmd: str = " && ".join(report_cmd)
+ docker_cmd_ = f"invoke docker_cmd --use-bash --cmd '{full_report_cmd}'"
+ # Execute the full coverage/Docker pipeline.
+ hlitauti.run(ctx, docker_cmd_)
+
+
# #############################################################################
# Traceback.
# #############################################################################
@@ -998,10 +1103,6 @@ def pytest_repro( # type: ignore
pytest helpers/test/test_cache.py::TestCachingOnS3_2
```
- "files": files with the failed tests, e.g.,
- ```
- pytest helpers/test/test_cache.py
- pytest helpers/test/test_lib_tasks.py
- ```
:param file_name: the name of the file containing the pytest output file to parse
:param show_stacktrace: whether to show the stacktrace of the failed tests
- only if it is available in the pytest output file
@@ -1151,10 +1252,6 @@ def pytest_rename_test(ctx, old_test_class_name, new_test_class_name): # type:
Rename the test and move its golden outcome.
E.g., to rename a test class and all the test methods:
- ```
- > i pytest_rename_test TestCacheUpdateFunction1 \
- TestCacheUpdateFunction_new
- ```
:param old_test_class_name: old class name
:param new_test_class_name: new class name
@@ -1385,9 +1482,6 @@ def pytest_buildmeister( # type: ignore
Run the regression tests.
- Run updating all the tests
- ```
- > pytest_buildmeister --pytest-opts "--update_outcomes"
- ```
:param docker_clean: remove all dead Docker instances
:param opts: options to pass to the invoke (e.g., `--version 1.2.0` to test
diff --git a/tasks.py b/tasks.py
index 86b2e0643..72f35fb6c 100644
--- a/tasks.py
+++ b/tasks.py
@@ -92,6 +92,7 @@
pytest_repro,
run_blank_tests,
run_coverage_report,
+ run_coverage,
run_fast_slow_superslow_tests,
run_fast_slow_tests,
run_fast_tests,