From 20ad56de68120ad539beeaeeac215f0437ce09fc Mon Sep 17 00:00:00 2001 From: dazzling-no-more <278675588+dazzling-no-more@users.noreply.github.com> Date: Thu, 30 Apr 2026 22:14:54 +0400 Subject: [PATCH] feat: added auto updater --- .github/workflows/release.yml | 202 +++- .gitignore | 1 + Cargo.lock | 93 +- Cargo.toml | 14 +- android/app/src/main/AndroidManifest.xml | 9 + .../main/java/com/therealaleph/mhrv/Native.kt | 18 +- .../com/therealaleph/mhrv/UpdateInstaller.kt | 214 ++++ .../com/therealaleph/mhrv/ui/HomeScreen.kt | 126 ++- .../app/src/main/res/values-fa/strings.xml | 9 + android/app/src/main/res/values/strings.xml | 9 + docs/maintainer/README.md | 1 + docs/maintainer/references/update-signing.md | 159 +++ src/android_jni.rs | 125 ++- src/bin/ui.rs | 221 +++- src/lib.rs | 6 + src/main.rs | 6 + src/update_apply.rs | 998 ++++++++++++++++++ src/update_check.rs | 70 +- 18 files changed, 2228 insertions(+), 53 deletions(-) create mode 100644 android/app/src/main/java/com/therealaleph/mhrv/UpdateInstaller.kt create mode 100644 docs/maintainer/references/update-signing.md create mode 100644 src/update_apply.rs diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8068b30..83793f7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,6 +30,31 @@ permissions: # well-scoped permissions block. packages: write +# Auto-updater public key, embedded into every release binary at compile +# time via `option_env!("MHRV_UPDATE_PUBKEY")` (see src/update_apply.rs). +# Read from the repo variable `MINISIGN_PUBLIC_KEY` — the bare base64 line +# from a `minisign -G` .pub file (the one *after* the `untrusted comment`). +# +# This env var is populated only when `MINISIGN_SIGNING_ENABLED == 'true'`. +# Empty/whitespace values are treated as "unset" by `src/update_apply.rs`: +# binaries log a runtime warning + apply updates without a sig check. When +# it IS non-empty, desktop and Android update flows refuse to apply an asset +# that doesn't have a matching `.minisig` next to it on the release page. +# +# To enable signed updates end-to-end: +# 1. Generate a keypair (one-time, offline): +# rsign generate -p mhrv-update.pub -s mhrv-update.key +# 2. `gh variable set MINISIGN_PUBLIC_KEY --body "$(tail -1 mhrv-update.pub)"` +# 3. `gh secret set MINISIGN_SECRET_KEY < mhrv-update.key` +# 4. Optional, for passphrased keys: +# `gh secret set MINISIGN_KEY_PASSWORD --body 'your-passphrase'` +# 5. `gh variable set MINISIGN_SIGNING_ENABLED --body true` +# +# Once those are set, the next tag push produces signed artifacts +# and the freshly-built binaries enforce verification on the next update. +env: + MHRV_UPDATE_PUBKEY: ${{ vars.MINISIGN_SIGNING_ENABLED == 'true' && vars.MINISIGN_PUBLIC_KEY || '' }} + # Runner strategy: # - Linux + Android + mipsel: self-hosted (mhrv-hetzner-*, Hetzner # 8-core / 31 GB Ubuntu 24.04 box with @@ -248,6 +273,7 @@ jobs: if: matrix.target == 'x86_64-unknown-linux-musl' run: | docker run --rm -v "$PWD":/src -w /src \ + -e MHRV_UPDATE_PUBKEY \ messense/rust-musl-cross:x86_64-musl \ cargo build --release --target x86_64-unknown-linux-musl --bin mhrv-rs sudo chown -R "$(id -u):$(id -g)" target @@ -256,6 +282,7 @@ jobs: if: matrix.target == 'aarch64-unknown-linux-musl' run: | docker run --rm -v "$PWD":/src -w /src \ + -e MHRV_UPDATE_PUBKEY \ messense/rust-musl-cross:aarch64-musl \ cargo build --release --target aarch64-unknown-linux-musl --bin mhrv-rs sudo chown -R "$(id -u):$(id -g)" target @@ -293,6 +320,7 @@ jobs: trap 'sudo chown -R "$(id -u):$(id -g)" target 2>/dev/null || true' EXIT docker run --rm -v "$PWD":/src -w /src \ -e RUSTFLAGS='-C target-feature=+soft-float' \ + -e MHRV_UPDATE_PUBKEY \ messense/rust-musl-cross:mipsel-musl \ bash -c ' set -eux @@ -514,6 +542,136 @@ jobs: path: dist/*.apk if-no-files-found: error + # Sign every release artifact with minisign — see src/update_apply.rs + # for the threat model. Produces `.minisig` files alongside the + # build artifacts so the auto-updater can verify provenance before + # swapping the running binary. + # + # Gracefully no-ops when `vars.MINISIGN_SIGNING_ENABLED != 'true'` so + # the workflow keeps shipping releases until the maintainer sets up + # the keypair (see workflow-level `env` block at the top for the + # one-time setup commands). + # + # Tool: rsign2 (Frank Denis, Rust port of minisign). Produces signatures + # binary-compatible with the OG `minisign` and verifiable by the + # `minisign-verify` crate the updater uses. Picked over apt-installing + # `minisign` because rsign2 cross-installs the same way on every runner + # we have (Linux self-hosted, Linux GH-hosted) via `cargo install`. + sign: + needs: [build, android] + if: ${{ vars.MINISIGN_SIGNING_ENABLED == 'true' }} + runs-on: ubuntu-latest + env: + RSIGN2_VERSION: 0.6.5 + permissions: + contents: read + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + + # Cache the pinned `cargo install rsign2` output so we're not + # rebuilding it from scratch every release. The key includes the + # rsign2 version so tool upgrades invalidate the cache deliberately. + - uses: Swatinem/rust-cache@v2 + with: + key: rsign2-${{ env.RSIGN2_VERSION }}-stable + cache-bin: "false" + + - name: Install rsign2 + run: | + # `--version` pins the signing tool itself; `--locked` uses that + # crate release's Cargo.lock so transitive bumps are deliberate too. + cargo install --quiet --locked --version "${RSIGN2_VERSION}" rsign2 + + - name: Download all build artifacts + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + mkdir -p dist + # Same retry pattern as the `release` job — the artifacts API + # has been intermittently 5-retries-exhausted on this workflow, + # `gh run download` against the current run ID is more reliable. + for attempt in 1 2 3; do + if gh run download "${GITHUB_RUN_ID}" --dir dist --repo "${GITHUB_REPOSITORY}"; then + echo "downloaded all artifacts on attempt $attempt" + # `gh run download` puts each artifact in its own subdir; + # flatten so the sign loop sees `dist/` directly. + find dist -type f -mindepth 2 -exec mv -f {} dist/ \; + find dist -type d -empty -delete + ls -la dist/ + exit 0 + fi + echo "download attempt $attempt failed; retrying in 30s..." + sleep 30 + done + echo "::error::failed to download artifacts after 3 attempts" + exit 1 + + - name: Sign artifacts + env: + # Whole secret-key file content (multi-line — the `untrusted + # comment` line plus the base64 key line). Pass it via env to + # avoid quoting issues inside a heredoc. + MINISIGN_SECRET_KEY: ${{ secrets.MINISIGN_SECRET_KEY }} + # rsign2 reads the key passphrase from RSIGN_PASSWORD when set + # (so we can sign non-interactively in CI). For passwordless + # keys (generated with `rsign generate -p ... -s ... -W`), an + # empty string here is correct. + RSIGN_PASSWORD: ${{ secrets.MINISIGN_KEY_PASSWORD }} + run: | + set -euo pipefail + if [ -z "${MHRV_UPDATE_PUBKEY:-}" ]; then + echo "::error::MINISIGN_SIGNING_ENABLED is true but MINISIGN_PUBLIC_KEY repo variable is empty" + exit 1 + fi + if [ -z "${MINISIGN_SECRET_KEY:-}" ]; then + echo "::error::MINISIGN_SIGNING_ENABLED is true but MINISIGN_SECRET_KEY secret is empty" + exit 1 + fi + # Write the key to a temp file. Use a strict umask so a stray + # `set -x` later doesn't expose it to other steps' logs (the + # file path is fine; the contents aren't). + umask 077 + KEY_FILE="$(mktemp -t mhrv-sign-XXXXXX.key)" + # `printf` rather than `echo`: preserves the secret body without + # shell-specific escapes, while ensuring the file ends with a + # newline for tools that expect line-oriented minisign keys. + printf '%s\n' "${MINISIGN_SECRET_KEY}" > "${KEY_FILE}" + + # Trap to wipe the key on any exit (success, failure, signal). + trap 'rm -f "${KEY_FILE}"' EXIT + + shopt -s nullglob + signed=0 + for f in dist/*.tar.gz dist/*.zip dist/*.apk; do + [ -f "$f" ] || continue + echo "::group::sign $(basename "$f")" + # `-W` = no password prompt (read from RSIGN_PASSWORD env) + # `-x ` = write the signature to a specific path so it + # lands as `.minisig` instead of rsign's default + # next-to-binary location. + rsign sign \ + -W \ + -s "${KEY_FILE}" \ + -x "${f}.minisig" \ + "$f" + ls -la "${f}.minisig" + echo "::endgroup::" + signed=$((signed + 1)) + done + echo "signed ${signed} artifacts" + if [ "${signed}" -eq 0 ]; then + echo "::warning::no artifacts matched dist/*.{tar.gz,zip,apk}" + fi + + - name: Upload signatures + uses: actions/upload-artifact@v4 + with: + name: minisign-signatures + path: dist/*.minisig + if-no-files-found: error + # Build + publish the tunnel-node Docker image to GHCR. Issue: every # full-mode user has to set up tunnel-node on a VPS, and "rustup + # cargo build --release" on a 1GB VPS is non-trivial — fails on memory, @@ -589,7 +747,17 @@ jobs: # off the self-hosted runners avoids contention with Linux build jobs from # the next tag if two releases overlap. release: - needs: [build, android] + needs: [build, android, sign] + # When `sign` is skipped (signing not yet enabled in repo vars) we + # still want the release to proceed with unsigned artifacts. GH + # Actions' default behaviour skips dependent jobs whenever ANY needed + # job is skipped — this `if:` overrides that so a skipped `sign` + # doesn't block release, but a `sign` *failure* still does. + if: | + always() + && needs.build.result == 'success' + && needs.android.result == 'success' + && (needs.sign.result == 'success' || needs.sign.result == 'skipped') runs-on: ubuntu-latest permissions: contents: write @@ -706,7 +874,16 @@ jobs: # `https://github.com/.../releases/tag/v...` for users who can reach # that URL — this in-repo folder is the fallback for users who can't. commit-releases: - needs: [build, android, release] + needs: [build, android, sign, release] + # Same skipped-sign escape hatch as the `release` job: `always()` keeps + # this job evaluable when `sign` is skipped, while the explicit success + # checks still block cancellations/failures from build/android/release. + if: | + always() + && needs.build.result == 'success' + && needs.android.result == 'success' + && needs.release.result == 'success' + && (needs.sign.result == 'success' || needs.sign.result == 'skipped') runs-on: ubuntu-latest permissions: contents: write @@ -748,7 +925,8 @@ jobs: --dir artifacts \ --pattern '*.tar.gz' \ --pattern '*.zip' \ - --pattern '*.apk' + --pattern '*.apk' \ + --pattern '*.minisig' echo "--- artifacts/ contents ---" ls -la artifacts/ @@ -760,12 +938,12 @@ jobs: mkdir -p releases - # Wipe old binary artifacts (.apk, .tar.gz, .zip) but keep - # README.md and .gitattributes — those are folder-level docs - # that stay constant across versions and shouldn't be + # Wipe old binary artifacts (.apk, .tar.gz, .zip, .minisig) but + # keep README.md and .gitattributes — those are folder-level + # docs that stay constant across versions and shouldn't be # regenerated on every release. find releases -maxdepth 1 -type f \ - \( -name '*.apk' -o -name '*.tar.gz' -o -name '*.zip' \) \ + \( -name '*.apk' -o -name '*.tar.gz' -o -name '*.zip' -o -name '*.minisig' \) \ -delete # Copy desktop archives. Their names already include the @@ -785,6 +963,16 @@ jobs: cp "$f" "releases/$(basename "$f")" done + # Minisign signatures, when present (signing is opt-in via the + # MINISIGN_SIGNING_ENABLED repo variable). Naming follows the + # `.minisig` convention the auto-updater expects, so a + # user who hits the in-repo `releases/` fallback path + # (GitHub-Releases-page filtered ISP) gets verified updates too. + for f in artifacts/*.minisig; do + [ -f "$f" ] || continue + cp "$f" "releases/$(basename "$f")" + done + # Update the "Current version" line in releases/README.md # (both English and Persian copies) and APK filename refs so # the doc stays accurate. `sed -i` BSD/GNU compatibility is diff --git a/.gitignore b/.gitignore index 1c844db..51199c6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,6 @@ /dist /ca /config.json +/android/.kotlin/ .DS_Store /SCR-*.png diff --git a/Cargo.lock b/Cargo.lock index b7fd1c7..68db04e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,6 +166,15 @@ version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + [[package]] name = "arboard" version = "3.6.1" @@ -865,6 +874,17 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "digest" version = "0.10.7" @@ -1224,6 +1244,17 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "find-msvc-tools" version = "0.1.9" @@ -2235,6 +2266,7 @@ dependencies = [ "httparse", "jni 0.21.1", "libc", + "minisign-verify", "portable-atomic", "rand 0.8.6", "rcgen", @@ -2243,6 +2275,8 @@ dependencies = [ "rustls-pki-types", "serde", "serde_json", + "tar", + "tempfile", "thiserror 2.0.18", "time", "tokio", @@ -2253,6 +2287,7 @@ dependencies = [ "url", "webpki-roots 0.26.11", "x509-parser", + "zip", ] [[package]] @@ -2261,6 +2296,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "minisign-verify" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f9645cb765ea72b8111f36c522475d2daa0d22c957a9826437e97534bc4e9e" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3629,6 +3670,17 @@ dependencies = [ "libc", ] +[[package]] +name = "tar" +version = "0.4.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22692a6476a21fa75fdfc11d452fda482af402c008cdbaf3476414e122040973" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "tempfile" version = "3.27.0" @@ -4579,7 +4631,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] @@ -5207,6 +5259,16 @@ dependencies = [ "time", ] +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix 1.1.4", +] + [[package]] name = "xcursor" version = "0.3.10" @@ -5350,8 +5412,37 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "zip" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabe6324e908f85a1c52063ce7aa26b68dcb7eb6dbc83a2d148403c9bc3eba50" +dependencies = [ + "arbitrary", + "crc32fast", + "crossbeam-utils", + "displaydoc", + "flate2", + "indexmap", + "memchr", + "thiserror 2.0.18", + "zopfli", +] + [[package]] name = "zmij" version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zopfli" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f05cd8797d63865425ff89b5c4a48804f35ba0ce8d125800027ad6017d2b5249" +dependencies = [ + "bumpalo", + "crc32fast", + "log", + "simd-adler32", +] diff --git a/Cargo.toml b/Cargo.toml index 77fa7cc..dd39b74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ default = [] ui = ["dep:eframe"] [dependencies] -tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time", "io-util", "signal", "sync"] } +tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time", "io-util", "signal", "sync", "fs"] } tokio-rustls = { version = "0.26", default-features = false, features = ["ring", "tls12"] } rustls = { version = "0.23", default-features = false, features = ["ring", "std", "tls12"] } rustls-pemfile = "2" @@ -49,6 +49,7 @@ http = "1" flate2 = "1" directories = "5" futures-util = { version = "0.3", default-features = false, features = ["std"] } +minisign-verify = "0.2" # 64-bit atomics on 32-bit MIPS/ARMv5 targets. Rust's std AtomicU64 is # only available on targets that expose native 64-bit atomics, which # mipsel-unknown-linux-musl does not — `AtomicU64` resolves to "no @@ -83,6 +84,17 @@ url = "2.5.8" [target.'cfg(unix)'.dependencies] libc = "0.2" +# Desktop auto-updater deps. Android uses Android's PackageInstaller for +# APK swap, so it doesn't need any of this — keeping these scoped to +# non-android keeps the Android cdylib lean. +# zip → unpack windows-amd64.zip / macos-*-app.zip release assets +# tar → unpack linux/macos *.tar.gz release assets +# tempfile → scratch dir for download/extract before staging +[target.'cfg(not(target_os = "android"))'.dependencies] +zip = { version = "2", default-features = false, features = ["deflate"] } +tar = "0.4.45" +tempfile = "3" + # Android-only deps: jni gives us the extern "system" wrappers used in # src/android_jni.rs; zero cost on any other platform because the whole # module is `#[cfg(target_os = "android")]`. diff --git a/android/app/src/main/AndroidManifest.xml b/android/app/src/main/AndroidManifest.xml index 4d74ca5..37fd432 100644 --- a/android/app/src/main/AndroidManifest.xml +++ b/android/app/src/main/AndroidManifest.xml @@ -18,6 +18,15 @@ --> + + + مصرف امروز (تخمینی) diff --git a/android/app/src/main/res/values/strings.xml b/android/app/src/main/res/values/strings.xml index 5f4d637..faf8945 100644 --- a/android/app/src/main/res/values/strings.xml +++ b/android/app/src/main/res/values/strings.xml @@ -96,6 +96,15 @@ google_ip already current (%1$s) DNS lookup failed — check network Logs copied to clipboard + Update v%1$s → v%2$s %3$s + Update available: v%1$s → v%2$s + Enable "Install unknown apps" for mhrv-rs, then tap the version label to retry. + Downloading and verifying update… (%1$.1f MB) + Couldn\'t open installer: %1$s + Download failed: %1$s + Download crashed: %1$s + Downloaded APK was empty. + Downloaded APK size mismatch (%1$d of %2$d bytes). Usage today (estimated) diff --git a/docs/maintainer/README.md b/docs/maintainer/README.md index 5639af4..093040b 100644 --- a/docs/maintainer/README.md +++ b/docs/maintainer/README.md @@ -11,6 +11,7 @@ Start with `SKILL.md` for orientation, conventions, and pointers. Then read refe - `references/diagnostic-taxonomy.md` — six candidate causes for the placeholder body, DIAGNOSTIC_MODE disambiguator - `references/workflow-conventions.md` — reply marker, Persian/English match rule, changelog format, commit messages, close reasons - `references/release-workflow.md` — Cargo.toml → tag → Telegram pipeline +- `references/update-signing.md` — one-time maintainer setup for minisign-signed auto-updates (CI gates on repo vars, defaults to off) - `references/contributors.md` — core contributor roles + their substantive PRs - `references/roadmap.md` — current and upcoming release batches - `references/persian-templates.md` — adaptable Persian reply templates and standardized phrasings diff --git a/docs/maintainer/references/update-signing.md b/docs/maintainer/references/update-signing.md new file mode 100644 index 0000000..d5a7363 --- /dev/null +++ b/docs/maintainer/references/update-signing.md @@ -0,0 +1,159 @@ +# Auto-updater signing — one-time setup + +The desktop auto-updater (`src/update_apply.rs`) verifies a minisign +signature against an embedded public key before swapping the running +binary. The Android sideload updater uses the same embedded public key to +verify the downloaded APK's sibling `.minisig` before handing it to the +OS installer. Android's PackageInstaller still performs the normal APK +same-key check against the app's signing certificate; that prevents a +different package identity from replacing the app, but minisign is the +release-provenance check. + +Until you complete the steps below the updater is in **rollout mode**: +it still applies updates, but it logs `MHRV_UPDATE_PUBKEY was not set +at build time — applying update without signature check (insecure)`. The +CI workflow's `sign` job is also skipped (it gates on +`vars.MINISIGN_SIGNING_ENABLED == 'true'`), and the build env var is left +empty. Both are fully backward-compatible — flipping the switch is what +turns verification on for new releases. + +## What you're setting up + +- **Public key** → committed to the repo as a GH Actions repo *variable* + (it's public; doesn't need to be a secret), embedded into every release + binary at compile time via `option_env!("MHRV_UPDATE_PUBKEY")`. +- **Secret key** → stored as a GH Actions *secret*, used by the `sign` + job in `release.yml` to produce `.minisig` files for every + release artifact. +- **Toggle** → repo variable that flips the `sign` job from skipped to + active, so you can stage everything ahead of time and turn it on in a + separate commit if you want. + +## Step 1 — Generate the keypair (offline, one time) + +Pick a machine that's not also a CI runner. Anywhere with `cargo` works: + +```bash +cargo install --locked --version 0.6.5 rsign2 +rsign generate -p mhrv-update.pub -s mhrv-update.key +# Choose a strong passphrase or hit enter for passwordless. +``` + +You now have: + +- `mhrv-update.pub` — two lines: a comment, then the base64 public key +- `mhrv-update.key` — multi-line: comment, base64 secret key + +Prefer a strong passphrase when you can. GitHub Actions secrets are +encrypted at rest, but a passwordless key is immediately usable if the +secret body ever leaks through a workflow-log mistake, compromised +runner, or maintainer-machine compromise. A passphrase adds +defense-in-depth; the CI workflow already reads it from the optional +`MINISIGN_KEY_PASSWORD` secret. Passwordless is still simpler and may be +acceptable for low-friction rollout, but treat that as a conscious +trade-off rather than extra security from Actions storage alone. + +**Back up `mhrv-update.key` somewhere offline.** If you lose it, you +cannot sign future releases against the same public key, and existing +installs won't accept updates until you ship a new build with a new +embedded public key (which will then refuse to update from the old +build because *it* didn't have the new key embedded — you'd have to +break the update chain and ask users to manually reinstall). Don't lose +the key. + +## Step 2 — Wire it into GitHub Actions + +```bash +# Public key — bare base64 line (the one AFTER `untrusted comment:`). +gh variable set MINISIGN_PUBLIC_KEY --body "$(tail -1 mhrv-update.pub)" + +# Secret key — full file content (multi-line). `gh secret set` reads +# stdin, which preserves newlines correctly. +gh secret set MINISIGN_SECRET_KEY < mhrv-update.key + +# Passphrase, if you set one in step 1. Skip this command if the key is +# passwordless. +gh secret set MINISIGN_KEY_PASSWORD --body 'your-passphrase' + +# Flip the switch — until this is `true`, the `sign` job is skipped and +# binaries embed no public key (rollout-mode behaviour). +gh variable set MINISIGN_SIGNING_ENABLED --body true +``` + +Sanity-check (run from the repo dir, or pass +`--repo therealaleph/MasterHttpRelayVPN-RUST` if you're elsewhere): + +```bash +gh variable list # MINISIGN_PUBLIC_KEY, MINISIGN_SIGNING_ENABLED +gh secret list # MINISIGN_SECRET_KEY (+ MINISIGN_KEY_PASSWORD if set) +``` + +## Step 3 — Cut a release + +Push a tag as you normally would. The `release` workflow now: + +1. Builds binaries with `MHRV_UPDATE_PUBKEY` set, so the embedded key + becomes the `option_env!` value at compile time. +2. Runs the `sign` job after build + android, which first fails fast if + `MINISIGN_PUBLIC_KEY` or `MINISIGN_SECRET_KEY` is missing, then downloads every + artifact, runs `rsign sign -W -s key -x out.minisig file` against + each, and uploads the `.minisig` files as a workflow artifact. +3. The `release` job picks up everything (originals + `.minisig`s) and + uploads them to the GitHub Release page. +4. The `commit-releases` job copies them all to the in-repo `releases/` + folder so the GitHub-Releases-page-blocked fallback works for signed + updates too. + +## Step 4 — Verify a downloaded asset (manual sanity check) + +```bash +rsign verify -P "$(tail -1 mhrv-update.pub)" \ + -x mhrv-rs-linux-amd64.tar.gz.minisig \ + mhrv-rs-linux-amd64.tar.gz +``` + +If you see `Signature and comment signature verified`, the chain works +end-to-end. Same check the `minisign-verify` crate runs at apply time. + +## Rotating the keypair + +Don't, unless the secret key is compromised. The cost is high: every +already-installed copy will refuse the update that ships the new public +key, because the *current* binary's embedded key won't match the new +signature. Recovery is "users manually reinstall from the GitHub Release +page" — the same UX as a Play-Store-less Android sideload. + +If you must rotate: + +1. Generate the new pair as in step 1. +2. **Sign the new binaries with BOTH the old key AND the new key.** Ship + one `.minisig` per key (`.minisig` and `.minisig.new`, + say) for at least one transitional release. The currently-deployed + binary verifies against the old `.minisig` and applies cleanly; the + newly-installed binary then has the new public key embedded and + verifies against the new `.minisig` from then on. +3. After most users have advanced past the transitional release, drop + the old signature. + +This dual-sign step is not currently implemented in the workflow — it'd +need a small extension to the `sign` job. Add it then, not now. + +## Threat model recap + +What signing prevents: + +- A compromised maintainer GitHub account or release pipeline pushing a + malicious binary to the Releases page. Even though TLS proves "GitHub + served this", without minisign the updater has no way to know whether + the file came from a legitimate release process or a hijacked one. + This applies to desktop archives and Android APK assets. + +What signing does *not* prevent: + +- A user downloading and running an unsigned binary manually from + somewhere other than the auto-updater path (the launcher script, a + fresh install). The signing scope is "updates to a running install", + not "first-install verification". For first-install, users still rely + on the GitHub repo identity and the HTTPS path. +- A compromise of *the offline machine where you keep the secret key*. + Treat that key like an offline crypto wallet seed phrase. diff --git a/src/android_jni.rs b/src/android_jni.rs index 91b4fa5..c726af5 100644 --- a/src/android_jni.rs +++ b/src/android_jni.rs @@ -357,10 +357,16 @@ pub extern "system" fn Java_com_therealaleph_mhrv_Native_drainLogs<'a>( /// /// Returned shape, one of: /// {"kind":"upToDate","current":"1.0.0","latest":"1.0.0"} -/// {"kind":"updateAvailable","current":"1.0.0","latest":"1.1.0","url":"https://..."} +/// {"kind":"updateAvailable","current":"1.0.0","latest":"1.1.0","url":"https://...", +/// "assetName":"mhrv-rs-android-arm64-v8a-v1.1.0.apk", +/// "assetUrl":"https://...","assetSize":12345678} /// {"kind":"offline","reason":"..."} /// {"kind":"error","reason":"..."} /// +/// `assetName/Url/Size` are only present on `updateAvailable` and only when +/// the picker matched a per-ABI APK in the release. The Kotlin updater +/// uses these fields to fetch the right APK and hand it to PackageInstaller. +/// /// Blocking — hit from a background dispatcher. #[no_mangle] pub extern "system" fn Java_com_therealaleph_mhrv_Native_checkUpdate<'a>( @@ -398,10 +404,19 @@ fn update_check_to_json(u: &crate::update_check::UpdateCheck) -> String { r#"{{"kind":"upToDate","current":"{}","latest":"{}"}}"#, esc(current), esc(latest), ), - crate::update_check::UpdateCheck::UpdateAvailable { current, latest, release_url, .. } => format!( - r#"{{"kind":"updateAvailable","current":"{}","latest":"{}","url":"{}"}}"#, - esc(current), esc(latest), esc(release_url), - ), + crate::update_check::UpdateCheck::UpdateAvailable { current, latest, release_url, asset } => { + let asset_fields = match asset { + Some(a) => format!( + r#","assetName":"{}","assetUrl":"{}","assetSize":{}"#, + esc(&a.name), esc(&a.download_url), a.size_bytes, + ), + None => String::new(), + }; + format!( + r#"{{"kind":"updateAvailable","current":"{}","latest":"{}","url":"{}"{}}}"#, + esc(current), esc(latest), esc(release_url), asset_fields, + ) + } crate::update_check::UpdateCheck::Offline(reason) => format!( r#"{{"kind":"offline","reason":"{}"}}"#, esc(reason), @@ -413,6 +428,106 @@ fn update_check_to_json(u: &crate::update_check::UpdateCheck) -> String { } } +/// `Native.downloadAsset(url, destPath)` -> String. Downloads a release +/// asset to `destPath` using the same rustls + redirect-following client +/// the desktop UI uses (so we go through CA-pinned TLS, no Java/OkHttp +/// dependency on the Kotlin side). When this build embeds +/// `MHRV_UPDATE_PUBKEY`, also downloads `.minisig` and verifies the +/// asset before returning success. BLOCKS — call from IO dispatcher. +/// +/// Returns a JSON blob: +/// {"ok":true,"bytes":12345678} +/// {"ok":false,"error":"..."} +/// +/// Always uses Route::Direct on Android — the proxy-route trick that +/// helps shared-NAT desktop users isn't needed here (Android users +/// generally have working clear-net to GitHub for the asset CDN, which +/// `objects.githubusercontent.com` redirects to). Can be revisited if +/// users on Iranian networks report the asset host blocked. +#[no_mangle] +pub extern "system" fn Java_com_therealaleph_mhrv_Native_downloadAsset<'a>( + mut env: JNIEnv<'a>, + _class: JClass, + url: JString, + dest: JString, +) -> jstring { + let result_json = safe( + r#"{"ok":false,"error":"panic"}"#.to_string(), + AssertUnwindSafe(|| { + install_logging_once(); + let url_s = jstring_to_string(&mut env, &url); + let dest_s = jstring_to_string(&mut env, &dest); + if url_s.is_empty() || dest_s.is_empty() { + return r#"{"ok":false,"error":"empty url or dest"}"#.to_string(); + } + let Some(rt) = one_shot_runtime() else { + return r#"{"ok":false,"error":"tokio init failed"}"#.to_string(); + }; + let dest_path = std::path::PathBuf::from(&dest_s); + let res = rt.block_on(async { + let bytes = crate::update_check::download_asset( + crate::update_check::Route::Direct, + &url_s, + &dest_path, + ) + .await?; + + if let Some(pubkey) = crate::update_apply::embedded_update_pubkey() { + let sig_url = crate::update_apply::signature_url_for_asset(&url_s); + let sig_path = { + let Some(file_name) = dest_path.file_name() else { + return Err("dest path has no filename".to_string()); + }; + let mut sig_name = file_name.to_os_string(); + sig_name.push(".minisig"); + dest_path.with_file_name(sig_name) + }; + crate::update_check::download_asset( + crate::update_check::Route::Direct, + &sig_url, + &sig_path, + ) + .await + .map_err(|e| format!("signature missing: {}", e))?; + let sig_text = tokio::fs::read_to_string(&sig_path) + .await + .map_err(|e| format!("read signature: {}", e))?; + crate::update_apply::verify_minisign_signature( + pubkey, + &dest_path, + &sig_text, + ) + .map_err(|e| format!("signature invalid: {}", e))?; + let _ = tokio::fs::remove_file(&sig_path).await; + tracing::info!("android: minisign signature verified for {}", dest_s); + } else { + tracing::warn!( + "android: MHRV_UPDATE_PUBKEY was not set at build time — \ + installing update without minisign check (rollout mode)." + ); + } + + Ok::(bytes) + }); + match res { + Ok(bytes) => { + tracing::info!("android: downloadAsset {} -> {} ({} bytes)", url_s, dest_s, bytes); + format!(r#"{{"ok":true,"bytes":{}}}"#, bytes) + } + Err(e) => { + let _ = std::fs::remove_file(&dest_path); + tracing::warn!("android: downloadAsset failed: {}", e); + let cleaned = e.replace('\\', "\\\\").replace('"', "\\\""); + format!(r#"{{"ok":false,"error":"{}"}}"#, cleaned) + } + } + }), + ); + env.new_string(result_json) + .map(|s| s.into_raw()) + .unwrap_or(std::ptr::null_mut()) +} + /// `Native.testSni(googleIp, sni)` -> String. Returns a small JSON blob /// like `{"ok":true,"latencyMs":123}` or `{"ok":false,"error":"..."}`. /// Blocking call — Kotlin side should invoke on a background coroutine. diff --git a/src/bin/ui.rs b/src/bin/ui.rs index 5da1203..b1b9533 100644 --- a/src/bin/ui.rs +++ b/src/bin/ui.rs @@ -23,6 +23,12 @@ const WIN_HEIGHT: f32 = 680.0; const LOG_MAX: usize = 200; fn main() -> eframe::Result<()> { + // Auto-updater finalize step — must run before *anything* else + // because on Windows a staged `.new` is what got launched, and we + // need to rename it back to the canonical exe and re-exec before + // touching state, opening windows, etc. + mhrv_rs::update_apply::finalize_pending_at_startup(); + let _ = rustls::crypto::ring::default_provider().install_default(); // Re-point HOME at the invoking user if this binary was launched // under sudo (see cert_installer::reconcile_sudo_environment). Must @@ -153,6 +159,16 @@ struct UiState { /// One-line status of the most recent download (Ok(path) or Err(msg)). last_download: Option>, last_download_at: Option, + /// Set while a stage-update (download + verify + extract + stage) is + /// in flight. Used to disable the Install button so a double-click + /// doesn't kick off two parallel downloads. + install_in_progress: bool, + /// Result of the most recent staging: + /// - Ok(StagedUpdate) → ready, show "Restart now" button + /// - Err(msg) → show the error inline + /// Cleared on next install attempt. + last_install: Option>, + last_install_at: Option, } #[derive(Clone, Debug)] @@ -204,6 +220,18 @@ enum Cmd { url: String, name: String, }, + /// Download + verify + extract + stage a release asset, ready to swap + /// in on next launch (or via restart_to_apply). Fires when the user + /// clicks the "Install update" button after a successful CheckUpdate + /// surfaces an UpdateAvailable with a matching platform asset. + InstallUpdate { + route: mhrv_rs::update_check::Route, + url: String, + name: String, + }, + /// Perform the binary swap and re-launch. Fires when the user clicks + /// "Restart now" after staging completed. + RestartToApply, } struct App { @@ -1452,7 +1480,7 @@ impl eframe::App for App { // Priority: update-check in flight > fresh test msg > fresh CA // result > update-check result. Old/expired entries are dropped. const TRANSIENT_TTL: Duration = Duration::from_secs(10); - let (test_msg_fresh, ca_trusted_fresh, update_check_fresh, download_fresh) = { + let (test_msg_fresh, ca_trusted_fresh, update_check_fresh, download_fresh, install_fresh) = { let s = self.shared.state.lock().unwrap(); ( s.last_test_msg_at @@ -1463,6 +1491,13 @@ impl eframe::App for App { .map_or(false, |t| t.elapsed() < TRANSIENT_TTL), s.last_download_at .map_or(false, |t| t.elapsed() < TRANSIENT_TTL), + // Install state stays "fresh" for as long as a successful + // staging is parked — TTL only applies to errors. We need + // the "Restart now" button to remain visible until the + // user acts on it. + s.install_in_progress + || matches!(s.last_install, Some(Ok(_))) + || s.last_install_at.map_or(false, |t| t.elapsed() < TRANSIENT_TTL), ) }; @@ -1496,24 +1531,49 @@ impl eframe::App for App { { ui.hyperlink_to("open release", release_url); if let Some(a) = asset { - let dl_in_flight = self.shared.state.lock().unwrap().download_in_progress; + let (dl_in_flight, install_in_flight) = { + let s = self.shared.state.lock().unwrap(); + (s.download_in_progress, s.install_in_progress) + }; if dl_in_flight { ui.small( egui::RichText::new("downloading…") .color(egui::Color32::GRAY), ); + } else if install_in_flight { + ui.small( + egui::RichText::new("installing…") + .color(egui::Color32::GRAY), + ); } else { - let btn = egui::Button::new( + // Primary action: Install (download + verify + // + extract + stage + restart). Secondary: + // plain download, for users who'd rather + // place the asset in Downloads and apply it + // by hand. + let install_btn = egui::Button::new( egui::RichText::new(format!( - "⤓ Download {} ({:.1} MB)", - a.name, + "⟳ Install update ({:.1} MB)", a.size_bytes as f64 / 1_048_576.0 )) .color(egui::Color32::WHITE), ) .fill(ACCENT) .rounding(4.0); - if ui.add(btn).clicked() { + if ui.add(install_btn).clicked() { + let route = self.update_check_route(); + let _ = self.cmd_tx.send(Cmd::InstallUpdate { + route, + url: a.download_url.clone(), + name: a.name.clone(), + }); + } + if ui.small_button(format!( + "download only ({:.1} MB)", + a.size_bytes as f64 / 1_048_576.0 + )) + .clicked() + { let route = self.update_check_route(); let _ = self.cmd_tx.send(Cmd::DownloadUpdate { route, @@ -1535,6 +1595,47 @@ impl eframe::App for App { }; ui.small(egui::RichText::new(last_test_msg).color(color)); shown_any = true; + } else if install_fresh { + let install_state = { + let s = self.shared.state.lock().unwrap(); + (s.install_in_progress, s.last_install.clone()) + }; + match install_state { + (true, _) => { + ui.small( + egui::RichText::new("Installing update… (downloading + verifying)") + .color(egui::Color32::GRAY), + ); + } + (false, Some(Ok(staged))) => { + ui.horizontal(|ui| { + ui.small( + egui::RichText::new(format!( + "Update staged → {}", + staged.staged_path.display() + )) + .color(OK_GREEN), + ); + let restart_btn = egui::Button::new( + egui::RichText::new("⟳ Restart now to apply") + .color(egui::Color32::WHITE), + ) + .fill(ACCENT) + .rounding(4.0); + if ui.add(restart_btn).clicked() { + let _ = self.cmd_tx.send(Cmd::RestartToApply); + } + }); + } + (false, Some(Err(msg))) => { + ui.small( + egui::RichText::new(format!("Install failed: {}", msg)) + .color(ERR_RED), + ); + } + (false, None) => {} + } + shown_any = true; } else if download_fresh { let dl = self.shared.state.lock().unwrap().last_download.clone(); match dl { @@ -2275,29 +2376,111 @@ fn background_thread(shared: Arc, rx: Receiver) { let dir = downloads_dir(); let out = dir.join(&name); let result = mhrv_rs::update_check::download_asset(route, &url, &out).await; - let mut st = shared2.state.lock().unwrap(); - st.download_in_progress = false; - st.last_download_at = Some(Instant::now()); - match result { + let log_msg = match result { Ok(bytes) => { - push_log( - &shared2, - &format!( - "[ui] download ok: {} ({} bytes) -> {}", - name, - bytes, - out.display() - ), + let log_msg = format!( + "[ui] download ok: {} ({} bytes) -> {}", + name, + bytes, + out.display() ); + let mut st = shared2.state.lock().unwrap(); + st.download_in_progress = false; + st.last_download_at = Some(Instant::now()); st.last_download = Some(Ok(out)); + log_msg } Err(e) => { - push_log(&shared2, &format!("[ui] download failed: {}", e)); + let log_msg = format!("[ui] download failed: {}", e); + let mut st = shared2.state.lock().unwrap(); + st.download_in_progress = false; + st.last_download_at = Some(Instant::now()); st.last_download = Some(Err(e)); + log_msg } + }; + push_log(&shared2, &log_msg); + }); + } + Ok(Cmd::InstallUpdate { route, url, name }) => { + let shared2 = shared.clone(); + let already_in_progress = { + let mut st = shared2.state.lock().unwrap(); + if st.install_in_progress { + true + } else { + st.install_in_progress = true; + st.last_install = None; + st.last_install_at = Some(Instant::now()); + false } + }; + if already_in_progress { + push_log( + &shared, + "[ui] install already in progress; ignoring duplicate request", + ); + continue; + } + push_log(&shared, &format!("[ui] installing {}", name)); + rt.spawn(async move { + let result = + mhrv_rs::update_apply::download_and_stage(route, &url, &name).await; + let log_msg = match result { + Ok(staged) => { + let log_msg = format!( + "[ui] update staged → {} (restart to apply)", + staged.staged_path.display() + ); + let mut st = shared2.state.lock().unwrap(); + st.install_in_progress = false; + st.last_install_at = Some(Instant::now()); + st.last_install = Some(Ok(staged)); + log_msg + } + Err(e) => { + let log_msg = format!("[ui] install failed: {}", e); + let mut st = shared2.state.lock().unwrap(); + st.install_in_progress = false; + st.last_install_at = Some(Instant::now()); + st.last_install = Some(Err(e.to_string())); + log_msg + } + }; + push_log(&shared2, &log_msg); }); } + Ok(Cmd::RestartToApply) => { + // Pull the staged update out of UiState. If it's missing + // we have nothing to do — the user shouldn't have been able + // to click Restart in that case, but a UI race could let it + // through. Also need to do the swap on this thread so the + // process can exec/exit cleanly without the egui loop + // continuing afterwards. + let staged = shared + .state + .lock() + .unwrap() + .last_install + .as_ref() + .and_then(|r| r.as_ref().ok().cloned()); + if let Some(staged) = staged { + push_log(&shared, "[ui] restarting to apply update"); + if let Err(e) = mhrv_rs::update_apply::restart_to_apply(&staged) { + push_log(&shared, &format!("[ui] restart failed: {}", e)); + let mut st = shared.state.lock().unwrap(); + st.last_install = Some(Err(format!("restart failed: {}", e))); + st.last_install_at = Some(Instant::now()); + } + // restart_to_apply doesn't return on success — control + // never reaches here in the happy path. + } else { + push_log( + &shared, + "[ui] restart requested but no staged update is available", + ); + } + } Err(_) => {} } diff --git a/src/lib.rs b/src/lib.rs index 1c62a5b..8ac9a71 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,5 +14,11 @@ pub mod scan_sni; pub mod test_cmd; pub mod update_check; +// Desktop-only — Android delegates APK install to PackageInstaller, doesn't +// need the extract / sig-verify / binary-swap machinery. On Android the +// module still exists as a stub so `main.rs` can call +// `update_apply::finalize_pending_at_startup()` unconditionally. +pub mod update_apply; + #[cfg(target_os = "android")] pub mod android_jni; diff --git a/src/main.rs b/src/main.rs index 202c7ec..e344ba0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -136,6 +136,12 @@ fn init_logging(level: &str) { #[tokio::main] async fn main() -> ExitCode { + // Auto-updater finalize step. If a previous run staged a `.new` + // next to us, finish the swap (Windows: rename + re-exec; Unix: + // late-apply rename) before any other init touches state. Best-effort: + // a failure logs and falls through. + mhrv_rs::update_apply::finalize_pending_at_startup(); + // Install default rustls crypto provider (ring). let _ = rustls::crypto::ring::default_provider().install_default(); diff --git a/src/update_apply.rs b/src/update_apply.rs new file mode 100644 index 0000000..156fa9d --- /dev/null +++ b/src/update_apply.rs @@ -0,0 +1,998 @@ +//! Apply a downloaded release asset: extract → verify signature → stage +//! `.new` → swap → re-launch. Pairs with `update_check.rs`, which +//! handles discovery and the network download. +//! +//! ## Signing & threat model +//! +//! TLS proves "GitHub's CDN served this", not provenance. If the release +//! pipeline or a maintainer GitHub account is compromised, an updater that +//! ships unsigned is a malware vector — the binary it pulls would +//! happily install. To close that gap we verify a minisign signature +//! against an embedded public key. +//! +//! Setup, one time: +//! +//! ```text +//! minisign -G -p mhrv.pub -s mhrv.key # keep mhrv.key offline +//! ``` +//! +//! Build with: +//! +//! ```text +//! MHRV_UPDATE_PUBKEY="$(tail -n1 mhrv.pub)" cargo build --release +//! ``` +//! +//! Per release, in CI: +//! +//! ```text +//! minisign -Sm -s mhrv.key +//! ``` +//! +//! Upload `.minisig` next to the asset in the release. +//! +//! Until the public key is set (or when the build env var is empty), the +//! updater still works but logs a loud warning and applies updates +//! without a signature check. Intentional: ship the feature first, layer +//! signing on once the keypair is generated. +//! +//! ## Binary swap, per platform +//! +//! - **Unix**: `rename` of the new binary over the running exe is +//! permitted while the process is alive (the kernel keeps the old +//! inode for the running process). After rename we `execv` self with +//! the original argv — single seamless restart. +//! - **Windows**: cannot `replace` a running .exe, but **can** rename +//! one. So `stage_update_*` writes `.new`; `restart_to_apply` +//! spawns `.new`, exits. The new process detects it is running +//! from a `.new` path, renames the old `` → `.old`, renames +//! itself (the `.new`) → ``, re-execs. Brief flash, one swap. +//! +//! Android is not handled here — APK install goes through +//! `PackageInstaller` on the Kotlin side. On Android this module compiles +//! to a single no-op `finalize_pending_at_startup` stub so callers in +//! `main.rs` don't need a `cfg` gate. + +/// Compile-time public key for verifying release assets. Set via +/// `MHRV_UPDATE_PUBKEY` env var at build time. The expected format is the +/// base64 line from a minisign `.pub` file (the line *after* the `untrusted +/// comment:` line). +const PUBKEY_B64_RAW: Option<&str> = option_env!("MHRV_UPDATE_PUBKEY"); + +fn normalize_embedded_pubkey(raw: Option<&'static str>) -> Option<&'static str> { + raw.and_then(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } + }) +} + +/// Embedded minisign public key, if this build enforces signature checks. +/// Empty/whitespace env vars are treated as unset so GitHub Actions repo +/// variables can default cleanly to rollout mode. +pub fn embedded_update_pubkey() -> Option<&'static str> { + normalize_embedded_pubkey(PUBKEY_B64_RAW) +} + +/// `true` if the build embedded a minisign public key. UI can use this to +/// distinguish "verified update" from "rollout-mode update". +pub fn signature_verification_enabled() -> bool { + embedded_update_pubkey().is_some() +} + +/// Verify `archive` against minisign signature text and a base64 public +/// key. Shared by the desktop self-updater and Android sideload flow. +pub fn verify_minisign_signature( + pubkey_b64: &str, + archive: &std::path::Path, + sig_text: &str, +) -> Result<(), String> { + let pk = minisign_verify::PublicKey::from_base64(pubkey_b64.trim()) + .map_err(|e| format!("bad pubkey: {}", e))?; + let sig = minisign_verify::Signature::decode(sig_text) + .map_err(|e| format!("decode sig: {}", e))?; + let mut f = std::fs::File::open(archive) + .map_err(|e| format!("open archive: {}", e))?; + let mut buf = Vec::new(); + std::io::Read::read_to_end(&mut f, &mut buf) + .map_err(|e| format!("read archive: {}", e))?; + pk.verify(&buf, &sig, false) + .map_err(|e| e.to_string()) +} + +/// Return the sibling minisign URL for a release asset URL. +/// +/// GitHub's current `browser_download_url` values have no query string, but +/// signed/CDN URLs can. In that case the signature asset path still gets the +/// `.minisig` suffix before the query parameters. +pub fn signature_url_for_asset(asset_url: &str) -> String { + if let Some((base, query)) = asset_url.split_once('?') { + format!("{}.minisig?{}", base, query) + } else { + format!("{}.minisig", asset_url) + } +} + +#[cfg(target_os = "android")] +pub fn finalize_pending_at_startup() {} + +#[cfg(test)] +mod shared_tests { + use super::*; + + #[test] + fn embedded_pubkey_normalization_treats_empty_as_unset() { + assert_eq!(normalize_embedded_pubkey(None), None); + assert_eq!(normalize_embedded_pubkey(Some("")), None); + assert_eq!(normalize_embedded_pubkey(Some(" \n\t")), None); + assert_eq!(normalize_embedded_pubkey(Some(" abc123\n")), Some("abc123")); + } + + #[test] + fn signature_url_keeps_query_on_signature_asset() { + assert_eq!( + signature_url_for_asset("https://x/y/archive.tar.gz"), + "https://x/y/archive.tar.gz.minisig" + ); + assert_eq!( + signature_url_for_asset("https://x/y/archive.tar.gz?token=abc"), + "https://x/y/archive.tar.gz.minisig?token=abc" + ); + } +} + +#[cfg(not(target_os = "android"))] +mod desktop { + +use std::path::{Path, PathBuf}; + +use super::{embedded_update_pubkey, signature_url_for_asset, verify_minisign_signature}; + +#[derive(Debug, thiserror::Error)] +pub enum ApplyError { + #[error("io: {0}")] + Io(#[from] std::io::Error), + #[error("download: {0}")] + Download(String), + #[error("extract: {0}")] + Extract(String), + #[error("signature missing — refusing to apply unsigned update (rebuild without MHRV_UPDATE_PUBKEY to allow this)")] + SignatureMissing, + #[error("signature invalid: {0}")] + SignatureInvalid(String), + #[error("no compatible binary found in archive")] + BinaryNotFound, + #[error("ambiguous archive: more than one binary matched {0}")] + AmbiguousBinary(String), + #[error("staging: {0}")] + Staging(String), +} + +/// Result of staging an update. `staged_path` always ends in `.new` and +/// is the path that gets renamed at apply time. `relaunch_path` is the +/// exe to `execv` after the swap completes. +/// +/// For binary-only updates `staged_path` is a regular file (e.g. +/// `.new`) and `relaunch_path == swap_target()`. +/// +/// For macOS `.app` bundle updates `staged_path` is a directory (e.g. +/// `.new`) and `relaunch_path` points at the new exe inside +/// (`/Contents/MacOS/`). +#[derive(Debug, Clone)] +pub struct StagedUpdate { + pub staged_path: PathBuf, + pub relaunch_path: PathBuf, +} + +impl StagedUpdate { + /// The path the staged content swaps into — i.e. `staged_path` with + /// the trailing `.new` stripped. + pub fn swap_target(&self) -> PathBuf { + let s = self.staged_path.to_string_lossy(); + let stripped = s.strip_suffix(".new").unwrap_or(&s); + PathBuf::from(stripped.to_string()) + } +} + +/// Download a release archive into a temp dir, fetch its `.minisig`, +/// verify (if a pubkey is embedded), extract, and stage the new binary +/// next to the current exe as `.new`. On Ok, call `restart_to_apply` +/// to perform the swap. +pub async fn download_and_stage( + route: crate::update_check::Route, + archive_url: &str, + archive_name: &str, +) -> Result { + let scratch = tempfile::tempdir() + .map_err(|e| ApplyError::Staging(format!("tempdir: {}", e)))?; + let archive_path = scratch.path().join(archive_name); + crate::update_check::download_asset(route.clone(), archive_url, &archive_path) + .await + .map_err(ApplyError::Download)?; + + // Try to fetch the matching `.minisig` alongside. We tolerate a missing + // sig file only when no pubkey was embedded at build time; with a + // pubkey, missing sig is a hard failure. + let sig_url = signature_url_for_asset(archive_url); + let sig_path = scratch.path().join(format!("{}.minisig", archive_name)); + let sig_result = + crate::update_check::download_asset(route, &sig_url, &sig_path).await; + + match (embedded_update_pubkey(), &sig_result) { + (Some(pubkey), Ok(_)) => { + let sig_text = std::fs::read_to_string(&sig_path).map_err(|e| { + ApplyError::SignatureInvalid(format!("read sig: {}", e)) + })?; + verify_minisign_signature(pubkey, &archive_path, &sig_text) + .map_err(ApplyError::SignatureInvalid)?; + tracing::info!( + "update_apply: minisign signature verified for {}", + archive_name + ); + } + (Some(_), Err(e)) => { + tracing::error!("update_apply: missing .minisig for {}: {}", archive_name, e); + return Err(ApplyError::SignatureMissing); + } + (None, _) => { + tracing::warn!( + "update_apply: MHRV_UPDATE_PUBKEY was not set at build time — \ + applying update without signature check (insecure)." + ); + } + } + + stage_from_archive(&archive_path) +} + +/// Extract `archive` to a scratch dir, find the binary (or `.app` +/// bundle) that matches our running install, stage it as `.new` next +/// to the existing exe/bundle. +/// +/// Three modes: +/// +/// 1. **macOS `.app` bundle**: if the running exe lives inside +/// `Foo.app/Contents/MacOS/` AND the archive contains a `.app`, +/// we swap the whole bundle (so `Info.plist`, future framework +/// additions, etc. all come along). Staged path is `.new`, +/// a directory. +/// 2. **macOS bare binary**: running outside any `.app`, archive +/// contains the bare binary at the top level. Single-file swap. +/// 3. **Linux / Windows / etc.**: single-file swap. +pub fn stage_from_archive(archive: &Path) -> Result { + let current_exe = std::env::current_exe() + .map_err(|e| ApplyError::Staging(format!("current_exe: {}", e)))?; + let exe_name = current_exe + .file_name() + .ok_or_else(|| ApplyError::Staging("current_exe has no filename".into()))? + .to_string_lossy() + .into_owned(); + + let scratch = tempfile::tempdir() + .map_err(|e| ApplyError::Staging(format!("scratch tempdir: {}", e)))?; + extract_archive(archive, scratch.path())?; + + // macOS .app bundle case — only attempted when both the running + // install AND the archive have a bundle. Otherwise fall through to + // the binary-only path (which still works on macOS for users who + // unpacked the .tar.gz onto e.g. /usr/local/bin). + #[cfg(target_os = "macos")] + { + if let Some(target_bundle) = macos_bundle_for_exe(¤t_exe) { + if let Some(extracted_bundle) = find_app_bundle(scratch.path()) { + let staged = staged_path(&target_bundle); + cleanup_path(&staged); + copy_dir_all(&extracted_bundle, &staged)?; + let staged_inner_exe = staged + .join("Contents/MacOS") + .join(&exe_name); + if !staged_inner_exe.exists() { + return Err(ApplyError::BinaryNotFound); + } + use std::os::unix::fs::PermissionsExt; + if let Ok(meta) = std::fs::metadata(&staged_inner_exe) { + let mut p = meta.permissions(); + p.set_mode(0o755); + let _ = std::fs::set_permissions(&staged_inner_exe, p); + } + let relaunch_path = target_bundle + .join("Contents/MacOS") + .join(&exe_name); + tracing::info!( + "update_apply: staged macOS bundle → {}", + staged.display() + ); + return Ok(StagedUpdate { + staged_path: staged, + relaunch_path, + }); + } + } + } + + // Binary-only path. + let extracted = find_binary(scratch.path(), &exe_name)?; + + let staged = staged_path(¤t_exe); + let _ = std::fs::remove_file(&staged); + std::fs::copy(&extracted, &staged) + .map_err(|e| ApplyError::Staging(format!("copy staged: {}", e)))?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let mut perm = std::fs::metadata(&staged)?.permissions(); + perm.set_mode(0o755); + std::fs::set_permissions(&staged, perm)?; + } + + tracing::info!("update_apply: staged → {}", staged.display()); + Ok(StagedUpdate { + staged_path: staged, + relaunch_path: current_exe, + }) +} + +fn staged_path(current: &Path) -> PathBuf { + let mut name = current.file_name().unwrap().to_owned(); + name.push(".new"); + current.with_file_name(name) +} + +/// Walk up from a binary path looking for an enclosing `Foo.app` — +/// specifically the layout `Foo.app/Contents/MacOS/` that macOS +/// app bundles use. Returns the bundle root (`.../Foo.app`) on match. +#[cfg(target_os = "macos")] +fn macos_bundle_for_exe(exe: &Path) -> Option { + let macos_dir = exe.parent()?; // Foo.app/Contents/MacOS + if macos_dir.file_name()? != "MacOS" { + return None; + } + let contents = macos_dir.parent()?; // Foo.app/Contents + if contents.file_name()? != "Contents" { + return None; + } + let app = contents.parent()?; // Foo.app + if app.extension().map(|e| e == "app").unwrap_or(false) { + Some(app.to_path_buf()) + } else { + None + } +} + +/// Locate the first `*.app` directory under `root`. Returns None if the +/// archive isn't a bundle archive (e.g. `.tar.gz` of bare binaries). +#[cfg(target_os = "macos")] +fn find_app_bundle(root: &Path) -> Option { + let mut stack = vec![root.to_path_buf()]; + while let Some(d) = stack.pop() { + let Ok(rd) = std::fs::read_dir(&d) else { + continue; + }; + for e in rd.flatten() { + let p = e.path(); + if p.is_dir() { + if p.extension().map(|x| x == "app").unwrap_or(false) { + return Some(p); + } + stack.push(p); + } + } + } + None +} + +#[cfg(target_os = "macos")] +fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), ApplyError> { + std::fs::create_dir_all(dst)?; + for entry in std::fs::read_dir(src)? { + let entry = entry?; + let ft = entry.file_type()?; + let from = entry.path(); + let to = dst.join(entry.file_name()); + if ft.is_dir() { + copy_dir_all(&from, &to)?; + } else if ft.is_symlink() { + let target = std::fs::read_link(&from)?; + std::os::unix::fs::symlink(&target, &to)?; + } else { + std::fs::copy(&from, &to)?; + // Preserve mode (rcgen / build-app.sh chmod +x's the inner + // binary; we want that to survive the copy). + use std::os::unix::fs::PermissionsExt; + let mode = std::fs::metadata(&from)?.permissions().mode(); + let _ = std::fs::set_permissions(&to, std::fs::Permissions::from_mode(mode)); + } + } + Ok(()) +} + +#[cfg(target_os = "macos")] +fn cleanup_path(p: &Path) { + if p.is_dir() { + let _ = std::fs::remove_dir_all(p); + } else if p.exists() { + let _ = std::fs::remove_file(p); + } +} + +fn extract_archive(archive: &Path, dest: &Path) -> Result<(), ApplyError> { + let lower = archive + .file_name() + .map(|s| s.to_string_lossy().to_lowercase()) + .unwrap_or_default(); + if lower.ends_with(".zip") { + extract_zip(archive, dest) + } else if lower.ends_with(".tar.gz") || lower.ends_with(".tgz") { + extract_tar_gz(archive, dest) + } else { + Err(ApplyError::Extract(format!( + "unsupported archive type: {}", + lower + ))) + } +} + +fn extract_zip(path: &Path, dest: &Path) -> Result<(), ApplyError> { + let f = std::fs::File::open(path) + .map_err(|e| ApplyError::Extract(format!("open zip: {}", e)))?; + let mut zip = zip::ZipArchive::new(f) + .map_err(|e| ApplyError::Extract(format!("zip: {}", e)))?; + for i in 0..zip.len() { + let mut entry = zip + .by_index(i) + .map_err(|e| ApplyError::Extract(format!("zip entry {}: {}", i, e)))?; + // `enclosed_name` rejects path-traversal (`..`) entries. + let Some(rel) = entry.enclosed_name() else { + continue; + }; + let out_path = dest.join(rel); + if entry.is_dir() { + std::fs::create_dir_all(&out_path)?; + continue; + } + if let Some(p) = out_path.parent() { + std::fs::create_dir_all(p)?; + } + let mut out_f = std::fs::File::create(&out_path)?; + std::io::copy(&mut entry, &mut out_f) + .map_err(|e| ApplyError::Extract(format!("zip copy: {}", e)))?; + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + if let Some(mode) = entry.unix_mode() { + let _ = std::fs::set_permissions( + &out_path, + std::fs::Permissions::from_mode(mode), + ); + } + } + } + Ok(()) +} + +fn extract_tar_gz(path: &Path, dest: &Path) -> Result<(), ApplyError> { + let f = std::fs::File::open(path) + .map_err(|e| ApplyError::Extract(format!("open tar.gz: {}", e)))?; + let gz = flate2::read::GzDecoder::new(f); + let mut archive = tar::Archive::new(gz); + std::fs::create_dir_all(dest) + .map_err(|e| ApplyError::Extract(format!("create tar.gz destination: {}", e)))?; + for entry in archive + .entries() + .map_err(|e| ApplyError::Extract(format!("tar.gz entries: {}", e)))? + { + let mut entry = entry.map_err(|e| ApplyError::Extract(format!("tar.gz entry: {}", e)))?; + let entry_type = entry.header().entry_type(); + if entry_type.is_symlink() || entry_type.is_hard_link() { + let path = entry + .path() + .map(|p| p.display().to_string()) + .unwrap_or_else(|_| "".to_string()); + return Err(ApplyError::Extract(format!( + "tar.gz link entries are not supported: {}", + path + ))); + } + entry + .unpack_in(dest) + .map_err(|e| ApplyError::Extract(format!("tar.gz unpack: {}", e)))?; + } + Ok(()) +} + +/// Walk `root` for regular files whose name matches `target_name` or +/// its stem (handles archives that ship the binary without a `.exe` +/// extension, or with one when current_exe doesn't). Errors if more +/// than one match — defensive against future multi-binary archives +/// where multiple files could plausibly satisfy the same name (e.g. +/// `mhrv-rs-ui` shipped at root AND inside an `extras/` dir would +/// otherwise pick whichever `read_dir` returned first). +fn find_binary(root: &Path, target_name: &str) -> Result { + let stem = Path::new(target_name) + .file_stem() + .map(|s| s.to_string_lossy().into_owned()) + .unwrap_or_else(|| target_name.to_string()); + let with_exe = format!("{}.exe", stem); + + let mut matches = Vec::new(); + let mut stack = vec![root.to_path_buf()]; + while let Some(d) = stack.pop() { + let Ok(rd) = std::fs::read_dir(&d) else { + continue; + }; + for entry in rd.flatten() { + let path = entry.path(); + let Ok(ft) = entry.file_type() else { continue }; + if ft.is_dir() { + stack.push(path); + continue; + } + if ft.is_file() { + let n = entry.file_name().to_string_lossy().into_owned(); + if n.eq_ignore_ascii_case(target_name) + || n.eq_ignore_ascii_case(&stem) + || n.eq_ignore_ascii_case(&with_exe) + { + matches.push(path); + } + } + } + } + match matches.len() { + 0 => Err(ApplyError::BinaryNotFound), + 1 => Ok(matches.into_iter().next().unwrap()), + _ => { + tracing::error!( + "update_apply: multiple binaries in archive matched {}: {:?}", + target_name, + matches + ); + Err(ApplyError::AmbiguousBinary(target_name.to_string())) + } + } +} + +/// Perform the swap and re-launch. Does not return on success. +/// +/// Unix: +/// - **File swap**: a single `rename` over the running exe is the +/// atomic POSIX trick — the running process keeps its open inode so +/// it doesn't crash, the new content takes the path. No `.old` +/// bookkeeping needed. +/// - **Directory swap (.app bundle)**: a directory rename can't +/// atomically replace a non-empty target, so we do a 2-step: move +/// the existing bundle to `.old`, then rename `.new` → +/// target. The `.old` is cleaned up on next launch by +/// `cleanup_stale_old`. +/// +/// Windows: stages `.new` and spawns it; the new process detects it's +/// running from a `.new` path and finalizes the swap in +/// `finalize_pending_at_startup`. +pub fn restart_to_apply(staged: &StagedUpdate) -> Result<(), ApplyError> { + let args: Vec = std::env::args().skip(1).collect(); + + #[cfg(unix)] + { + let swap_target = staged.swap_target(); + if staged.staged_path.is_dir() { + // Directory swap (macOS .app bundle). Two-step: backup + // existing, rename new into place. Both renames are + // POSIX-allowed even while the running process is execed + // out of the old bundle (open file descriptors keep the + // mapping alive across the rename). + let backup_name = format!( + "{}.old", + swap_target.file_name().unwrap().to_string_lossy() + ); + let backup = swap_target.with_file_name(backup_name); + // Stale .old from a previous half-applied swap, if any. + if backup.is_dir() { + let _ = std::fs::remove_dir_all(&backup); + } else if backup.exists() { + let _ = std::fs::remove_file(&backup); + } + if swap_target.exists() { + std::fs::rename(&swap_target, &backup).map_err(|e| { + ApplyError::Staging(format!( + "backup {} → {}: {}", + swap_target.display(), + backup.display(), + e + )) + })?; + } + std::fs::rename(&staged.staged_path, &swap_target).map_err(|e| { + ApplyError::Staging(format!( + "rename {} → {}: {}", + staged.staged_path.display(), + swap_target.display(), + e + )) + })?; + } else { + // Atomic file swap. + std::fs::rename(&staged.staged_path, &swap_target).map_err(|e| { + ApplyError::Staging(format!( + "rename {} → {}: {}", + staged.staged_path.display(), + swap_target.display(), + e + )) + })?; + use std::os::unix::fs::PermissionsExt; + let mut p = std::fs::metadata(&swap_target)?.permissions(); + p.set_mode(0o755); + std::fs::set_permissions(&swap_target, p)?; + } + + use std::os::unix::process::CommandExt; + let err = std::process::Command::new(&staged.relaunch_path) + .args(&args) + .exec(); + // exec returns only on failure. + Err(ApplyError::Staging(format!("execv: {}", err))) + } + + #[cfg(windows)] + { + // Windows binary-only path. (No .app bundles on Windows so we + // never reach the directory branch here.) + std::process::Command::new(&staged.staged_path) + .args(&args) + .spawn() + .map_err(|e| { + ApplyError::Staging(format!("spawn .new: {}", e)) + })?; + // Give the new process a moment so it's past startup before we + // exit and free our exe lock. Not strictly required because the + // .new code retries the rename, but smoother UX. + std::thread::sleep(std::time::Duration::from_millis(150)); + std::process::exit(0); + } +} + +/// Run as the very first thing in `main()`. Two responsibilities: +/// +/// 1. **Windows finalize**: if we're running from a `.new` path it +/// means the old process exited and we need to complete the swap — +/// rename old → `.old`, rename ourselves → target, re-exec. +/// 2. **Unix late apply**: if a previous `restart_to_apply` failed +/// before the final rename, a stale `.new` file or macOS +/// `.new` directory may be sitting next to us. Pick it up now. +/// +/// Always best-effort. A swap failure here logs and falls through so the +/// app still starts (running the old version) rather than hard-failing +/// at boot. +pub fn finalize_pending_at_startup() { + let Ok(current) = std::env::current_exe() else { + return; + }; + let Some(name_os) = current.file_name() else { + return; + }; + let name = name_os.to_string_lossy().into_owned(); + + cleanup_stale_old(¤t, &name); + + #[cfg(windows)] + { + if let Some(target_name) = name.strip_suffix(".new") { + let target = current.with_file_name(target_name); + let backup = + current.with_file_name(format!("{}.old", target_name)); + let _ = std::fs::remove_file(&backup); + // Old process may not have fully exited yet; brief retry loop. + for _ in 0..30 { + if !target.exists() { + break; + } + if std::fs::rename(&target, &backup).is_ok() { + break; + } + std::thread::sleep(std::time::Duration::from_millis(150)); + } + // Rename self (.new) → target. Allowed on Windows even while + // running. + match std::fs::rename(¤t, &target) { + Ok(_) => { + tracing::info!( + "update_apply: finalized swap → {}", + target.display() + ); + let args: Vec = std::env::args().skip(1).collect(); + let _ = std::process::Command::new(&target) + .args(args) + .spawn(); + std::process::exit(0); + } + Err(e) => { + tracing::error!( + "update_apply: failed to finalize swap {} → {}: {}", + current.display(), + target.display(), + e + ); + } + } + } + } + + #[cfg(unix)] + { + #[cfg(target_os = "macos")] + if late_apply_macos_bundle(¤t) { + return; + } + + let staged = current.with_file_name(format!("{}.new", name)); + if staged.exists() { + match std::fs::rename(&staged, ¤t) { + Ok(_) => tracing::info!( + "update_apply: late-applied staged update → {}", + current.display() + ), + Err(e) => tracing::warn!( + "update_apply: late-apply rename failed: {}", + e + ), + } + } + } +} + +#[cfg(target_os = "macos")] +fn late_apply_macos_bundle(current: &Path) -> bool { + let Some(target_bundle) = macos_bundle_for_exe(current) else { + return false; + }; + let staged_bundle = staged_path(&target_bundle); + if !staged_bundle.exists() { + return false; + } + + let backup_name = format!( + "{}.old", + target_bundle.file_name().unwrap().to_string_lossy() + ); + let backup = target_bundle.with_file_name(backup_name); + cleanup_path(&backup); + + if target_bundle.exists() { + if let Err(e) = std::fs::rename(&target_bundle, &backup) { + tracing::warn!( + "update_apply: late-apply bundle backup failed {} → {}: {}", + target_bundle.display(), + backup.display(), + e + ); + return true; + } + } + + match std::fs::rename(&staged_bundle, &target_bundle) { + Ok(_) => tracing::info!( + "update_apply: late-applied staged macOS bundle → {}", + target_bundle.display() + ), + Err(e) => { + tracing::warn!( + "update_apply: late-apply bundle rename failed {} → {}: {}", + staged_bundle.display(), + target_bundle.display(), + e + ); + if backup.exists() && !target_bundle.exists() { + let _ = std::fs::rename(&backup, &target_bundle); + } + } + } + true +} + +/// Wipe a stale `.old` file from a previous swap, if any. Scoped to +/// our specific name — earlier versions deleted *every* `.old` in the +/// parent dir, which would blast away unrelated `.old` backups when the +/// binary lived in a shared dir like `~/Downloads`. +fn cleanup_stale_old(current: &Path, current_name: &str) { + let stale_name = format!("{}.old", current_name); + let stale = current.with_file_name(&stale_name); + if stale.is_dir() { + // macOS .app bundle backup — directory. + let _ = std::fs::remove_dir_all(&stale); + } else if stale.exists() { + let _ = std::fs::remove_file(&stale); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + #[test] + fn staged_path_appends_new() { + let p = Path::new("/tmp/foo/mhrv-rs-ui"); + assert_eq!( + staged_path(p), + PathBuf::from("/tmp/foo/mhrv-rs-ui.new") + ); + let p = Path::new("C:/x/mhrv-rs-ui.exe"); + assert_eq!( + staged_path(p).file_name().unwrap().to_string_lossy(), + "mhrv-rs-ui.exe.new" + ); + } + + #[test] + fn find_binary_matches_with_or_without_exe() { + let dir = tempfile::tempdir().unwrap(); + let nested = dir.path().join("mhrv-rs-1.0"); + std::fs::create_dir_all(&nested).unwrap(); + let bin = nested.join("mhrv-rs-ui"); + std::fs::write(&bin, b"#!/bin/sh\n").unwrap(); + // current_exe has .exe, archive has bare name → still match by stem. + let found = find_binary(dir.path(), "mhrv-rs-ui.exe").unwrap(); + assert_eq!(found, bin); + // current_exe has bare name, archive has bare name → match. + let found = find_binary(dir.path(), "mhrv-rs-ui").unwrap(); + assert_eq!(found, bin); + } + + #[test] + fn find_binary_errors_on_ambiguous_match() { + let dir = tempfile::tempdir().unwrap(); + // Two files would both satisfy the stem `mhrv-rs-ui`: one at root + // and one inside a subdir. We want the function to refuse rather + // than silently pick by `read_dir` order. + std::fs::write(dir.path().join("mhrv-rs-ui"), b"a").unwrap(); + let sub = dir.path().join("inner"); + std::fs::create_dir_all(&sub).unwrap(); + std::fs::write(sub.join("mhrv-rs-ui"), b"b").unwrap(); + let res = find_binary(dir.path(), "mhrv-rs-ui"); + assert!(matches!(res, Err(ApplyError::AmbiguousBinary(_)))); + } + + #[test] + fn find_binary_skips_unrelated_names() { + let dir = tempfile::tempdir().unwrap(); + std::fs::write(dir.path().join("mhrv-rs"), b"cli").unwrap(); + std::fs::write(dir.path().join("mhrv-rs-ui-extras"), b"x").unwrap(); + std::fs::write(dir.path().join("mhrv-rs-ui"), b"ui").unwrap(); + let found = find_binary(dir.path(), "mhrv-rs-ui").unwrap(); + assert_eq!(found.file_name().unwrap(), "mhrv-rs-ui"); + } + + #[test] + fn extract_zip_rejects_path_traversal() { + // zip-rs `enclosed_name` returns None for `..` entries, which we + // then `continue` past. Build an archive that includes one + // traversal entry and one safe one — only the safe one should + // land on disk. + let tmp = tempfile::tempdir().unwrap(); + let zip_path = tmp.path().join("evil.zip"); + let f = std::fs::File::create(&zip_path).unwrap(); + let mut zw = zip::ZipWriter::new(f); + let opts: zip::write::SimpleFileOptions = zip::write::SimpleFileOptions::default() + .compression_method(zip::CompressionMethod::Deflated); + zw.start_file("../escape.txt", opts).unwrap(); + zw.write_all(b"would-be-traversal").unwrap(); + zw.start_file("safe.txt", opts).unwrap(); + zw.write_all(b"ok").unwrap(); + zw.finish().unwrap(); + + let dest = tmp.path().join("out"); + std::fs::create_dir_all(&dest).unwrap(); + extract_zip(&zip_path, &dest).unwrap(); + // The safe file lands; the traversal entry is silently skipped. + assert!(dest.join("safe.txt").is_file()); + // Walk the parent of `dest` to verify no `escape.txt` leaked + // upward — i.e. the path-traversal didn't write outside `dest`. + let leaked = tmp.path().join("escape.txt"); + assert!( + !leaked.exists(), + "path traversal wrote outside dest: {}", + leaked.display() + ); + } + + #[test] + fn extract_tar_gz_unpacks_files() { + let tmp = tempfile::tempdir().unwrap(); + let tgz = tmp.path().join("a.tar.gz"); + // Build a minimal tar.gz containing one file. + let f = std::fs::File::create(&tgz).unwrap(); + let gz = flate2::write::GzEncoder::new(f, flate2::Compression::default()); + let mut tar_w = tar::Builder::new(gz); + let mut header = tar::Header::new_gnu(); + let payload = b"hello-world"; + header.set_size(payload.len() as u64); + header.set_mode(0o644); + header.set_cksum(); + tar_w.append_data(&mut header, "greeting.txt", &payload[..]).unwrap(); + tar_w.into_inner().unwrap().finish().unwrap(); + + let dest = tmp.path().join("out"); + extract_tar_gz(&tgz, &dest).unwrap(); + let contents = std::fs::read_to_string(dest.join("greeting.txt")).unwrap(); + assert_eq!(contents, "hello-world"); + } + + #[test] + fn extract_tar_gz_rejects_link_entries() { + for (entry_type, name) in [ + (tar::EntryType::Symlink, "symlink"), + (tar::EntryType::Link, "hardlink"), + ] { + let tmp = tempfile::tempdir().unwrap(); + let tgz = tmp.path().join(format!("{name}.tar.gz")); + let f = std::fs::File::create(&tgz).unwrap(); + let gz = flate2::write::GzEncoder::new(f, flate2::Compression::default()); + let mut tar_w = tar::Builder::new(gz); + let mut header = tar::Header::new_gnu(); + header.set_entry_type(entry_type); + header.set_size(0); + tar_w + .append_link(&mut header, format!("{name}.txt"), "target.txt") + .unwrap(); + tar_w.into_inner().unwrap().finish().unwrap(); + + let dest = tmp.path().join("out"); + let res = extract_tar_gz(&tgz, &dest); + assert!( + matches!(res, Err(ApplyError::Extract(ref msg)) if msg.contains("link entries")), + "{name} archive should be rejected, got {res:?}" + ); + } + } + + #[test] + fn cleanup_stale_old_only_touches_our_name() { + let dir = tempfile::tempdir().unwrap(); + let our = dir.path().join("mhrv-rs-ui.old"); + let theirs = dir.path().join("someone-elses.old"); + std::fs::write(&our, b"x").unwrap(); + std::fs::write(&theirs, b"y").unwrap(); + // current would normally be the actual exe path; we simulate by + // pointing at a name in this dir. + let current = dir.path().join("mhrv-rs-ui"); + cleanup_stale_old(¤t, "mhrv-rs-ui"); + assert!(!our.exists(), "ours should be removed"); + assert!(theirs.exists(), "unrelated .old must NOT be removed"); + } + + #[test] + fn staged_update_swap_target_strips_new() { + let s = StagedUpdate { + staged_path: PathBuf::from("/p/foo.exe.new"), + relaunch_path: PathBuf::from("/p/foo.exe"), + }; + assert_eq!(s.swap_target(), PathBuf::from("/p/foo.exe")); + let s = StagedUpdate { + staged_path: PathBuf::from("/Apps/Mhrv.app.new"), + relaunch_path: PathBuf::from("/Apps/Mhrv.app/Contents/MacOS/mhrv-rs-ui"), + }; + assert_eq!(s.swap_target(), PathBuf::from("/Apps/Mhrv.app")); + } + + #[cfg(target_os = "macos")] + #[test] + fn macos_bundle_for_exe_detects_layout() { + let inside = Path::new("/Applications/Mhrv.app/Contents/MacOS/mhrv-rs-ui"); + assert_eq!( + macos_bundle_for_exe(inside), + Some(PathBuf::from("/Applications/Mhrv.app")) + ); + let outside = Path::new("/usr/local/bin/mhrv-rs-ui"); + assert!(macos_bundle_for_exe(outside).is_none()); + let near_miss = Path::new("/X/NotAnApp/Contents/MacOS/foo"); + assert!(macos_bundle_for_exe(near_miss).is_none()); + } +} + +} // mod desktop + +#[cfg(not(target_os = "android"))] +pub use desktop::*; diff --git a/src/update_check.rs b/src/update_check.rs index f382ff8..f23bfb9 100644 --- a/src/update_check.rs +++ b/src/update_check.rs @@ -28,6 +28,8 @@ const REPO_NAME: &str = "MasterHttpRelayVPN-RUST"; const GITHUB_API_HOST: &str = "api.github.com"; const GITHUB_HOST: &str = "github.com"; const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); +const API_READ_LIMIT_BYTES: usize = 512 * 1024; +const BINARY_READ_LIMIT_BYTES: usize = 256 * 1024 * 1024; /// Where to route the HTTPS GET. Direct = straight rustls to the target. /// Proxy = HTTP CONNECT through our local MITM proxy (so GitHub sees @@ -137,6 +139,9 @@ pub async fn check(route: Route) -> UpdateCheck { } /// Download a release asset to `out_path`. Returns Ok(bytes written) or Err(reason). +/// The body is currently buffered in memory and then written directly to +/// `out_path`; callers that expose the path to users should stage into a +/// scratch location first. pub async fn download_asset( route: Route, asset_url: &str, @@ -148,7 +153,10 @@ pub async fn download_asset( let (host, path) = split_url(asset_url) .ok_or_else(|| format!("bad asset URL: {}", asset_url))?; let body = https_raw_get(&route, &host, &path, true).await?; - std::fs::write(out_path, &body).map_err(|e| format!("write {}: {}", out_path.display(), e))?; + // Async write so we don't stall the executor on a 50 MB-class spool. + tokio::fs::write(out_path, &body) + .await + .map_err(|e| format!("write {}: {}", out_path.display(), e))?; Ok(body.len() as u64) } @@ -272,7 +280,11 @@ async fn https_raw_get( tls.flush().await.ok(); let mut buf = Vec::with_capacity(if binary { 1024 * 1024 } else { 16 * 1024 }); - let read_limit: usize = if binary { 128 * 1024 * 1024 } else { 512 * 1024 }; + let read_limit: usize = if binary { + BINARY_READ_LIMIT_BYTES + } else { + API_READ_LIMIT_BYTES + }; let read_fut = async { let mut chunk = [0u8; 8192]; loop { @@ -282,7 +294,15 @@ async fn https_raw_get( Err(e) => return Err(format!("read: {}", e)), } if buf.len() > read_limit { - return Err("response too large".into()); + let limit_label = if read_limit >= 1_048_576 { + format!("{:.0} MiB", read_limit as f64 / 1_048_576.0) + } else { + format!("{} KiB", read_limit / 1024) + }; + return Err(format!( + "response too large (>{} limit)", + limit_label + )); } } Ok::<(), String>(()) @@ -403,13 +423,14 @@ fn split_url(url: &str) -> Option<(String, String)> { /// Given the GitHub API's `assets` array, pick the one that best matches /// this platform + arch. Returns None if nothing reasonable matched. fn pick_asset_for_platform(assets: &[serde_json::Value]) -> Option { - let os = std::env::consts::OS; - let arch = std::env::consts::ARCH; + pick_asset_for_target(assets, std::env::consts::OS, std::env::consts::ARCH) +} +fn asset_preferences(os: &str, arch: &str) -> &'static [&'static [&'static str]] { // Priority-ordered preference list of name *patterns* — first pattern // that matches any asset wins. All matches are case-insensitive // substrings. - let prefs: &[&[&str]] = match (os, arch) { + match (os, arch) { // macOS: .app.zip is the nicest user experience (double-click). ("macos", "aarch64") => &[&["macos-arm64-app", ".zip"], &["macos-arm64", ".tar.gz"]], ("macos", "x86_64") => &[&["macos-amd64-app", ".zip"], &["macos-amd64", ".tar.gz"]], @@ -417,10 +438,21 @@ fn pick_asset_for_platform(assets: &[serde_json::Value]) -> Option ("linux", "aarch64") => &[&["linux-arm64", ".tar.gz"], &["linux-musl-arm64", ".tar.gz"]], ("linux", "arm") => &[&["raspbian-armhf", ".tar.gz"]], ("linux", "x86_64") => &[&["linux-amd64", ".tar.gz"], &["linux-musl-amd64", ".tar.gz"]], + // Android: each per-arch APK matches its ABI. Universal is the + // fallback when no per-arch build is published. The running + // process's target_arch picks the right one — `Build.SUPPORTED_ABIS[0]` + // and `target_arch` agree because the Rust cdylib was built for + // exactly the ABI the device loaded. + ("android", "aarch64") => &[&["android-arm64-v8a", ".apk"], &["android-universal", ".apk"]], + ("android", "arm") => &[&["android-armeabi-v7a", ".apk"], &["android-universal", ".apk"]], + ("android", "x86_64") => &[&["android-x86_64", ".apk"], &["android-universal", ".apk"]], + ("android", "x86") => &[&["android-x86-", ".apk"], &["android-universal", ".apk"]], _ => &[], - }; + } +} - for needles in prefs { +fn pick_asset_for_target(assets: &[serde_json::Value], os: &str, arch: &str) -> Option { + for needles in asset_preferences(os, arch) { for a in assets { let name = a.get("name").and_then(|v| v.as_str()).unwrap_or(""); let lower = name.to_ascii_lowercase(); @@ -499,6 +531,28 @@ mod tests { assert!(pick_asset_for_platform(arr).is_none()); } + #[test] + fn pick_asset_android_picks_per_abi_apk_over_universal() { + let assets = serde_json::json!([ + {"name": "mhrv-rs-android-universal-v1.9.1.apk", "browser_download_url": "https://x/universal", "size": 1}, + {"name": "mhrv-rs-android-arm64-v8a-v1.9.1.apk", "browser_download_url": "https://x/arm64", "size": 2}, + {"name": "mhrv-rs-android-armeabi-v7a-v1.9.1.apk", "browser_download_url": "https://x/armv7", "size": 3}, + {"name": "mhrv-rs-android-x86_64-v1.9.1.apk", "browser_download_url": "https://x/x86_64", "size": 4}, + {"name": "mhrv-rs-android-x86-v1.9.1.apk", "browser_download_url": "https://x/x86", "size": 5}, + ]); + let arr = assets.as_array().unwrap(); + let cases = [ + ("aarch64", "mhrv-rs-android-arm64-v8a-v1.9.1.apk"), + ("arm", "mhrv-rs-android-armeabi-v7a-v1.9.1.apk"), + ("x86_64", "mhrv-rs-android-x86_64-v1.9.1.apk"), + ("x86", "mhrv-rs-android-x86-v1.9.1.apk"), + ]; + for (arch, expected) in cases { + let picked = pick_asset_for_target(arr, "android", arch).expect("should pick"); + assert_eq!(picked.name, expected, "arch={arch}"); + } + } + #[test] fn is_newer_mixed_length() { assert!(is_newer("1.2.3.4", "1.2.3"));