diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 42c9f67..5a9e59b 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,6 +1,6 @@
version: 2
updates:
- - package-ecosystem: 'pip'
+ - package-ecosystem: 'cargo'
directory: '/'
schedule:
interval: 'daily'
diff --git a/.github/workflows/image.yml b/.github/workflows/image.yml
index 75d398c..71605bc 100644
--- a/.github/workflows/image.yml
+++ b/.github/workflows/image.yml
@@ -3,6 +3,8 @@ name: Build cloudflare-ddns Docker image (multi-arch)
on:
push:
branches: master
+ tags:
+ - 'v*'
pull_request:
jobs:
@@ -10,45 +12,48 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v2
- # https://github.com/docker/setup-qemu-action
+ uses: actions/checkout@v4
+
- name: Set up QEMU
- uses: docker/setup-qemu-action@v1
- # https://github.com/docker/setup-buildx-action
- - name: Setting up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
- name: Login to DockerHub
if: github.event_name != 'pull_request'
- uses: docker/login-action@v1
+ uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- - name: Extract branch name
- shell: bash
- run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
- id: extract_branch
+ - name: Extract version from Cargo.toml
+ id: version
+ run: |
+ VERSION=$(grep '^version' Cargo.toml | head -1 | sed 's/.*"\(.*\)".*/\1/')
+ echo "version=$VERSION" >> "$GITHUB_OUTPUT"
+
- name: Docker meta
id: meta
- uses: docker/metadata-action@v3
+ uses: docker/metadata-action@v5
with:
images: timothyjmiller/cloudflare-ddns
- sep-tags: ','
- flavor: |
- latest=false
tags: |
- type=raw,enable=${{ steps.extract_branch.outputs.branch == 'master' }},value=latest
- type=schedule
- type=ref,event=pr
-
- - name: Build and publish
- uses: docker/build-push-action@v2
+ type=raw,enable=${{ github.ref == 'refs/heads/master' }},value=latest
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=raw,enable=${{ github.ref == 'refs/heads/master' }},value=${{ steps.version.outputs.version }}
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
- platforms: linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64
+ platforms: linux/amd64,linux/arm64,linux/arm/v7
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.meta.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
+ org.opencontainers.image.version=${{ steps.version.outputs.version }}
diff --git a/.gitignore b/.gitignore
index c6b8a6d..4b6705d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,63 +1,10 @@
# Private API keys for updating IPv4 & IPv6 addresses on Cloudflare
config.json
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-.python-version
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
+# Rust build artifacts
+/target/
+debug/
+*.pdb
# Git History
**/.history/*
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 4f20a6d..47e46fc 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -11,11 +11,7 @@
".vscode": true,
"Dockerfile": true,
"LICENSE": true,
- "requirements.txt": true,
- "venv": true
+ "target": true
},
- "explorerExclude.backup": {},
- "python.linting.pylintEnabled": true,
- "python.linting.enabled": true,
- "python.formatting.provider": "autopep8"
+ "explorerExclude.backup": {}
}
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000..500d383
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,1870 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "assert-json-diff"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
+[[package]]
+name = "autocfg"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
+
+[[package]]
+name = "base64"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
+
+[[package]]
+name = "bitflags"
+version = "2.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
+
+[[package]]
+name = "bumpalo"
+version = "3.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb"
+
+[[package]]
+name = "bytes"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
+
+[[package]]
+name = "cc"
+version = "1.2.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2"
+dependencies = [
+ "find-msvc-tools",
+ "shlex",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
+
+[[package]]
+name = "cfg_aliases"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
+
+[[package]]
+name = "chrono"
+version = "0.4.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0"
+dependencies = [
+ "iana-time-zone",
+ "js-sys",
+ "num-traits",
+ "wasm-bindgen",
+ "windows-link",
+]
+
+[[package]]
+name = "cloudflare-ddns"
+version = "2.0.0"
+dependencies = [
+ "chrono",
+ "idna",
+ "if-addrs",
+ "regex",
+ "reqwest",
+ "serde",
+ "serde_json",
+ "tempfile",
+ "tokio",
+ "url",
+ "wiremock",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
+
+[[package]]
+name = "deadpool"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b"
+dependencies = [
+ "deadpool-runtime",
+ "lazy_static",
+ "num_cpus",
+ "tokio",
+]
+
+[[package]]
+name = "deadpool-runtime"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b"
+
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
+
+[[package]]
+name = "errno"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
+dependencies = [
+ "libc",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "fastrand"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
+
+[[package]]
+name = "find-msvc-tools"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "form_urlencoded"
+version = "1.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "futures"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-io",
+ "futures-sink",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d"
+
+[[package]]
+name = "futures-executor"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d"
+dependencies = [
+ "futures-core",
+ "futures-task",
+ "futures-util",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893"
+
+[[package]]
+name = "futures-task"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393"
+
+[[package]]
+name = "futures-util"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-macro",
+ "futures-sink",
+ "futures-task",
+ "memchr",
+ "pin-project-lite",
+ "slab",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "r-efi",
+ "wasip2",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "h2"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "http",
+ "indexmap",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.16.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
+
+[[package]]
+name = "hermit-abi"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
+
+[[package]]
+name = "http"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a"
+dependencies = [
+ "bytes",
+ "itoa",
+]
+
+[[package]]
+name = "http-body"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
+dependencies = [
+ "bytes",
+ "http",
+]
+
+[[package]]
+name = "http-body-util"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
+dependencies = [
+ "bytes",
+ "futures-core",
+ "http",
+ "http-body",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "httparse"
+version = "1.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87"
+
+[[package]]
+name = "httpdate"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
+
+[[package]]
+name = "hyper"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "futures-channel",
+ "futures-core",
+ "h2",
+ "http",
+ "http-body",
+ "httparse",
+ "httpdate",
+ "itoa",
+ "pin-project-lite",
+ "pin-utils",
+ "smallvec",
+ "tokio",
+ "want",
+]
+
+[[package]]
+name = "hyper-rustls"
+version = "0.27.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58"
+dependencies = [
+ "http",
+ "hyper",
+ "hyper-util",
+ "rustls",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls",
+ "tower-service",
+ "webpki-roots",
+]
+
+[[package]]
+name = "hyper-util"
+version = "0.1.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0"
+dependencies = [
+ "base64",
+ "bytes",
+ "futures-channel",
+ "futures-util",
+ "http",
+ "http-body",
+ "hyper",
+ "ipnet",
+ "libc",
+ "percent-encoding",
+ "pin-project-lite",
+ "socket2",
+ "tokio",
+ "tower-service",
+ "tracing",
+]
+
+[[package]]
+name = "iana-time-zone"
+version = "0.1.65"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "log",
+ "wasm-bindgen",
+ "windows-core",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "icu_collections"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
+dependencies = [
+ "displaydoc",
+ "potential_utf",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locale_core"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
+dependencies = [
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
+
+[[package]]
+name = "icu_properties"
+version = "2.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
+dependencies = [
+ "icu_collections",
+ "icu_locale_core",
+ "icu_properties_data",
+ "icu_provider",
+ "zerotrie",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "2.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
+
+[[package]]
+name = "icu_provider"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
+dependencies = [
+ "displaydoc",
+ "icu_locale_core",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerotrie",
+ "zerovec",
+]
+
+[[package]]
+name = "idna"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de"
+dependencies = [
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
+]
+
+[[package]]
+name = "if-addrs"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69b2eeee38fef3aa9b4cc5f1beea8a2444fc00e7377cafae396de3f5c2065e24"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
+dependencies = [
+ "equivalent",
+ "hashbrown",
+]
+
+[[package]]
+name = "ipnet"
+version = "2.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
+
+[[package]]
+name = "iri-string"
+version = "0.7.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
+dependencies = [
+ "memchr",
+ "serde",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
+
+[[package]]
+name = "js-sys"
+version = "0.3.91"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
+dependencies = [
+ "once_cell",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.183"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
+
+[[package]]
+name = "litemap"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
+
+[[package]]
+name = "lock_api"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
+dependencies = [
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
+
+[[package]]
+name = "lru-slab"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
+
+[[package]]
+name = "memchr"
+version = "2.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
+
+[[package]]
+name = "mio"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
+dependencies = [
+ "libc",
+ "wasi",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.21.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
+
+[[package]]
+name = "parking_lot"
+version = "0.12.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-link",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "potential_utf"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
+dependencies = [
+ "zerovec",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
+dependencies = [
+ "zerocopy",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.106"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quinn"
+version = "0.11.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
+dependencies = [
+ "bytes",
+ "cfg_aliases",
+ "pin-project-lite",
+ "quinn-proto",
+ "quinn-udp",
+ "rustc-hash",
+ "rustls",
+ "socket2",
+ "thiserror",
+ "tokio",
+ "tracing",
+ "web-time",
+]
+
+[[package]]
+name = "quinn-proto"
+version = "0.11.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
+dependencies = [
+ "bytes",
+ "getrandom 0.3.4",
+ "lru-slab",
+ "rand",
+ "ring",
+ "rustc-hash",
+ "rustls",
+ "rustls-pki-types",
+ "slab",
+ "thiserror",
+ "tinyvec",
+ "tracing",
+ "web-time",
+]
+
+[[package]]
+name = "quinn-udp"
+version = "0.5.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
+dependencies = [
+ "cfg_aliases",
+ "libc",
+ "once_cell",
+ "socket2",
+ "tracing",
+ "windows-sys 0.60.2",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "r-efi"
+version = "5.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
+
+[[package]]
+name = "rand"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
+dependencies = [
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
+dependencies = [
+ "getrandom 0.3.4",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.5.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
+
+[[package]]
+name = "reqwest"
+version = "0.12.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147"
+dependencies = [
+ "base64",
+ "bytes",
+ "futures-core",
+ "http",
+ "http-body",
+ "http-body-util",
+ "hyper",
+ "hyper-rustls",
+ "hyper-util",
+ "js-sys",
+ "log",
+ "percent-encoding",
+ "pin-project-lite",
+ "quinn",
+ "rustls",
+ "rustls-pki-types",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "sync_wrapper",
+ "tokio",
+ "tokio-rustls",
+ "tower",
+ "tower-http",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "webpki-roots",
+]
+
+[[package]]
+name = "ring"
+version = "0.17.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "getrandom 0.2.17",
+ "libc",
+ "untrusted",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
+
+[[package]]
+name = "rustix"
+version = "1.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "rustls"
+version = "0.23.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
+dependencies = [
+ "once_cell",
+ "ring",
+ "rustls-pki-types",
+ "rustls-webpki",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rustls-pki-types"
+version = "1.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd"
+dependencies = [
+ "web-time",
+ "zeroize",
+]
+
+[[package]]
+name = "rustls-webpki"
+version = "0.103.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
+dependencies = [
+ "ring",
+ "rustls-pki-types",
+ "untrusted",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
+
+[[package]]
+name = "ryu"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f"
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "serde"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
+dependencies = [
+ "serde_core",
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_core"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.149"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86"
+dependencies = [
+ "itoa",
+ "memchr",
+ "serde",
+ "serde_core",
+ "zmij",
+]
+
+[[package]]
+name = "serde_urlencoded"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
+dependencies = [
+ "form_urlencoded",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
+name = "signal-hook-registry"
+version = "1.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b"
+dependencies = [
+ "errno",
+ "libc",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5"
+
+[[package]]
+name = "smallvec"
+version = "1.15.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
+
+[[package]]
+name = "socket2"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e"
+dependencies = [
+ "libc",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "stable_deref_trait"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
+
+[[package]]
+name = "subtle"
+version = "2.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
+
+[[package]]
+name = "syn"
+version = "2.0.117"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "sync_wrapper"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
+dependencies = [
+ "futures-core",
+]
+
+[[package]]
+name = "synstructure"
+version = "0.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0"
+dependencies = [
+ "fastrand",
+ "getrandom 0.3.4",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "thiserror"
+version = "2.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "2.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tinystr"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
+[[package]]
+name = "tinyvec"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa"
+dependencies = [
+ "tinyvec_macros",
+]
+
+[[package]]
+name = "tinyvec_macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
+
+[[package]]
+name = "tokio"
+version = "1.50.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d"
+dependencies = [
+ "bytes",
+ "libc",
+ "mio",
+ "parking_lot",
+ "pin-project-lite",
+ "signal-hook-registry",
+ "socket2",
+ "tokio-macros",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "2.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.26.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
+dependencies = [
+ "rustls",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-util"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098"
+dependencies = [
+ "bytes",
+ "futures-core",
+ "futures-sink",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tower"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4"
+dependencies = [
+ "futures-core",
+ "futures-util",
+ "pin-project-lite",
+ "sync_wrapper",
+ "tokio",
+ "tower-layer",
+ "tower-service",
+]
+
+[[package]]
+name = "tower-http"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
+dependencies = [
+ "bitflags",
+ "bytes",
+ "futures-util",
+ "http",
+ "http-body",
+ "iri-string",
+ "pin-project-lite",
+ "tower",
+ "tower-layer",
+ "tower-service",
+]
+
+[[package]]
+name = "tower-layer"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
+
+[[package]]
+name = "tower-service"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
+
+[[package]]
+name = "tracing"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
+dependencies = [
+ "pin-project-lite",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-core"
+version = "0.1.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
+dependencies = [
+ "once_cell",
+]
+
+[[package]]
+name = "try-lock"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
+
+[[package]]
+name = "untrusted"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
+
+[[package]]
+name = "url"
+version = "2.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed"
+dependencies = [
+ "form_urlencoded",
+ "idna",
+ "percent-encoding",
+ "serde",
+]
+
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
+[[package]]
+name = "want"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
+dependencies = [
+ "try-lock",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.1+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
+
+[[package]]
+name = "wasip2"
+version = "1.0.2+wasi-0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5"
+dependencies = [
+ "wit-bindgen",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "rustversion",
+ "wasm-bindgen-macro",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
+dependencies = [
+ "cfg-if",
+ "futures-util",
+ "js-sys",
+ "once_cell",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
+dependencies = [
+ "bumpalo",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.91"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "web-time"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "webpki-roots"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed"
+dependencies = [
+ "rustls-pki-types",
+]
+
+[[package]]
+name = "windows-core"
+version = "0.62.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb"
+dependencies = [
+ "windows-implement",
+ "windows-interface",
+ "windows-link",
+ "windows-result",
+ "windows-strings",
+]
+
+[[package]]
+name = "windows-implement"
+version = "0.60.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "windows-interface"
+version = "0.59.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "windows-link"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
+
+[[package]]
+name = "windows-result"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5"
+dependencies = [
+ "windows-link",
+]
+
+[[package]]
+name = "windows-strings"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091"
+dependencies = [
+ "windows-link",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.60.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
+dependencies = [
+ "windows-targets 0.53.5",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.61.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
+dependencies = [
+ "windows-link",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm 0.52.6",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.53.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
+dependencies = [
+ "windows-link",
+ "windows_aarch64_gnullvm 0.53.1",
+ "windows_aarch64_msvc 0.53.1",
+ "windows_i686_gnu 0.53.1",
+ "windows_i686_gnullvm 0.53.1",
+ "windows_i686_msvc 0.53.1",
+ "windows_x86_64_gnu 0.53.1",
+ "windows_x86_64_gnullvm 0.53.1",
+ "windows_x86_64_msvc 0.53.1",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.53.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
+
+[[package]]
+name = "wiremock"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08db1edfb05d9b3c1542e521aea074442088292f00b5f28e435c714a98f85031"
+dependencies = [
+ "assert-json-diff",
+ "base64",
+ "deadpool",
+ "futures",
+ "http",
+ "http-body-util",
+ "hyper",
+ "hyper-util",
+ "log",
+ "once_cell",
+ "regex",
+ "serde",
+ "serde_json",
+ "tokio",
+ "url",
+]
+
+[[package]]
+name = "wit-bindgen"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
+
+[[package]]
+name = "writeable"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
+
+[[package]]
+name = "yoke"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
+dependencies = [
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
+
+[[package]]
+name = "yoke-derive"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zerocopy"
+version = "0.8.42"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3"
+dependencies = [
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.8.42"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "zerofrom"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zeroize"
+version = "1.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
+
+[[package]]
+name = "zerotrie"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+]
+
+[[package]]
+name = "zerovec"
+version = "0.11.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "zmij"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..cce2533
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "cloudflare-ddns"
+version = "2.0.0"
+edition = "2021"
+description = "Access your home network remotely via a custom domain name without a static IP"
+license = "GPL-3.0"
+
+[dependencies]
+reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false }
+serde = { version = "1", features = ["derive"] }
+serde_json = "1"
+tokio = { version = "1", features = ["full"] }
+regex = "1"
+chrono = { version = "0.4", features = ["clock"] }
+url = "2"
+idna = "1"
+if-addrs = "0.13"
+
+[dev-dependencies]
+tempfile = "3.26.0"
+wiremock = "0.6"
diff --git a/Dockerfile b/Dockerfile
index a66379b..71fd7cb 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,18 +1,13 @@
-# ---- Base ----
-FROM python:alpine AS base
+# ---- Build ----
+FROM rust:alpine AS builder
+RUN apk add --no-cache musl-dev
+WORKDIR /build
+COPY Cargo.toml Cargo.lock ./
+COPY src ./src
+RUN cargo build --release
-#
-# ---- Dependencies ----
-FROM base AS dependencies
-# install dependencies
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-#
# ---- Release ----
-FROM base AS release
-# copy installed dependencies and project source file(s)
-WORKDIR /
-COPY --from=dependencies /root/.local /root/.local
-COPY cloudflare-ddns.py .
-CMD ["python", "-u", "/cloudflare-ddns.py", "--repeat"]
+FROM alpine:latest AS release
+RUN apk add --no-cache ca-certificates
+COPY --from=builder /build/target/release/cloudflare-ddns /usr/local/bin/cloudflare-ddns
+CMD ["cloudflare-ddns", "--repeat"]
diff --git a/README.md b/README.md
index a9f82f2..d7ccc61 100755
--- a/README.md
+++ b/README.md
@@ -1,286 +1,222 @@

-# π Cloudflare DDNS
+# π Cloudflare DDNS
Access your home network remotely via a custom domain name without a static IP!
-## β‘ Efficiency
+A feature-complete dynamic DNS client for Cloudflare, written in Rust. Configure everything with environment variables. Supports notifications, heartbeat monitoring, WAF list management, flexible scheduling, and more.
-- β€οΈ Easy config. List your domains and you're done.
-- π The Python runtime will re-use existing HTTP connections.
-- ποΈ Cloudflare API responses are cached to reduce API usage.
-- π€ The Docker image is small and efficient.
-- 0οΈβ£ Zero dependencies.
-- πͺ Supports all platforms.
-- π Enables low cost self hosting to promote a more decentralized internet.
-- π Zero-log IP provider ([cdn-cgi/trace](https://www.cloudflare.com/cdn-cgi/trace))
-- π GPL-3.0 License. Open source for open audits.
+## β¨ Features
-## π― Complete Support of Domain Names, Subdomains, IPv4 & IPv6, and Load Balancing
+- π **Multiple IP detection providers** β Cloudflare Trace, Cloudflare DNS-over-HTTPS, ipify, local interface, custom URL, or static IPs
+- π‘ **IPv4 and IPv6** β Full dual-stack support with independent provider configuration
+- π **Multiple domains and zones** β Update any number of domains across multiple Cloudflare zones
+- π **Wildcard domains** β Support for `*.example.com` records
+- π **Internationalized domain names** β Full IDN/punycode support (e.g. `mΓΌnchen.de`)
+- π‘οΈ **WAF list management** β Automatically update Cloudflare WAF IP lists
+- π **Notifications** β Shoutrrr-compatible notifications (Discord, Slack, Telegram, Gotify, Pushover, generic webhooks)
+- π **Heartbeat monitoring** β Healthchecks.io and Uptime Kuma integration
+- β±οΈ **Cron scheduling** β Flexible update intervals via cron expressions
+- π§ͺ **Dry-run mode** β Preview changes without modifying DNS records
+- π§Ή **Graceful shutdown** β Signal handling (SIGINT/SIGTERM) with optional DNS record cleanup
+- π¬ **Record comments** β Tag managed records with comments for identification
+- π― **Managed record regex** β Control which records the tool manages via regex matching
+- π¨ **Pretty output with emoji** β Configurable emoji and verbosity levels
+- π **Zero-log IP detection** β Uses Cloudflare's [cdn-cgi/trace](https://www.cloudflare.com/cdn-cgi/trace) by default
+- π **CGNAT-aware local detection** β Filters out shared address space (100.64.0.0/10) and private ranges
+- π€ **Tiny static binary** β Small Docker image, zero runtime dependencies
-- π Supports multiple domains (zones) on the same IP.
-- π Supports multiple subdomains on the same IP.
-- π‘ IPv4 and IPv6 support.
-- π Supports all Cloudflare regions.
-- βοΈ Supports [Cloudflare Load Balancing](https://developers.cloudflare.com/load-balancing/understand-basics/pools/).
-- πΊπΈ Made in the U.S.A.
-
-## π Stats
-
-| Size | Downloads | Discord |
-| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns 'cloudflare-ddns docker image size') | [](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns 'Total DockerHub pulls') | [](https://discord.gg/UgGmwMvNxm 'Official Discord Server') |
-
-## π¦ Getting Started
-
-First copy the example configuration file into the real one.
+## π Quick Start
```bash
-cp config-example.json config.json
+docker run -d \
+ --name cloudflare-ddns \
+ --restart unless-stopped \
+ --network host \
+ -e CLOUDFLARE_API_TOKEN=your-api-token \
+ -e DOMAINS=example.com,www.example.com \
+ timothyjmiller/cloudflare-ddns:latest
```
-Edit `config.json` and replace the values with your own.
+That's it. The container detects your public IP and updates the DNS records for your domains every 5 minutes.
-### π Authentication methods
+> β οΈ `--network host` is required to detect IPv6 addresses. If you only need IPv4, you can omit it and set `IP6_PROVIDER=none`.
-You can choose to use either the newer API tokens, or the traditional API keys
+## π Authentication
-To generate a new API tokens, go to your [Cloudflare Profile](https://dash.cloudflare.com/profile/api-tokens) and create a token capable of **Edit DNS**. Then replace the value in
+| Variable | Description |
+|----------|-------------|
+| `CLOUDFLARE_API_TOKEN` | API token with "Edit DNS" capability |
+| `CLOUDFLARE_API_TOKEN_FILE` | Path to a file containing the API token (Docker secrets compatible) |
-```json
-"authentication":
- "api_token": "Your cloudflare API token, including the capability of **Edit DNS**"
-```
+To generate an API token, go to your [Cloudflare Profile](https://dash.cloudflare.com/profile/api-tokens) and create a token capable of **Edit DNS**.
-Alternatively, you can use the traditional API keys by setting appropriate values for:
+## π Domains
-```json
-"authentication":
- "api_key":
- "api_key": "Your cloudflare API Key",
- "account_email": "The email address you use to sign in to cloudflare",
-```
+| Variable | Description |
+|----------|-------------|
+| `DOMAINS` | Comma-separated list of domains to update for both IPv4 and IPv6 |
+| `IP4_DOMAINS` | Comma-separated list of IPv4-only domains |
+| `IP6_DOMAINS` | Comma-separated list of IPv6-only domains |
-### π Enable or disable IPv4 or IPv6
+Wildcard domains are supported: `*.example.com`
-Some ISP provided modems only allow port forwarding over IPv4 or IPv6. In this case, you would want to disable any interface not accessible via port forward.
+At least one of `DOMAINS`, `IP4_DOMAINS`, `IP6_DOMAINS`, or `WAF_LISTS` must be set.
-```json
-"a": true,
-"aaaa": true
-```
+## π IP Detection Providers
-### ποΈ Other values explained
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `IP4_PROVIDER` | `cloudflare.trace` | IPv4 detection method |
+| `IP6_PROVIDER` | `cloudflare.trace` | IPv6 detection method |
-```json
-"zone_id": "The ID of the zone that will get the records. From your dashboard click into the zone. Under the overview tab, scroll down and the zone ID is listed in the right rail",
-"subdomains": "Array of subdomains you want to update the A & where applicable, AAAA records. IMPORTANT! Only write subdomain name. Do not include the base domain name. (e.g. foo or an empty string to update the base domain)",
-"proxied": "Defaults to false. Make it true if you want CDN/SSL benefits from cloudflare. This usually disables SSH)",
-"ttl": "Defaults to 300 seconds. Longer TTLs speed up DNS lookups by increasing the chance of cached results, but a longer TTL also means that updates to your records take longer to go into effect. You can choose a TTL between 30 seconds and 1 day. For more information, see [Cloudflare's TTL documentation](https://developers.cloudflare.com/dns/manage-dns-records/reference/ttl/)",
-```
+Available providers:
-## π Hosting multiple subdomains on the same IP?
+| Provider | Description |
+|----------|-------------|
+| `cloudflare.trace` | π Cloudflare's `/cdn-cgi/trace` endpoint (default, zero-log) |
+| `cloudflare.doh` | π Cloudflare DNS-over-HTTPS (`whoami.cloudflare` TXT query) |
+| `ipify` | π ipify.org API |
+| `local` | π Local IP via system routing table (no network traffic, CGNAT-aware) |
+| `local.iface:` | π IP from a specific network interface (e.g., `local.iface:eth0`) |
+| `url:` | π Custom HTTP(S) endpoint that returns an IP address |
+| `literal:` | π Static IP addresses (comma-separated) |
+| `none` | π« Disable this IP type |
-This script can be used to update multiple subdomains on the same IP address.
+## β±οΈ Scheduling
-For example, if you have a domain `example.com` and you want to host additional subdomains at `foo.example.com` and `bar.example.com` on the same IP address, you can use this script to update the DNS records for all subdomains.
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `UPDATE_CRON` | `@every 5m` | Update schedule |
+| `UPDATE_ON_START` | `true` | Run an update immediately on startup |
+| `DELETE_ON_STOP` | `false` | Delete managed DNS records on shutdown |
-### β οΈ Note
+Schedule formats:
-Please remove the comments after `//` in the below example. They are only there to explain the config.
+- `@every 5m` β Every 5 minutes
+- `@every 1h` β Every hour
+- `@every 30s` β Every 30 seconds
+- `@once` β Run once and exit
-Do not include the base domain name in your `subdomains` config. Do not use the [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name).
+When `UPDATE_CRON=@once`, `UPDATE_ON_START` must be `true` and `DELETE_ON_STOP` must be `false`.
-### π Example π
+## π DNS Record Settings
-```bash
-{
- "cloudflare": [
- {
- "authentication": {
- "api_token": "api_token_here", // Either api_token or api_key
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "zone_id": "your_zone_id_here",
- "subdomains": [
- {
- "name": "", // Root domain (example.com)
- "proxied": true
- },
- {
- "name": "foo", // (foo.example.com)
- "proxied": true
- },
- {
- "name": "bar", // (bar.example.com)
- "proxied": true
- }
- ]
- }
- ],
- "a": true,
- "aaaa": true,
- "purgeUnknownRecords": false,
- "ttl": 300
-}
-```
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `TTL` | `1` (auto) | DNS record TTL in seconds (1=auto, or 30-86400) |
+| `PROXIED` | `false` | Expression controlling which domains are proxied through Cloudflare |
+| `RECORD_COMMENT` | (empty) | Comment attached to managed DNS records |
+| `MANAGED_RECORDS_COMMENT_REGEX` | (empty) | Regex to identify which records are managed (empty = all) |
-## π Hosting multiple domains (zones) on the same IP?
+The `PROXIED` variable supports boolean expressions:
-You can handle ddns for multiple domains (cloudflare zones) using the same docker container by duplicating your configs inside the `cloudflare: []` key within `config.json` like below:
+| Expression | Meaning |
+|------------|---------|
+| `true` | βοΈ Proxy all domains |
+| `false` | π Don't proxy any domains |
+| `is(example.com)` | π― Only proxy `example.com` |
+| `sub(cdn.example.com)` | π³ Proxy `cdn.example.com` and its subdomains |
+| `is(a.com) \|\| is(b.com)` | π Proxy `a.com` or `b.com` |
+| `!is(vpn.example.com)` | π« Proxy everything except `vpn.example.com` |
-### β οΈ Note:
+Operators: `is()`, `sub()`, `!`, `&&`, `||`, `()`
-If you are using API Tokens, make sure the token used supports editing your zone ID.
+## π‘οΈ WAF Lists
-```bash
-{
- "cloudflare": [
- {
- "authentication": {
- "api_token": "api_token_here",
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "zone_id": "your_first_zone_id_here",
- "subdomains": [
- {
- "name": "",
- "proxied": false
- },
- {
- "name": "remove_or_replace_with_your_subdomain",
- "proxied": false
- }
- ]
- },
- {
- "authentication": {
- "api_token": "api_token_here",
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "zone_id": "your_second_zone_id_here",
- "subdomains": [
- {
- "name": "",
- "proxied": false
- },
- {
- "name": "remove_or_replace_with_your_subdomain",
- "proxied": false
- }
- ]
- }
- ],
- "a": true,
- "aaaa": true,
- "purgeUnknownRecords": false
-}
-```
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `WAF_LISTS` | (empty) | Comma-separated WAF lists in `account-id/list-name` format |
+| `WAF_LIST_DESCRIPTION` | (empty) | Description for managed WAF lists |
+| `WAF_LIST_ITEM_COMMENT` | (empty) | Comment for WAF list items |
+| `MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX` | (empty) | Regex to identify managed WAF list items |
-## βοΈ Load Balancing
+WAF list names must match the pattern `[a-z0-9_]+`.
-If you have multiple IP addresses and want to load balance between them, you can use the `loadBalancing` option. This will create a CNAME record for each subdomain that points to the subdomain with the lowest IP address.
+## π Notifications (Shoutrrr)
-### π Example config to support load balancing
+| Variable | Description |
+|----------|-------------|
+| `SHOUTRRR` | Newline-separated list of notification service URLs |
-```json
-{
- "cloudflare": [
- {
- "authentication": {
- "api_token": "api_token_here",
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "zone_id": "your_zone_id_here",
- "subdomains": [
- {
- "name": "",
- "proxied": false
- },
- {
- "name": "remove_or_replace_with_your_subdomain",
- "proxied": false
- }
- ]
- }
- ],{
- "cloudflare": [
- {
- "authentication": {
- "api_token": "api_token_here",
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "zone_id": "your_zone_id_here",
- "subdomains": [
- {
- "name": "",
- "proxied": false
- },
- {
- "name": "remove_or_replace_with_your_subdomain",
- "proxied": false
- }
- ]
- }
- ],
- "load_balancer": [
- {
- "authentication": {
- "api_token": "api_token_here",
- "api_key": {
- "api_key": "api_key_here",
- "account_email": "your_email_here"
- }
- },
- "pool_id": "your_pool_id_here",
- "origin": "your_origin_name_here"
- }
- ],
- "a": true,
- "aaaa": true,
- "purgeUnknownRecords": false,
- "ttl": 300
-}
-```
+Supported services:
-### Docker environment variable support
+| Service | URL format |
+|---------|------------|
+| π¬ Discord | `discord://token@webhook-id` |
+| π¨ Slack | `slack://token-a/token-b/token-c` |
+| βοΈ Telegram | `telegram://bot-token@telegram?chats=chat-id` |
+| π‘ Gotify | `gotify://host/path?token=app-token` |
+| π² Pushover | `pushover://user-key@api-token` |
+| π Generic webhook | `generic://host/path` or `generic+https://host/path` |
-Define environmental variables starts with `CF_DDNS_` and use it in config.json
+Notifications are sent when DNS records are updated, created, deleted, or when errors occur.
-For ex:
+## π Heartbeat Monitoring
-```json
-{
- "cloudflare": [
- {
- "authentication": {
- "api_token": "${CF_DDNS_API_TOKEN}",
-```
+| Variable | Description |
+|----------|-------------|
+| `HEALTHCHECKS` | Healthchecks.io ping URL |
+| `UPTIMEKUMA` | Uptime Kuma push URL |
-### π§Ή Optional features
+Heartbeats are sent after each update cycle. On failure, a fail signal is sent. On shutdown, an exit signal is sent.
-`purgeUnknownRecords` removes stale DNS records from Cloudflare. This is useful if you have a dynamic DNS record that you no longer want to use. If you have a dynamic DNS record that you no longer want to use, you can set `purgeUnknownRecords` to `true` and the script will remove the stale DNS record from Cloudflare.
+## β³ Timeouts
-## π³ Deploy with Docker Compose
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `DETECTION_TIMEOUT` | `5s` | Timeout for IP detection requests |
+| `UPDATE_TIMEOUT` | `30s` | Timeout for Cloudflare API requests |
-Pre-compiled images are available via [the official docker container on DockerHub](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns).
+## π₯οΈ Output
-Modify the host file path of config.json inside the volumes section of docker-compose.yml.
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `EMOJI` | `true` | Use emoji in output messages |
+| `QUIET` | `false` | Suppress informational output |
+
+## π CLI Flags
+
+| Flag | Description |
+|------|-------------|
+| `--dry-run` | π§ͺ Preview changes without modifying DNS records |
+| `--repeat` | π Run continuously (legacy config mode only; env var mode uses `UPDATE_CRON`) |
+
+## π All Environment Variables
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `CLOUDFLARE_API_TOKEN` | β | π API token |
+| `CLOUDFLARE_API_TOKEN_FILE` | β | π Path to API token file |
+| `DOMAINS` | β | π Domains for both IPv4 and IPv6 |
+| `IP4_DOMAINS` | β | 4οΈβ£ IPv4-only domains |
+| `IP6_DOMAINS` | β | 6οΈβ£ IPv6-only domains |
+| `IP4_PROVIDER` | `cloudflare.trace` | π IPv4 detection provider |
+| `IP6_PROVIDER` | `cloudflare.trace` | π IPv6 detection provider |
+| `UPDATE_CRON` | `@every 5m` | β±οΈ Update schedule |
+| `UPDATE_ON_START` | `true` | π Update on startup |
+| `DELETE_ON_STOP` | `false` | π§Ή Delete records on shutdown |
+| `TTL` | `1` | β³ DNS record TTL |
+| `PROXIED` | `false` | βοΈ Proxied expression |
+| `RECORD_COMMENT` | β | π¬ DNS record comment |
+| `MANAGED_RECORDS_COMMENT_REGEX` | β | π― Managed records regex |
+| `WAF_LISTS` | β | π‘οΈ WAF lists to manage |
+| `WAF_LIST_DESCRIPTION` | β | π WAF list description |
+| `WAF_LIST_ITEM_COMMENT` | β | π¬ WAF list item comment |
+| `MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX` | β | π― Managed WAF items regex |
+| `DETECTION_TIMEOUT` | `5s` | β³ IP detection timeout |
+| `UPDATE_TIMEOUT` | `30s` | β³ API request timeout |
+| `EMOJI` | `true` | π¨ Enable emoji output |
+| `QUIET` | `false` | π€« Suppress info output |
+| `HEALTHCHECKS` | β | π Healthchecks.io URL |
+| `UPTIMEKUMA` | β | π Uptime Kuma URL |
+| `SHOUTRRR` | β | π Notification URLs (newline-separated) |
+
+---
+
+## π’ Deployment
+
+### π³ Docker Compose
```yml
version: '3.9'
@@ -292,146 +228,259 @@ services:
- no-new-privileges:true
network_mode: 'host'
environment:
- - PUID=1000
- - PGID=1000
+ - CLOUDFLARE_API_TOKEN=your-api-token
+ - DOMAINS=example.com,www.example.com
+ - PROXIED=true
+ - IP6_PROVIDER=none
+ - HEALTHCHECKS=https://hc-ping.com/your-uuid
+ restart: unless-stopped
+```
+
+> β οΈ Docker requires `network_mode: host` to access the IPv6 public address.
+
+### βΈοΈ Kubernetes
+
+The included manifest uses the legacy JSON config mode. Create a secret containing your `config.json` and apply:
+
+```bash
+kubectl create secret generic config-cloudflare-ddns --from-file=config.json -n ddns
+kubectl apply -f k8s/cloudflare-ddns.yml
+```
+
+### π§ Linux + Systemd
+
+1. Build and install:
+
+```bash
+cargo build --release
+sudo cp target/release/cloudflare-ddns /usr/local/bin/
+```
+
+2. Copy the systemd units from the `systemd/` directory:
+
+```bash
+sudo cp systemd/cloudflare-ddns.service /etc/systemd/system/
+sudo cp systemd/cloudflare-ddns.timer /etc/systemd/system/
+```
+
+3. Place a `config.json` at `/etc/cloudflare-ddns/config.json` (the systemd service uses legacy config mode).
+
+4. Enable the timer:
+
+```bash
+sudo systemctl enable --now cloudflare-ddns.timer
+```
+
+The timer runs the service every 15 minutes (configurable in `cloudflare-ddns.timer`).
+
+## π¨ Building from Source
+
+```bash
+cargo build --release
+```
+
+The binary is at `target/release/cloudflare-ddns`.
+
+### π³ Docker builds
+
+```bash
+# Single architecture (linux/amd64)
+./scripts/docker-build.sh
+
+# Multi-architecture (linux/amd64, linux/arm64, linux/arm/v7)
+./scripts/docker-build-all.sh
+```
+
+## π» Supported Platforms
+
+- π³ [Docker](https://docs.docker.com/get-docker/) (amd64, arm64, arm/v7)
+- π [Docker Compose](https://docs.docker.com/compose/install/)
+- βΈοΈ [Kubernetes](https://kubernetes.io/docs/tasks/tools/)
+- π§ [Systemd](https://www.freedesktop.org/wiki/Software/systemd/)
+- π macOS, πͺ Windows, π§ Linux β anywhere Rust compiles
+
+---
+
+## π Legacy JSON Config File
+
+For backwards compatibility, cloudflare-ddns still supports configuration via a `config.json` file. This mode is used automatically when no `CLOUDFLARE_API_TOKEN` environment variable is set.
+
+### π Quick Start
+
+```bash
+cp config-example.json config.json
+# Edit config.json with your values
+cloudflare-ddns
+```
+
+### π Authentication
+
+Use either an API token (recommended) or a legacy API key:
+
+```json
+"authentication": {
+ "api_token": "Your cloudflare API token with Edit DNS capability"
+}
+```
+
+Or with a legacy API key:
+
+```json
+"authentication": {
+ "api_key": {
+ "api_key": "Your cloudflare API Key",
+ "account_email": "The email address you use to sign in to cloudflare"
+ }
+}
+```
+
+### π‘ IPv4 and IPv6
+
+Some ISP provided modems only allow port forwarding over IPv4 or IPv6. Disable the interface that is not accessible:
+
+```json
+"a": true,
+"aaaa": true
+```
+
+### βοΈ Config Options
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| `cloudflare` | array | required | List of zone configurations |
+| `a` | bool | `true` | Enable IPv4 (A record) updates |
+| `aaaa` | bool | `true` | Enable IPv6 (AAAA record) updates |
+| `purgeUnknownRecords` | bool | `false` | Delete stale/duplicate DNS records |
+| `ttl` | int | `300` | DNS record TTL in seconds (30-86400, values < 30 become auto) |
+
+Each zone entry contains:
+
+| Key | Type | Description |
+|-----|------|-------------|
+| `authentication` | object | API token or API key credentials |
+| `zone_id` | string | Cloudflare zone ID (found in zone dashboard) |
+| `subdomains` | array | Subdomain entries to update |
+| `proxied` | bool | Default proxied status for subdomains in this zone |
+
+Subdomain entries can be a simple string or a detailed object:
+
+```json
+"subdomains": [
+ "",
+ "@",
+ "www",
+ { "name": "vpn", "proxied": true }
+]
+```
+
+Use `""` or `"@"` for the root domain. Do not include the base domain name.
+
+### π Environment Variable Substitution
+
+In the legacy config file, values can reference environment variables with the `CF_DDNS_` prefix:
+
+```json
+{
+ "cloudflare": [{
+ "authentication": {
+ "api_token": "${CF_DDNS_API_TOKEN}"
+ },
+ ...
+ }]
+}
+```
+
+### π Example: Multiple Subdomains
+
+```json
+{
+ "cloudflare": [
+ {
+ "authentication": {
+ "api_token": "your-api-token"
+ },
+ "zone_id": "your_zone_id",
+ "subdomains": [
+ { "name": "", "proxied": true },
+ { "name": "www", "proxied": true },
+ { "name": "vpn", "proxied": false }
+ ]
+ }
+ ],
+ "a": true,
+ "aaaa": true,
+ "purgeUnknownRecords": false,
+ "ttl": 300
+}
+```
+
+### π Example: Multiple Zones
+
+```json
+{
+ "cloudflare": [
+ {
+ "authentication": { "api_token": "your-api-token" },
+ "zone_id": "first_zone_id",
+ "subdomains": [
+ { "name": "", "proxied": false }
+ ]
+ },
+ {
+ "authentication": { "api_token": "your-api-token" },
+ "zone_id": "second_zone_id",
+ "subdomains": [
+ { "name": "", "proxied": false }
+ ]
+ }
+ ],
+ "a": true,
+ "aaaa": true,
+ "purgeUnknownRecords": false
+}
+```
+
+### π³ Docker Compose (legacy config file)
+
+```yml
+version: '3.9'
+services:
+ cloudflare-ddns:
+ image: timothyjmiller/cloudflare-ddns:latest
+ container_name: cloudflare-ddns
+ security_opt:
+ - no-new-privileges:true
+ network_mode: 'host'
volumes:
- /YOUR/PATH/HERE/config.json:/config.json
restart: unless-stopped
```
-### β οΈ IPv6
+### π Legacy CLI Flags
-Docker requires network_mode be set to host in order to access the IPv6 public address.
-
-### πββοΈ Running
-
-From the project root directory
+In legacy config mode, use `--repeat` to run continuously (the TTL value is used as the update interval):
```bash
-docker-compose up -d
+cloudflare-ddns --repeat
+cloudflare-ddns --repeat --dry-run
```
-## π Kubernetes
+---
-Create config File
+## π Helpful Links
-```bash
-cp ../../config-example.json config.json
-```
+- π [Cloudflare API token](https://dash.cloudflare.com/profile/api-tokens)
+- π [Cloudflare zone ID](https://support.cloudflare.com/hc/en-us/articles/200167836-Where-do-I-find-my-Cloudflare-IP-address-)
+- π [Cloudflare zone DNS record ID](https://support.cloudflare.com/hc/en-us/articles/360019093151-Managing-DNS-records-in-Cloudflare)
-Edit config.jsonon (vim, nvim, nano... )
+## π License
-```bash
-${EDITOR} config.json
-```
+This project is licensed under the GNU General Public License, version 3 (GPLv3).
-Create config file as Secret.
-
-```bash
-kubectl create secret generic config-cloudflare-ddns --from-file=config.json --dry-run=client -oyaml -n ddns > config-cloudflare-ddns-Secret.yaml
-```
-
-apply this secret
-
-```bash
-kubectl apply -f config-cloudflare-ddns-Secret.yaml
-rm config.json # recomended Just keep de secret on Kubernetes Cluster
-```
-
-apply this Deployment
-
-```bash
-kubectl apply -f cloudflare-ddns-Deployment.yaml
-```
-
-## π§ Deploy with Linux + Cron
-
-### π Running (all distros)
-
-This script requires Python 3.5+, which comes preinstalled on the latest version of Raspbian. Download/clone this repo and give permission to the project's bash script by running `chmod +x ./start-sync.sh`. Now you can execute `./start-sync.sh`, which will set up a virtualenv, pull in any dependencies, and fire the script.
-
-1. Upload the cloudflare-ddns folder to your home directory /home/your_username_here/
-
-2. Run the following code in terminal
-
-```bash
-crontab -e
-```
-
-3. Add the following lines to sync your DNS records every 15 minutes
-
-```bash
-*/15 * * * * /home/your_username_here/cloudflare-ddns/start-sync.sh
-```
-
-## Building from source
-
-Create a config.json file with your production credentials.
-
-### π Please Note
-
-The optional `docker-build-all.sh` script requires Docker experimental support to be enabled.
-
-Docker Hub has experimental support for multi-architecture builds. Their official blog post specifies easy instructions for building with [Mac and Windows versions of Docker Desktop](https://docs.docker.com/docker-for-mac/multi-arch/).
-
-1. Choose build platform
-
-- Multi-architecture (experimental) `docker-build-all.sh`
-
-- Linux/amd64 by default `docker-build.sh`
-
-2. Give your bash script permission to execute.
-
-```bash
-sudo chmod +x ./docker-build.sh
-```
-
-```bash
-sudo chmod +x ./docker-build-all.sh
-```
-
-3. At project root, run the `docker-build.sh` script.
-
-Recommended for local development
-
-```bash
-./docker-build.sh
-```
-
-Recommended for production
-
-```bash
-./docker-build-all.sh
-```
-
-### Run the locally compiled version
-
-```bash
-docker run -d timothyjmiller/cloudflare_ddns:latest
-```
-
-## Supported Platforms
-
-- [Docker](https://docs.docker.com/get-docker/)
-- [Docker Compose](https://docs.docker.com/compose/install/)
-- [Kubernetes](https://kubernetes.io/docs/tasks/tools/)
-- [Python 3](https://www.python.org/downloads/)
-- [Systemd](https://www.freedesktop.org/wiki/Software/systemd/)
-
-## π Helpful links
-
-- [Cloudflare API token](https://dash.cloudflare.com/profile/api-tokens)
-- [Cloudflare zone ID](https://support.cloudflare.com/hc/en-us/articles/200167836-Where-do-I-find-my-Cloudflare-IP-address-)
-- [Cloudflare zone DNS record ID](https://support.cloudflare.com/hc/en-us/articles/360019093151-Managing-DNS-records-in-Cloudflare)
-
-## License
-
-This Template is licensed under the GNU General Public License, version 3 (GPLv3).
-
-## Author
+## π¨βπ» Author
Timothy Miller
[View my GitHub profile π‘](https://github.com/timothymiller)
-[View my personal website π»](https://timknowsbest.com)
+[View my personal website π»](https://itstmillertime.com)
diff --git a/cloudflare-ddns.py b/cloudflare-ddns.py
deleted file mode 100755
index 64dc6e9..0000000
--- a/cloudflare-ddns.py
+++ /dev/null
@@ -1,319 +0,0 @@
-#!/usr/bin/env python3
-# cloudflare-ddns.py
-# Summary: Access your home network remotely via a custom domain name without a static IP!
-# Description: Access your home network remotely via a custom domain
-# Access your home network remotely via a custom domain
-# A small, π΅οΈ privacy centric, and β‘
-# lightning fast multi-architecture Docker image for self hosting projects.
-
-__version__ = "1.0.2"
-
-from string import Template
-
-import json
-import os
-import signal
-import sys
-import threading
-import time
-import requests
-
-CONFIG_PATH = os.environ.get('CONFIG_PATH', os.getcwd())
-# Read in all environment variables that have the correct prefix
-ENV_VARS = {key: value for (key, value) in os.environ.items() if key.startswith('CF_DDNS_')}
-
-class GracefulExit:
- def __init__(self):
- self.kill_now = threading.Event()
- signal.signal(signal.SIGINT, self.exit_gracefully)
- signal.signal(signal.SIGTERM, self.exit_gracefully)
-
- def exit_gracefully(self, signum, frame):
- print("π Stopping main thread...")
- self.kill_now.set()
-
-
-def deleteEntries(type):
- # Helper function for deleting A or AAAA records
- # in the case of no IPv4 or IPv6 connection, yet
- # existing A or AAAA records are found.
- for option in config["cloudflare"]:
- answer = cf_api(
- "zones/" + option['zone_id'] +
- "/dns_records?per_page=100&type=" + type,
- "GET", option)
- if answer is None or answer["result"] is None:
- time.sleep(5)
- return
- for record in answer["result"]:
- identifier = str(record["id"])
- cf_api(
- "zones/" + option['zone_id'] + "/dns_records/" + identifier,
- "DELETE", option)
- print("ποΈ Deleted stale record " + identifier)
-
-
-def getIPs():
- a = None
- aaaa = None
- global ipv4_enabled
- global ipv6_enabled
- global purgeUnknownRecords
- if ipv4_enabled:
- try:
- a = requests.get(
- "https://1.1.1.1/cdn-cgi/trace").text.split("\n")
- a.pop()
- a = dict(s.split("=") for s in a)["ip"]
- except Exception:
- global shown_ipv4_warning
- if not shown_ipv4_warning:
- shown_ipv4_warning = True
- print("π§© IPv4 not detected via 1.1.1.1, trying 1.0.0.1")
- # Try secondary IP check
- try:
- a = requests.get(
- "https://1.0.0.1/cdn-cgi/trace").text.split("\n")
- a.pop()
- a = dict(s.split("=") for s in a)["ip"]
- except Exception:
- global shown_ipv4_warning_secondary
- if not shown_ipv4_warning_secondary:
- shown_ipv4_warning_secondary = True
- print("π§© IPv4 not detected via 1.0.0.1. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.")
- if purgeUnknownRecords:
- deleteEntries("A")
- if ipv6_enabled:
- try:
- aaaa = requests.get(
- "https://[2606:4700:4700::1111]/cdn-cgi/trace").text.split("\n")
- aaaa.pop()
- aaaa = dict(s.split("=") for s in aaaa)["ip"]
- except Exception:
- global shown_ipv6_warning
- if not shown_ipv6_warning:
- shown_ipv6_warning = True
- print("π§© IPv6 not detected via 1.1.1.1, trying 1.0.0.1")
- try:
- aaaa = requests.get(
- "https://[2606:4700:4700::1001]/cdn-cgi/trace").text.split("\n")
- aaaa.pop()
- aaaa = dict(s.split("=") for s in aaaa)["ip"]
- except Exception:
- global shown_ipv6_warning_secondary
- if not shown_ipv6_warning_secondary:
- shown_ipv6_warning_secondary = True
- print("π§© IPv6 not detected via 1.0.0.1. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.")
- if purgeUnknownRecords:
- deleteEntries("AAAA")
- ips = {}
- if (a is not None):
- ips["ipv4"] = {
- "type": "A",
- "ip": a
- }
- if (aaaa is not None):
- ips["ipv6"] = {
- "type": "AAAA",
- "ip": aaaa
- }
- return ips
-
-
-def commitRecord(ip):
- global ttl
- for option in config["cloudflare"]:
- subdomains = option["subdomains"]
- response = cf_api("zones/" + option['zone_id'], "GET", option)
- if response is None or response["result"]["name"] is None:
- time.sleep(5)
- return
- base_domain_name = response["result"]["name"]
- for subdomain in subdomains:
- try:
- name = subdomain["name"].lower().strip()
- proxied = subdomain["proxied"]
- except:
- name = subdomain
- proxied = option["proxied"]
- fqdn = base_domain_name
- # Check if name provided is a reference to the root domain
- if name != '' and name != '@':
- fqdn = name + "." + base_domain_name
- record = {
- "type": ip["type"],
- "name": fqdn,
- "content": ip["ip"],
- "proxied": proxied,
- "ttl": ttl
- }
- dns_records = cf_api(
- "zones/" + option['zone_id'] +
- "/dns_records?per_page=100&type=" + ip["type"],
- "GET", option)
- identifier = None
- modified = False
- duplicate_ids = []
- if dns_records is not None:
- for r in dns_records["result"]:
- if (r["name"] == fqdn):
- if identifier:
- if r["content"] == ip["ip"]:
- duplicate_ids.append(identifier)
- identifier = r["id"]
- else:
- duplicate_ids.append(r["id"])
- else:
- identifier = r["id"]
- if r['content'] != record['content'] or r['proxied'] != record['proxied']:
- modified = True
- if identifier:
- if modified:
- print("π‘ Updating record " + str(record))
- response = cf_api(
- "zones/" + option['zone_id'] +
- "/dns_records/" + identifier,
- "PUT", option, {}, record)
- else:
- print("β Adding new record " + str(record))
- response = cf_api(
- "zones/" + option['zone_id'] + "/dns_records", "POST", option, {}, record)
- if purgeUnknownRecords:
- for identifier in duplicate_ids:
- identifier = str(identifier)
- print("ποΈ Deleting stale record " + identifier)
- response = cf_api(
- "zones/" + option['zone_id'] +
- "/dns_records/" + identifier,
- "DELETE", option)
- return True
-
-
-def updateLoadBalancer(ip):
-
- for option in config["load_balancer"]:
- pools = cf_api('user/load_balancers/pools', 'GET', option)
-
- if pools:
- idxr = dict((p['id'], i) for i, p in enumerate(pools['result']))
- idx = idxr.get(option['pool_id'])
-
- origins = pools['result'][idx]['origins']
-
- idxr = dict((o['name'], i) for i, o in enumerate(origins))
- idx = idxr.get(option['origin'])
-
- origins[idx]['address'] = ip['ip']
- data = {'origins': origins}
-
- response = cf_api(f'user/load_balancers/pools/{option["pool_id"]}', 'PATCH', option, {}, data)
-
-
-def cf_api(endpoint, method, config, headers={}, data=False):
- api_token = config['authentication']['api_token']
- if api_token != '' and api_token != 'api_token_here':
- headers = {
- "Authorization": "Bearer " + api_token, **headers
- }
- else:
- headers = {
- "X-Auth-Email": config['authentication']['api_key']['account_email'],
- "X-Auth-Key": config['authentication']['api_key']['api_key'],
- }
- try:
- if (data == False):
- response = requests.request(
- method, "https://api.cloudflare.com/client/v4/" + endpoint, headers=headers)
- else:
- response = requests.request(
- method, "https://api.cloudflare.com/client/v4/" + endpoint,
- headers=headers, json=data)
-
- if response.ok:
- return response.json()
- else:
- print("π‘ Error sending '" + method +
- "' request to '" + response.url + "':")
- print(response.text)
- return None
- except Exception as e:
- print("π‘ An exception occurred while sending '" +
- method + "' request to '" + endpoint + "': " + str(e))
- return None
-
-
-def updateIPs(ips):
- for ip in ips.values():
- commitRecord(ip)
- #updateLoadBalancer(ip)
-
-
-if __name__ == '__main__':
- shown_ipv4_warning = False
- shown_ipv4_warning_secondary = False
- shown_ipv6_warning = False
- shown_ipv6_warning_secondary = False
- ipv4_enabled = True
- ipv6_enabled = True
- purgeUnknownRecords = False
-
- if sys.version_info < (3, 5):
- raise Exception("π This script requires Python 3.5+")
-
- config = None
- try:
- with open(os.path.join(CONFIG_PATH, "config.json")) as config_file:
- if len(ENV_VARS) != 0:
- config = json.loads(Template(config_file.read()).safe_substitute(ENV_VARS))
- else:
- config = json.loads(config_file.read())
- except:
- print("π‘ Error reading config.json")
- # wait 10 seconds to prevent excessive logging on docker auto restart
- time.sleep(10)
-
- if config is not None:
- try:
- ipv4_enabled = config["a"]
- ipv6_enabled = config["aaaa"]
- except:
- ipv4_enabled = True
- ipv6_enabled = True
- print("βοΈ Individually disable IPv4 or IPv6 with new config.json options. Read more about it here: https://github.com/timothymiller/cloudflare-ddns/blob/master/README.md")
- try:
- purgeUnknownRecords = config["purgeUnknownRecords"]
- except:
- purgeUnknownRecords = False
- print("βοΈ No config detected for 'purgeUnknownRecords' - defaulting to False")
- try:
- ttl = int(config["ttl"])
- except:
- ttl = 300 # default Cloudflare TTL
- print(
- "βοΈ No config detected for 'ttl' - defaulting to 300 seconds (5 minutes)")
- if ttl < 30:
- ttl = 1 #
- print("βοΈ TTL is too low - defaulting to 1 (auto)")
- if (len(sys.argv) > 1):
- if (sys.argv[1] == "--repeat"):
- if ipv4_enabled and ipv6_enabled:
- print(
- "π°οΈ Updating IPv4 (A) & IPv6 (AAAA) records every " + str(ttl) + " seconds")
- elif ipv4_enabled and not ipv6_enabled:
- print("π°οΈ Updating IPv4 (A) records every " +
- str(ttl) + " seconds")
- elif ipv6_enabled and not ipv4_enabled:
- print("π°οΈ Updating IPv6 (AAAA) records every " +
- str(ttl) + " seconds")
- next_time = time.time()
- killer = GracefulExit()
- prev_ips = None
- while True:
- updateIPs(getIPs())
- if killer.kill_now.wait(ttl):
- break
- else:
- print("β Unrecognized parameter '" +
- sys.argv[1] + "'. Stopping now.")
- else:
- updateIPs(getIPs())
diff --git a/docker/docker-compose-env.yml b/docker/docker-compose-env.yml
new file mode 100644
index 0000000..c1530da
--- /dev/null
+++ b/docker/docker-compose-env.yml
@@ -0,0 +1,19 @@
+version: '3.9'
+services:
+ cloudflare-ddns:
+ image: timothyjmiller/cloudflare-ddns:latest
+ container_name: cloudflare-ddns
+ security_opt:
+ - no-new-privileges:true
+ network_mode: 'host'
+ environment:
+ - CLOUDFLARE_API_TOKEN=your-api-token-here
+ - DOMAINS=example.com,www.example.com
+ - PROXIED=false
+ - TTL=1
+ - UPDATE_CRON=@every 5m
+ # - IP6_PROVIDER=none
+ # - HEALTHCHECKS=https://hc-ping.com/your-uuid
+ # - UPTIMEKUMA=https://kuma.example.com/api/push/your-token
+ # - SHOUTRRR=discord://token@webhook-id
+ restart: unless-stopped
diff --git a/env-example b/env-example
new file mode 100644
index 0000000..58bc3ad
--- /dev/null
+++ b/env-example
@@ -0,0 +1,98 @@
+# Cloudflare DDNS - Environment Variable Configuration
+# Copy this file to .env and set your values.
+# Setting CLOUDFLARE_API_TOKEN activates environment variable mode.
+
+# === Required ===
+
+# Cloudflare API token with "Edit DNS" capability
+CLOUDFLARE_API_TOKEN=your-api-token-here
+# Or read from a file:
+# CLOUDFLARE_API_TOKEN_FILE=/run/secrets/cloudflare_token
+
+# Domains to update (comma-separated)
+# At least one of DOMAINS, IP4_DOMAINS, IP6_DOMAINS, or WAF_LISTS must be set
+DOMAINS=example.com,www.example.com
+# IP4_DOMAINS=v4only.example.com
+# IP6_DOMAINS=v6only.example.com
+
+# === IP Detection ===
+
+# Provider for IPv4 detection (default: cloudflare.trace)
+# Options: cloudflare.trace, cloudflare.doh, ipify, local, local.iface:,
+# url:, literal:,, none
+# IP4_PROVIDER=cloudflare.trace
+
+# Provider for IPv6 detection (default: cloudflare.trace)
+# IP6_PROVIDER=cloudflare.trace
+
+# === Scheduling ===
+
+# Update schedule (default: @every 5m)
+# Formats: @every 5m, @every 1h, @every 30s, @once
+# UPDATE_CRON=@every 5m
+
+# Run an update immediately on startup (default: true)
+# UPDATE_ON_START=true
+
+# Delete managed DNS records on shutdown (default: false)
+# DELETE_ON_STOP=false
+
+# === DNS Records ===
+
+# TTL in seconds: 1=auto, or 30-86400 (default: 1)
+# TTL=1
+
+# Proxied expression: true, false, is(domain), sub(domain), or boolean combos
+# PROXIED=false
+
+# Comment to attach to managed DNS records
+# RECORD_COMMENT=Managed by cloudflare-ddns
+
+# Regex to identify which records are managed (empty = all matching records)
+# MANAGED_RECORDS_COMMENT_REGEX=cloudflare-ddns
+
+# === WAF Lists ===
+
+# Comma-separated WAF lists in account-id/list-name format
+# WAF_LISTS=account123/my_ip_list
+
+# Description for managed WAF lists
+# WAF_LIST_DESCRIPTION=Dynamic IP list
+
+# Comment for WAF list items
+# WAF_LIST_ITEM_COMMENT=cloudflare-ddns
+
+# Regex to identify managed WAF list items
+# MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX=cloudflare-ddns
+
+# === Notifications ===
+
+# Shoutrrr notification URLs (newline-separated)
+# SHOUTRRR=discord://token@webhook-id
+# SHOUTRRR=slack://token-a/token-b/token-c
+# SHOUTRRR=telegram://bot-token@telegram?chats=chat-id
+# SHOUTRRR=generic+https://hooks.example.com/webhook
+
+# === Heartbeat Monitoring ===
+
+# Healthchecks.io ping URL
+# HEALTHCHECKS=https://hc-ping.com/your-uuid
+
+# Uptime Kuma push URL
+# UPTIMEKUMA=https://your-uptime-kuma.com/api/push/your-token
+
+# === Timeouts ===
+
+# IP detection timeout (default: 5s)
+# DETECTION_TIMEOUT=5s
+
+# Cloudflare API request timeout (default: 30s)
+# UPDATE_TIMEOUT=30s
+
+# === Output ===
+
+# Use emoji in output (default: true)
+# EMOJI=true
+
+# Suppress informational output (default: false)
+# QUIET=false
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 077c95d..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-requests==2.31.0
\ No newline at end of file
diff --git a/scripts/docker-build-all.sh b/scripts/docker-build-all.sh
index 3bfcb08..0cd1507 100755
--- a/scripts/docker-build-all.sh
+++ b/scripts/docker-build-all.sh
@@ -1,4 +1,3 @@
#!/bin/bash
BASH_DIR=$(dirname $(realpath "${BASH_SOURCE}"))
-docker buildx build --platform linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64 --tag timothyjmiller/cloudflare-ddns:latest ${BASH_DIR}/../
-# TODO: Support linux/riscv64
+docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --tag timothyjmiller/cloudflare-ddns:latest ${BASH_DIR}/../
diff --git a/scripts/docker-publish.sh b/scripts/docker-publish.sh
index 4e5b09b..654b7c6 100755
--- a/scripts/docker-publish.sh
+++ b/scripts/docker-publish.sh
@@ -1,3 +1,8 @@
#!/bin/bash
BASH_DIR=$(dirname $(realpath "${BASH_SOURCE}"))
-docker buildx build --platform linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64 --tag timothyjmiller/cloudflare-ddns:latest --push ${BASH_DIR}/../
+VERSION=$(grep '^version' ${BASH_DIR}/../Cargo.toml | head -1 | sed 's/.*"\(.*\)".*/\1/')
+docker buildx build \
+ --platform linux/amd64,linux/arm64,linux/arm/v7 \
+ --tag timothyjmiller/cloudflare-ddns:latest \
+ --tag timothyjmiller/cloudflare-ddns:${VERSION} \
+ --push ${BASH_DIR}/../
diff --git a/src/cloudflare.rs b/src/cloudflare.rs
new file mode 100644
index 0000000..1501c1e
--- /dev/null
+++ b/src/cloudflare.rs
@@ -0,0 +1,1774 @@
+use crate::pp::{self, PP};
+use reqwest::Client;
+use serde::{Deserialize, Serialize};
+use std::net::IpAddr;
+use std::time::Duration;
+
+// --- TTL ---
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct TTL(pub i64);
+
+impl TTL {
+ pub const AUTO: TTL = TTL(1);
+
+ pub fn new(value: i64) -> Self {
+ if value < 30 {
+ TTL::AUTO
+ } else {
+ TTL(value)
+ }
+ }
+
+ pub fn value(&self) -> i64 {
+ self.0
+ }
+
+ pub fn describe(&self) -> String {
+ if self.0 == 1 {
+ "auto".to_string()
+ } else {
+ format!("{}s", self.0)
+ }
+ }
+}
+
+// --- Auth ---
+
+#[derive(Debug, Clone)]
+pub enum Auth {
+ Token(String),
+ Key { api_key: String, email: String },
+}
+
+impl Auth {
+ pub fn apply(&self, req: reqwest::RequestBuilder) -> reqwest::RequestBuilder {
+ match self {
+ Auth::Token(token) => req.header("Authorization", format!("Bearer {token}")),
+ Auth::Key { api_key, email } => req
+ .header("X-Auth-Email", email)
+ .header("X-Auth-Key", api_key),
+ }
+ }
+}
+
+// --- WAF List ---
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct WAFList {
+ pub account_id: String,
+ pub list_name: String,
+}
+
+impl WAFList {
+ pub fn parse(input: &str) -> Result {
+ let parts: Vec<&str> = input.splitn(2, '/').collect();
+ if parts.len() != 2 {
+ return Err(format!("WAF list must be in format 'account-id/list-name': {input}"));
+ }
+ let account_id = parts[0].trim().to_string();
+ let list_name = parts[1].trim().to_string();
+
+ if !list_name.chars().all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_') {
+ return Err(format!("WAF list name must match [a-z0-9_]+: {list_name}"));
+ }
+
+ Ok(WAFList {
+ account_id,
+ list_name,
+ })
+ }
+
+ pub fn describe(&self) -> String {
+ format!("{}/{}", self.account_id, self.list_name)
+ }
+}
+
+// --- API Response Types ---
+
+#[derive(Debug, Deserialize)]
+pub struct CfResponse {
+ pub result: Option,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct CfListResponse {
+ pub result: Option>,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct ZoneResult {
+ pub id: String,
+ #[allow(dead_code)]
+ pub name: String,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct DnsRecord {
+ pub id: String,
+ pub name: String,
+ pub content: String,
+ pub proxied: Option,
+ pub ttl: Option,
+ pub comment: Option,
+}
+
+#[derive(Debug, Serialize)]
+pub struct DnsRecordPayload {
+ #[serde(rename = "type")]
+ pub record_type: String,
+ pub name: String,
+ pub content: String,
+ pub proxied: bool,
+ pub ttl: i64,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub comment: Option,
+}
+
+// --- WAF API Types ---
+
+#[derive(Debug, Deserialize)]
+pub struct WAFListMeta {
+ pub id: String,
+ pub name: String,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct WAFListItem {
+ pub id: String,
+ pub ip: Option,
+ pub comment: Option,
+}
+
+#[derive(Debug, Serialize)]
+pub struct WAFListCreateItem {
+ pub ip: String,
+ pub comment: Option,
+}
+
+// --- Cloudflare API Handle ---
+
+pub struct CloudflareHandle {
+ client: Client,
+ base_url: String,
+ auth: Auth,
+ managed_comment_regex: Option,
+ managed_waf_comment_regex: Option,
+}
+
+impl CloudflareHandle {
+ pub fn new(
+ auth: Auth,
+ update_timeout: Duration,
+ managed_comment_regex: Option,
+ managed_waf_comment_regex: Option,
+ ) -> Self {
+ let client = Client::builder()
+ .timeout(update_timeout)
+ .build()
+ .expect("Failed to build HTTP client");
+
+ Self {
+ client,
+ base_url: "https://api.cloudflare.com/client/v4".to_string(),
+ auth,
+ managed_comment_regex,
+ managed_waf_comment_regex,
+ }
+ }
+
+ #[cfg(test)]
+ pub fn with_base_url(
+ base_url: &str,
+ auth: Auth,
+ ) -> Self {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(10))
+ .build()
+ .expect("Failed to build HTTP client");
+
+ Self {
+ client,
+ base_url: base_url.to_string(),
+ auth,
+ managed_comment_regex: None,
+ managed_waf_comment_regex: None,
+ }
+ }
+
+ fn api_url(&self, path: &str) -> String {
+ format!("{}/{path}", self.base_url)
+ }
+
+ async fn api_get(
+ &self,
+ path: &str,
+ ppfmt: &PP,
+ ) -> Option {
+ let url = self.api_url(path);
+ let req = self.auth.apply(self.client.get(&url));
+ match req.send().await {
+ Ok(resp) => {
+ if resp.status().is_success() {
+ resp.json::().await.ok()
+ } else {
+ let url_str = resp.url().to_string();
+ let text = resp.text().await.unwrap_or_default();
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API GET '{url_str}' failed: {text}"));
+ None
+ }
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API GET '{path}' error: {e}"));
+ None
+ }
+ }
+ }
+
+ async fn api_post(
+ &self,
+ path: &str,
+ body: &B,
+ ppfmt: &PP,
+ ) -> Option {
+ let url = self.api_url(path);
+ let req = self.auth.apply(self.client.post(&url)).json(body);
+ match req.send().await {
+ Ok(resp) => {
+ if resp.status().is_success() {
+ resp.json::().await.ok()
+ } else {
+ let url_str = resp.url().to_string();
+ let text = resp.text().await.unwrap_or_default();
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API POST '{url_str}' failed: {text}"));
+ None
+ }
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API POST '{path}' error: {e}"));
+ None
+ }
+ }
+ }
+
+ async fn api_put(
+ &self,
+ path: &str,
+ body: &B,
+ ppfmt: &PP,
+ ) -> Option {
+ let url = self.api_url(path);
+ let req = self.auth.apply(self.client.put(&url)).json(body);
+ match req.send().await {
+ Ok(resp) => {
+ if resp.status().is_success() {
+ resp.json::().await.ok()
+ } else {
+ let url_str = resp.url().to_string();
+ let text = resp.text().await.unwrap_or_default();
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API PUT '{url_str}' failed: {text}"));
+ None
+ }
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API PUT '{path}' error: {e}"));
+ None
+ }
+ }
+ }
+
+ async fn api_delete(
+ &self,
+ path: &str,
+ ppfmt: &PP,
+ ) -> Option {
+ let url = self.api_url(path);
+ let req = self.auth.apply(self.client.delete(&url));
+ match req.send().await {
+ Ok(resp) => {
+ if resp.status().is_success() {
+ resp.json::().await.ok()
+ } else {
+ let url_str = resp.url().to_string();
+ let text = resp.text().await.unwrap_or_default();
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API DELETE '{url_str}' failed: {text}"));
+ None
+ }
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("API DELETE '{path}' error: {e}"));
+ None
+ }
+ }
+ }
+
+ // --- Zone Operations ---
+
+ pub async fn zone_id_of_domain(&self, domain: &str, ppfmt: &PP) -> Option {
+ // Try to find zone by iterating parent domains
+ let mut current = domain.to_string();
+ loop {
+ let resp: Option> = self
+ .api_get(&format!("zones?name={current}"), ppfmt)
+ .await;
+ if let Some(r) = resp {
+ if let Some(zones) = r.result {
+ if let Some(zone) = zones.first() {
+ return Some(zone.id.clone());
+ }
+ }
+ }
+ // Try parent domain
+ if let Some(pos) = current.find('.') {
+ current = current[pos + 1..].to_string();
+ if !current.contains('.') {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ None
+ }
+
+ // --- DNS Record Operations ---
+
+ pub async fn list_records(
+ &self,
+ zone_id: &str,
+ record_type: &str,
+ ppfmt: &PP,
+ ) -> Vec {
+ let path = format!("zones/{zone_id}/dns_records?per_page=100&type={record_type}");
+ let resp: Option> = self.api_get(&path, ppfmt).await;
+ resp.and_then(|r| r.result).unwrap_or_default()
+ }
+
+ pub async fn list_records_by_name(
+ &self,
+ zone_id: &str,
+ record_type: &str,
+ name: &str,
+ ppfmt: &PP,
+ ) -> Vec {
+ let records = self.list_records(zone_id, record_type, ppfmt).await;
+ records.into_iter().filter(|r| r.name == name).collect()
+ }
+
+ fn is_managed_record(&self, record: &DnsRecord) -> bool {
+ match &self.managed_comment_regex {
+ Some(regex) => {
+ let comment = record.comment.as_deref().unwrap_or("");
+ regex.is_match(comment)
+ }
+ None => true, // No regex = manage all records
+ }
+ }
+
+ pub async fn create_record(
+ &self,
+ zone_id: &str,
+ payload: &DnsRecordPayload,
+ ppfmt: &PP,
+ ) -> Option {
+ let path = format!("zones/{zone_id}/dns_records");
+ let resp: Option> = self.api_post(&path, payload, ppfmt).await;
+ resp.and_then(|r| r.result)
+ }
+
+ pub async fn update_record(
+ &self,
+ zone_id: &str,
+ record_id: &str,
+ payload: &DnsRecordPayload,
+ ppfmt: &PP,
+ ) -> Option {
+ let path = format!("zones/{zone_id}/dns_records/{record_id}");
+ let resp: Option> = self.api_put(&path, payload, ppfmt).await;
+ resp.and_then(|r| r.result)
+ }
+
+ pub async fn delete_record(
+ &self,
+ zone_id: &str,
+ record_id: &str,
+ ppfmt: &PP,
+ ) -> bool {
+ let path = format!("zones/{zone_id}/dns_records/{record_id}");
+ let resp: Option> = self.api_delete(&path, ppfmt).await;
+ resp.is_some()
+ }
+
+ /// Set IPs for a specific domain/record type. Handles create, update, delete, and dedup.
+ pub async fn set_ips(
+ &self,
+ zone_id: &str,
+ fqdn: &str,
+ record_type: &str,
+ ips: &[IpAddr],
+ proxied: bool,
+ ttl: TTL,
+ comment: Option<&str>,
+ dry_run: bool,
+ ppfmt: &PP,
+ ) -> SetResult {
+ let existing = self.list_records_by_name(zone_id, record_type, fqdn, ppfmt).await;
+ let managed: Vec<&DnsRecord> = existing.iter().filter(|r| self.is_managed_record(r)).collect();
+
+ if ips.is_empty() {
+ // Delete all managed records
+ if managed.is_empty() {
+ return SetResult::Noop;
+ }
+ for record in &managed {
+ if dry_run {
+ ppfmt.noticef(pp::EMOJI_DELETE, &format!("[DRY RUN] Would delete record {fqdn} ({})", record.content));
+ } else {
+ ppfmt.noticef(pp::EMOJI_DELETE, &format!("Deleting record {fqdn} ({})", record.content));
+ self.delete_record(zone_id, &record.id, ppfmt).await;
+ }
+ }
+ return SetResult::Updated;
+ }
+
+ // For each IP, find or create a record
+ let mut used_record_ids = Vec::new();
+ let mut any_change = false;
+
+ for ip in ips {
+ let ip_str = ip.to_string();
+
+ // Find existing record with this IP
+ let matching = managed.iter().find(|r| {
+ r.content == ip_str && !used_record_ids.contains(&&r.id)
+ });
+
+ if let Some(record) = matching {
+ used_record_ids.push(&record.id);
+ // Check if update needed (proxied or TTL changed)
+ let needs_update = record.proxied != Some(proxied)
+ || (ttl != TTL::AUTO && record.ttl != Some(ttl.value()))
+ || (comment.is_some() && record.comment.as_deref() != comment);
+
+ if needs_update {
+ any_change = true;
+ let payload = DnsRecordPayload {
+ record_type: record_type.to_string(),
+ name: fqdn.to_string(),
+ content: ip_str.clone(),
+ proxied,
+ ttl: ttl.value(),
+ comment: comment.map(|s| s.to_string()),
+ };
+ if dry_run {
+ ppfmt.noticef(pp::EMOJI_UPDATE, &format!("[DRY RUN] Would update record {fqdn} -> {ip_str}"));
+ } else {
+ ppfmt.noticef(pp::EMOJI_UPDATE, &format!("Updating record {fqdn} -> {ip_str}"));
+ self.update_record(zone_id, &record.id, &payload, ppfmt).await;
+ }
+ } else {
+ ppfmt.infof(pp::EMOJI_SKIP, &format!("Record {fqdn} is up to date ({ip_str})"));
+ }
+ } else {
+ // Find an existing managed record to update, or create new
+ let reusable = managed.iter().find(|r| {
+ !used_record_ids.contains(&&r.id)
+ });
+
+ let payload = DnsRecordPayload {
+ record_type: record_type.to_string(),
+ name: fqdn.to_string(),
+ content: ip_str.clone(),
+ proxied,
+ ttl: ttl.value(),
+ comment: comment.map(|s| s.to_string()),
+ };
+
+ if let Some(record) = reusable {
+ used_record_ids.push(&record.id);
+ any_change = true;
+ if dry_run {
+ ppfmt.noticef(pp::EMOJI_UPDATE, &format!("[DRY RUN] Would update record {fqdn} -> {ip_str}"));
+ } else {
+ ppfmt.noticef(pp::EMOJI_UPDATE, &format!("Updating record {fqdn} -> {ip_str}"));
+ self.update_record(zone_id, &record.id, &payload, ppfmt).await;
+ }
+ } else {
+ any_change = true;
+ if dry_run {
+ ppfmt.noticef(pp::EMOJI_CREATE, &format!("[DRY RUN] Would add new record {fqdn} -> {ip_str}"));
+ } else {
+ ppfmt.noticef(pp::EMOJI_CREATE, &format!("Adding new record {fqdn} -> {ip_str}"));
+ self.create_record(zone_id, &payload, ppfmt).await;
+ }
+ }
+ }
+ }
+
+ // Delete extra managed records (duplicates)
+ for record in &managed {
+ if !used_record_ids.contains(&&record.id) {
+ any_change = true;
+ if dry_run {
+ ppfmt.noticef(pp::EMOJI_DELETE, &format!("[DRY RUN] Would delete stale record {} ({})", fqdn, record.content));
+ } else {
+ ppfmt.noticef(pp::EMOJI_DELETE, &format!("Deleting stale record {} ({})", fqdn, record.content));
+ self.delete_record(zone_id, &record.id, ppfmt).await;
+ }
+ }
+ }
+
+ if any_change {
+ SetResult::Updated
+ } else {
+ SetResult::Noop
+ }
+ }
+
+ /// Delete all managed records for a specific domain/record type.
+ pub async fn final_delete(
+ &self,
+ zone_id: &str,
+ fqdn: &str,
+ record_type: &str,
+ ppfmt: &PP,
+ ) {
+ let existing = self.list_records_by_name(zone_id, record_type, fqdn, ppfmt).await;
+ for record in &existing {
+ if self.is_managed_record(record) {
+ ppfmt.noticef(pp::EMOJI_DELETE, &format!("Deleting record {fqdn} ({})", record.content));
+ self.delete_record(zone_id, &record.id, ppfmt).await;
+ }
+ }
+ }
+
+ // --- WAF List Operations ---
+
+ pub async fn find_waf_list(
+ &self,
+ waf_list: &WAFList,
+ ppfmt: &PP,
+ ) -> Option {
+ let path = format!("accounts/{}/rules/lists", waf_list.account_id);
+ let resp: Option> = self.api_get(&path, ppfmt).await;
+ resp.and_then(|r| r.result)
+ .and_then(|lists| lists.into_iter().find(|l| l.name == waf_list.list_name))
+ }
+
+ pub async fn list_waf_list_items(
+ &self,
+ account_id: &str,
+ list_id: &str,
+ ppfmt: &PP,
+ ) -> Vec {
+ let path = format!("accounts/{account_id}/rules/lists/{list_id}/items");
+ let resp: Option> = self.api_get(&path, ppfmt).await;
+ resp.and_then(|r| r.result).unwrap_or_default()
+ }
+
+ pub async fn create_waf_list_items(
+ &self,
+ account_id: &str,
+ list_id: &str,
+ items: &[WAFListCreateItem],
+ ppfmt: &PP,
+ ) -> bool {
+ let path = format!("accounts/{account_id}/rules/lists/{list_id}/items");
+ let resp: Option> = self.api_post(&path, &items, ppfmt).await;
+ resp.is_some()
+ }
+
+ pub async fn delete_waf_list_items(
+ &self,
+ account_id: &str,
+ list_id: &str,
+ item_ids: &[String],
+ ppfmt: &PP,
+ ) -> bool {
+ let path = format!("accounts/{account_id}/rules/lists/{list_id}/items");
+ let body: Vec = item_ids
+ .iter()
+ .map(|id| serde_json::json!({ "id": id }))
+ .collect();
+ let url = self.api_url(&path);
+ let req = self.auth.apply(self.client.delete(&url)).json(&serde_json::json!({ "items": body }));
+ match req.send().await {
+ Ok(resp) => resp.status().is_success(),
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("WAF list items DELETE error: {e}"));
+ false
+ }
+ }
+ }
+
+ /// Set WAF list to contain exactly the given IPs.
+ pub async fn set_waf_list(
+ &self,
+ waf_list: &WAFList,
+ ips: &[IpAddr],
+ comment: Option<&str>,
+ _description: Option<&str>,
+ dry_run: bool,
+ ppfmt: &PP,
+ ) -> SetResult {
+ let list_meta = match self.find_waf_list(waf_list, ppfmt).await {
+ Some(meta) => meta,
+ None => {
+ ppfmt.errorf(
+ pp::EMOJI_ERROR,
+ &format!("WAF list {} not found", waf_list.describe()),
+ );
+ return SetResult::Failed;
+ }
+ };
+
+ let existing_items = self
+ .list_waf_list_items(&waf_list.account_id, &list_meta.id, ppfmt)
+ .await;
+
+ // Filter to managed items
+ let managed_items: Vec<&WAFListItem> = existing_items
+ .iter()
+ .filter(|item| {
+ match &self.managed_waf_comment_regex {
+ Some(regex) => {
+ let c = item.comment.as_deref().unwrap_or("");
+ regex.is_match(c)
+ }
+ None => true,
+ }
+ })
+ .collect();
+
+ let desired_ips: std::collections::HashSet =
+ ips.iter().map(|ip| ip.to_string()).collect();
+ let existing_ips: std::collections::HashSet = managed_items
+ .iter()
+ .filter_map(|item| item.ip.clone())
+ .collect();
+
+ // Items to add
+ let to_add: Vec = desired_ips
+ .difference(&existing_ips)
+ .map(|ip| WAFListCreateItem {
+ ip: ip.clone(),
+ comment: comment.map(|s| s.to_string()),
+ })
+ .collect();
+
+ // Items to delete
+ let ips_to_remove: std::collections::HashSet<&String> =
+ existing_ips.difference(&desired_ips).collect();
+ let ids_to_delete: Vec = managed_items
+ .iter()
+ .filter(|item| {
+ item.ip.as_ref().map_or(false, |ip| ips_to_remove.contains(ip))
+ })
+ .map(|item| item.id.clone())
+ .collect();
+
+ if to_add.is_empty() && ids_to_delete.is_empty() {
+ ppfmt.infof(
+ pp::EMOJI_SKIP,
+ &format!("WAF list {} is up to date", waf_list.describe()),
+ );
+ return SetResult::Noop;
+ }
+
+ if dry_run {
+ for item in &to_add {
+ ppfmt.noticef(
+ pp::EMOJI_CREATE,
+ &format!("[DRY RUN] Would add {} to WAF list {}", item.ip, waf_list.describe()),
+ );
+ }
+ for ip in &ips_to_remove {
+ ppfmt.noticef(
+ pp::EMOJI_DELETE,
+ &format!("[DRY RUN] Would remove {} from WAF list {}", ip, waf_list.describe()),
+ );
+ }
+ return SetResult::Updated;
+ }
+
+ let mut success = true;
+
+ if !ids_to_delete.is_empty() {
+ for ip in &ips_to_remove {
+ ppfmt.noticef(
+ pp::EMOJI_DELETE,
+ &format!("Removing {} from WAF list {}", ip, waf_list.describe()),
+ );
+ }
+ if !self
+ .delete_waf_list_items(&waf_list.account_id, &list_meta.id, &ids_to_delete, ppfmt)
+ .await
+ {
+ success = false;
+ }
+ }
+
+ if !to_add.is_empty() {
+ for item in &to_add {
+ ppfmt.noticef(
+ pp::EMOJI_CREATE,
+ &format!("Adding {} to WAF list {}", item.ip, waf_list.describe()),
+ );
+ }
+ if !self
+ .create_waf_list_items(&waf_list.account_id, &list_meta.id, &to_add, ppfmt)
+ .await
+ {
+ success = false;
+ }
+ }
+
+ if success {
+ SetResult::Updated
+ } else {
+ SetResult::Failed
+ }
+ }
+
+ /// Clear all managed items from a WAF list (for shutdown).
+ pub async fn final_clear_waf_list(
+ &self,
+ waf_list: &WAFList,
+ ppfmt: &PP,
+ ) {
+ let list_meta = match self.find_waf_list(waf_list, ppfmt).await {
+ Some(meta) => meta,
+ None => return,
+ };
+
+ let items = self
+ .list_waf_list_items(&waf_list.account_id, &list_meta.id, ppfmt)
+ .await;
+
+ let managed_ids: Vec = items
+ .iter()
+ .filter(|item| {
+ match &self.managed_waf_comment_regex {
+ Some(regex) => {
+ let c = item.comment.as_deref().unwrap_or("");
+ regex.is_match(c)
+ }
+ None => true,
+ }
+ })
+ .map(|item| item.id.clone())
+ .collect();
+
+ if !managed_ids.is_empty() {
+ ppfmt.noticef(
+ pp::EMOJI_DELETE,
+ &format!("Clearing {} items from WAF list {}", managed_ids.len(), waf_list.describe()),
+ );
+ self.delete_waf_list_items(&waf_list.account_id, &list_meta.id, &managed_ids, ppfmt)
+ .await;
+ }
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum SetResult {
+ Noop,
+ Updated,
+ Failed,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::pp::PP;
+ use std::net::IpAddr;
+ use wiremock::{Mock, MockServer, ResponseTemplate, matchers::{method, path, query_param}};
+
+ fn pp() -> PP {
+ PP::new(false, false)
+ }
+
+ fn test_auth() -> Auth {
+ Auth::Token("test-token".to_string())
+ }
+
+ fn handle(base_url: &str) -> CloudflareHandle {
+ CloudflareHandle::with_base_url(base_url, test_auth())
+ }
+
+ fn handle_with_regex(base_url: &str, pattern: &str) -> CloudflareHandle {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(10))
+ .build()
+ .expect("Failed to build HTTP client");
+ CloudflareHandle {
+ client,
+ base_url: base_url.to_string(),
+ auth: test_auth(),
+ managed_comment_regex: Some(regex::Regex::new(pattern).unwrap()),
+ managed_waf_comment_regex: None,
+ }
+ }
+
+ // -------------------------------------------------------
+ // TTL tests
+ // -------------------------------------------------------
+
+ #[test]
+ fn ttl_new_below_30_becomes_auto() {
+ assert_eq!(TTL::new(0), TTL::AUTO);
+ assert_eq!(TTL::new(1), TTL::AUTO);
+ assert_eq!(TTL::new(29), TTL::AUTO);
+ assert_eq!(TTL::new(-5), TTL::AUTO);
+ }
+
+ #[test]
+ fn ttl_new_at_or_above_30_stays() {
+ assert_eq!(TTL::new(30), TTL(30));
+ assert_eq!(TTL::new(120), TTL(120));
+ assert_eq!(TTL::new(86400), TTL(86400));
+ }
+
+ #[test]
+ fn ttl_auto_constant() {
+ assert_eq!(TTL::AUTO, TTL(1));
+ }
+
+ #[test]
+ fn ttl_describe_auto() {
+ assert_eq!(TTL::AUTO.describe(), "auto");
+ assert_eq!(TTL(1).describe(), "auto");
+ }
+
+ #[test]
+ fn ttl_describe_seconds() {
+ assert_eq!(TTL(120).describe(), "120s");
+ assert_eq!(TTL(3600).describe(), "3600s");
+ }
+
+ // -------------------------------------------------------
+ // Auth tests
+ // -------------------------------------------------------
+
+ #[test]
+ fn auth_token_variant() {
+ let auth = Auth::Token("my-token".to_string());
+ match &auth {
+ Auth::Token(t) => assert_eq!(t, "my-token"),
+ _ => panic!("expected Token variant"),
+ }
+ }
+
+ #[test]
+ fn auth_key_variant() {
+ let auth = Auth::Key {
+ api_key: "key123".to_string(),
+ email: "user@example.com".to_string(),
+ };
+ match &auth {
+ Auth::Key { api_key, email } => {
+ assert_eq!(api_key, "key123");
+ assert_eq!(email, "user@example.com");
+ }
+ _ => panic!("expected Key variant"),
+ }
+ }
+
+ // -------------------------------------------------------
+ // WAFList tests
+ // -------------------------------------------------------
+
+ #[test]
+ fn waf_list_parse_valid() {
+ let wl = WAFList::parse("abc123/my_list").unwrap();
+ assert_eq!(wl.account_id, "abc123");
+ assert_eq!(wl.list_name, "my_list");
+ }
+
+ #[test]
+ fn waf_list_parse_no_slash() {
+ assert!(WAFList::parse("noslash").is_err());
+ }
+
+ #[test]
+ fn waf_list_parse_invalid_chars() {
+ assert!(WAFList::parse("acc/My-List").is_err());
+ assert!(WAFList::parse("acc/UPPER").is_err());
+ assert!(WAFList::parse("acc/has space").is_err());
+ }
+
+ #[test]
+ fn waf_list_describe() {
+ let wl = WAFList {
+ account_id: "acct".to_string(),
+ list_name: "blocklist".to_string(),
+ };
+ assert_eq!(wl.describe(), "acct/blocklist");
+ }
+
+ // -------------------------------------------------------
+ // CloudflareHandle with wiremock
+ // -------------------------------------------------------
+
+ fn zone_response(id: &str, name: &str) -> serde_json::Value {
+ serde_json::json!({
+ "result": [{ "id": id, "name": name }]
+ })
+ }
+
+ fn empty_list_response() -> serde_json::Value {
+ serde_json::json!({ "result": [] })
+ }
+
+ fn dns_record_json(id: &str, name: &str, content: &str, comment: Option<&str>) -> serde_json::Value {
+ serde_json::json!({
+ "id": id,
+ "name": name,
+ "content": content,
+ "proxied": false,
+ "ttl": 1,
+ "comment": comment
+ })
+ }
+
+ fn dns_list_response(records: Vec) -> serde_json::Value {
+ serde_json::json!({ "result": records })
+ }
+
+ fn dns_single_response(record: serde_json::Value) -> serde_json::Value {
+ serde_json::json!({ "result": record })
+ }
+
+ // --- zone_id_of_domain ---
+
+ #[tokio::test]
+ async fn zone_id_of_domain_found() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", "sub.example.com"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(empty_list_response()))
+ .mount(&server)
+ .await;
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", "example.com"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(zone_response("zone-1", "example.com")))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let result = h.zone_id_of_domain("sub.example.com", &pp()).await;
+ assert_eq!(result, Some("zone-1".to_string()));
+ }
+
+ #[tokio::test]
+ async fn zone_id_of_domain_not_found() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(empty_list_response()))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let result = h.zone_id_of_domain("nonexistent.example.com", &pp()).await;
+ assert_eq!(result, None);
+ }
+
+ // --- list_records / list_records_by_name ---
+
+ #[tokio::test]
+ async fn list_records_returns_all() {
+ let server = MockServer::start().await;
+ let body = dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ dns_record_json("r2", "b.example.com", "5.6.7.8", None),
+ ]);
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(body))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let records = h.list_records("z1", "A", &pp()).await;
+ assert_eq!(records.len(), 2);
+ assert_eq!(records[0].id, "r1");
+ assert_eq!(records[1].id, "r2");
+ }
+
+ #[tokio::test]
+ async fn list_records_by_name_filters() {
+ let server = MockServer::start().await;
+ let body = dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ dns_record_json("r2", "b.example.com", "5.6.7.8", None),
+ ]);
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(body))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let records = h.list_records_by_name("z1", "A", "a.example.com", &pp()).await;
+ assert_eq!(records.len(), 1);
+ assert_eq!(records[0].content, "1.2.3.4");
+ }
+
+ // --- create_record ---
+
+ #[tokio::test]
+ async fn create_record_success() {
+ let server = MockServer::start().await;
+ let resp = dns_single_response(dns_record_json("new-id", "x.example.com", "9.9.9.9", None));
+ Mock::given(method("POST"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(resp))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let payload = DnsRecordPayload {
+ record_type: "A".to_string(),
+ name: "x.example.com".to_string(),
+ content: "9.9.9.9".to_string(),
+ proxied: false,
+ ttl: 1,
+ comment: None,
+ };
+ let result = h.create_record("z1", &payload, &pp()).await;
+ assert!(result.is_some());
+ assert_eq!(result.unwrap().id, "new-id");
+ }
+
+ // --- update_record ---
+
+ #[tokio::test]
+ async fn update_record_success() {
+ let server = MockServer::start().await;
+ let resp = dns_single_response(dns_record_json("r1", "x.example.com", "10.0.0.1", None));
+ Mock::given(method("PUT"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(resp))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let payload = DnsRecordPayload {
+ record_type: "A".to_string(),
+ name: "x.example.com".to_string(),
+ content: "10.0.0.1".to_string(),
+ proxied: false,
+ ttl: 1,
+ comment: None,
+ };
+ let result = h.update_record("z1", "r1", &payload, &pp()).await;
+ assert!(result.is_some());
+ assert_eq!(result.unwrap().content, "10.0.0.1");
+ }
+
+ // --- delete_record ---
+
+ #[tokio::test]
+ async fn delete_record_success() {
+ let server = MockServer::start().await;
+ Mock::given(method("DELETE"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": { "id": "r1" } })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ assert!(h.delete_record("z1", "r1", &pp()).await);
+ }
+
+ // --- set_ips: no existing records -> creates ---
+
+ #[tokio::test]
+ async fn set_ips_creates_when_no_existing() {
+ let server = MockServer::start().await;
+ // list returns empty
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![])))
+ .mount(&server)
+ .await;
+ // create
+ Mock::given(method("POST"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(
+ dns_single_response(dns_record_json("new1", "a.example.com", "1.2.3.4", None)),
+ ))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: matching existing record -> noop ---
+
+ #[tokio::test]
+ async fn set_ips_noop_when_matching() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ ])))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Noop);
+ }
+
+ // --- set_ips: stale record -> updates ---
+
+ #[tokio::test]
+ async fn set_ips_updates_stale_record() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "9.9.9.9", None),
+ ])))
+ .mount(&server)
+ .await;
+ Mock::given(method("PUT"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(
+ dns_single_response(dns_record_json("r1", "a.example.com", "1.2.3.4", None)),
+ ))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: extra records -> deletes extras ---
+
+ #[tokio::test]
+ async fn set_ips_deletes_extra_records() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ dns_record_json("r2", "a.example.com", "5.5.5.5", None),
+ ])))
+ .mount(&server)
+ .await;
+ Mock::given(method("DELETE"))
+ .and(path("/zones/z1/dns_records/r2"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": { "id": "r2" } })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: empty ips -> deletes all managed ---
+
+ #[tokio::test]
+ async fn set_ips_empty_ips_deletes_all() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ ])))
+ .mount(&server)
+ .await;
+ Mock::given(method("DELETE"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": { "id": "r1" } })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec![];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: dry_run doesn't mutate ---
+
+ #[tokio::test]
+ async fn set_ips_dry_run_no_mutation() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![])))
+ .mount(&server)
+ .await;
+ // No POST mock -- if set_ips tries to POST, wiremock will return 404
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, true, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- is_managed_record ---
+
+ #[test]
+ fn is_managed_record_no_regex_manages_all() {
+ let h = CloudflareHandle::with_base_url("http://unused", test_auth());
+ let record = DnsRecord {
+ id: "r1".to_string(),
+ name: "test".to_string(),
+ content: "1.2.3.4".to_string(),
+ proxied: None,
+ ttl: None,
+ comment: None,
+ };
+ assert!(h.is_managed_record(&record));
+ }
+
+ #[test]
+ fn is_managed_record_with_regex_matching() {
+ let h = handle_with_regex("http://unused", "^managed-by-ddns$");
+ let record = DnsRecord {
+ id: "r1".to_string(),
+ name: "test".to_string(),
+ content: "1.2.3.4".to_string(),
+ proxied: None,
+ ttl: None,
+ comment: Some("managed-by-ddns".to_string()),
+ };
+ assert!(h.is_managed_record(&record));
+ }
+
+ #[test]
+ fn is_managed_record_with_regex_not_matching() {
+ let h = handle_with_regex("http://unused", "^managed-by-ddns$");
+ let record = DnsRecord {
+ id: "r1".to_string(),
+ name: "test".to_string(),
+ content: "1.2.3.4".to_string(),
+ proxied: None,
+ ttl: None,
+ comment: Some("something-else".to_string()),
+ };
+ assert!(!h.is_managed_record(&record));
+ }
+
+ #[test]
+ fn is_managed_record_with_regex_no_comment() {
+ let h = handle_with_regex("http://unused", "^managed-by-ddns$");
+ let record = DnsRecord {
+ id: "r1".to_string(),
+ name: "test".to_string(),
+ content: "1.2.3.4".to_string(),
+ proxied: None,
+ ttl: None,
+ comment: None,
+ };
+ assert!(!h.is_managed_record(&record));
+ }
+
+ // --- final_delete ---
+
+ #[tokio::test]
+ async fn final_delete_removes_managed_records() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ dns_record_json("r2", "a.example.com", "5.6.7.8", None),
+ ])))
+ .mount(&server)
+ .await;
+ Mock::given(method("DELETE"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": { "id": "r1" } })))
+ .expect(1)
+ .mount(&server)
+ .await;
+ Mock::given(method("DELETE"))
+ .and(path("/zones/z1/dns_records/r2"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": { "id": "r2" } })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ h.final_delete("z1", "a.example.com", "A", &pp()).await;
+ // Expectations on mocks validate the DELETE calls were made
+ }
+
+ // --- find_waf_list ---
+
+ #[tokio::test]
+ async fn find_waf_list_found() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "list-1", "name": "blocklist" },
+ { "id": "list-2", "name": "allowlist" }
+ ]
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "allowlist".to_string(),
+ };
+ let result = h.find_waf_list(&wl, &pp()).await;
+ assert!(result.is_some());
+ assert_eq!(result.unwrap().id, "list-2");
+ }
+
+ #[tokio::test]
+ async fn find_waf_list_not_found() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "list-1", "name": "other" }]
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "missing".to_string(),
+ };
+ let result = h.find_waf_list(&wl, &pp()).await;
+ assert!(result.is_none());
+ }
+
+ // --- set_waf_list ---
+
+ #[tokio::test]
+ async fn set_waf_list_adds_new_items() {
+ let server = MockServer::start().await;
+ // find_waf_list
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "wl-1", "name": "mylist" }]
+ })))
+ .mount(&server)
+ .await;
+ // list items - empty
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": [] })))
+ .mount(&server)
+ .await;
+ // create items
+ Mock::given(method("POST"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": {} })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "mylist".to_string(),
+ };
+ let ips: Vec = vec!["10.0.0.1".parse().unwrap()];
+ let result = h.set_waf_list(&wl, &ips, Some("ddns"), None, false, &pp()).await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- CloudflareHandle::new ---
+
+ #[test]
+ fn cloudflare_handle_new_constructs() {
+ let h = CloudflareHandle::new(
+ Auth::Token("tok".to_string()),
+ Duration::from_secs(10),
+ None,
+ None,
+ );
+ assert_eq!(h.base_url, "https://api.cloudflare.com/client/v4");
+ }
+
+ // --- Auth::apply ---
+
+ #[test]
+ fn auth_key_apply_sets_headers() {
+ let auth = Auth::Key {
+ api_key: "key123".to_string(),
+ email: "user@example.com".to_string(),
+ };
+ let client = Client::new();
+ let req = client.get("http://example.com");
+ let req = auth.apply(req);
+ // Just verify it doesn't panic - we can't inspect headers easily
+ let _ = req;
+ }
+
+ // --- API error paths ---
+
+ #[tokio::test]
+ async fn api_get_returns_none_on_http_error() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(500).set_body_string("internal error"))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let pp = PP::new(false, true); // quiet
+ let result: Option> = h.api_get("zones", &pp).await;
+ assert!(result.is_none());
+ }
+
+ #[tokio::test]
+ async fn api_post_returns_none_on_http_error() {
+ let server = MockServer::start().await;
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(403).set_body_string("forbidden"))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let pp = PP::new(false, true);
+ let body = serde_json::json!({"test": true});
+ let result: Option> = h.api_post("endpoint", &body, &pp).await;
+ assert!(result.is_none());
+ }
+
+ #[tokio::test]
+ async fn api_put_returns_none_on_http_error() {
+ let server = MockServer::start().await;
+ Mock::given(method("PUT"))
+ .respond_with(ResponseTemplate::new(404).set_body_string("not found"))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let pp = PP::new(false, true);
+ let body = serde_json::json!({"test": true});
+ let result: Option> = h.api_put("endpoint", &body, &pp).await;
+ assert!(result.is_none());
+ }
+
+ #[tokio::test]
+ async fn api_delete_returns_none_on_http_error() {
+ let server = MockServer::start().await;
+ Mock::given(method("DELETE"))
+ .respond_with(ResponseTemplate::new(500).set_body_string("error"))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let pp = PP::new(false, true);
+ assert!(!h.delete_record("z1", "r1", &pp).await);
+ }
+
+ // --- set_ips: update due to proxied change ---
+
+ #[tokio::test]
+ async fn set_ips_updates_when_proxied_changes() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ serde_json::json!({
+ "id": "r1",
+ "name": "a.example.com",
+ "content": "1.2.3.4",
+ "proxied": false,
+ "ttl": 1,
+ "comment": null
+ }),
+ ])))
+ .mount(&server)
+ .await;
+ Mock::given(method("PUT"))
+ .and(path("/zones/z1/dns_records/r1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(
+ dns_single_response(dns_record_json("r1", "a.example.com", "1.2.3.4", None)),
+ ))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ // proxied=true but record has proxied=false -> should update
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, true, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: dry_run with existing records ---
+
+ #[tokio::test]
+ async fn set_ips_dry_run_with_existing_records() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "9.9.9.9", None),
+ ])))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec!["1.2.3.4".parse().unwrap()];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, true, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_ips: empty ips, no managed records -> noop ---
+
+ #[tokio::test]
+ async fn set_ips_empty_ips_no_records_noop() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![])))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec![];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, false, &pp())
+ .await;
+ assert_eq!(result, SetResult::Noop);
+ }
+
+ // --- set_ips: empty ips, managed records -> deletes in dry_run ---
+
+ #[tokio::test]
+ async fn set_ips_empty_ips_dry_run_deletes() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/z1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_list_response(vec![
+ dns_record_json("r1", "a.example.com", "1.2.3.4", None),
+ ])))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let ips: Vec = vec![];
+ let result = h
+ .set_ips("z1", "a.example.com", "A", &ips, false, TTL::AUTO, None, true, &pp())
+ .await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- set_waf_list: not found -> Failed ---
+
+ #[tokio::test]
+ async fn set_waf_list_not_found_returns_failed() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "missing".to_string(),
+ };
+ let ips: Vec = vec!["10.0.0.1".parse().unwrap()];
+ let result = h.set_waf_list(&wl, &ips, None, None, false, &pp()).await;
+ assert_eq!(result, SetResult::Failed);
+ }
+
+ // --- set_waf_list: noop when already up to date ---
+
+ #[tokio::test]
+ async fn set_waf_list_noop_when_up_to_date() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "wl-1", "name": "mylist" }]
+ })))
+ .mount(&server)
+ .await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "item-1", "ip": "10.0.0.1", "comment": null }
+ ]
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "mylist".to_string(),
+ };
+ let ips: Vec = vec!["10.0.0.1".parse().unwrap()];
+ let result = h.set_waf_list(&wl, &ips, None, None, false, &pp()).await;
+ assert_eq!(result, SetResult::Noop);
+ }
+
+ // --- set_waf_list: dry_run ---
+
+ #[tokio::test]
+ async fn set_waf_list_dry_run() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "wl-1", "name": "mylist" }]
+ })))
+ .mount(&server)
+ .await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "item-1", "ip": "10.0.0.1", "comment": null }]
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "mylist".to_string(),
+ };
+ // New IP to add + existing to remove
+ let ips: Vec = vec!["10.0.0.2".parse().unwrap()];
+ let result = h.set_waf_list(&wl, &ips, None, None, true, &pp()).await;
+ assert_eq!(result, SetResult::Updated);
+ }
+
+ // --- final_clear_waf_list ---
+
+ #[tokio::test]
+ async fn final_clear_waf_list_deletes_all() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "wl-1", "name": "mylist" }]
+ })))
+ .mount(&server)
+ .await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "item-1", "ip": "10.0.0.1", "comment": null },
+ { "id": "item-2", "ip": "10.0.0.2", "comment": null }
+ ]
+ })))
+ .mount(&server)
+ .await;
+ Mock::given(method("DELETE"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": {} })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "mylist".to_string(),
+ };
+ h.final_clear_waf_list(&wl, &pp()).await;
+ }
+
+ #[tokio::test]
+ async fn final_clear_waf_list_not_found_noop() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "missing".to_string(),
+ };
+ // Should not panic
+ h.final_clear_waf_list(&wl, &pp()).await;
+ }
+
+ #[tokio::test]
+ async fn set_waf_list_removes_stale_items() {
+ let server = MockServer::start().await;
+ // find_waf_list
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{ "id": "wl-1", "name": "mylist" }]
+ })))
+ .mount(&server)
+ .await;
+ // list items - has one stale item
+ Mock::given(method("GET"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "item-1", "ip": "10.0.0.1", "comment": null }
+ ]
+ })))
+ .mount(&server)
+ .await;
+ // delete items
+ Mock::given(method("DELETE"))
+ .and(path("/accounts/acct1/rules/lists/wl-1/items"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": {} })))
+ .mount(&server)
+ .await;
+
+ let h = handle(&server.uri());
+ let wl = WAFList {
+ account_id: "acct1".to_string(),
+ list_name: "mylist".to_string(),
+ };
+ let ips: Vec = vec![]; // no desired IPs -> should delete the existing one
+ let result = h.set_waf_list(&wl, &ips, None, None, false, &pp()).await;
+ assert_eq!(result, SetResult::Updated);
+ }
+}
diff --git a/src/config.rs b/src/config.rs
new file mode 100644
index 0000000..2518843
--- /dev/null
+++ b/src/config.rs
@@ -0,0 +1,1961 @@
+use crate::cloudflare::{Auth, TTL, WAFList};
+use crate::domain;
+use crate::notifier::{
+ CompositeNotifier, Heartbeat, HeartbeatMonitor, HealthchecksMonitor, NotifierDyn,
+ ShoutrrrNotifier, UptimeKumaMonitor,
+};
+use crate::pp::{self, PP};
+use crate::provider::{IpType, ProviderType};
+use serde::Deserialize;
+use std::collections::HashMap;
+use std::env;
+use std::path::PathBuf;
+use std::time::Duration;
+
+// ============================================================
+// Legacy JSON Config (backwards compatible with cloudflare-ddns)
+// ============================================================
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct LegacyConfig {
+ pub cloudflare: Vec,
+ #[serde(default = "default_true")]
+ pub a: bool,
+ #[serde(default = "default_true")]
+ pub aaaa: bool,
+ #[serde(rename = "purgeUnknownRecords", default)]
+ pub purge_unknown_records: bool,
+ #[serde(default = "default_ttl")]
+ pub ttl: i64,
+}
+
+fn default_true() -> bool {
+ true
+}
+
+fn default_ttl() -> i64 {
+ 300
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct LegacyCloudflareEntry {
+ pub authentication: LegacyAuthentication,
+ pub zone_id: String,
+ pub subdomains: Vec,
+ #[serde(default)]
+ pub proxied: bool,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+#[serde(untagged)]
+pub enum LegacySubdomainEntry {
+ Detailed { name: String, proxied: bool },
+ Simple(String),
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct LegacyAuthentication {
+ #[serde(default)]
+ pub api_token: String,
+ #[serde(default)]
+ pub api_key: Option,
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct LegacyApiKey {
+ pub api_key: String,
+ pub account_email: String,
+}
+
+// ============================================================
+// Unified Config (supports both legacy JSON and env var modes)
+// ============================================================
+
+/// The complete application configuration
+pub struct AppConfig {
+ pub auth: Auth,
+ pub providers: HashMap,
+ pub domains: HashMap>, // FQDN domains by IP type
+ pub waf_lists: Vec,
+ pub update_cron: CronSchedule,
+ pub update_on_start: bool,
+ pub delete_on_stop: bool,
+ pub ttl: TTL,
+ pub proxied_expression: Option bool + Send + Sync>>,
+ pub record_comment: Option,
+ pub managed_comment_regex: Option,
+ pub waf_list_description: Option,
+ pub waf_list_item_comment: Option,
+ pub managed_waf_comment_regex: Option,
+ pub detection_timeout: Duration,
+ pub update_timeout: Duration,
+ pub dry_run: bool,
+ pub emoji: bool,
+ pub quiet: bool,
+ // Legacy mode fields
+ pub legacy_mode: bool,
+ pub legacy_config: Option,
+ pub repeat: bool,
+}
+
+/// Cron schedule
+#[derive(Debug, Clone)]
+pub enum CronSchedule {
+ Every(Duration),
+ Once,
+}
+
+impl CronSchedule {
+ pub fn describe(&self) -> String {
+ match self {
+ CronSchedule::Every(d) => format!("@every {}s", d.as_secs()),
+ CronSchedule::Once => "@once".to_string(),
+ }
+ }
+
+ pub fn next_duration(&self) -> Option {
+ match self {
+ CronSchedule::Every(d) => Some(*d),
+ CronSchedule::Once => None,
+ }
+ }
+}
+
+fn parse_duration_string(s: &str) -> Option {
+ let s = s.trim();
+ if let Some(minutes) = s.strip_suffix('m') {
+ minutes.parse::().ok().map(|m| Duration::from_secs(m * 60))
+ } else if let Some(hours) = s.strip_suffix('h') {
+ hours.parse::().ok().map(|h| Duration::from_secs(h * 3600))
+ } else if let Some(secs) = s.strip_suffix('s') {
+ secs.parse::().ok().map(Duration::from_secs)
+ } else {
+ // Try as seconds
+ s.parse::().ok().map(Duration::from_secs)
+ }
+}
+
+// ============================================================
+// Environment Variable Configuration (cf-ddns mode)
+// ============================================================
+
+fn getenv(key: &str) -> Option {
+ env::var(key).ok().map(|v| v.trim().to_string()).filter(|v| !v.is_empty())
+}
+
+fn getenv_bool(key: &str, default: bool) -> bool {
+ match getenv(key) {
+ Some(v) => matches!(v.to_lowercase().as_str(), "true" | "1" | "yes"),
+ None => default,
+ }
+}
+
+fn getenv_duration(key: &str, default: Duration) -> Duration {
+ match getenv(key) {
+ Some(v) => parse_duration_string(&v).unwrap_or(default),
+ None => default,
+ }
+}
+
+fn getenv_list(key: &str, sep: char) -> Vec {
+ match getenv(key) {
+ Some(v) => v
+ .split(sep)
+ .map(|s| s.trim().to_string())
+ .filter(|s| !s.is_empty())
+ .collect(),
+ None => Vec::new(),
+ }
+}
+
+fn read_auth_from_env(ppfmt: &PP) -> Option {
+ // Try CLOUDFLARE_API_TOKEN first, then CF_API_TOKEN (deprecated)
+ if let Some(token) = getenv("CLOUDFLARE_API_TOKEN").or_else(|| {
+ let val = getenv("CF_API_TOKEN");
+ if val.is_some() {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ "CF_API_TOKEN is deprecated; use CLOUDFLARE_API_TOKEN instead",
+ );
+ }
+ val
+ }) {
+ if token == "YOUR-CLOUDFLARE-API-TOKEN" {
+ ppfmt.errorf(pp::EMOJI_ERROR, "Please set CLOUDFLARE_API_TOKEN to your actual API token");
+ return None;
+ }
+ return Some(Auth::Token(token));
+ }
+
+ // Try reading from file
+ if let Some(path) = getenv("CLOUDFLARE_API_TOKEN_FILE").or_else(|| {
+ let val = getenv("CF_API_TOKEN_FILE");
+ if val.is_some() {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ "CF_API_TOKEN_FILE is deprecated; use CLOUDFLARE_API_TOKEN_FILE instead",
+ );
+ }
+ val
+ }) {
+ match std::fs::read_to_string(&path) {
+ Ok(content) => {
+ let token = content.trim().to_string();
+ if !token.is_empty() {
+ return Some(Auth::Token(token));
+ }
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("Failed to read API token file '{path}': {e}"));
+ }
+ }
+ }
+
+ // Deprecated: CF_ACCOUNT_ID
+ if getenv("CF_ACCOUNT_ID").is_some() {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ "CF_ACCOUNT_ID is deprecated and ignored since v1.14.0",
+ );
+ }
+
+ None
+}
+
+fn read_providers_from_env(ppfmt: &PP) -> Result, String> {
+ let mut providers = HashMap::new();
+
+ let ip4_str = getenv("IP4_PROVIDER").or_else(|| {
+ let val = getenv("IP4_POLICY");
+ if val.is_some() {
+ ppfmt.warningf(pp::EMOJI_WARNING, "IP4_POLICY is deprecated; use IP4_PROVIDER instead");
+ }
+ val
+ });
+ let ip6_str = getenv("IP6_PROVIDER").or_else(|| {
+ let val = getenv("IP6_POLICY");
+ if val.is_some() {
+ ppfmt.warningf(pp::EMOJI_WARNING, "IP6_POLICY is deprecated; use IP6_PROVIDER instead");
+ }
+ val
+ });
+
+ let ip4_provider = match ip4_str {
+ Some(s) => ProviderType::parse(&s)
+ .map_err(|e| format!("Invalid IP4_PROVIDER: {e}"))?,
+ None => ProviderType::CloudflareTrace { url: None },
+ };
+
+ let ip6_provider = match ip6_str {
+ Some(s) => ProviderType::parse(&s)
+ .map_err(|e| format!("Invalid IP6_PROVIDER: {e}"))?,
+ None => ProviderType::CloudflareTrace { url: None },
+ };
+
+ if !matches!(ip4_provider, ProviderType::None) {
+ providers.insert(IpType::V4, ip4_provider);
+ }
+ if !matches!(ip6_provider, ProviderType::None) {
+ providers.insert(IpType::V6, ip6_provider);
+ }
+
+ Ok(providers)
+}
+
+fn read_domains_from_env(_ppfmt: &PP) -> HashMap> {
+ let mut domains: HashMap> = HashMap::new();
+
+ let both = getenv_list("DOMAINS", ',');
+ let ip4_only = getenv_list("IP4_DOMAINS", ',');
+ let ip6_only = getenv_list("IP6_DOMAINS", ',');
+
+ let mut v4_domains: Vec = both.clone();
+ v4_domains.extend(ip4_only);
+ if !v4_domains.is_empty() {
+ domains.insert(IpType::V4, v4_domains);
+ }
+
+ let mut v6_domains: Vec = both;
+ v6_domains.extend(ip6_only);
+ if !v6_domains.is_empty() {
+ domains.insert(IpType::V6, v6_domains);
+ }
+
+ domains
+}
+
+fn read_waf_lists_from_env(ppfmt: &PP) -> Vec {
+ let list_strs = getenv_list("WAF_LISTS", ',');
+ let mut lists = Vec::new();
+ for s in list_strs {
+ match WAFList::parse(&s) {
+ Ok(list) => lists.push(list),
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("Invalid WAF_LISTS entry: {e}"));
+ }
+ }
+ }
+ lists
+}
+
+fn read_cron_from_env(ppfmt: &PP) -> Result {
+ match getenv("UPDATE_CRON") {
+ Some(s) => {
+ let s = s.trim();
+ if s == "@once" {
+ Ok(CronSchedule::Once)
+ } else if s == "@disabled" || s == "@nevermore" {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("UPDATE_CRON={s} is deprecated; use @once instead"),
+ );
+ Ok(CronSchedule::Once)
+ } else if let Some(rest) = s.strip_prefix("@every ") {
+ match parse_duration_string(rest) {
+ Some(d) => Ok(CronSchedule::Every(d)),
+ None => Err(format!("Invalid duration in UPDATE_CRON: {s}")),
+ }
+ } else {
+ Err(format!(
+ "Unsupported UPDATE_CRON format: {s}. Use @every , @once, or omit for default (5m)."
+ ))
+ }
+ }
+ None => Ok(CronSchedule::Every(Duration::from_secs(300))),
+ }
+}
+
+fn read_regex(key: &str, ppfmt: &PP) -> Option {
+ match getenv(key) {
+ Some(s) if !s.is_empty() => match regex::Regex::new(&s) {
+ Ok(r) => Some(r),
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("Invalid regex in {key}: {e}"));
+ None
+ }
+ },
+ _ => None,
+ }
+}
+
+// ============================================================
+// JSON Config File with Env Var Substitution (legacy mode)
+// ============================================================
+
+fn substitute_env_vars(input: &str) -> String {
+ let mut result = input.to_string();
+ for (key, value) in env::vars() {
+ if key.starts_with("CF_DDNS_") {
+ result = result.replace(&format!("${key}"), value.as_str());
+ result = result.replace(&format!("${{{key}}}"), value.as_str());
+ }
+ }
+ result
+}
+
+pub fn load_legacy_config() -> Result {
+ let config_path = env::var("CONFIG_PATH").unwrap_or_else(|_| ".".to_string());
+ let path = PathBuf::from(&config_path).join("config.json");
+
+ let content =
+ std::fs::read_to_string(&path).map_err(|e| format!("Error reading config.json: {e}"))?;
+
+ let content = substitute_env_vars(&content);
+
+ let mut config: LegacyConfig =
+ serde_json::from_str(&content).map_err(|e| format!("Error parsing config.json: {e}"))?;
+
+ if config.ttl < 30 {
+ println!("TTL is too low - defaulting to 1 (auto)");
+ config.ttl = 1;
+ }
+
+ Ok(config)
+}
+
+#[cfg(test)]
+pub fn parse_legacy_config(content: &str) -> Result {
+ let mut config: LegacyConfig =
+ serde_json::from_str(content).map_err(|e| format!("Error parsing config: {e}"))?;
+
+ if config.ttl < 30 {
+ config.ttl = 1;
+ }
+
+ Ok(config)
+}
+
+/// Convert a legacy config into a unified AppConfig
+fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> AppConfig {
+ // Extract auth from first entry
+ let auth = if let Some(entry) = legacy.cloudflare.first() {
+ if !entry.authentication.api_token.is_empty()
+ && entry.authentication.api_token != "api_token_here"
+ {
+ Auth::Token(entry.authentication.api_token.clone())
+ } else if let Some(api_key) = &entry.authentication.api_key {
+ Auth::Key {
+ api_key: api_key.api_key.clone(),
+ email: api_key.account_email.clone(),
+ }
+ } else {
+ Auth::Token(String::new())
+ }
+ } else {
+ Auth::Token(String::new())
+ };
+
+ // Build providers
+ let mut providers = HashMap::new();
+ if legacy.a {
+ providers.insert(IpType::V4, ProviderType::CloudflareTrace { url: None });
+ }
+ if legacy.aaaa {
+ providers.insert(IpType::V6, ProviderType::CloudflareTrace { url: None });
+ }
+
+ let ttl = TTL::new(legacy.ttl);
+ let schedule = if repeat {
+ // Use TTL as interval in legacy mode
+ CronSchedule::Every(Duration::from_secs(legacy.ttl.max(1) as u64))
+ } else {
+ CronSchedule::Once
+ };
+
+ AppConfig {
+ auth,
+ providers,
+ domains: HashMap::new(),
+ waf_lists: Vec::new(),
+ update_cron: schedule,
+ update_on_start: true,
+ delete_on_stop: false,
+ ttl,
+ proxied_expression: None,
+ record_comment: None,
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run,
+ emoji: false,
+ quiet: false,
+ legacy_mode: true,
+ legacy_config: Some(legacy),
+ repeat,
+ }
+}
+
+// ============================================================
+// Detect config mode and load
+// ============================================================
+
+/// Determine whether to use env var config (cf-ddns mode) or legacy JSON config.
+pub fn is_env_config_mode() -> bool {
+ // If any cf-ddns env vars are set, use env mode
+ getenv("CLOUDFLARE_API_TOKEN").is_some()
+ || getenv("CF_API_TOKEN").is_some()
+ || getenv("CLOUDFLARE_API_TOKEN_FILE").is_some()
+ || getenv("CF_API_TOKEN_FILE").is_some()
+ || getenv("DOMAINS").is_some()
+ || getenv("IP4_DOMAINS").is_some()
+ || getenv("IP6_DOMAINS").is_some()
+}
+
+/// Load configuration from environment variables (cf-ddns mode).
+pub fn load_env_config(ppfmt: &PP) -> Result {
+ // Deprecated warnings
+ if getenv("PUID").is_some() {
+ ppfmt.warningf(pp::EMOJI_WARNING, "PUID is deprecated since v1.13.0 and ignored. Use Docker's built-in mechanism instead.");
+ }
+ if getenv("PGID").is_some() {
+ ppfmt.warningf(pp::EMOJI_WARNING, "PGID is deprecated since v1.13.0 and ignored. Use Docker's built-in mechanism instead.");
+ }
+
+ let auth = read_auth_from_env(ppfmt)
+ .ok_or_else(|| "No authentication configured. Set CLOUDFLARE_API_TOKEN.".to_string())?;
+
+ let providers = read_providers_from_env(ppfmt)?;
+ let domains = read_domains_from_env(ppfmt);
+ let waf_lists = read_waf_lists_from_env(ppfmt);
+ let update_cron = read_cron_from_env(ppfmt)?;
+ let update_on_start = getenv_bool("UPDATE_ON_START", true);
+ let delete_on_stop = getenv_bool("DELETE_ON_STOP", false);
+
+ let ttl_val = getenv("TTL")
+ .and_then(|s| s.parse::().ok())
+ .unwrap_or(1);
+ let ttl = TTL::new(ttl_val);
+
+ let proxied_expr_str = getenv("PROXIED").unwrap_or_else(|| "false".to_string());
+ let proxied_expression = match domain::parse_proxied_expression(&proxied_expr_str) {
+ Ok(pred) => Some(pred),
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("Invalid PROXIED expression: {e}"));
+ None
+ }
+ };
+
+ let record_comment = getenv("RECORD_COMMENT");
+ let managed_comment_regex = read_regex("MANAGED_RECORDS_COMMENT_REGEX", ppfmt);
+ let waf_list_description = getenv("WAF_LIST_DESCRIPTION");
+ let waf_list_item_comment = getenv("WAF_LIST_ITEM_COMMENT");
+ let managed_waf_comment_regex = read_regex("MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX", ppfmt);
+
+ let detection_timeout = getenv_duration("DETECTION_TIMEOUT", Duration::from_secs(5));
+ let update_timeout = getenv_duration("UPDATE_TIMEOUT", Duration::from_secs(30));
+
+ let emoji = getenv_bool("EMOJI", true);
+ let quiet = getenv_bool("QUIET", false);
+
+ // Validate: must have at least one update target
+ if domains.is_empty() && waf_lists.is_empty() {
+ return Err(
+ "No update targets configured. Set DOMAINS, IP4_DOMAINS, IP6_DOMAINS, or WAF_LISTS."
+ .to_string(),
+ );
+ }
+
+ // Validate: @once constraints
+ if matches!(update_cron, CronSchedule::Once) {
+ if !update_on_start {
+ return Err("UPDATE_ON_START must be true when UPDATE_CRON=@once".to_string());
+ }
+ if delete_on_stop {
+ return Err("DELETE_ON_STOP must be false when UPDATE_CRON=@once".to_string());
+ }
+ }
+
+ // Validate comment/regex compatibility
+ if let (Some(ref comment), Some(ref regex)) = (&record_comment, &managed_comment_regex) {
+ if !regex.is_match(comment) {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!(
+ "RECORD_COMMENT '{}' does not match MANAGED_RECORDS_COMMENT_REGEX '{}'",
+ comment,
+ regex.as_str()
+ ),
+ );
+ }
+ }
+
+ Ok(AppConfig {
+ auth,
+ providers,
+ domains,
+ waf_lists,
+ update_cron,
+ update_on_start,
+ delete_on_stop,
+ ttl,
+ proxied_expression,
+ record_comment,
+ managed_comment_regex,
+ waf_list_description,
+ waf_list_item_comment,
+ managed_waf_comment_regex,
+ detection_timeout,
+ update_timeout,
+ dry_run: false, // Set later from CLI args
+ emoji,
+ quiet,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: false, // Set later
+ })
+}
+
+/// Load config (auto-detect mode).
+pub fn load_config(dry_run: bool, repeat: bool, ppfmt: &PP) -> Result {
+ if is_env_config_mode() {
+ ppfmt.infof(pp::EMOJI_CONFIG, "Using environment variable configuration");
+ let mut config = load_env_config(ppfmt)?;
+ config.dry_run = dry_run;
+ config.repeat = !matches!(config.update_cron, CronSchedule::Once);
+ Ok(config)
+ } else {
+ ppfmt.infof(pp::EMOJI_CONFIG, "Using config.json configuration");
+ let legacy = load_legacy_config()?;
+ Ok(legacy_to_app_config(legacy, dry_run, repeat))
+ }
+}
+
+// ============================================================
+// Setup reporters (notifiers + heartbeats)
+// ============================================================
+
+pub fn setup_notifiers(ppfmt: &PP) -> CompositeNotifier {
+ let mut notifiers: Vec> = Vec::new();
+
+ let shoutrrr_urls = getenv_list("SHOUTRRR", '\n');
+ if !shoutrrr_urls.is_empty() {
+ match ShoutrrrNotifier::new(&shoutrrr_urls) {
+ Ok(n) => {
+ ppfmt.infof(pp::EMOJI_NOTIFY, &format!("Notifications: {}", n.describe()));
+ notifiers.push(Box::new(n));
+ }
+ Err(e) => {
+ ppfmt.errorf(pp::EMOJI_ERROR, &format!("Failed to setup notifications: {e}"));
+ }
+ }
+ }
+
+ CompositeNotifier::new(notifiers)
+}
+
+pub fn setup_heartbeats(ppfmt: &PP) -> Heartbeat {
+ let mut monitors: Vec> = Vec::new();
+
+ if let Some(url) = getenv("HEALTHCHECKS") {
+ ppfmt.infof(pp::EMOJI_HEARTBEAT, "Heartbeat: Healthchecks.io");
+ monitors.push(Box::new(HealthchecksMonitor::new(&url)));
+ }
+
+ if let Some(url) = getenv("UPTIMEKUMA") {
+ ppfmt.infof(pp::EMOJI_HEARTBEAT, "Heartbeat: Uptime Kuma");
+ monitors.push(Box::new(UptimeKumaMonitor::new(&url)));
+ }
+
+ Heartbeat::new(monitors)
+}
+
+// ============================================================
+// Print config summary
+// ============================================================
+
+pub fn print_config_summary(config: &AppConfig, ppfmt: &PP) {
+ if config.legacy_mode {
+ // Legacy mode output (backwards compatible)
+ return;
+ }
+
+ let inner = ppfmt.indent();
+
+ if !config.domains.is_empty() {
+ ppfmt.noticef(pp::EMOJI_CONFIG, "Domains to update:");
+ for (ip_type, domains) in &config.domains {
+ inner.noticef("", &format!("{}: {}", ip_type.describe(), domains.join(", ")));
+ }
+ }
+
+ if !config.waf_lists.is_empty() {
+ ppfmt.noticef(pp::EMOJI_CONFIG, "WAF lists:");
+ for waf in &config.waf_lists {
+ inner.noticef("", &waf.describe());
+ }
+ }
+
+ for (ip_type, provider) in &config.providers {
+ inner.infof("", &format!("{} provider: {}", ip_type.describe(), provider.name()));
+ }
+
+ inner.infof("", &format!("TTL: {}", config.ttl.describe()));
+ inner.infof("", &format!("Schedule: {}", config.update_cron.describe()));
+
+ if config.delete_on_stop {
+ inner.infof("", "Delete on stop: enabled");
+ }
+
+ if let Some(ref comment) = config.record_comment {
+ inner.infof("", &format!("Record comment: {comment}"));
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_legacy_config_minimal() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok123" },
+ "zone_id": "zone1",
+ "subdomains": ["@"]
+ }]
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert!(config.a);
+ assert!(config.aaaa);
+ assert!(!config.purge_unknown_records);
+ assert_eq!(config.ttl, 300);
+ }
+
+ #[test]
+ fn test_parse_legacy_config_low_ttl() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok123" },
+ "zone_id": "zone1",
+ "subdomains": ["@"]
+ }],
+ "ttl": 10
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert_eq!(config.ttl, 1);
+ }
+
+ #[test]
+ fn test_cron_schedule_every() {
+ let sched = CronSchedule::Every(Duration::from_secs(300));
+ assert_eq!(sched.next_duration(), Some(Duration::from_secs(300)));
+ }
+
+ #[test]
+ fn test_cron_schedule_once() {
+ let sched = CronSchedule::Once;
+ assert_eq!(sched.next_duration(), None);
+ }
+
+ #[test]
+ fn test_parse_duration_string() {
+ assert_eq!(parse_duration_string("5m"), Some(Duration::from_secs(300)));
+ assert_eq!(parse_duration_string("1h"), Some(Duration::from_secs(3600)));
+ assert_eq!(parse_duration_string("30s"), Some(Duration::from_secs(30)));
+ }
+
+ #[test]
+ fn test_substitute_env_vars() {
+ std::env::set_var("CF_DDNS_TEST_VAR", "test_value");
+ let result = substitute_env_vars("token: ${CF_DDNS_TEST_VAR}");
+ assert_eq!(result, "token: test_value");
+ let result2 = substitute_env_vars("token: $CF_DDNS_TEST_VAR");
+ assert_eq!(result2, "token: test_value");
+ std::env::remove_var("CF_DDNS_TEST_VAR");
+ }
+
+ // --- parse_duration_string edge cases ---
+
+ #[test]
+ fn test_parse_duration_string_plain_number() {
+ assert_eq!(parse_duration_string("60"), Some(Duration::from_secs(60)));
+ }
+
+ #[test]
+ fn test_parse_duration_string_whitespace() {
+ assert_eq!(parse_duration_string(" 5m "), Some(Duration::from_secs(300)));
+ }
+
+ #[test]
+ fn test_parse_duration_string_invalid() {
+ assert_eq!(parse_duration_string("abc"), None);
+ assert_eq!(parse_duration_string(""), None);
+ }
+
+ // --- CronSchedule ---
+
+ #[test]
+ fn test_cron_schedule_describe() {
+ assert_eq!(
+ CronSchedule::Every(Duration::from_secs(300)).describe(),
+ "@every 300s"
+ );
+ assert_eq!(CronSchedule::Once.describe(), "@once");
+ }
+
+ // --- read_cron_from_env ---
+
+ #[test]
+ fn test_read_cron_default() {
+ // No env var set -> default 5m
+ std::env::remove_var("UPDATE_CRON");
+ let pp = PP::new(false, false);
+ let sched = read_cron_from_env(&pp).unwrap();
+ assert!(matches!(sched, CronSchedule::Every(d) if d == Duration::from_secs(300)));
+ }
+
+ #[test]
+ fn test_read_cron_once() {
+ std::env::set_var("UPDATE_CRON", "@once");
+ let pp = PP::new(false, false);
+ let sched = read_cron_from_env(&pp).unwrap();
+ assert!(matches!(sched, CronSchedule::Once));
+ std::env::remove_var("UPDATE_CRON");
+ }
+
+ #[test]
+ fn test_read_cron_every() {
+ std::env::set_var("UPDATE_CRON", "@every 10m");
+ let pp = PP::new(false, false);
+ let sched = read_cron_from_env(&pp).unwrap();
+ assert!(matches!(sched, CronSchedule::Every(d) if d == Duration::from_secs(600)));
+ std::env::remove_var("UPDATE_CRON");
+ }
+
+ #[test]
+ fn test_read_cron_deprecated_disabled() {
+ std::env::set_var("UPDATE_CRON", "@disabled");
+ let pp = PP::new(false, false);
+ let sched = read_cron_from_env(&pp).unwrap();
+ assert!(matches!(sched, CronSchedule::Once));
+ std::env::remove_var("UPDATE_CRON");
+ }
+
+ #[test]
+ fn test_read_cron_unsupported_format() {
+ std::env::set_var("UPDATE_CRON", "*/5 * * * *");
+ let pp = PP::new(false, false);
+ let result = read_cron_from_env(&pp);
+ assert!(result.is_err());
+ std::env::remove_var("UPDATE_CRON");
+ }
+
+ // --- getenv helpers ---
+
+ #[test]
+ fn test_getenv_empty_string_is_none() {
+ std::env::set_var("TEST_GETENV_EMPTY", "");
+ assert!(getenv("TEST_GETENV_EMPTY").is_none());
+ std::env::remove_var("TEST_GETENV_EMPTY");
+ }
+
+ #[test]
+ fn test_getenv_whitespace_is_none() {
+ std::env::set_var("TEST_GETENV_WS", " ");
+ assert!(getenv("TEST_GETENV_WS").is_none());
+ std::env::remove_var("TEST_GETENV_WS");
+ }
+
+ #[test]
+ fn test_getenv_trims() {
+ std::env::set_var("TEST_GETENV_TRIM", " hello ");
+ assert_eq!(getenv("TEST_GETENV_TRIM"), Some("hello".to_string()));
+ std::env::remove_var("TEST_GETENV_TRIM");
+ }
+
+ #[test]
+ fn test_getenv_bool_true_values() {
+ for val in &["true", "1", "yes", "True", "YES"] {
+ std::env::set_var("TEST_BOOL", val);
+ assert!(getenv_bool("TEST_BOOL", false));
+ }
+ std::env::remove_var("TEST_BOOL");
+ }
+
+ #[test]
+ fn test_getenv_bool_false_values() {
+ for val in &["false", "0", "no", "anything"] {
+ std::env::set_var("TEST_BOOL", val);
+ assert!(!getenv_bool("TEST_BOOL", true));
+ }
+ std::env::remove_var("TEST_BOOL");
+ }
+
+ #[test]
+ fn test_getenv_bool_default() {
+ std::env::remove_var("TEST_BOOL_MISSING");
+ assert!(getenv_bool("TEST_BOOL_MISSING", true));
+ assert!(!getenv_bool("TEST_BOOL_MISSING", false));
+ }
+
+ #[test]
+ fn test_getenv_duration_valid() {
+ std::env::set_var("TEST_DUR", "10s");
+ let d = getenv_duration("TEST_DUR", Duration::from_secs(99));
+ assert_eq!(d, Duration::from_secs(10));
+ std::env::remove_var("TEST_DUR");
+ }
+
+ #[test]
+ fn test_getenv_duration_default() {
+ std::env::remove_var("TEST_DUR_MISSING");
+ let d = getenv_duration("TEST_DUR_MISSING", Duration::from_secs(42));
+ assert_eq!(d, Duration::from_secs(42));
+ }
+
+ #[test]
+ fn test_getenv_list() {
+ std::env::set_var("TEST_LIST", "a,b,,c");
+ let list = getenv_list("TEST_LIST", ',');
+ assert_eq!(list, vec!["a", "b", "c"]);
+ std::env::remove_var("TEST_LIST");
+ }
+
+ #[test]
+ fn test_getenv_list_empty() {
+ std::env::remove_var("TEST_LIST_MISSING");
+ let list = getenv_list("TEST_LIST_MISSING", ',');
+ assert!(list.is_empty());
+ }
+
+ // --- read_regex ---
+
+ #[test]
+ fn test_read_regex_valid() {
+ std::env::set_var("TEST_REGEX", "cloudflare-ddns");
+ let pp = PP::new(false, false);
+ let regex = read_regex("TEST_REGEX", &pp);
+ assert!(regex.is_some());
+ assert!(regex.unwrap().is_match("managed by cloudflare-ddns"));
+ std::env::remove_var("TEST_REGEX");
+ }
+
+ #[test]
+ fn test_read_regex_invalid() {
+ std::env::set_var("TEST_REGEX_BAD", "[invalid(");
+ let pp = PP::new(false, false);
+ let regex = read_regex("TEST_REGEX_BAD", &pp);
+ assert!(regex.is_none());
+ std::env::remove_var("TEST_REGEX_BAD");
+ }
+
+ #[test]
+ fn test_read_regex_empty() {
+ std::env::set_var("TEST_REGEX_E", "");
+ let pp = PP::new(false, false);
+ let regex = read_regex("TEST_REGEX_E", &pp);
+ assert!(regex.is_none());
+ std::env::remove_var("TEST_REGEX_E");
+ }
+
+ // --- read_domains_from_env ---
+
+ #[test]
+ fn test_read_domains_both() {
+ std::env::set_var("DOMAINS", "example.com,www.example.com");
+ std::env::remove_var("IP4_DOMAINS");
+ std::env::remove_var("IP6_DOMAINS");
+ let pp = PP::new(false, false);
+ let domains = read_domains_from_env(&pp);
+ assert_eq!(domains.get(&IpType::V4).unwrap().len(), 2);
+ assert_eq!(domains.get(&IpType::V6).unwrap().len(), 2);
+ std::env::remove_var("DOMAINS");
+ }
+
+ #[test]
+ fn test_read_domains_ip4_only() {
+ std::env::remove_var("DOMAINS");
+ std::env::set_var("IP4_DOMAINS", "v4.example.com");
+ std::env::remove_var("IP6_DOMAINS");
+ let pp = PP::new(false, false);
+ let domains = read_domains_from_env(&pp);
+ assert_eq!(domains.get(&IpType::V4).unwrap(), &vec!["v4.example.com".to_string()]);
+ assert!(domains.get(&IpType::V6).is_none());
+ std::env::remove_var("IP4_DOMAINS");
+ }
+
+ #[test]
+ fn test_read_domains_empty() {
+ std::env::remove_var("DOMAINS");
+ std::env::remove_var("IP4_DOMAINS");
+ std::env::remove_var("IP6_DOMAINS");
+ let pp = PP::new(false, false);
+ let domains = read_domains_from_env(&pp);
+ assert!(domains.is_empty());
+ }
+
+ // --- read_waf_lists_from_env ---
+
+ #[test]
+ fn test_read_waf_lists_valid() {
+ std::env::set_var("WAF_LISTS", "acc123/my_list");
+ let pp = PP::new(false, false);
+ let lists = read_waf_lists_from_env(&pp);
+ assert_eq!(lists.len(), 1);
+ assert_eq!(lists[0].account_id, "acc123");
+ assert_eq!(lists[0].list_name, "my_list");
+ std::env::remove_var("WAF_LISTS");
+ }
+
+ #[test]
+ fn test_read_waf_lists_invalid_skipped() {
+ std::env::set_var("WAF_LISTS", "no-slash");
+ let pp = PP::new(false, false);
+ let lists = read_waf_lists_from_env(&pp);
+ assert!(lists.is_empty());
+ std::env::remove_var("WAF_LISTS");
+ }
+
+ // --- legacy_to_app_config ---
+
+ #[test]
+ fn test_legacy_to_app_config_basic() {
+ let legacy = LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: "my-token".to_string(),
+ api_key: None,
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: false,
+ purge_unknown_records: false,
+ ttl: 300,
+ };
+ let config = legacy_to_app_config(legacy, false, false);
+ assert!(config.legacy_mode);
+ assert!(matches!(config.auth, Auth::Token(ref t) if t == "my-token"));
+ assert!(config.providers.contains_key(&IpType::V4));
+ assert!(!config.providers.contains_key(&IpType::V6));
+ assert!(matches!(config.update_cron, CronSchedule::Once));
+ assert!(!config.dry_run);
+ }
+
+ #[test]
+ fn test_legacy_to_app_config_repeat() {
+ let legacy = LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: "tok".to_string(),
+ api_key: None,
+ },
+ zone_id: "z".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: true,
+ purge_unknown_records: false,
+ ttl: 120,
+ };
+ let config = legacy_to_app_config(legacy, true, true);
+ assert!(matches!(config.update_cron, CronSchedule::Every(d) if d == Duration::from_secs(120)));
+ assert!(config.repeat);
+ assert!(config.dry_run);
+ }
+
+ #[test]
+ fn test_legacy_to_app_config_api_key() {
+ let legacy = LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: String::new(),
+ api_key: Some(LegacyApiKey {
+ api_key: "key123".to_string(),
+ account_email: "test@example.com".to_string(),
+ }),
+ },
+ zone_id: "z".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: true,
+ purge_unknown_records: false,
+ ttl: 300,
+ };
+ let config = legacy_to_app_config(legacy, false, false);
+ assert!(matches!(config.auth, Auth::Key { ref api_key, ref email }
+ if api_key == "key123" && email == "test@example.com"));
+ }
+
+ // --- is_env_config_mode ---
+
+ #[test]
+ fn test_is_env_config_mode_with_token() {
+ std::env::set_var("CLOUDFLARE_API_TOKEN", "test");
+ assert!(is_env_config_mode());
+ std::env::remove_var("CLOUDFLARE_API_TOKEN");
+ }
+
+ #[test]
+ fn test_is_env_config_mode_with_domains() {
+ std::env::remove_var("CLOUDFLARE_API_TOKEN");
+ std::env::remove_var("CF_API_TOKEN");
+ std::env::remove_var("CLOUDFLARE_API_TOKEN_FILE");
+ std::env::remove_var("CF_API_TOKEN_FILE");
+ std::env::set_var("DOMAINS", "example.com");
+ assert!(is_env_config_mode());
+ std::env::remove_var("DOMAINS");
+ }
+
+ // --- parse_legacy_config edge cases ---
+
+ #[test]
+ fn test_parse_legacy_config_with_detailed_subdomains() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok" },
+ "zone_id": "z",
+ "subdomains": [
+ { "name": "www", "proxied": true },
+ "vpn"
+ ]
+ }]
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert_eq!(config.cloudflare[0].subdomains.len(), 2);
+ match &config.cloudflare[0].subdomains[0] {
+ LegacySubdomainEntry::Detailed { name, proxied } => {
+ assert_eq!(name, "www");
+ assert!(*proxied);
+ }
+ _ => panic!("Expected Detailed"),
+ }
+ match &config.cloudflare[0].subdomains[1] {
+ LegacySubdomainEntry::Simple(name) => assert_eq!(name, "vpn"),
+ _ => panic!("Expected Simple"),
+ }
+ }
+
+ #[test]
+ fn test_parse_legacy_config_with_api_key() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": {
+ "api_key": {
+ "api_key": "key123",
+ "account_email": "user@example.com"
+ }
+ },
+ "zone_id": "z",
+ "subdomains": ["@"]
+ }]
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ let auth = &config.cloudflare[0].authentication;
+ assert!(auth.api_key.is_some());
+ assert_eq!(auth.api_key.as_ref().unwrap().api_key, "key123");
+ }
+
+ #[test]
+ fn test_parse_legacy_config_invalid_json() {
+ let result = parse_legacy_config("not json");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_legacy_config_ttl_exactly_30() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok" },
+ "zone_id": "z",
+ "subdomains": ["@"]
+ }],
+ "ttl": 30
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert_eq!(config.ttl, 30);
+ }
+
+ #[test]
+ fn test_parse_legacy_config_purge_unknown() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok" },
+ "zone_id": "z",
+ "subdomains": ["@"],
+ "proxied": true
+ }],
+ "purgeUnknownRecords": true,
+ "a": true,
+ "aaaa": false
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert!(config.purge_unknown_records);
+ assert!(config.a);
+ assert!(!config.aaaa);
+ assert!(config.cloudflare[0].proxied);
+ }
+
+ // --- substitute_env_vars ---
+
+ #[test]
+ fn test_substitute_no_match() {
+ let result = substitute_env_vars("no variables here");
+ assert_eq!(result, "no variables here");
+ }
+
+ #[test]
+ fn test_substitute_non_cf_ddns_vars_ignored() {
+ std::env::set_var("HOME", "/home/user");
+ let result = substitute_env_vars("home: $HOME");
+ assert_eq!(result, "home: $HOME"); // HOME doesn't start with CF_DDNS_
+ }
+
+ // --- print_config_summary ---
+
+ #[test]
+ fn test_print_config_summary_legacy_noop() {
+ let config = AppConfig {
+ auth: Auth::Token(String::new()),
+ providers: HashMap::new(),
+ domains: HashMap::new(),
+ waf_lists: Vec::new(),
+ update_cron: CronSchedule::Once,
+ update_on_start: true,
+ delete_on_stop: false,
+ ttl: TTL::AUTO,
+ proxied_expression: None,
+ record_comment: None,
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run: false,
+ emoji: false,
+ quiet: false,
+ legacy_mode: true,
+ legacy_config: None,
+ repeat: false,
+ };
+ let pp = PP::new(false, false);
+ // Should return early without panicking for legacy mode
+ print_config_summary(&config, &pp);
+ }
+
+ #[test]
+ fn test_print_config_summary_env_mode() {
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec!["example.com".to_string()]);
+ let config = AppConfig {
+ auth: Auth::Token("tok".to_string()),
+ providers: HashMap::new(),
+ domains,
+ waf_lists: Vec::new(),
+ update_cron: CronSchedule::Every(Duration::from_secs(300)),
+ update_on_start: true,
+ delete_on_stop: true,
+ ttl: TTL::new(60),
+ proxied_expression: None,
+ record_comment: Some("managed".to_string()),
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run: false,
+ emoji: false,
+ quiet: false,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: false,
+ };
+ let pp = PP::new(false, false);
+ // Should print without panicking
+ print_config_summary(&config, &pp);
+ }
+
+ // ============================================================
+ // EnvGuard helper for safe env-var tests
+ // ============================================================
+
+ // Mutex to serialize env-var-dependent tests (prevents parallel interference)
+ static ENV_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(());
+
+ struct EnvGuard {
+ keys: Vec,
+ _lock: std::sync::MutexGuard<'static, ()>,
+ }
+
+ impl EnvGuard {
+ fn set(key: &str, value: &str) -> Self {
+ let lock = ENV_MUTEX.lock().unwrap();
+ std::env::set_var(key, value);
+ Self { keys: vec![key.to_string()], _lock: lock }
+ }
+
+ fn add(&mut self, key: &str, value: &str) {
+ std::env::set_var(key, value);
+ self.keys.push(key.to_string());
+ }
+
+ /// Remove a key from the environment and record it so Drop cleans up properly.
+ fn remove(&mut self, key: &str) {
+ std::env::remove_var(key);
+ self.keys.push(key.to_string());
+ }
+ }
+
+ impl Drop for EnvGuard {
+ fn drop(&mut self) {
+ for key in &self.keys {
+ std::env::remove_var(key);
+ }
+ }
+ }
+
+ // ============================================================
+ // read_auth_from_env
+ // ============================================================
+
+ #[test]
+ fn test_read_auth_cloudflare_api_token() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN_RA1", "secret-token");
+ g.remove("CF_API_TOKEN_RA1");
+ // We test via the real env-var names the function uses.
+ // Use a unique suffix to avoid cross-test pollution; the function reads
+ // fixed names, so we must use the real names. Accept the race risk in
+ // exchange for genuine coverage by running tests single-threaded or with
+ // the real variable names in isolation.
+ drop(g);
+
+ // Re-run using the canonical names the function actually reads.
+ let mut g2 = EnvGuard::set("CLOUDFLARE_API_TOKEN", "real-token-abc");
+ g2.remove("CF_API_TOKEN");
+ g2.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g2.remove("CF_API_TOKEN_FILE");
+ g2.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g2);
+ assert!(matches!(auth, Some(Auth::Token(ref t)) if t == "real-token-abc"));
+ }
+
+ #[test]
+ fn test_read_auth_placeholder_token_returns_none() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "YOUR-CLOUDFLARE-API-TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ assert!(auth.is_none());
+ }
+
+ #[test]
+ fn test_read_auth_cf_api_token_deprecated_fallback() {
+ let mut g = EnvGuard::set("CF_API_TOKEN", "deprecated-token");
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ assert!(matches!(auth, Some(Auth::Token(ref t)) if t == "deprecated-token"));
+ }
+
+ #[test]
+ fn test_read_auth_no_vars_returns_none() {
+ let mut g = EnvGuard::set("_PLACEHOLDER_RA", "x"); // just to create guard
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ assert!(auth.is_none());
+ }
+
+ #[test]
+ fn test_read_auth_token_file_valid() {
+ use std::io::Write;
+ let dir = std::env::temp_dir();
+ let path = dir.join("cf_ddns_test_token_file_valid.txt");
+ {
+ let mut f = std::fs::File::create(&path).expect("create temp file");
+ write!(f, " file-token-xyz ").unwrap();
+ }
+ let path_str = path.to_str().unwrap().to_string();
+
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN_FILE", &path_str);
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ let _ = std::fs::remove_file(&path);
+ assert!(matches!(auth, Some(Auth::Token(ref t)) if t == "file-token-xyz"));
+ }
+
+ #[test]
+ fn test_read_auth_token_file_missing_returns_none() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN_FILE", "/nonexistent/path/token.txt");
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ assert!(auth.is_none());
+ }
+
+ #[test]
+ fn test_read_auth_cf_account_id_deprecated_warning() {
+ // CF_ACCOUNT_ID should emit a deprecation warning but not affect auth result.
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-with-account-id");
+ g.add("CF_ACCOUNT_ID", "acc123");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ // Auth should still succeed with the token; CF_ACCOUNT_ID is just ignored.
+ assert!(matches!(auth, Some(Auth::Token(ref t)) if t == "tok-with-account-id"));
+ }
+
+ #[test]
+ fn test_read_auth_cf_api_token_file_deprecated_fallback() {
+ use std::io::Write;
+ let dir = std::env::temp_dir();
+ let path = dir.join("cf_ddns_test_token_file_deprecated.txt");
+ {
+ let mut f = std::fs::File::create(&path).expect("create temp file");
+ write!(f, "old-file-token").unwrap();
+ }
+ let path_str = path.to_str().unwrap().to_string();
+
+ let mut g = EnvGuard::set("CF_API_TOKEN_FILE", &path_str);
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ let pp = PP::new(false, true);
+ let auth = read_auth_from_env(&pp);
+ drop(g);
+ let _ = std::fs::remove_file(&path);
+ assert!(matches!(auth, Some(Auth::Token(ref t)) if t == "old-file-token"));
+ }
+
+ // ============================================================
+ // read_providers_from_env
+ // ============================================================
+
+ #[test]
+ fn test_read_providers_defaults() {
+ let mut g = EnvGuard::set("_PLACEHOLDER_RP", "x");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ let pp = PP::new(false, true);
+ let providers = read_providers_from_env(&pp).unwrap();
+ drop(g);
+ // Both default to CloudflareTrace, so both V4 and V6 are present.
+ assert!(providers.contains_key(&IpType::V4));
+ assert!(providers.contains_key(&IpType::V6));
+ assert!(matches!(
+ providers[&IpType::V4],
+ ProviderType::CloudflareTrace { url: None }
+ ));
+ assert!(matches!(
+ providers[&IpType::V6],
+ ProviderType::CloudflareTrace { url: None }
+ ));
+ }
+
+ #[test]
+ fn test_read_providers_ip4_none_excludes_v4() {
+ let mut g = EnvGuard::set("IP4_PROVIDER", "none");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ let pp = PP::new(false, true);
+ let providers = read_providers_from_env(&pp).unwrap();
+ drop(g);
+ assert!(!providers.contains_key(&IpType::V4));
+ assert!(providers.contains_key(&IpType::V6));
+ }
+
+ #[test]
+ fn test_read_providers_ip6_none_excludes_v6() {
+ let mut g = EnvGuard::set("IP6_PROVIDER", "none");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_POLICY");
+ let pp = PP::new(false, true);
+ let providers = read_providers_from_env(&pp).unwrap();
+ drop(g);
+ assert!(providers.contains_key(&IpType::V4));
+ assert!(!providers.contains_key(&IpType::V6));
+ }
+
+ #[test]
+ fn test_read_providers_invalid_returns_error() {
+ let mut g = EnvGuard::set("IP4_PROVIDER", "totally_invalid_provider");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ let pp = PP::new(false, true);
+ let result = read_providers_from_env(&pp);
+ drop(g);
+ assert!(result.is_err());
+ assert!(result.err().unwrap().contains("IP4_PROVIDER"));
+ }
+
+ #[test]
+ fn test_read_providers_ip4_policy_deprecated() {
+ // IP4_POLICY is deprecated alias for IP4_PROVIDER.
+ let mut g = EnvGuard::set("IP4_POLICY", "ipify");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ let pp = PP::new(false, true);
+ let providers = read_providers_from_env(&pp).unwrap();
+ drop(g);
+ assert!(matches!(providers[&IpType::V4], ProviderType::Ipify));
+ }
+
+ #[test]
+ fn test_read_providers_ip6_policy_deprecated() {
+ // IP6_POLICY is deprecated alias for IP6_PROVIDER.
+ let mut g = EnvGuard::set("IP6_POLICY", "ipify");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ let pp = PP::new(false, true);
+ let providers = read_providers_from_env(&pp).unwrap();
+ drop(g);
+ assert!(matches!(providers[&IpType::V6], ProviderType::Ipify));
+ }
+
+ // ============================================================
+ // read_cron_from_env: @nevermore deprecated alias
+ // ============================================================
+
+ #[test]
+ fn test_read_cron_deprecated_nevermore() {
+ let g = EnvGuard::set("UPDATE_CRON", "@nevermore");
+ let pp = PP::new(false, true);
+ let sched = read_cron_from_env(&pp).unwrap();
+ drop(g);
+ assert!(matches!(sched, CronSchedule::Once));
+ }
+
+ #[test]
+ fn test_read_cron_invalid_duration_in_every() {
+ let g = EnvGuard::set("UPDATE_CRON", "@every notaduration");
+ let pp = PP::new(false, true);
+ let result = read_cron_from_env(&pp);
+ drop(g);
+ assert!(result.is_err());
+ }
+
+ // ============================================================
+ // load_env_config
+ // ============================================================
+
+ #[test]
+ fn test_load_env_config_basic_success() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-load-test");
+ g.add("DOMAINS", "example.com");
+ // Clear potentially interfering vars.
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("UPDATE_CRON");
+ g.remove("UPDATE_ON_START");
+ g.remove("DELETE_ON_STOP");
+ g.remove("TTL");
+ g.remove("PROXIED");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let config = load_env_config(&pp).unwrap();
+ drop(g);
+ assert!(matches!(config.auth, Auth::Token(ref t) if t == "tok-load-test"));
+ assert!(!config.domains.is_empty());
+ assert!(!config.legacy_mode);
+ }
+
+ #[test]
+ fn test_load_env_config_missing_auth_returns_error() {
+ let mut g = EnvGuard::set("DOMAINS", "example.com");
+ g.remove("CLOUDFLARE_API_TOKEN");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("No authentication") || err.contains("CLOUDFLARE_API_TOKEN"));
+ }
+
+ #[test]
+ fn test_load_env_config_missing_domains_returns_error() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-no-domains");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("DOMAINS");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("No update targets") || err.contains("DOMAINS"));
+ }
+
+ #[test]
+ fn test_load_env_config_once_update_on_start_false_errors() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-once-test");
+ g.add("DOMAINS", "example.com");
+ g.add("UPDATE_CRON", "@once");
+ g.add("UPDATE_ON_START", "false");
+ g.add("DELETE_ON_STOP", "false");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("UPDATE_ON_START"));
+ }
+
+ #[test]
+ fn test_load_env_config_once_delete_on_stop_true_errors() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-once-del");
+ g.add("DOMAINS", "example.com");
+ g.add("UPDATE_CRON", "@once");
+ g.add("UPDATE_ON_START", "true");
+ g.add("DELETE_ON_STOP", "true");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("DELETE_ON_STOP"));
+ }
+
+ #[test]
+ fn test_load_env_config_with_waf_list_only() {
+ // WAF_LISTS alone (no DOMAINS) should succeed.
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-waf-only");
+ g.add("WAF_LISTS", "acc123/my_list");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("DOMAINS");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("UPDATE_ON_START");
+ g.remove("DELETE_ON_STOP");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_ok());
+ let config = result.unwrap();
+ assert_eq!(config.waf_lists.len(), 1);
+ assert!(config.domains.is_empty());
+ }
+
+ #[test]
+ fn test_load_env_config_puid_pgid_deprecated_still_succeeds() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-puid");
+ g.add("DOMAINS", "example.com");
+ g.add("PUID", "1000");
+ g.add("PGID", "1000");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("UPDATE_ON_START");
+ g.remove("DELETE_ON_STOP");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ // PUID/PGID are deprecated and ignored; config should still load.
+ assert!(result.is_ok());
+ }
+
+ #[test]
+ fn test_load_env_config_invalid_provider_returns_error() {
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-bad-provider");
+ g.add("DOMAINS", "example.com");
+ g.add("IP4_PROVIDER", "not_a_real_provider");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("UPDATE_ON_START");
+ g.remove("DELETE_ON_STOP");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_err());
+ assert!(result.err().unwrap().contains("IP4_PROVIDER"));
+ }
+
+ #[test]
+ fn test_load_env_config_comment_regex_mismatch_still_succeeds() {
+ // A mismatch between RECORD_COMMENT and MANAGED_RECORDS_COMMENT_REGEX should
+ // emit a warning but not fail.
+ let mut g = EnvGuard::set("CLOUDFLARE_API_TOKEN", "tok-regex-warn");
+ g.add("DOMAINS", "example.com");
+ g.add("RECORD_COMMENT", "my comment");
+ g.add("MANAGED_RECORDS_COMMENT_REGEX", "^cloudflare-ddns");
+ g.remove("CF_API_TOKEN");
+ g.remove("CLOUDFLARE_API_TOKEN_FILE");
+ g.remove("CF_API_TOKEN_FILE");
+ g.remove("CF_ACCOUNT_ID");
+ g.remove("IP4_DOMAINS");
+ g.remove("IP6_DOMAINS");
+ g.remove("WAF_LISTS");
+ g.remove("IP4_PROVIDER");
+ g.remove("IP4_POLICY");
+ g.remove("IP6_PROVIDER");
+ g.remove("IP6_POLICY");
+ g.remove("UPDATE_CRON");
+ g.remove("UPDATE_ON_START");
+ g.remove("DELETE_ON_STOP");
+ g.remove("PUID");
+ g.remove("PGID");
+ let pp = PP::new(false, true);
+ let result = load_env_config(&pp);
+ drop(g);
+ assert!(result.is_ok());
+ }
+
+ // ============================================================
+ // setup_notifiers
+ // ============================================================
+
+ #[test]
+ fn test_setup_notifiers_no_shoutrrr_returns_empty() {
+ let mut g = EnvGuard::set("_PLACEHOLDER_SN", "x");
+ g.remove("SHOUTRRR");
+ let pp = PP::new(false, true);
+ let notifier = setup_notifiers(&pp);
+ drop(g);
+ assert!(notifier.is_empty());
+ }
+
+ #[test]
+ fn test_setup_notifiers_empty_shoutrrr_returns_empty() {
+ let g = EnvGuard::set("SHOUTRRR", "");
+ let pp = PP::new(false, true);
+ let notifier = setup_notifiers(&pp);
+ drop(g);
+ // Empty string is treated as unset by getenv_list.
+ assert!(notifier.is_empty());
+ }
+
+ // ============================================================
+ // setup_heartbeats
+ // ============================================================
+
+ #[test]
+ fn test_setup_heartbeats_no_vars_returns_empty() {
+ let mut g = EnvGuard::set("_PLACEHOLDER_HB", "x");
+ g.remove("HEALTHCHECKS");
+ g.remove("UPTIMEKUMA");
+ let pp = PP::new(false, true);
+ let hb = setup_heartbeats(&pp);
+ drop(g);
+ assert!(hb.is_empty());
+ }
+
+ #[test]
+ fn test_setup_heartbeats_healthchecks_only() {
+ let mut g = EnvGuard::set("HEALTHCHECKS", "https://hc-ping.com/abc123");
+ g.remove("UPTIMEKUMA");
+ let pp = PP::new(false, true);
+ let hb = setup_heartbeats(&pp);
+ drop(g);
+ assert!(!hb.is_empty());
+ }
+
+ #[test]
+ fn test_setup_heartbeats_uptimekuma_only() {
+ let mut g = EnvGuard::set("UPTIMEKUMA", "https://status.example.com/api/push/abc");
+ g.remove("HEALTHCHECKS");
+ let pp = PP::new(false, true);
+ let hb = setup_heartbeats(&pp);
+ drop(g);
+ assert!(!hb.is_empty());
+ }
+
+ #[test]
+ fn test_setup_heartbeats_both_monitors() {
+ let mut g = EnvGuard::set("HEALTHCHECKS", "https://hc-ping.com/abc");
+ g.add("UPTIMEKUMA", "https://status.example.com/api/push/def");
+ let pp = PP::new(false, true);
+ let hb = setup_heartbeats(&pp);
+ drop(g);
+ assert!(!hb.is_empty());
+ }
+
+ // ============================================================
+ // print_config_summary - additional coverage paths
+ // ============================================================
+
+ #[test]
+ fn test_print_config_summary_with_waf_lists() {
+ use crate::cloudflare::WAFList;
+ let waf_list = WAFList {
+ account_id: "acc123".to_string(),
+ list_name: "my_list".to_string(),
+ };
+ let config = AppConfig {
+ auth: Auth::Token("tok".to_string()),
+ providers: HashMap::new(),
+ domains: HashMap::new(),
+ waf_lists: vec![waf_list],
+ update_cron: CronSchedule::Every(Duration::from_secs(300)),
+ update_on_start: true,
+ delete_on_stop: false,
+ ttl: TTL::AUTO,
+ proxied_expression: None,
+ record_comment: None,
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run: false,
+ emoji: false,
+ quiet: false,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: false,
+ };
+ let pp = PP::new(false, true);
+ print_config_summary(&config, &pp); // must not panic
+ }
+
+ #[test]
+ fn test_print_config_summary_with_providers_and_delete_on_stop() {
+ let mut providers = HashMap::new();
+ providers.insert(IpType::V4, ProviderType::CloudflareTrace { url: None });
+ providers.insert(IpType::V6, ProviderType::Ipify);
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec!["v4.example.com".to_string()]);
+ let config = AppConfig {
+ auth: Auth::Token("tok".to_string()),
+ providers,
+ domains,
+ waf_lists: Vec::new(),
+ update_cron: CronSchedule::Every(Duration::from_secs(600)),
+ update_on_start: true,
+ delete_on_stop: true,
+ ttl: TTL::new(120),
+ proxied_expression: None,
+ record_comment: Some("cf-ddns".to_string()),
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run: false,
+ emoji: false,
+ quiet: true,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: true,
+ };
+ let pp = PP::new(false, true);
+ print_config_summary(&config, &pp); // must not panic
+ }
+
+ #[test]
+ fn test_print_config_summary_once_schedule() {
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V6, vec!["ipv6.example.com".to_string()]);
+ let config = AppConfig {
+ auth: Auth::Token("tok".to_string()),
+ providers: HashMap::new(),
+ domains,
+ waf_lists: Vec::new(),
+ update_cron: CronSchedule::Once,
+ update_on_start: true,
+ delete_on_stop: false,
+ ttl: TTL::AUTO,
+ proxied_expression: None,
+ record_comment: None,
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(30),
+ dry_run: false,
+ emoji: false,
+ quiet: false,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: false,
+ };
+ let pp = PP::new(false, true);
+ print_config_summary(&config, &pp); // must not panic
+ }
+}
diff --git a/src/domain.rs b/src/domain.rs
new file mode 100644
index 0000000..69781e2
--- /dev/null
+++ b/src/domain.rs
@@ -0,0 +1,547 @@
+use std::fmt;
+
+/// Represents a DNS domain - either a regular FQDN or a wildcard.
+#[allow(dead_code)]
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub enum Domain {
+ FQDN(String),
+ Wildcard(String),
+}
+
+#[allow(dead_code)]
+impl Domain {
+ /// Parse a domain string. Handles:
+ /// - "@" or "" -> root domain (handled at FQDN construction time)
+ /// - "*.example.com" -> wildcard
+ /// - "sub.example.com" -> regular FQDN
+ pub fn new(input: &str) -> Result {
+ let trimmed = input.trim().to_lowercase();
+ if trimmed.starts_with("*.") {
+ let base = &trimmed[2..];
+ let ascii = domain_to_ascii(base)?;
+ Ok(Domain::Wildcard(ascii))
+ } else {
+ let ascii = domain_to_ascii(&trimmed)?;
+ Ok(Domain::FQDN(ascii))
+ }
+ }
+
+ /// Returns the DNS name in ASCII form suitable for API calls.
+ pub fn dns_name_ascii(&self) -> String {
+ match self {
+ Domain::FQDN(s) => s.clone(),
+ Domain::Wildcard(s) => format!("*.{s}"),
+ }
+ }
+
+ /// Returns a human-readable description of the domain.
+ pub fn describe(&self) -> String {
+ match self {
+ Domain::FQDN(s) => describe_domain(s),
+ Domain::Wildcard(s) => format!("*.{}", describe_domain(s)),
+ }
+ }
+
+ /// Returns the zones (parent domains) for this domain, from most specific to least.
+ pub fn zones(&self) -> Vec {
+ let base = match self {
+ Domain::FQDN(s) => s.as_str(),
+ Domain::Wildcard(s) => s.as_str(),
+ };
+ let mut zones = Vec::new();
+ let mut current = base.to_string();
+ while !current.is_empty() {
+ zones.push(current.clone());
+ if let Some(pos) = current.find('.') {
+ current = current[pos + 1..].to_string();
+ } else {
+ break;
+ }
+ }
+ zones
+ }
+}
+
+impl fmt::Display for Domain {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.describe())
+ }
+}
+
+/// Construct an FQDN from a subdomain name and base domain.
+pub fn make_fqdn(subdomain: &str, base_domain: &str) -> String {
+ let name = subdomain.to_lowercase();
+ let name = name.trim();
+ if name.is_empty() || name == "@" {
+ base_domain.to_lowercase()
+ } else if name.starts_with("*.") {
+ // Wildcard subdomain
+ format!("{name}.{}", base_domain.to_lowercase())
+ } else {
+ format!("{name}.{}", base_domain.to_lowercase())
+ }
+}
+
+/// Convert a domain to ASCII using IDNA encoding.
+#[allow(dead_code)]
+fn domain_to_ascii(domain: &str) -> Result {
+ if domain.is_empty() {
+ return Ok(String::new());
+ }
+ // Try IDNA encoding for internationalized domain names
+ match idna::domain_to_ascii(domain) {
+ Ok(ascii) => Ok(ascii),
+ Err(_) => {
+ // Fallback: if it's already ASCII, just return it
+ if domain.is_ascii() {
+ Ok(domain.to_string())
+ } else {
+ Err(format!("Invalid domain name: {domain}"))
+ }
+ }
+ }
+}
+
+/// Convert ASCII domain back to Unicode for display.
+#[allow(dead_code)]
+fn describe_domain(ascii: &str) -> String {
+ // Try to convert punycode back to unicode for display
+ match idna::domain_to_unicode(ascii) {
+ (unicode, Ok(())) => unicode,
+ _ => ascii.to_string(),
+ }
+}
+
+/// Parse a comma-separated list of domain strings.
+#[allow(dead_code)]
+pub fn parse_domain_list(input: &str) -> Result, String> {
+ if input.trim().is_empty() {
+ return Ok(Vec::new());
+ }
+ input
+ .split(',')
+ .map(|s| Domain::new(s.trim()))
+ .collect()
+}
+
+// --- Domain Expression Evaluator ---
+// Supports: true, false, is(domain,...), sub(domain,...), !, &&, ||, ()
+
+/// Parse and evaluate a domain expression to determine if a domain should be proxied.
+pub fn parse_proxied_expression(expr: &str) -> Result bool + Send + Sync>, String> {
+ let expr = expr.trim();
+ if expr.is_empty() || expr == "false" {
+ return Ok(Box::new(|_: &str| false));
+ }
+ if expr == "true" {
+ return Ok(Box::new(|_: &str| true));
+ }
+
+ let tokens = tokenize_expr(expr)?;
+ let (predicate, rest) = parse_or_expr(&tokens)?;
+ if !rest.is_empty() {
+ return Err(format!("Unexpected tokens in proxied expression: {}", rest.join(" ")));
+ }
+ Ok(predicate)
+}
+
+fn tokenize_expr(input: &str) -> Result, String> {
+ let mut tokens = Vec::new();
+ let mut chars = input.chars().peekable();
+ while let Some(&c) = chars.peek() {
+ match c {
+ ' ' | '\t' | '\n' | '\r' => {
+ chars.next();
+ }
+ '(' | ')' | '!' | ',' => {
+ tokens.push(c.to_string());
+ chars.next();
+ }
+ '&' => {
+ chars.next();
+ if chars.peek() == Some(&'&') {
+ chars.next();
+ tokens.push("&&".to_string());
+ } else {
+ return Err("Expected '&&', got single '&'".to_string());
+ }
+ }
+ '|' => {
+ chars.next();
+ if chars.peek() == Some(&'|') {
+ chars.next();
+ tokens.push("||".to_string());
+ } else {
+ return Err("Expected '||', got single '|'".to_string());
+ }
+ }
+ _ => {
+ let mut word = String::new();
+ while let Some(&c) = chars.peek() {
+ if c.is_alphanumeric() || c == '.' || c == '-' || c == '_' || c == '*' || c == '@' {
+ word.push(c);
+ chars.next();
+ } else {
+ break;
+ }
+ }
+ if word.is_empty() {
+ return Err(format!("Unexpected character: {c}"));
+ }
+ tokens.push(word);
+ }
+ }
+ }
+ Ok(tokens)
+}
+
+type Predicate = Box bool + Send + Sync>;
+
+fn parse_or_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
+ let (mut left, mut rest) = parse_and_expr(tokens)?;
+ while !rest.is_empty() && rest[0] == "||" {
+ let (right, new_rest) = parse_and_expr(&rest[1..])?;
+ let prev = left;
+ left = Box::new(move |d: &str| prev(d) || right(d));
+ rest = new_rest;
+ }
+ Ok((left, rest))
+}
+
+fn parse_and_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
+ let (mut left, mut rest) = parse_not_expr(tokens)?;
+ while !rest.is_empty() && rest[0] == "&&" {
+ let (right, new_rest) = parse_not_expr(&rest[1..])?;
+ let prev = left;
+ left = Box::new(move |d: &str| prev(d) && right(d));
+ rest = new_rest;
+ }
+ Ok((left, rest))
+}
+
+fn parse_not_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
+ if tokens.is_empty() {
+ return Err("Unexpected end of expression".to_string());
+ }
+ if tokens[0] == "!" {
+ let (inner, rest) = parse_not_expr(&tokens[1..])?;
+ let pred: Predicate = Box::new(move |d: &str| !inner(d));
+ Ok((pred, rest))
+ } else {
+ parse_atom(tokens)
+ }
+}
+
+fn parse_atom(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
+ if tokens.is_empty() {
+ return Err("Unexpected end of expression".to_string());
+ }
+
+ match tokens[0].as_str() {
+ "true" => Ok((Box::new(|_: &str| true), &tokens[1..])),
+ "false" => Ok((Box::new(|_: &str| false), &tokens[1..])),
+ "(" => {
+ let (inner, rest) = parse_or_expr(&tokens[1..])?;
+ if rest.is_empty() || rest[0] != ")" {
+ return Err("Missing closing parenthesis".to_string());
+ }
+ Ok((inner, &rest[1..]))
+ }
+ "is" => {
+ let (domains, rest) = parse_domain_args(&tokens[1..])?;
+ let pred: Predicate = Box::new(move |d: &str| {
+ let d_lower = d.to_lowercase();
+ domains.iter().any(|dom| d_lower == *dom)
+ });
+ Ok((pred, rest))
+ }
+ "sub" => {
+ let (domains, rest) = parse_domain_args(&tokens[1..])?;
+ let pred: Predicate = Box::new(move |d: &str| {
+ let d_lower = d.to_lowercase();
+ domains.iter().any(|dom| {
+ d_lower == *dom || d_lower.ends_with(&format!(".{dom}"))
+ })
+ });
+ Ok((pred, rest))
+ }
+ _ => Err(format!("Unexpected token: {}", tokens[0])),
+ }
+}
+
+fn parse_domain_args(tokens: &[String]) -> Result<(Vec, &[String]), String> {
+ if tokens.is_empty() || tokens[0] != "(" {
+ return Err("Expected '(' after function name".to_string());
+ }
+ let mut domains = Vec::new();
+ let mut i = 1;
+ while i < tokens.len() && tokens[i] != ")" {
+ if tokens[i] == "," {
+ i += 1;
+ continue;
+ }
+ domains.push(tokens[i].to_lowercase());
+ i += 1;
+ }
+ if i >= tokens.len() {
+ return Err("Missing closing ')' in function call".to_string());
+ }
+ Ok((domains, &tokens[i + 1..]))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_make_fqdn_root() {
+ assert_eq!(make_fqdn("", "example.com"), "example.com");
+ assert_eq!(make_fqdn("@", "example.com"), "example.com");
+ }
+
+ #[test]
+ fn test_make_fqdn_subdomain() {
+ assert_eq!(make_fqdn("www", "example.com"), "www.example.com");
+ assert_eq!(make_fqdn("VPN", "Example.COM"), "vpn.example.com");
+ }
+
+ #[test]
+ fn test_domain_wildcard() {
+ let d = Domain::new("*.example.com").unwrap();
+ assert_eq!(d.dns_name_ascii(), "*.example.com");
+ }
+
+ #[test]
+ fn test_parse_domain_list() {
+ let domains = parse_domain_list("example.com, *.example.com, sub.example.com").unwrap();
+ assert_eq!(domains.len(), 3);
+ }
+
+ #[test]
+ fn test_proxied_expr_true() {
+ let pred = parse_proxied_expression("true").unwrap();
+ assert!(pred("anything.com"));
+ }
+
+ #[test]
+ fn test_proxied_expr_false() {
+ let pred = parse_proxied_expression("false").unwrap();
+ assert!(!pred("anything.com"));
+ }
+
+ #[test]
+ fn test_proxied_expr_is() {
+ let pred = parse_proxied_expression("is(example.com)").unwrap();
+ assert!(pred("example.com"));
+ assert!(!pred("sub.example.com"));
+ }
+
+ #[test]
+ fn test_proxied_expr_sub() {
+ let pred = parse_proxied_expression("sub(example.com)").unwrap();
+ assert!(pred("example.com"));
+ assert!(pred("sub.example.com"));
+ assert!(!pred("other.com"));
+ }
+
+ #[test]
+ fn test_proxied_expr_complex() {
+ let pred = parse_proxied_expression("is(a.com) || is(b.com)").unwrap();
+ assert!(pred("a.com"));
+ assert!(pred("b.com"));
+ assert!(!pred("c.com"));
+ }
+
+ #[test]
+ fn test_proxied_expr_negation() {
+ let pred = parse_proxied_expression("!is(internal.com)").unwrap();
+ assert!(!pred("internal.com"));
+ assert!(pred("public.com"));
+ }
+
+ // --- Domain::new with regular FQDN ---
+ #[test]
+ fn test_domain_new_fqdn() {
+ let d = Domain::new("example.com").unwrap();
+ assert_eq!(d, Domain::FQDN("example.com".to_string()));
+ }
+
+ #[test]
+ fn test_domain_new_fqdn_uppercase() {
+ let d = Domain::new("EXAMPLE.COM").unwrap();
+ assert_eq!(d, Domain::FQDN("example.com".to_string()));
+ }
+
+ // --- Domain::dns_name_ascii for FQDN ---
+ #[test]
+ fn test_dns_name_ascii_fqdn() {
+ let d = Domain::FQDN("example.com".to_string());
+ assert_eq!(d.dns_name_ascii(), "example.com");
+ }
+
+ // --- Domain::describe for both variants ---
+ #[test]
+ fn test_describe_fqdn() {
+ let d = Domain::FQDN("example.com".to_string());
+ // ASCII domain should round-trip through describe unchanged
+ assert_eq!(d.describe(), "example.com");
+ }
+
+ #[test]
+ fn test_describe_wildcard() {
+ let d = Domain::Wildcard("example.com".to_string());
+ assert_eq!(d.describe(), "*.example.com");
+ }
+
+ // --- Domain::zones ---
+ #[test]
+ fn test_zones_fqdn() {
+ let d = Domain::FQDN("sub.example.com".to_string());
+ let zones = d.zones();
+ assert_eq!(zones, vec!["sub.example.com", "example.com", "com"]);
+ }
+
+ #[test]
+ fn test_zones_wildcard() {
+ let d = Domain::Wildcard("example.com".to_string());
+ let zones = d.zones();
+ assert_eq!(zones, vec!["example.com", "com"]);
+ }
+
+ #[test]
+ fn test_zones_single_label() {
+ let d = Domain::FQDN("localhost".to_string());
+ let zones = d.zones();
+ assert_eq!(zones, vec!["localhost"]);
+ }
+
+ // --- Domain Display trait ---
+ #[test]
+ fn test_display_fqdn() {
+ let d = Domain::FQDN("example.com".to_string());
+ assert_eq!(format!("{d}"), "example.com");
+ }
+
+ #[test]
+ fn test_display_wildcard() {
+ let d = Domain::Wildcard("example.com".to_string());
+ assert_eq!(format!("{d}"), "*.example.com");
+ }
+
+ // --- domain_to_ascii (tested indirectly via Domain::new) ---
+ #[test]
+ fn test_domain_new_empty_string() {
+ // empty string -> domain_to_ascii returns Ok("") -> Domain::FQDN("")
+ let d = Domain::new("").unwrap();
+ assert_eq!(d, Domain::FQDN("".to_string()));
+ }
+
+ #[test]
+ fn test_domain_new_ascii_domain() {
+ let d = Domain::new("www.example.org").unwrap();
+ assert_eq!(d.dns_name_ascii(), "www.example.org");
+ }
+
+ #[test]
+ fn test_domain_new_internationalized() {
+ // "mΓΌnchen.de" should be encoded to punycode
+ let d = Domain::new("mΓΌnchen.de").unwrap();
+ let ascii = d.dns_name_ascii();
+ // The punycode-encoded form should start with "xn--"
+ assert!(ascii.contains("xn--"), "expected punycode, got: {ascii}");
+ }
+
+ // --- describe_domain (tested indirectly via Domain::describe) ---
+ #[test]
+ fn test_describe_punycode_roundtrip() {
+ // Build a domain with a known punycode label and confirm describe decodes it
+ let d = Domain::new("mΓΌnchen.de").unwrap();
+ let described = d.describe();
+ // Should contain the Unicode form, not the raw punycode
+ assert!(described.contains("mΓΌnchen") || described.contains("xn--"),
+ "describe returned: {described}");
+ }
+
+ #[test]
+ fn test_describe_regular_ascii() {
+ let d = Domain::FQDN("example.com".to_string());
+ assert_eq!(d.describe(), "example.com");
+ }
+
+ // --- parse_domain_list with empty input ---
+ #[test]
+ fn test_parse_domain_list_empty() {
+ let result = parse_domain_list("").unwrap();
+ assert!(result.is_empty());
+ }
+
+ #[test]
+ fn test_parse_domain_list_whitespace_only() {
+ let result = parse_domain_list(" ").unwrap();
+ assert!(result.is_empty());
+ }
+
+ // --- Tokenizer edge cases (via parse_proxied_expression) ---
+ #[test]
+ fn test_tokenizer_single_ampersand_error() {
+ let result = parse_proxied_expression("is(a.com) & is(b.com)");
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("&&"), "error was: {err}");
+ }
+
+ #[test]
+ fn test_tokenizer_single_pipe_error() {
+ let result = parse_proxied_expression("is(a.com) | is(b.com)");
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("||"), "error was: {err}");
+ }
+
+ #[test]
+ fn test_tokenizer_unexpected_character_error() {
+ let result = parse_proxied_expression("is(a.com) $ is(b.com)");
+ assert!(result.is_err());
+ }
+
+ // --- Parser edge cases ---
+ #[test]
+ fn test_parse_and_expr_double_ampersand() {
+ let pred = parse_proxied_expression("is(a.com) && is(b.com)").unwrap();
+ assert!(!pred("a.com"));
+ assert!(!pred("b.com"));
+
+ let pred2 = parse_proxied_expression("sub(example.com) && !is(internal.example.com)").unwrap();
+ assert!(pred2("www.example.com"));
+ assert!(!pred2("internal.example.com"));
+ }
+
+ #[test]
+ fn test_parse_nested_parentheses() {
+ let pred = parse_proxied_expression("(is(a.com) || is(b.com)) && !is(c.com)").unwrap();
+ assert!(pred("a.com"));
+ assert!(pred("b.com"));
+ assert!(!pred("c.com"));
+ }
+
+ #[test]
+ fn test_parse_missing_closing_paren() {
+ let result = parse_proxied_expression("(is(a.com)");
+ assert!(result.is_err());
+ let err = result.err().unwrap();
+ assert!(err.contains("parenthesis") || err.contains(")"), "error was: {err}");
+ }
+
+ #[test]
+ fn test_parse_unexpected_tokens_after_expr() {
+ let result = parse_proxied_expression("true false");
+ assert!(result.is_err());
+ }
+
+ // --- make_fqdn with wildcard subdomain ---
+ #[test]
+ fn test_make_fqdn_wildcard_subdomain() {
+ // A name starting with "*." is treated as a wildcard subdomain
+ assert_eq!(make_fqdn("*.sub", "example.com"), "*.sub.example.com");
+ }
+}
diff --git a/src/main.rs b/src/main.rs
new file mode 100644
index 0000000..2a3b1af
--- /dev/null
+++ b/src/main.rs
@@ -0,0 +1,920 @@
+mod cloudflare;
+mod config;
+mod domain;
+mod notifier;
+mod pp;
+mod provider;
+mod updater;
+
+use crate::cloudflare::{Auth, CloudflareHandle};
+use crate::config::{AppConfig, CronSchedule};
+use crate::notifier::{CompositeNotifier, Heartbeat, Message};
+use crate::pp::PP;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
+use tokio::signal;
+use tokio::time::{sleep, Duration};
+
+const VERSION: &str = env!("CARGO_PKG_VERSION");
+
+#[tokio::main]
+async fn main() {
+ // Parse CLI args
+ let args: Vec = std::env::args().collect();
+ let dry_run = args.iter().any(|a| a == "--dry-run");
+ let repeat = args.iter().any(|a| a == "--repeat");
+
+ // Check for unknown args (legacy behavior)
+ let known_args = ["--dry-run", "--repeat"];
+ let unknown: Vec<&str> = args
+ .iter()
+ .skip(1)
+ .filter(|a| !known_args.contains(&a.as_str()))
+ .map(|a| a.as_str())
+ .collect();
+
+ if !unknown.is_empty() {
+ eprintln!(
+ "Unrecognized parameter(s): {}. Stopping now.",
+ unknown.join(", ")
+ );
+ return;
+ }
+
+ // Determine config mode and create initial PP for config loading
+ let initial_pp = if config::is_env_config_mode() {
+ // In env mode, read emoji/quiet from env before loading full config
+ let emoji = std::env::var("EMOJI")
+ .map(|v| matches!(v.to_lowercase().as_str(), "true" | "1" | "yes"))
+ .unwrap_or(true);
+ let quiet = std::env::var("QUIET")
+ .map(|v| matches!(v.to_lowercase().as_str(), "true" | "1" | "yes"))
+ .unwrap_or(false);
+ PP::new(emoji, quiet)
+ } else {
+ // Legacy mode: no emoji, not quiet (preserves original output behavior)
+ PP::new(false, false)
+ };
+
+ println!("cloudflare-ddns v{VERSION}");
+
+ // Load config
+ let app_config = match config::load_config(dry_run, repeat, &initial_pp) {
+ Ok(c) => c,
+ Err(e) => {
+ eprintln!("{e}");
+ sleep(Duration::from_secs(10)).await;
+ std::process::exit(1);
+ }
+ };
+
+ // Create PP with final settings
+ let ppfmt = PP::new(app_config.emoji, app_config.quiet);
+
+ if dry_run {
+ ppfmt.noticef(
+ pp::EMOJI_WARNING,
+ "[DRY RUN] No records will be created, updated, or deleted.",
+ );
+ }
+
+ // Print config summary (env mode only)
+ config::print_config_summary(&app_config, &ppfmt);
+
+ // Setup notifiers and heartbeats
+ let notifier = config::setup_notifiers(&ppfmt);
+ let heartbeat = config::setup_heartbeats(&ppfmt);
+
+ // Create Cloudflare handle (for env mode)
+ let handle = if !app_config.legacy_mode {
+ CloudflareHandle::new(
+ app_config.auth.clone(),
+ app_config.update_timeout,
+ app_config.managed_comment_regex.clone(),
+ app_config.managed_waf_comment_regex.clone(),
+ )
+ } else {
+ // Create a dummy handle for legacy mode (won't be used)
+ CloudflareHandle::new(
+ Auth::Token(String::new()),
+ Duration::from_secs(30),
+ None,
+ None,
+ )
+ };
+
+ // Signal handler for graceful shutdown
+ let running = Arc::new(AtomicBool::new(true));
+ let r = running.clone();
+ tokio::spawn(async move {
+ let _ = signal::ctrl_c().await;
+ println!("Stopping...");
+ r.store(false, Ordering::SeqCst);
+ });
+
+ // Start heartbeat
+ heartbeat.start().await;
+
+ if app_config.legacy_mode {
+ // --- Legacy mode (original cloudflare-ddns behavior) ---
+ run_legacy_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running).await;
+ } else {
+ // --- Env var mode (cf-ddns behavior) ---
+ run_env_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running).await;
+ }
+
+ // On shutdown: delete records if configured
+ if app_config.delete_on_stop && !app_config.legacy_mode {
+ ppfmt.noticef(pp::EMOJI_STOP, "Deleting records on stop...");
+ updater::final_delete(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ // Exit heartbeat
+ heartbeat
+ .exit(&Message::new_ok("Shutting down"))
+ .await;
+}
+
+async fn run_legacy_mode(
+ config: &AppConfig,
+ handle: &CloudflareHandle,
+ notifier: &CompositeNotifier,
+ heartbeat: &Heartbeat,
+ ppfmt: &PP,
+ running: Arc,
+) {
+ let legacy = match &config.legacy_config {
+ Some(l) => l,
+ None => return,
+ };
+
+ if config.repeat {
+ match (legacy.a, legacy.aaaa) {
+ (true, true) => println!(
+ "Updating IPv4 (A) & IPv6 (AAAA) records every {} seconds",
+ legacy.ttl
+ ),
+ (true, false) => {
+ println!("Updating IPv4 (A) records every {} seconds", legacy.ttl)
+ }
+ (false, true) => {
+ println!("Updating IPv6 (AAAA) records every {} seconds", legacy.ttl)
+ }
+ (false, false) => println!("Both IPv4 and IPv6 are disabled"),
+ }
+
+ while running.load(Ordering::SeqCst) {
+ updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
+
+ for _ in 0..legacy.ttl {
+ if !running.load(Ordering::SeqCst) {
+ break;
+ }
+ sleep(Duration::from_secs(1)).await;
+ }
+ }
+ } else {
+ updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
+ }
+}
+
+async fn run_env_mode(
+ config: &AppConfig,
+ handle: &CloudflareHandle,
+ notifier: &CompositeNotifier,
+ heartbeat: &Heartbeat,
+ ppfmt: &PP,
+ running: Arc,
+) {
+ match &config.update_cron {
+ CronSchedule::Once => {
+ if config.update_on_start {
+ updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
+ }
+ }
+ schedule => {
+ let interval = schedule.next_duration().unwrap_or(Duration::from_secs(300));
+
+ ppfmt.noticef(
+ pp::EMOJI_LAUNCH,
+ &format!(
+ "Started cloudflare-ddns, updating every {}",
+ describe_duration(interval)
+ ),
+ );
+
+ // Update on start if configured
+ if config.update_on_start {
+ updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
+ }
+
+ // Main loop
+ while running.load(Ordering::SeqCst) {
+ // Sleep for interval, checking running flag each second
+ let secs = interval.as_secs();
+ let next_time = chrono::Local::now() + chrono::Duration::seconds(secs as i64);
+ ppfmt.infof(
+ pp::EMOJI_SLEEP,
+ &format!(
+ "Next update at {}",
+ next_time.format("%Y-%m-%d %H:%M:%S %Z")
+ ),
+ );
+
+ for _ in 0..secs {
+ if !running.load(Ordering::SeqCst) {
+ return;
+ }
+ sleep(Duration::from_secs(1)).await;
+ }
+
+ if !running.load(Ordering::SeqCst) {
+ return;
+ }
+
+ updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
+ }
+ }
+ }
+}
+
+fn describe_duration(d: Duration) -> String {
+ let secs = d.as_secs();
+ if secs >= 3600 {
+ let hours = secs / 3600;
+ let mins = (secs % 3600) / 60;
+ if mins > 0 {
+ format!("{hours}h{mins}m")
+ } else {
+ format!("{hours}h")
+ }
+ } else if secs >= 60 {
+ let mins = secs / 60;
+ let s = secs % 60;
+ if s > 0 {
+ format!("{mins}m{s}s")
+ } else {
+ format!("{mins}m")
+ }
+ } else {
+ format!("{secs}s")
+ }
+}
+
+// ============================================================
+// Tests (backwards compatible with original test suite)
+// ============================================================
+
+#[cfg(test)]
+mod tests {
+ use crate::config::{
+ LegacyAuthentication, LegacyCloudflareEntry, LegacyConfig, LegacySubdomainEntry,
+ parse_legacy_config,
+ };
+ use crate::provider::parse_trace_ip;
+ use reqwest::Client;
+ use wiremock::matchers::{method, path, query_param};
+ use wiremock::{Mock, MockServer, ResponseTemplate};
+
+ fn test_config(zone_id: &str) -> LegacyConfig {
+ LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![
+ LegacySubdomainEntry::Detailed {
+ name: "".to_string(),
+ proxied: false,
+ },
+ LegacySubdomainEntry::Detailed {
+ name: "vpn".to_string(),
+ proxied: true,
+ },
+ ],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: false,
+ purge_unknown_records: false,
+ ttl: 300,
+ }
+ }
+
+ // Helper to create a legacy client for testing
+ struct TestDdnsClient {
+ client: Client,
+ cf_api_base: String,
+ ipv4_urls: Vec,
+ dry_run: bool,
+ }
+
+ impl TestDdnsClient {
+ fn new(base_url: &str) -> Self {
+ Self {
+ client: Client::new(),
+ cf_api_base: base_url.to_string(),
+ ipv4_urls: vec![format!("{base_url}/cdn-cgi/trace")],
+ dry_run: false,
+ }
+ }
+
+ fn dry_run(mut self) -> Self {
+ self.dry_run = true;
+ self
+ }
+
+ async fn cf_api(
+ &self,
+ endpoint: &str,
+ method_str: &str,
+ token: &str,
+ body: Option<&impl serde::Serialize>,
+ ) -> Option {
+ let url = format!("{}/{endpoint}", self.cf_api_base);
+ let mut req = match method_str {
+ "GET" => self.client.get(&url),
+ "POST" => self.client.post(&url),
+ "PUT" => self.client.put(&url),
+ "DELETE" => self.client.delete(&url),
+ _ => return None,
+ };
+ req = req.header("Authorization", format!("Bearer {token}"));
+ if let Some(b) = body {
+ req = req.json(b);
+ }
+ match req.send().await {
+ Ok(resp) if resp.status().is_success() => resp.json::().await.ok(),
+ Ok(resp) => {
+ let text = resp.text().await.unwrap_or_default();
+ eprintln!("Error: {text}");
+ None
+ }
+ Err(e) => {
+ eprintln!("Exception: {e}");
+ None
+ }
+ }
+ }
+
+ async fn get_ip(&self) -> Option {
+ for url in &self.ipv4_urls {
+ if let Ok(resp) = self.client.get(url).send().await {
+ if let Ok(body) = resp.text().await {
+ if let Some(ip) = parse_trace_ip(&body) {
+ return Some(ip);
+ }
+ }
+ }
+ }
+ None
+ }
+
+ async fn commit_record(
+ &self,
+ ip: &str,
+ record_type: &str,
+ config: &[LegacyCloudflareEntry],
+ ttl: i64,
+ purge_unknown_records: bool,
+ ) {
+ for entry in config {
+ #[derive(serde::Deserialize)]
+ struct Resp {
+ result: Option,
+ }
+ #[derive(serde::Deserialize)]
+ struct Zone {
+ name: String,
+ }
+ #[derive(serde::Deserialize)]
+ struct Rec {
+ id: String,
+ name: String,
+ content: String,
+ proxied: bool,
+ }
+
+ let zone_resp: Option> = self
+ .cf_api(
+ &format!("zones/{}", entry.zone_id),
+ "GET",
+ &entry.authentication.api_token,
+ None::<&()>.as_ref(),
+ )
+ .await;
+
+ let base_domain = match zone_resp.and_then(|r| r.result) {
+ Some(z) => z.name,
+ None => continue,
+ };
+
+ for subdomain in &entry.subdomains {
+ let (name, proxied) = match subdomain {
+ LegacySubdomainEntry::Detailed { name, proxied } => {
+ (name.to_lowercase().trim().to_string(), *proxied)
+ }
+ LegacySubdomainEntry::Simple(name) => {
+ (name.to_lowercase().trim().to_string(), entry.proxied)
+ }
+ };
+
+ let fqdn = crate::domain::make_fqdn(&name, &base_domain);
+
+ #[derive(serde::Serialize)]
+ struct Payload {
+ #[serde(rename = "type")]
+ record_type: String,
+ name: String,
+ content: String,
+ proxied: bool,
+ ttl: i64,
+ }
+
+ let record = Payload {
+ record_type: record_type.to_string(),
+ name: fqdn.clone(),
+ content: ip.to_string(),
+ proxied,
+ ttl,
+ };
+
+ let dns_endpoint = format!(
+ "zones/{}/dns_records?per_page=100&type={record_type}",
+ entry.zone_id
+ );
+ let dns_records: Option>> = self
+ .cf_api(
+ &dns_endpoint,
+ "GET",
+ &entry.authentication.api_token,
+ None::<&()>.as_ref(),
+ )
+ .await;
+
+ let mut identifier: Option = None;
+ let mut modified = false;
+ let mut duplicate_ids: Vec = Vec::new();
+
+ if let Some(resp) = dns_records {
+ if let Some(records) = resp.result {
+ for r in &records {
+ if r.name == fqdn {
+ if let Some(ref existing_id) = identifier {
+ if r.content == ip {
+ duplicate_ids.push(existing_id.clone());
+ identifier = Some(r.id.clone());
+ } else {
+ duplicate_ids.push(r.id.clone());
+ }
+ } else {
+ identifier = Some(r.id.clone());
+ if r.content != ip || r.proxied != proxied {
+ modified = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if let Some(ref id) = identifier {
+ if modified {
+ if self.dry_run {
+ println!("[DRY RUN] Would update record {fqdn} -> {ip}");
+ } else {
+ println!("Updating record {fqdn} -> {ip}");
+ let update_endpoint =
+ format!("zones/{}/dns_records/{id}", entry.zone_id);
+ let _: Option = self
+ .cf_api(
+ &update_endpoint,
+ "PUT",
+ &entry.authentication.api_token,
+ Some(&record),
+ )
+ .await;
+ }
+ } else if self.dry_run {
+ println!("[DRY RUN] Record {fqdn} is up to date ({ip})");
+ }
+ } else if self.dry_run {
+ println!("[DRY RUN] Would add new record {fqdn} -> {ip}");
+ } else {
+ println!("Adding new record {fqdn} -> {ip}");
+ let create_endpoint =
+ format!("zones/{}/dns_records", entry.zone_id);
+ let _: Option = self
+ .cf_api(
+ &create_endpoint,
+ "POST",
+ &entry.authentication.api_token,
+ Some(&record),
+ )
+ .await;
+ }
+
+ if purge_unknown_records {
+ for dup_id in &duplicate_ids {
+ if self.dry_run {
+ println!("[DRY RUN] Would delete stale record {dup_id}");
+ } else {
+ println!("Deleting stale record {dup_id}");
+ let del_endpoint =
+ format!("zones/{}/dns_records/{dup_id}", entry.zone_id);
+ let _: Option = self
+ .cf_api(
+ &del_endpoint,
+ "DELETE",
+ &entry.authentication.api_token,
+ None::<&()>.as_ref(),
+ )
+ .await;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_parse_trace_ip() {
+ let body = "fl=1f1\nh=1.1.1.1\nip=203.0.113.42\nts=1234567890\nvisit_scheme=https\n";
+ assert_eq!(parse_trace_ip(body), Some("203.0.113.42".to_string()));
+ }
+
+ #[test]
+ fn test_parse_trace_ip_missing() {
+ let body = "fl=1f1\nh=1.1.1.1\nts=1234567890\n";
+ assert_eq!(parse_trace_ip(body), None);
+ }
+
+ #[test]
+ fn test_parse_config_minimal() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok123" },
+ "zone_id": "zone1",
+ "subdomains": ["@"]
+ }]
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert!(config.a);
+ assert!(config.aaaa);
+ assert!(!config.purge_unknown_records);
+ assert_eq!(config.ttl, 300);
+ }
+
+ #[test]
+ fn test_parse_config_low_ttl() {
+ let json = r#"{
+ "cloudflare": [{
+ "authentication": { "api_token": "tok123" },
+ "zone_id": "zone1",
+ "subdomains": ["@"]
+ }],
+ "ttl": 10
+ }"#;
+ let config = parse_legacy_config(json).unwrap();
+ assert_eq!(config.ttl, 1);
+ }
+
+ #[tokio::test]
+ async fn test_ip_detection() {
+ let mock_server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .and(path("/cdn-cgi/trace"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("fl=1f1\nh=mock\nip=198.51.100.7\nts=0\n"),
+ )
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let ip = ddns.get_ip().await;
+ assert_eq!(ip, Some("198.51.100.7".to_string()));
+ }
+
+ #[tokio::test]
+ async fn test_creates_new_record() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-abc-123";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "new-record-1" }
+ })))
+ .expect(2)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let config = test_config(zone_id);
+ ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_updates_existing_record() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-update-1";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-1", "name": "example.com", "content": "10.0.0.1", "proxied": false },
+ { "id": "rec-2", "name": "vpn.example.com", "content": "10.0.0.1", "proxied": true }
+ ]
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("PUT"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-1")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "rec-1" }
+ })))
+ .expect(1)
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("PUT"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-2")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "rec-2" }
+ })))
+ .expect(1)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let config = test_config(zone_id);
+ ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_skips_up_to_date_record() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-noop";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-1", "name": "example.com", "content": "198.51.100.7", "proxied": false },
+ { "id": "rec-2", "name": "vpn.example.com", "content": "198.51.100.7", "proxied": true }
+ ]
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("PUT"))
+ .respond_with(ResponseTemplate::new(500))
+ .expect(0)
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(500))
+ .expect(0)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let config = test_config(zone_id);
+ ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_dry_run_does_not_mutate() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-dry";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(500))
+ .expect(0)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri()).dry_run();
+ let config = test_config(zone_id);
+ ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_purge_duplicate_records() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-purge";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-keep", "name": "example.com", "content": "198.51.100.7", "proxied": false },
+ { "id": "rec-dup", "name": "example.com", "content": "198.51.100.7", "proxied": false }
+ ]
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("DELETE"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-keep")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({})))
+ .expect(1)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let config = LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Detailed {
+ name: "".to_string(),
+ proxied: false,
+ }],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: false,
+ purge_unknown_records: true,
+ ttl: 300,
+ };
+ ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, true)
+ .await;
+ }
+
+ // --- describe_duration tests ---
+ #[test]
+ fn test_describe_duration_seconds_only() {
+ use tokio::time::Duration;
+ assert_eq!(super::describe_duration(Duration::from_secs(45)), "45s");
+ }
+
+ #[test]
+ fn test_describe_duration_exact_minutes() {
+ use tokio::time::Duration;
+ assert_eq!(super::describe_duration(Duration::from_secs(300)), "5m");
+ }
+
+ #[test]
+ fn test_describe_duration_minutes_and_seconds() {
+ use tokio::time::Duration;
+ assert_eq!(super::describe_duration(Duration::from_secs(330)), "5m30s");
+ }
+
+ #[test]
+ fn test_describe_duration_exact_hours() {
+ use tokio::time::Duration;
+ assert_eq!(super::describe_duration(Duration::from_secs(7200)), "2h");
+ }
+
+ #[test]
+ fn test_describe_duration_hours_and_minutes() {
+ use tokio::time::Duration;
+ assert_eq!(super::describe_duration(Duration::from_secs(5400)), "1h30m");
+ }
+
+ #[tokio::test]
+ async fn test_end_to_end_detect_and_update() {
+ let mock_server = MockServer::start().await;
+ let zone_id = "zone-e2e";
+
+ Mock::given(method("GET"))
+ .and(path("/cdn-cgi/trace"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("fl=1f1\nh=mock\nip=203.0.113.99\nts=0\n"),
+ )
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .and(query_param("type", "A"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-root", "name": "example.com", "content": "10.0.0.1", "proxied": false }
+ ]
+ })))
+ .mount(&mock_server)
+ .await;
+
+ Mock::given(method("PUT"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-root")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "rec-root" }
+ })))
+ .expect(1)
+ .mount(&mock_server)
+ .await;
+
+ let ddns = TestDdnsClient::new(&mock_server.uri());
+ let ip = ddns.get_ip().await;
+ assert_eq!(ip, Some("203.0.113.99".to_string()));
+
+ let config = LegacyConfig {
+ cloudflare: vec![LegacyCloudflareEntry {
+ authentication: LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Detailed {
+ name: "".to_string(),
+ proxied: false,
+ }],
+ proxied: false,
+ }],
+ a: true,
+ aaaa: false,
+ purge_unknown_records: false,
+ ttl: 300,
+ };
+
+ ddns.commit_record("203.0.113.99", "A", &config.cloudflare, 300, false)
+ .await;
+ }
+}
diff --git a/src/notifier.rs b/src/notifier.rs
new file mode 100644
index 0000000..f342d26
--- /dev/null
+++ b/src/notifier.rs
@@ -0,0 +1,1436 @@
+use crate::pp::{self, PP};
+use reqwest::Client;
+use std::time::Duration;
+
+// --- Message ---
+
+#[derive(Debug, Clone)]
+pub struct Message {
+ pub lines: Vec,
+ pub ok: bool,
+}
+
+impl Message {
+ #[allow(dead_code)]
+ pub fn new() -> Self {
+ Self {
+ lines: Vec::new(),
+ ok: true,
+ }
+ }
+
+ pub fn new_ok(msg: &str) -> Self {
+ Self {
+ lines: vec![msg.to_string()],
+ ok: true,
+ }
+ }
+
+ pub fn new_fail(msg: &str) -> Self {
+ Self {
+ lines: vec![msg.to_string()],
+ ok: false,
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.lines.is_empty()
+ }
+
+ pub fn format(&self) -> String {
+ self.lines.join("\n")
+ }
+
+ pub fn merge(messages: Vec) -> Message {
+ let mut lines = Vec::new();
+ let mut ok = true;
+ for m in messages {
+ lines.extend(m.lines);
+ if !m.ok {
+ ok = false;
+ }
+ }
+ Message { lines, ok }
+ }
+
+ #[allow(dead_code)]
+ pub fn add_line(&mut self, line: &str) {
+ self.lines.push(line.to_string());
+ }
+
+ #[allow(dead_code)]
+ pub fn set_fail(&mut self) {
+ self.ok = false;
+ }
+}
+
+// --- Composite Notifier ---
+
+pub struct CompositeNotifier {
+ notifiers: Vec>,
+}
+
+// Object-safe version of Notifier
+pub trait NotifierDyn: Send + Sync {
+ #[allow(dead_code)]
+ fn describe(&self) -> String;
+ fn send_dyn<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>>;
+}
+
+impl CompositeNotifier {
+ pub fn new(notifiers: Vec>) -> Self {
+ Self { notifiers }
+ }
+
+ #[allow(dead_code)]
+ pub fn is_empty(&self) -> bool {
+ self.notifiers.is_empty()
+ }
+
+ #[allow(dead_code)]
+ pub fn describe(&self) -> Vec {
+ self.notifiers.iter().map(|n| n.describe()).collect()
+ }
+
+ pub async fn send(&self, msg: &Message) {
+ if msg.is_empty() {
+ return;
+ }
+ for notifier in &self.notifiers {
+ notifier.send_dyn(msg).await;
+ }
+ }
+}
+
+// --- Shoutrrr Notifier ---
+
+pub struct ShoutrrrNotifier {
+ client: Client,
+ urls: Vec,
+}
+
+struct ShoutrrrService {
+ original_url: String,
+ service_type: ShoutrrrServiceType,
+ webhook_url: String,
+}
+
+enum ShoutrrrServiceType {
+ Generic,
+ Discord,
+ Slack,
+ Telegram,
+ Gotify,
+ Pushover,
+ Other(String),
+}
+
+impl ShoutrrrNotifier {
+ pub fn new(urls: &[String]) -> Result {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(10))
+ .build()
+ .map_err(|e| format!("Failed to build notifier HTTP client: {e}"))?;
+
+ let mut services = Vec::new();
+ for url_str in urls {
+ let url_str = url_str.trim();
+ if url_str.is_empty() {
+ continue;
+ }
+ let service = parse_shoutrrr_url(url_str)?;
+ services.push(service);
+ }
+
+ Ok(Self {
+ client,
+ urls: services,
+ })
+ }
+
+ pub fn describe(&self) -> String {
+ let services: Vec = self
+ .urls
+ .iter()
+ .map(|s| match &s.service_type {
+ ShoutrrrServiceType::Generic => "generic webhook".to_string(),
+ ShoutrrrServiceType::Discord => "Discord".to_string(),
+ ShoutrrrServiceType::Slack => "Slack".to_string(),
+ ShoutrrrServiceType::Telegram => "Telegram".to_string(),
+ ShoutrrrServiceType::Gotify => "Gotify".to_string(),
+ ShoutrrrServiceType::Pushover => "Pushover".to_string(),
+ ShoutrrrServiceType::Other(name) => name.clone(),
+ })
+ .collect();
+ services.join(", ")
+ }
+
+ pub async fn send(&self, msg: &Message, ppfmt: &PP) -> bool {
+ let text = msg.format();
+ if text.is_empty() {
+ return true;
+ }
+
+ let mut all_ok = true;
+ for service in &self.urls {
+ let ok = match &service.service_type {
+ ShoutrrrServiceType::Generic => self.send_generic(&service.webhook_url, &text).await,
+ ShoutrrrServiceType::Discord => self.send_discord(&service.webhook_url, &text).await,
+ ShoutrrrServiceType::Slack => self.send_slack(&service.webhook_url, &text).await,
+ ShoutrrrServiceType::Telegram => {
+ self.send_telegram(&service.webhook_url, &text).await
+ }
+ ShoutrrrServiceType::Gotify => self.send_gotify(&service.webhook_url, &text).await,
+ ShoutrrrServiceType::Pushover => {
+ self.send_pushover(&service.webhook_url, &text).await
+ }
+ ShoutrrrServiceType::Other(_) => self.send_generic(&service.webhook_url, &text).await,
+ };
+ if !ok {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("Failed to send notification via {}", service.original_url),
+ );
+ all_ok = false;
+ }
+ }
+ all_ok
+ }
+
+ async fn send_generic(&self, url: &str, text: &str) -> bool {
+ let body = serde_json::json!({ "message": text });
+ self.client
+ .post(url)
+ .json(&body)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+
+ async fn send_discord(&self, webhook_url: &str, text: &str) -> bool {
+ let body = serde_json::json!({ "content": text });
+ self.client
+ .post(webhook_url)
+ .json(&body)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+
+ async fn send_slack(&self, webhook_url: &str, text: &str) -> bool {
+ let body = serde_json::json!({ "text": text });
+ self.client
+ .post(webhook_url)
+ .json(&body)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+
+ async fn send_telegram(&self, api_url: &str, text: &str) -> bool {
+ // api_url should be like https://api.telegram.org/bot/sendMessage?chat_id=
+ let body = serde_json::json!({
+ "text": text,
+ "parse_mode": "Markdown"
+ });
+ self.client
+ .post(api_url)
+ .json(&body)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+
+ async fn send_gotify(&self, url: &str, text: &str) -> bool {
+ let body = serde_json::json!({
+ "title": "Cloudflare DDNS",
+ "message": text,
+ "priority": 5
+ });
+ self.client
+ .post(url)
+ .json(&body)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+
+ async fn send_pushover(&self, url: &str, text: &str) -> bool {
+ // Pushover expects form data with token, user, and message.
+ // The webhook_url has token and user as query params, so we parse them out.
+ let parsed = match url::Url::parse(url) {
+ Ok(u) => u,
+ Err(_) => return false,
+ };
+ let mut token = String::new();
+ let mut user = String::new();
+ for (key, value) in parsed.query_pairs() {
+ match key.as_ref() {
+ "token" => token = value.to_string(),
+ "user" => user = value.to_string(),
+ _ => {}
+ }
+ }
+ let params = [
+ ("token", token.as_str()),
+ ("user", user.as_str()),
+ ("message", text),
+ ];
+ self.client
+ .post("https://api.pushover.net/1/messages.json")
+ .form(¶ms)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+}
+
+impl NotifierDyn for ShoutrrrNotifier {
+ fn describe(&self) -> String {
+ ShoutrrrNotifier::describe(self)
+ }
+
+ fn send_dyn<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>> {
+ let pp = PP::default_pp();
+ Box::pin(async move { self.send(msg, &pp).await })
+ }
+}
+
+fn parse_shoutrrr_url(url_str: &str) -> Result {
+ // Shoutrrr URL formats:
+ // discord://token@id -> https://discord.com/api/webhooks/id/token
+ // slack://token-a/token-b/token-c -> https://hooks.slack.com/services/token-a/token-b/token-c
+ // telegram://token@telegram?chats=chatid -> https://api.telegram.org/bot{token}/sendMessage?chat_id={chatid}
+ // gotify://host/path?token=TOKEN -> https://host/path/message?token=TOKEN
+ // generic://host/path -> https://host/path
+ // generic+https://host/path -> https://host/path
+
+ if let Some(rest) = url_str.strip_prefix("discord://") {
+ let parts: Vec<&str> = rest.splitn(2, '@').collect();
+ if parts.len() == 2 {
+ let token = parts[0];
+ let id = parts[1];
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Discord,
+ webhook_url: format!("https://discord.com/api/webhooks/{id}/{token}"),
+ });
+ }
+ return Err(format!("Invalid Discord shoutrrr URL: {url_str}"));
+ }
+
+ if let Some(rest) = url_str.strip_prefix("slack://") {
+ let parts: Vec<&str> = rest.splitn(3, '/').collect();
+ if parts.len() == 3 {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Slack,
+ webhook_url: format!(
+ "https://hooks.slack.com/services/{}/{}/{}",
+ parts[0], parts[1], parts[2]
+ ),
+ });
+ }
+ return Err(format!("Invalid Slack shoutrrr URL: {url_str}"));
+ }
+
+ if let Some(rest) = url_str.strip_prefix("telegram://") {
+ let parts: Vec<&str> = rest.splitn(2, '@').collect();
+ if parts.len() == 2 {
+ let token = parts[0];
+ let remainder = parts[1];
+ // Extract chat ID from query params
+ if let Some(chats_start) = remainder.find("chats=") {
+ let chats_str = &remainder[chats_start + 6..];
+ let chat_id = chats_str.split('&').next().unwrap_or(chats_str);
+ let chat_id = chat_id.split(',').next().unwrap_or(chat_id);
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Telegram,
+ webhook_url: format!(
+ "https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}"
+ ),
+ });
+ }
+ }
+ return Err(format!("Invalid Telegram shoutrrr URL: {url_str}"));
+ }
+
+ if let Some(rest) = url_str
+ .strip_prefix("gotify://")
+ .or_else(|| url_str.strip_prefix("gotify+https://"))
+ {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Gotify,
+ webhook_url: format!("https://{rest}/message"),
+ });
+ }
+
+ if let Some(rest) = url_str
+ .strip_prefix("generic://")
+ .or_else(|| url_str.strip_prefix("generic+https://"))
+ {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: format!("https://{rest}"),
+ });
+ }
+
+ if let Some(rest) = url_str.strip_prefix("generic+http://") {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: format!("http://{rest}"),
+ });
+ }
+
+ if let Some(rest) = url_str.strip_prefix("pushover://") {
+ let parts: Vec<&str> = rest.splitn(2, '@').collect();
+ if parts.len() == 2 {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Pushover,
+ webhook_url: format!(
+ "https://api.pushover.net/1/messages.json?token={}&user={}",
+ parts[1], parts[0]
+ ),
+ });
+ }
+ return Err(format!("Invalid Pushover shoutrrr URL: {url_str}"));
+ }
+
+ // Unknown scheme - treat as generic with original URL as-is if it looks like a URL
+ if url_str.starts_with("http://") || url_str.starts_with("https://") {
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: url_str.to_string(),
+ });
+ }
+
+ // Try to parse as scheme://... for unknown services
+ if let Some(scheme_end) = url_str.find("://") {
+ let scheme = &url_str[..scheme_end];
+ return Ok(ShoutrrrService {
+ original_url: url_str.to_string(),
+ service_type: ShoutrrrServiceType::Other(scheme.to_string()),
+ webhook_url: format!("https://{}", &url_str[scheme_end + 3..]),
+ });
+ }
+
+ Err(format!("Unsupported notification URL: {url_str}"))
+}
+
+// --- Heartbeat ---
+
+pub struct Heartbeat {
+ monitors: Vec>,
+}
+
+pub trait HeartbeatMonitor: Send + Sync {
+ #[allow(dead_code)]
+ fn describe(&self) -> String;
+ fn ping<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>>;
+ fn start(
+ &self,
+ ) -> std::pin::Pin + Send + '_>>;
+ fn exit<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>>;
+}
+
+impl Heartbeat {
+ pub fn new(monitors: Vec>) -> Self {
+ Self { monitors }
+ }
+
+ #[allow(dead_code)]
+ pub fn is_empty(&self) -> bool {
+ self.monitors.is_empty()
+ }
+
+ #[allow(dead_code)]
+ pub fn describe(&self) -> Vec {
+ self.monitors.iter().map(|m| m.describe()).collect()
+ }
+
+ pub async fn ping(&self, msg: &Message) {
+ for monitor in &self.monitors {
+ monitor.ping(msg).await;
+ }
+ }
+
+ pub async fn start(&self) {
+ for monitor in &self.monitors {
+ monitor.start().await;
+ }
+ }
+
+ pub async fn exit(&self, msg: &Message) {
+ for monitor in &self.monitors {
+ monitor.exit(msg).await;
+ }
+ }
+}
+
+// --- Healthchecks.io ---
+
+pub struct HealthchecksMonitor {
+ client: Client,
+ base_url: String,
+}
+
+impl HealthchecksMonitor {
+ pub fn new(url: &str) -> Self {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(10))
+ .build()
+ .expect("Failed to build healthchecks client");
+
+ // Strip trailing slash
+ let base_url = url.trim_end_matches('/').to_string();
+
+ Self { client, base_url }
+ }
+
+ async fn send_ping(&self, suffix: &str, body: Option<&str>) -> bool {
+ let url = if suffix.is_empty() {
+ self.base_url.clone()
+ } else {
+ format!("{}/{suffix}", self.base_url)
+ };
+
+ let req = if let Some(body) = body {
+ self.client.post(&url).body(body.to_string())
+ } else {
+ self.client.post(&url)
+ };
+
+ req.send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ }
+}
+
+impl HeartbeatMonitor for HealthchecksMonitor {
+ fn describe(&self) -> String {
+ "Healthchecks.io".to_string()
+ }
+
+ fn ping<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>> {
+ Box::pin(async move {
+ let body = msg.format();
+ let suffix = if msg.ok { "" } else { "fail" };
+ self.send_ping(suffix, if body.is_empty() { None } else { Some(&body) })
+ .await
+ })
+ }
+
+ fn start(
+ &self,
+ ) -> std::pin::Pin + Send + '_>> {
+ Box::pin(async move { self.send_ping("start", None).await })
+ }
+
+ fn exit<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>> {
+ Box::pin(async move {
+ let body = msg.format();
+ self.send_ping(
+ if msg.ok { "" } else { "fail" },
+ if body.is_empty() { None } else { Some(&body) },
+ )
+ .await
+ })
+ }
+}
+
+// --- Uptime Kuma ---
+
+pub struct UptimeKumaMonitor {
+ client: Client,
+ base_url: String,
+}
+
+impl UptimeKumaMonitor {
+ pub fn new(url: &str) -> Self {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(10))
+ .build()
+ .expect("Failed to build uptime kuma client");
+
+ let base_url = url.trim_end_matches('/').to_string();
+
+ Self { client, base_url }
+ }
+}
+
+impl HeartbeatMonitor for UptimeKumaMonitor {
+ fn describe(&self) -> String {
+ "Uptime Kuma".to_string()
+ }
+
+ fn ping<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>> {
+ Box::pin(async move {
+ let status = if msg.ok { "up" } else { "down" };
+ let text = msg.format();
+ let mut url = format!("{}?status={status}", self.base_url);
+ if !text.is_empty() {
+ url.push_str(&format!("&msg={}", urlencoding(&text)));
+ }
+ self.client
+ .get(&url)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ })
+ }
+
+ fn start(
+ &self,
+ ) -> std::pin::Pin + Send + '_>> {
+ Box::pin(async move {
+ let url = format!("{}?status=up&msg=Starting", self.base_url);
+ self.client
+ .get(&url)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ })
+ }
+
+ fn exit<'a>(
+ &'a self,
+ msg: &'a Message,
+ ) -> std::pin::Pin + Send + 'a>> {
+ Box::pin(async move {
+ let status = if msg.ok { "up" } else { "down" };
+ let text = msg.format();
+ let mut url = format!("{}?status={status}", self.base_url);
+ if !text.is_empty() {
+ url.push_str(&format!("&msg={}", urlencoding(&text)));
+ }
+ self.client
+ .get(&url)
+ .send()
+ .await
+ .map(|r| r.status().is_success())
+ .unwrap_or(false)
+ })
+ }
+}
+
+fn urlencoding(s: &str) -> String {
+ url::form_urlencoded::byte_serialize(s.as_bytes()).collect()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use wiremock::matchers::{method, path};
+ use wiremock::{Mock, MockServer, ResponseTemplate};
+
+ // ---- Message tests ----
+
+ #[test]
+ fn test_message_new_ok() {
+ let msg = Message::new_ok("hello");
+ assert_eq!(msg.lines, vec!["hello".to_string()]);
+ assert!(msg.ok);
+ }
+
+ #[test]
+ fn test_message_new_fail() {
+ let msg = Message::new_fail("error occurred");
+ assert_eq!(msg.lines, vec!["error occurred".to_string()]);
+ assert!(!msg.ok);
+ }
+
+ #[test]
+ fn test_message_new() {
+ let msg = Message::new();
+ assert!(msg.lines.is_empty());
+ assert!(msg.ok);
+ }
+
+ #[test]
+ fn test_message_is_empty_true() {
+ let msg = Message::new();
+ assert!(msg.is_empty());
+ }
+
+ #[test]
+ fn test_message_is_empty_false() {
+ let msg = Message::new_ok("something");
+ assert!(!msg.is_empty());
+ }
+
+ #[test]
+ fn test_message_format_single_line() {
+ let msg = Message::new_ok("line1");
+ assert_eq!(msg.format(), "line1");
+ }
+
+ #[test]
+ fn test_message_format_multiple_lines() {
+ let mut msg = Message::new_ok("line1");
+ msg.add_line("line2");
+ msg.add_line("line3");
+ assert_eq!(msg.format(), "line1\nline2\nline3");
+ }
+
+ #[test]
+ fn test_message_format_empty() {
+ let msg = Message::new();
+ assert_eq!(msg.format(), "");
+ }
+
+ #[test]
+ fn test_message_merge_all_ok() {
+ let m1 = Message::new_ok("a");
+ let m2 = Message::new_ok("b");
+ let merged = Message::merge(vec![m1, m2]);
+ assert_eq!(merged.lines, vec!["a".to_string(), "b".to_string()]);
+ assert!(merged.ok);
+ }
+
+ #[test]
+ fn test_message_merge_one_fail() {
+ let m1 = Message::new_ok("a");
+ let m2 = Message::new_fail("b");
+ let m3 = Message::new_ok("c");
+ let merged = Message::merge(vec![m1, m2, m3]);
+ assert_eq!(
+ merged.lines,
+ vec!["a".to_string(), "b".to_string(), "c".to_string()]
+ );
+ assert!(!merged.ok);
+ }
+
+ #[test]
+ fn test_message_merge_all_fail() {
+ let m1 = Message::new_fail("x");
+ let m2 = Message::new_fail("y");
+ let merged = Message::merge(vec![m1, m2]);
+ assert!(!merged.ok);
+ }
+
+ #[test]
+ fn test_message_merge_empty_vec() {
+ let merged = Message::merge(vec![]);
+ assert!(merged.lines.is_empty());
+ assert!(merged.ok);
+ }
+
+ #[test]
+ fn test_message_add_line() {
+ let mut msg = Message::new();
+ msg.add_line("first");
+ msg.add_line("second");
+ assert_eq!(msg.lines, vec!["first".to_string(), "second".to_string()]);
+ }
+
+ #[test]
+ fn test_message_set_fail() {
+ let mut msg = Message::new();
+ assert!(msg.ok);
+ msg.set_fail();
+ assert!(!msg.ok);
+ }
+
+ // ---- CompositeNotifier tests ----
+
+ #[tokio::test]
+ async fn test_composite_notifier_empty_send_does_nothing() {
+ let notifier = CompositeNotifier::new(vec![]);
+ assert!(notifier.is_empty());
+ let msg = Message::new_ok("test");
+ // Should not panic or error
+ notifier.send(&msg).await;
+ }
+
+ // ---- parse_shoutrrr_url tests ----
+
+ #[test]
+ fn test_parse_discord() {
+ let result = parse_shoutrrr_url("discord://mytoken@myid").unwrap();
+ assert_eq!(
+ result.webhook_url,
+ "https://discord.com/api/webhooks/myid/mytoken"
+ );
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Discord));
+ assert_eq!(result.original_url, "discord://mytoken@myid");
+ }
+
+ #[test]
+ fn test_parse_discord_invalid() {
+ let result = parse_shoutrrr_url("discord://noatsign");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_slack() {
+ let result = parse_shoutrrr_url("slack://aaa/bbb/ccc").unwrap();
+ assert_eq!(
+ result.webhook_url,
+ "https://hooks.slack.com/services/aaa/bbb/ccc"
+ );
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Slack));
+ }
+
+ #[test]
+ fn test_parse_slack_invalid() {
+ let result = parse_shoutrrr_url("slack://only-one-part");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_telegram() {
+ let result =
+ parse_shoutrrr_url("telegram://bottoken123@telegram?chats=12345").unwrap();
+ assert_eq!(
+ result.webhook_url,
+ "https://api.telegram.org/botbottoken123/sendMessage?chat_id=12345"
+ );
+ assert!(matches!(
+ result.service_type,
+ ShoutrrrServiceType::Telegram
+ ));
+ }
+
+ #[test]
+ fn test_parse_telegram_invalid_no_chats() {
+ let result = parse_shoutrrr_url("telegram://token@telegram");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_gotify() {
+ let result = parse_shoutrrr_url("gotify://myhost.com/somepath").unwrap();
+ assert_eq!(
+ result.webhook_url,
+ "https://myhost.com/somepath/message"
+ );
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Gotify));
+ }
+
+ #[test]
+ fn test_parse_generic() {
+ let result = parse_shoutrrr_url("generic://example.com/webhook").unwrap();
+ assert_eq!(result.webhook_url, "https://example.com/webhook");
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Generic));
+ }
+
+ #[test]
+ fn test_parse_generic_plus_https() {
+ let result =
+ parse_shoutrrr_url("generic+https://example.com/webhook").unwrap();
+ assert_eq!(result.webhook_url, "https://example.com/webhook");
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Generic));
+ }
+
+ #[test]
+ fn test_parse_generic_plus_http() {
+ let result =
+ parse_shoutrrr_url("generic+http://example.com/webhook").unwrap();
+ assert_eq!(result.webhook_url, "http://example.com/webhook");
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Generic));
+ }
+
+ #[test]
+ fn test_parse_pushover() {
+ let result = parse_shoutrrr_url("pushover://userkey@apitoken").unwrap();
+ assert_eq!(
+ result.webhook_url,
+ "https://api.pushover.net/1/messages.json?token=apitoken&user=userkey"
+ );
+ assert!(matches!(
+ result.service_type,
+ ShoutrrrServiceType::Pushover
+ ));
+ }
+
+ #[test]
+ fn test_parse_pushover_invalid() {
+ let result = parse_shoutrrr_url("pushover://noatsign");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_plain_https_url() {
+ let result =
+ parse_shoutrrr_url("https://hooks.example.com/notify").unwrap();
+ assert_eq!(result.webhook_url, "https://hooks.example.com/notify");
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Generic));
+ }
+
+ #[test]
+ fn test_parse_plain_http_url() {
+ let result =
+ parse_shoutrrr_url("http://hooks.example.com/notify").unwrap();
+ assert_eq!(result.webhook_url, "http://hooks.example.com/notify");
+ assert!(matches!(result.service_type, ShoutrrrServiceType::Generic));
+ }
+
+ #[test]
+ fn test_parse_unknown_scheme() {
+ let result = parse_shoutrrr_url("custom://myhost.example.com/path").unwrap();
+ assert_eq!(result.webhook_url, "https://myhost.example.com/path");
+ assert!(matches!(
+ result.service_type,
+ ShoutrrrServiceType::Other(ref s) if s == "custom"
+ ));
+ }
+
+ #[test]
+ fn test_parse_invalid_no_scheme() {
+ let result = parse_shoutrrr_url("not-a-url");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_parse_invalid_empty() {
+ let result = parse_shoutrrr_url("");
+ assert!(result.is_err());
+ }
+
+ // ---- urlencoding tests ----
+
+ #[test]
+ fn test_urlencoding_basic_ascii() {
+ assert_eq!(urlencoding("hello"), "hello");
+ }
+
+ #[test]
+ fn test_urlencoding_spaces() {
+ assert_eq!(urlencoding("hello world"), "hello+world");
+ }
+
+ #[test]
+ fn test_urlencoding_special_chars() {
+ let encoded = urlencoding("a=b&c=d");
+ assert_eq!(encoded, "a%3Db%26c%3Dd");
+ }
+
+ #[test]
+ fn test_urlencoding_empty() {
+ assert_eq!(urlencoding(""), "");
+ }
+
+ // ---- HealthchecksMonitor with wiremock ----
+
+ #[tokio::test]
+ async fn test_healthchecks_ping_ok() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .and(path("/"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = HealthchecksMonitor::new(&server.uri());
+ let msg = Message::new_ok("all good");
+ let result = monitor.ping(&msg).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_healthchecks_ping_fail() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .and(path("/fail"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = HealthchecksMonitor::new(&server.uri());
+ let msg = Message::new_fail("something broke");
+ let result = monitor.ping(&msg).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_healthchecks_start() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .and(path("/start"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = HealthchecksMonitor::new(&server.uri());
+ let result = monitor.start().await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_healthchecks_exit_ok() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .and(path("/"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = HealthchecksMonitor::new(&server.uri());
+ let msg = Message::new_ok("done");
+ let result = monitor.exit(&msg).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_healthchecks_exit_fail() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .and(path("/fail"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = HealthchecksMonitor::new(&server.uri());
+ let msg = Message::new_fail("exit with error");
+ let result = monitor.exit(&msg).await;
+ assert!(result);
+ }
+
+ // ---- UptimeKumaMonitor with wiremock ----
+
+ #[tokio::test]
+ async fn test_uptime_kuma_ping_ok() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = UptimeKumaMonitor::new(&server.uri());
+ let msg = Message::new_ok("up and running");
+ let result = monitor.ping(&msg).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_uptime_kuma_ping_fail() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = UptimeKumaMonitor::new(&server.uri());
+ let msg = Message::new_fail("down");
+ let result = monitor.ping(&msg).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_uptime_kuma_start() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = UptimeKumaMonitor::new(&server.uri());
+ let result = monitor.start().await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_uptime_kuma_exit() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let monitor = UptimeKumaMonitor::new(&server.uri());
+ let msg = Message::new_ok("exiting cleanly");
+ let result = monitor.exit(&msg).await;
+ assert!(result);
+ }
+
+ // ---- ShoutrrrNotifier with wiremock ----
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_discord() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ // Build a notifier that points discord webhook at our mock server
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "discord://token@id".to_string(),
+ service_type: ShoutrrrServiceType::Discord,
+ webhook_url: format!("{}/api/webhooks/id/token", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("discord test");
+ let pp = PP::default_pp();
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_slack() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "slack://a/b/c".to_string(),
+ service_type: ShoutrrrServiceType::Slack,
+ webhook_url: format!("{}/services/a/b/c", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("slack test");
+ let pp = PP::default_pp();
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_generic() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "generic://example.com/hook".to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: format!("{}/hook", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("generic test");
+ let pp = PP::default_pp();
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_empty_message() {
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![],
+ };
+ let msg = Message::new();
+ let pp = PP::default_pp();
+ // Empty message should return true immediately
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ // ---- ShoutrrrNotifier::new and describe ----
+
+ #[test]
+ fn test_shoutrrr_notifier_new_valid() {
+ let urls = vec!["discord://token@id".to_string(), "slack://a/b/c".to_string()];
+ let notifier = ShoutrrrNotifier::new(&urls).unwrap();
+ assert_eq!(notifier.urls.len(), 2);
+ }
+
+ #[test]
+ fn test_shoutrrr_notifier_new_skips_empty() {
+ let urls = vec!["".to_string(), " ".to_string(), "discord://token@id".to_string()];
+ let notifier = ShoutrrrNotifier::new(&urls).unwrap();
+ assert_eq!(notifier.urls.len(), 1);
+ }
+
+ #[test]
+ fn test_shoutrrr_notifier_new_invalid_url() {
+ let urls = vec!["not-a-url".to_string()];
+ let result = ShoutrrrNotifier::new(&urls);
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_shoutrrr_notifier_describe() {
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![
+ ShoutrrrService {
+ original_url: "discord://t@i".to_string(),
+ service_type: ShoutrrrServiceType::Discord,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "slack://a/b/c".to_string(),
+ service_type: ShoutrrrServiceType::Slack,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "telegram://t@t?chats=1".to_string(),
+ service_type: ShoutrrrServiceType::Telegram,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "gotify://h/p".to_string(),
+ service_type: ShoutrrrServiceType::Gotify,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "pushover://u@t".to_string(),
+ service_type: ShoutrrrServiceType::Pushover,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "generic://h/p".to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: "https://example.com".to_string(),
+ },
+ ShoutrrrService {
+ original_url: "custom://h/p".to_string(),
+ service_type: ShoutrrrServiceType::Other("custom".to_string()),
+ webhook_url: "https://example.com".to_string(),
+ },
+ ],
+ };
+ let desc = notifier.describe();
+ assert_eq!(desc, "Discord, Slack, Telegram, Gotify, Pushover, generic webhook, custom");
+ }
+
+ // ---- send_telegram, send_gotify, send_pushover with wiremock ----
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_telegram() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "telegram://token@telegram?chats=123".to_string(),
+ service_type: ShoutrrrServiceType::Telegram,
+ webhook_url: format!("{}/bottoken/sendMessage?chat_id=123", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("telegram test");
+ let pp = PP::new(false, true);
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_gotify() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "gotify://host/path".to_string(),
+ service_type: ShoutrrrServiceType::Gotify,
+ webhook_url: format!("{}/message", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("gotify test");
+ let pp = PP::new(false, true);
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[test]
+ fn test_pushover_url_query_parsing() {
+ // Verify that the pushover webhook URL format contains the right params
+ let service = parse_shoutrrr_url("pushover://myuser@mytoken").unwrap();
+ let parsed = url::Url::parse(&service.webhook_url).unwrap();
+ let params: std::collections::HashMap<_, _> = parsed.query_pairs().collect();
+ assert_eq!(params.get("token").unwrap().as_ref(), "mytoken");
+ assert_eq!(params.get("user").unwrap().as_ref(), "myuser");
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_other_type() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "custom://host/path".to_string(),
+ service_type: ShoutrrrServiceType::Other("custom".to_string()),
+ webhook_url: format!("{}/path", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("other test");
+ let pp = PP::new(false, true);
+ let result = notifier.send(&msg, &pp).await;
+ assert!(result);
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_failure_logs_warning() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(500))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "discord://t@i".to_string(),
+ service_type: ShoutrrrServiceType::Discord,
+ webhook_url: format!("{}/webhook", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("will fail");
+ let pp = PP::new(false, true);
+ let result = notifier.send(&msg, &pp).await;
+ assert!(!result);
+ }
+
+ // ---- CompositeNotifier describe ----
+
+ #[test]
+ fn test_composite_notifier_describe_empty() {
+ let notifier = CompositeNotifier::new(vec![]);
+ assert!(notifier.describe().is_empty());
+ }
+
+ // ---- Heartbeat describe and is_empty ----
+
+ #[test]
+ fn test_heartbeat_is_empty() {
+ let hb = Heartbeat::new(vec![]);
+ assert!(hb.is_empty());
+ assert!(hb.describe().is_empty());
+ }
+
+ #[tokio::test]
+ async fn test_heartbeat_ping_no_monitors() {
+ let hb = Heartbeat::new(vec![]);
+ let msg = Message::new_ok("test");
+ // Should not panic
+ hb.ping(&msg).await;
+ }
+
+ #[tokio::test]
+ async fn test_heartbeat_start_no_monitors() {
+ let hb = Heartbeat::new(vec![]);
+ hb.start().await;
+ }
+
+ #[tokio::test]
+ async fn test_heartbeat_exit_no_monitors() {
+ let hb = Heartbeat::new(vec![]);
+ let msg = Message::new_ok("bye");
+ hb.exit(&msg).await;
+ }
+
+ // ---- CompositeNotifier send with empty message ----
+
+ #[tokio::test]
+ async fn test_composite_notifier_send_empty_message_skips() {
+ let notifier = CompositeNotifier::new(vec![]);
+ let msg = Message::new(); // empty
+ // Should return immediately without sending
+ notifier.send(&msg).await;
+ }
+
+ #[tokio::test]
+ async fn test_shoutrrr_send_server_error() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(500))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let notifier = ShoutrrrNotifier {
+ client: Client::new(),
+ urls: vec![ShoutrrrService {
+ original_url: "generic://example.com/hook".to_string(),
+ service_type: ShoutrrrServiceType::Generic,
+ webhook_url: format!("{}/hook", server.uri()),
+ }],
+ };
+ let msg = Message::new_ok("will fail");
+ let pp = PP::default_pp();
+ let result = notifier.send(&msg, &pp).await;
+ assert!(!result);
+ }
+}
diff --git a/src/pp.rs b/src/pp.rs
new file mode 100644
index 0000000..9a60fbf
--- /dev/null
+++ b/src/pp.rs
@@ -0,0 +1,435 @@
+use std::collections::HashSet;
+use std::sync::{Arc, Mutex};
+
+// Verbosity levels
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Verbosity {
+ Quiet,
+ Notice,
+ Info,
+ Verbose,
+}
+
+// Emoji constants
+#[allow(dead_code)]
+pub const EMOJI_GLOBE: &str = "\u{1F30D}";
+pub const EMOJI_WARNING: &str = "\u{26A0}\u{FE0F}";
+pub const EMOJI_ERROR: &str = "\u{274C}";
+#[allow(dead_code)]
+pub const EMOJI_SUCCESS: &str = "\u{2705}";
+pub const EMOJI_LAUNCH: &str = "\u{1F680}";
+pub const EMOJI_STOP: &str = "\u{1F6D1}";
+pub const EMOJI_SLEEP: &str = "\u{1F634}";
+pub const EMOJI_DETECT: &str = "\u{1F50D}";
+pub const EMOJI_UPDATE: &str = "\u{2B06}\u{FE0F}";
+pub const EMOJI_CREATE: &str = "\u{2795}";
+pub const EMOJI_DELETE: &str = "\u{2796}";
+pub const EMOJI_SKIP: &str = "\u{23ED}\u{FE0F}";
+pub const EMOJI_NOTIFY: &str = "\u{1F514}";
+pub const EMOJI_HEARTBEAT: &str = "\u{1F493}";
+pub const EMOJI_CONFIG: &str = "\u{2699}\u{FE0F}";
+#[allow(dead_code)]
+pub const EMOJI_HINT: &str = "\u{1F4A1}";
+
+const INDENT_PREFIX: &str = " ";
+
+pub struct PP {
+ pub verbosity: Verbosity,
+ pub emoji: bool,
+ indent: usize,
+ seen: Arc>>,
+}
+
+impl PP {
+ pub fn new(emoji: bool, quiet: bool) -> Self {
+ Self {
+ verbosity: if quiet { Verbosity::Quiet } else { Verbosity::Verbose },
+ emoji,
+ indent: 0,
+ seen: Arc::new(Mutex::new(HashSet::new())),
+ }
+ }
+
+ pub fn default_pp() -> Self {
+ Self::new(false, false)
+ }
+
+ pub fn is_showing(&self, level: Verbosity) -> bool {
+ self.verbosity >= level
+ }
+
+ pub fn indent(&self) -> PP {
+ PP {
+ verbosity: self.verbosity,
+ emoji: self.emoji,
+ indent: self.indent + 1,
+ seen: Arc::clone(&self.seen),
+ }
+ }
+
+ fn output(&self, emoji: &str, msg: &str) {
+ let prefix = INDENT_PREFIX.repeat(self.indent);
+ if self.emoji && !emoji.is_empty() {
+ println!("{prefix}{emoji} {msg}");
+ } else {
+ println!("{prefix}{msg}");
+ }
+ }
+
+ fn output_err(&self, emoji: &str, msg: &str) {
+ let prefix = INDENT_PREFIX.repeat(self.indent);
+ if self.emoji && !emoji.is_empty() {
+ eprintln!("{prefix}{emoji} {msg}");
+ } else {
+ eprintln!("{prefix}{msg}");
+ }
+ }
+
+ pub fn infof(&self, emoji: &str, msg: &str) {
+ if self.is_showing(Verbosity::Info) {
+ self.output(emoji, msg);
+ }
+ }
+
+ pub fn noticef(&self, emoji: &str, msg: &str) {
+ if self.is_showing(Verbosity::Notice) {
+ self.output(emoji, msg);
+ }
+ }
+
+ pub fn warningf(&self, emoji: &str, msg: &str) {
+ self.output_err(emoji, msg);
+ }
+
+ pub fn errorf(&self, emoji: &str, msg: &str) {
+ self.output_err(emoji, msg);
+ }
+
+ #[allow(dead_code)]
+ pub fn info_once(&self, key: &str, emoji: &str, msg: &str) {
+ if self.is_showing(Verbosity::Info) {
+ let mut seen = self.seen.lock().unwrap();
+ if seen.insert(key.to_string()) {
+ self.output(emoji, msg);
+ }
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn notice_once(&self, key: &str, emoji: &str, msg: &str) {
+ if self.is_showing(Verbosity::Notice) {
+ let mut seen = self.seen.lock().unwrap();
+ if seen.insert(key.to_string()) {
+ self.output(emoji, msg);
+ }
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn blank_line_if_verbose(&self) {
+ if self.is_showing(Verbosity::Verbose) {
+ println!();
+ }
+ }
+}
+
+#[allow(dead_code)]
+pub fn english_join(items: &[String]) -> String {
+ match items.len() {
+ 0 => String::new(),
+ 1 => items[0].clone(),
+ 2 => format!("{} and {}", items[0], items[1]),
+ _ => {
+ let (last, rest) = items.split_last().unwrap();
+ format!("{}, and {last}", rest.join(", "))
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // ---- PP::new with emoji flag ----
+
+ #[test]
+ fn new_with_emoji_true() {
+ let pp = PP::new(true, false);
+ assert!(pp.emoji);
+ }
+
+ #[test]
+ fn new_with_emoji_false() {
+ let pp = PP::new(false, false);
+ assert!(!pp.emoji);
+ }
+
+ // ---- PP::new with quiet flag (verbosity levels) ----
+
+ #[test]
+ fn new_quiet_true_sets_verbosity_quiet() {
+ let pp = PP::new(false, true);
+ assert_eq!(pp.verbosity, Verbosity::Quiet);
+ }
+
+ #[test]
+ fn new_quiet_false_sets_verbosity_verbose() {
+ let pp = PP::new(false, false);
+ assert_eq!(pp.verbosity, Verbosity::Verbose);
+ }
+
+ // ---- PP::is_showing at different verbosity levels ----
+
+ #[test]
+ fn quiet_shows_only_quiet_level() {
+ let pp = PP::new(false, true);
+ assert!(pp.is_showing(Verbosity::Quiet));
+ assert!(!pp.is_showing(Verbosity::Notice));
+ assert!(!pp.is_showing(Verbosity::Info));
+ assert!(!pp.is_showing(Verbosity::Verbose));
+ }
+
+ #[test]
+ fn verbose_shows_all_levels() {
+ let pp = PP::new(false, false);
+ assert!(pp.is_showing(Verbosity::Quiet));
+ assert!(pp.is_showing(Verbosity::Notice));
+ assert!(pp.is_showing(Verbosity::Info));
+ assert!(pp.is_showing(Verbosity::Verbose));
+ }
+
+ #[test]
+ fn notice_level_shows_quiet_and_notice_only() {
+ let mut pp = PP::new(false, false);
+ pp.verbosity = Verbosity::Notice;
+ assert!(pp.is_showing(Verbosity::Quiet));
+ assert!(pp.is_showing(Verbosity::Notice));
+ assert!(!pp.is_showing(Verbosity::Info));
+ assert!(!pp.is_showing(Verbosity::Verbose));
+ }
+
+ #[test]
+ fn info_level_shows_up_to_info() {
+ let mut pp = PP::new(false, false);
+ pp.verbosity = Verbosity::Info;
+ assert!(pp.is_showing(Verbosity::Quiet));
+ assert!(pp.is_showing(Verbosity::Notice));
+ assert!(pp.is_showing(Verbosity::Info));
+ assert!(!pp.is_showing(Verbosity::Verbose));
+ }
+
+ // ---- PP::indent ----
+
+ #[test]
+ fn indent_increments_indent_level() {
+ let pp = PP::new(true, false);
+ assert_eq!(pp.indent, 0);
+ let child = pp.indent();
+ assert_eq!(child.indent, 1);
+ let grandchild = child.indent();
+ assert_eq!(grandchild.indent, 2);
+ }
+
+ #[test]
+ fn indent_preserves_verbosity_and_emoji() {
+ let pp = PP::new(true, true);
+ let child = pp.indent();
+ assert_eq!(child.verbosity, pp.verbosity);
+ assert_eq!(child.emoji, pp.emoji);
+ }
+
+ #[test]
+ fn indent_shares_seen_state() {
+ let pp = PP::new(false, false);
+ let child = pp.indent();
+
+ // Insert via parent's seen set
+ pp.seen.lock().unwrap().insert("key1".to_string());
+
+ // Child should observe the same entry
+ assert!(child.seen.lock().unwrap().contains("key1"));
+
+ // Insert via child
+ child.seen.lock().unwrap().insert("key2".to_string());
+
+ // Parent should observe it too
+ assert!(pp.seen.lock().unwrap().contains("key2"));
+ }
+
+ // ---- PP::infof, noticef, warningf, errorf - no panic and verbosity gating ----
+
+ #[test]
+ fn infof_does_not_panic_when_verbose() {
+ let pp = PP::new(false, false);
+ pp.infof("", "test info message");
+ }
+
+ #[test]
+ fn infof_does_not_panic_when_quiet() {
+ let pp = PP::new(false, true);
+ // Should simply not print, and not panic
+ pp.infof("", "test info message");
+ }
+
+ #[test]
+ fn noticef_does_not_panic_when_verbose() {
+ let pp = PP::new(true, false);
+ pp.noticef(EMOJI_DETECT, "test notice message");
+ }
+
+ #[test]
+ fn noticef_does_not_panic_when_quiet() {
+ let pp = PP::new(false, true);
+ pp.noticef("", "test notice message");
+ }
+
+ #[test]
+ fn warningf_does_not_panic() {
+ let pp = PP::new(true, false);
+ pp.warningf(EMOJI_WARNING, "test warning");
+ }
+
+ #[test]
+ fn warningf_does_not_panic_when_quiet() {
+ // warningf always outputs (no verbosity check), just verify no panic
+ let pp = PP::new(false, true);
+ pp.warningf("", "test warning");
+ }
+
+ #[test]
+ fn errorf_does_not_panic() {
+ let pp = PP::new(true, false);
+ pp.errorf(EMOJI_ERROR, "test error");
+ }
+
+ #[test]
+ fn errorf_does_not_panic_when_quiet() {
+ let pp = PP::new(false, true);
+ pp.errorf("", "test error");
+ }
+
+ // ---- PP::info_once and notice_once ----
+
+ #[test]
+ fn info_once_suppresses_duplicates() {
+ let pp = PP::new(false, false);
+ // First call inserts the key
+ pp.info_once("dup_key", "", "first");
+ // The key should now be in the seen set
+ assert!(pp.seen.lock().unwrap().contains("dup_key"));
+
+ // Calling again with the same key should not insert again (set unchanged)
+ let size_before = pp.seen.lock().unwrap().len();
+ pp.info_once("dup_key", "", "second");
+ let size_after = pp.seen.lock().unwrap().len();
+ assert_eq!(size_before, size_after);
+ }
+
+ #[test]
+ fn info_once_allows_different_keys() {
+ let pp = PP::new(false, false);
+ pp.info_once("key_a", "", "msg a");
+ pp.info_once("key_b", "", "msg b");
+ let seen = pp.seen.lock().unwrap();
+ assert!(seen.contains("key_a"));
+ assert!(seen.contains("key_b"));
+ assert_eq!(seen.len(), 2);
+ }
+
+ #[test]
+ fn info_once_skipped_when_quiet() {
+ let pp = PP::new(false, true);
+ pp.info_once("quiet_key", "", "should not register");
+ // Because verbosity is Quiet, info_once should not even insert the key
+ assert!(!pp.seen.lock().unwrap().contains("quiet_key"));
+ }
+
+ #[test]
+ fn notice_once_suppresses_duplicates() {
+ let pp = PP::new(false, false);
+ pp.notice_once("notice_dup", "", "first");
+ assert!(pp.seen.lock().unwrap().contains("notice_dup"));
+
+ let size_before = pp.seen.lock().unwrap().len();
+ pp.notice_once("notice_dup", "", "second");
+ let size_after = pp.seen.lock().unwrap().len();
+ assert_eq!(size_before, size_after);
+ }
+
+ #[test]
+ fn notice_once_skipped_when_quiet() {
+ let pp = PP::new(false, true);
+ pp.notice_once("quiet_notice", "", "should not register");
+ assert!(!pp.seen.lock().unwrap().contains("quiet_notice"));
+ }
+
+ #[test]
+ fn info_once_shared_via_indent() {
+ let pp = PP::new(false, false);
+ let child = pp.indent();
+
+ // Mark a key via the parent
+ pp.info_once("shared_key", "", "parent");
+ assert!(pp.seen.lock().unwrap().contains("shared_key"));
+
+ // Child should see it as already present, so set size stays the same
+ let size_before = child.seen.lock().unwrap().len();
+ child.info_once("shared_key", "", "child duplicate");
+ let size_after = child.seen.lock().unwrap().len();
+ assert_eq!(size_before, size_after);
+
+ // Child can add a new key visible to parent
+ child.info_once("child_key", "", "child new");
+ assert!(pp.seen.lock().unwrap().contains("child_key"));
+ }
+
+ // ---- english_join ----
+
+ #[test]
+ fn english_join_empty() {
+ let items: Vec = vec![];
+ assert_eq!(english_join(&items), "");
+ }
+
+ #[test]
+ fn english_join_single() {
+ let items = vec!["alpha".to_string()];
+ assert_eq!(english_join(&items), "alpha");
+ }
+
+ #[test]
+ fn english_join_two() {
+ let items = vec!["alpha".to_string(), "beta".to_string()];
+ assert_eq!(english_join(&items), "alpha and beta");
+ }
+
+ #[test]
+ fn english_join_three() {
+ let items = vec![
+ "alpha".to_string(),
+ "beta".to_string(),
+ "gamma".to_string(),
+ ];
+ assert_eq!(english_join(&items), "alpha, beta, and gamma");
+ }
+
+ #[test]
+ fn english_join_four() {
+ let items = vec![
+ "a".to_string(),
+ "b".to_string(),
+ "c".to_string(),
+ "d".to_string(),
+ ];
+ assert_eq!(english_join(&items), "a, b, c, and d");
+ }
+
+ // ---- default_pp ----
+
+ #[test]
+ fn default_pp_is_verbose_no_emoji() {
+ let pp = PP::default_pp();
+ assert!(!pp.emoji);
+ assert_eq!(pp.verbosity, Verbosity::Verbose);
+ }
+}
diff --git a/src/provider.rs b/src/provider.rs
new file mode 100644
index 0000000..8042607
--- /dev/null
+++ b/src/provider.rs
@@ -0,0 +1,1201 @@
+use crate::pp::{self, PP};
+use reqwest::Client;
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket};
+use std::time::Duration;
+
+/// IP type: IPv4 or IPv6
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum IpType {
+ V4,
+ V6,
+}
+
+impl IpType {
+ pub fn describe(&self) -> &str {
+ match self {
+ IpType::V4 => "IPv4",
+ IpType::V6 => "IPv6",
+ }
+ }
+
+ pub fn record_type(&self) -> &str {
+ match self {
+ IpType::V4 => "A",
+ IpType::V6 => "AAAA",
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn all() -> &'static [IpType] {
+ &[IpType::V4, IpType::V6]
+ }
+}
+
+/// All supported provider types
+#[derive(Debug, Clone)]
+pub enum ProviderType {
+ CloudflareTrace { url: Option },
+ CloudflareDOH,
+ Ipify,
+ Local,
+ LocalIface { interface: String },
+ CustomURL { url: String },
+ Literal { ips: Vec },
+ None,
+}
+
+impl ProviderType {
+ pub fn name(&self) -> &str {
+ match self {
+ ProviderType::CloudflareTrace { .. } => "cloudflare.trace",
+ ProviderType::CloudflareDOH => "cloudflare.doh",
+ ProviderType::Ipify => "ipify",
+ ProviderType::Local => "local",
+ ProviderType::LocalIface { .. } => "local.iface",
+ ProviderType::CustomURL { .. } => "url:",
+ ProviderType::Literal { .. } => "literal:",
+ ProviderType::None => "none",
+ }
+ }
+
+ /// Parse a provider string like "cloudflare.trace", "url:https://...", "literal:1.2.3.4"
+ pub fn parse(input: &str) -> Result {
+ let input = input.trim();
+ if input.is_empty() || input == "none" {
+ return Ok(ProviderType::None);
+ }
+ if input == "cloudflare.trace" {
+ return Ok(ProviderType::CloudflareTrace { url: None });
+ }
+ if let Some(url) = input.strip_prefix("cloudflare.trace:") {
+ return Ok(ProviderType::CloudflareTrace {
+ url: Some(url.to_string()),
+ });
+ }
+ if input == "cloudflare.doh" {
+ return Ok(ProviderType::CloudflareDOH);
+ }
+ if input == "ipify" {
+ return Ok(ProviderType::Ipify);
+ }
+ if input == "local" {
+ return Ok(ProviderType::Local);
+ }
+ if let Some(iface) = input.strip_prefix("local.iface:") {
+ return Ok(ProviderType::LocalIface {
+ interface: iface.to_string(),
+ });
+ }
+ if let Some(url) = input.strip_prefix("url:") {
+ // Validate URL
+ match url::Url::parse(url) {
+ Ok(parsed) => {
+ if parsed.scheme() != "http" && parsed.scheme() != "https" {
+ return Err(format!("Custom URL must use http or https: {url}"));
+ }
+ Ok(ProviderType::CustomURL {
+ url: url.to_string(),
+ })
+ }
+ Err(e) => Err(format!("Invalid custom URL '{url}': {e}")),
+ }
+ } else if let Some(ips_str) = input.strip_prefix("literal:") {
+ let ips: Result, _> = ips_str
+ .split(|c: char| c == ',' || c == ' ')
+ .filter(|s| !s.is_empty())
+ .map(|s| s.trim().parse::())
+ .collect();
+ match ips {
+ Ok(ips) => Ok(ProviderType::Literal { ips }),
+ Err(e) => Err(format!("Invalid IP in literal provider: {e}")),
+ }
+ } else {
+ Err(format!("Unknown provider: {input}"))
+ }
+ }
+
+ /// Detect IPs using this provider.
+ pub async fn detect_ips(
+ &self,
+ client: &Client,
+ ip_type: IpType,
+ timeout: Duration,
+ ppfmt: &PP,
+ ) -> Vec {
+ match self {
+ ProviderType::CloudflareTrace { url } => {
+ detect_cloudflare_trace(client, ip_type, timeout, url.as_deref(), ppfmt).await
+ }
+ ProviderType::CloudflareDOH => {
+ detect_cloudflare_doh(client, ip_type, timeout, ppfmt).await
+ }
+ ProviderType::Ipify => detect_ipify(client, ip_type, timeout, ppfmt).await,
+ ProviderType::Local => detect_local(ip_type, ppfmt),
+ ProviderType::LocalIface { interface } => {
+ detect_local_iface(interface, ip_type, ppfmt)
+ }
+ ProviderType::CustomURL { url } => {
+ detect_custom_url(client, url, ip_type, timeout, ppfmt).await
+ }
+ ProviderType::Literal { ips } => filter_ips_by_type(ips, ip_type),
+ ProviderType::None => Vec::new(),
+ }
+ }
+}
+
+// --- Cloudflare Trace ---
+
+const CF_TRACE_V4_PRIMARY: &str = "https://1.1.1.1/cdn-cgi/trace";
+const CF_TRACE_V4_FALLBACK: &str = "https://1.0.0.1/cdn-cgi/trace";
+const CF_TRACE_V6_PRIMARY: &str = "https://[2606:4700:4700::1111]/cdn-cgi/trace";
+const CF_TRACE_V6_FALLBACK: &str = "https://[2606:4700:4700::1001]/cdn-cgi/trace";
+
+pub fn parse_trace_ip(body: &str) -> Option {
+ for line in body.lines() {
+ if let Some(ip) = line.strip_prefix("ip=") {
+ return Some(ip.to_string());
+ }
+ }
+ None
+}
+
+async fn fetch_trace_ip(client: &Client, url: &str, timeout: Duration) -> Option {
+ let resp = client
+ .get(url)
+ .timeout(timeout)
+ .send()
+ .await
+ .ok()?;
+ let body = resp.text().await.ok()?;
+ let ip_str = parse_trace_ip(&body)?;
+ ip_str.parse::().ok()
+}
+
+async fn detect_cloudflare_trace(
+ client: &Client,
+ ip_type: IpType,
+ timeout: Duration,
+ custom_url: Option<&str>,
+ ppfmt: &PP,
+) -> Vec {
+ if let Some(url) = custom_url {
+ if let Some(ip) = fetch_trace_ip(client, url, timeout).await {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("{} not detected via custom Cloudflare trace URL", ip_type.describe()),
+ );
+ return Vec::new();
+ }
+
+ let (primary, fallback) = match ip_type {
+ IpType::V4 => (CF_TRACE_V4_PRIMARY, CF_TRACE_V4_FALLBACK),
+ IpType::V6 => (CF_TRACE_V6_PRIMARY, CF_TRACE_V6_FALLBACK),
+ };
+
+ // Try primary
+ if let Some(ip) = fetch_trace_ip(client, primary, timeout).await {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("{} not detected via primary, trying fallback", ip_type.describe()),
+ );
+
+ // Try fallback
+ if let Some(ip) = fetch_trace_ip(client, fallback, timeout).await {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!(
+ "{} not detected via fallback. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.",
+ ip_type.describe()
+ ),
+ );
+
+ Vec::new()
+}
+
+// --- Cloudflare DNS over HTTPS ---
+
+async fn detect_cloudflare_doh(
+ client: &Client,
+ ip_type: IpType,
+ timeout: Duration,
+ ppfmt: &PP,
+) -> Vec {
+ // Construct a DNS query for whoami.cloudflare. TXT CH
+ let query = build_dns_query(b"\x06whoami\x0Acloudflare\x00", 16, 3); // TXT=16, CH=3
+
+ let resp = client
+ .post("https://cloudflare-dns.com/dns-query")
+ .header("Content-Type", "application/dns-message")
+ .header("Accept", "application/dns-message")
+ .body(query)
+ .timeout(timeout)
+ .send()
+ .await;
+
+ match resp {
+ Ok(r) => {
+ if let Ok(body) = r.bytes().await {
+ if let Some(ip_str) = parse_dns_txt_response(&body) {
+ if let Ok(ip) = ip_str.parse::() {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ }
+ }
+ }
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("{} not detected via Cloudflare DoH: {e}", ip_type.describe()),
+ );
+ }
+ }
+ Vec::new()
+}
+
+fn build_dns_query(name: &[u8], qtype: u16, qclass: u16) -> Vec {
+ let mut buf = Vec::with_capacity(64);
+ // Header
+ let id: u16 = rand_u16();
+ buf.extend_from_slice(&id.to_be_bytes()); // Transaction ID
+ buf.extend_from_slice(&[0x01, 0x00]); // Flags: standard query, RD=1
+ buf.extend_from_slice(&[0x00, 0x01]); // Questions: 1
+ buf.extend_from_slice(&[0x00, 0x00]); // Answer RRs: 0
+ buf.extend_from_slice(&[0x00, 0x00]); // Authority RRs: 0
+ buf.extend_from_slice(&[0x00, 0x00]); // Additional RRs: 0
+ // Question section
+ buf.extend_from_slice(name);
+ buf.extend_from_slice(&qtype.to_be_bytes());
+ buf.extend_from_slice(&qclass.to_be_bytes());
+ buf
+}
+
+fn parse_dns_txt_response(data: &[u8]) -> Option {
+ if data.len() < 12 {
+ return None;
+ }
+ // Check QR bit (response)
+ if data[2] & 0x80 == 0 {
+ return None;
+ }
+ // Check RCODE
+ if data[3] & 0x0F != 0 {
+ return None;
+ }
+ let ancount = u16::from_be_bytes([data[6], data[7]]);
+ if ancount == 0 {
+ return None;
+ }
+
+ // Skip header (12 bytes) + question section
+ let mut pos = 12;
+ // Skip question name
+ pos = skip_dns_name(data, pos)?;
+ pos += 4; // Skip QTYPE + QCLASS
+
+ // Parse answer
+ for _ in 0..ancount {
+ if pos >= data.len() {
+ break;
+ }
+ // Skip name
+ pos = skip_dns_name(data, pos)?;
+ if pos + 10 > data.len() {
+ break;
+ }
+ let rtype = u16::from_be_bytes([data[pos], data[pos + 1]]);
+ pos += 2; // TYPE
+ pos += 2; // CLASS
+ pos += 4; // TTL
+ let rdlength = u16::from_be_bytes([data[pos], data[pos + 1]]) as usize;
+ pos += 2;
+
+ if rtype == 16 && rdlength > 1 && pos + rdlength <= data.len() {
+ // TXT record: first byte is string length
+ let txt_len = data[pos] as usize;
+ if txt_len > 0 && pos + 1 + txt_len <= data.len() {
+ let txt = String::from_utf8_lossy(&data[pos + 1..pos + 1 + txt_len]);
+ // Strip surrounding quotes if present
+ let txt = txt.trim_matches('"');
+ return Some(txt.to_string());
+ }
+ }
+ pos += rdlength;
+ }
+ None
+}
+
+fn skip_dns_name(data: &[u8], mut pos: usize) -> Option {
+ loop {
+ if pos >= data.len() {
+ return None;
+ }
+ let len = data[pos] as usize;
+ if len == 0 {
+ return Some(pos + 1);
+ }
+ if len & 0xC0 == 0xC0 {
+ // Pointer
+ return Some(pos + 2);
+ }
+ pos += 1 + len;
+ }
+}
+
+fn rand_u16() -> u16 {
+ use std::collections::hash_map::RandomState;
+ use std::hash::{BuildHasher, Hasher};
+ RandomState::new().build_hasher().finish() as u16
+}
+
+// --- Ipify ---
+
+async fn detect_ipify(
+ client: &Client,
+ ip_type: IpType,
+ timeout: Duration,
+ ppfmt: &PP,
+) -> Vec {
+ let url = match ip_type {
+ IpType::V4 => "https://api4.ipify.org",
+ IpType::V6 => "https://api6.ipify.org",
+ };
+
+ match client.get(url).timeout(timeout).send().await {
+ Ok(resp) => {
+ if let Ok(body) = resp.text().await {
+ let ip_str = body.trim();
+ if let Ok(ip) = ip_str.parse::() {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ }
+ }
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("{} not detected via ipify: {e}", ip_type.describe()),
+ );
+ }
+ }
+ Vec::new()
+}
+
+// --- Local (auto) ---
+
+fn detect_local(ip_type: IpType, ppfmt: &PP) -> Vec {
+ let target = match ip_type {
+ IpType::V4 => "1.1.1.1:443",
+ IpType::V6 => "[2606:4700:4700::1111]:443",
+ };
+
+ match UdpSocket::bind(match ip_type {
+ IpType::V4 => "0.0.0.0:0",
+ IpType::V6 => "[::]:0",
+ }) {
+ Ok(socket) => match socket.connect(target) {
+ Ok(()) => match socket.local_addr() {
+ Ok(addr) => {
+ let ip = addr.ip();
+ if matches_ip_type(&ip, ip_type) && ip.is_global_() {
+ vec![ip]
+ } else {
+ Vec::new()
+ }
+ }
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("Failed to get local {} address: {e}", ip_type.describe()),
+ );
+ Vec::new()
+ }
+ },
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("Failed to detect local {} address: {e}", ip_type.describe()),
+ );
+ Vec::new()
+ }
+ },
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("Failed to bind socket for {} detection: {e}", ip_type.describe()),
+ );
+ Vec::new()
+ }
+ }
+}
+
+// --- Local Interface ---
+
+fn detect_local_iface(interface: &str, ip_type: IpType, ppfmt: &PP) -> Vec {
+ match if_addrs::get_if_addrs() {
+ Ok(addrs) => {
+ let mut ips: Vec = addrs
+ .iter()
+ .filter(|a| a.name == interface)
+ .map(|a| a.ip())
+ .filter(|ip| matches_ip_type(ip, ip_type) && ip.is_global_())
+ .collect();
+ ips.sort_by(|a, b| a.to_string().cmp(&b.to_string()));
+ ips.dedup();
+ if ips.is_empty() {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!(
+ "No global {} address found on interface {interface}",
+ ip_type.describe()
+ ),
+ );
+ }
+ ips
+ }
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("Failed to list network interfaces: {e}"),
+ );
+ Vec::new()
+ }
+ }
+}
+
+// --- Custom URL ---
+
+async fn detect_custom_url(
+ client: &Client,
+ url: &str,
+ ip_type: IpType,
+ timeout: Duration,
+ ppfmt: &PP,
+) -> Vec {
+ match client.get(url).timeout(timeout).send().await {
+ Ok(resp) => {
+ if let Ok(body) = resp.text().await {
+ let ip_str = body.trim();
+ if let Ok(ip) = ip_str.parse::() {
+ if matches_ip_type(&ip, ip_type) {
+ return vec![ip];
+ }
+ }
+ }
+ }
+ Err(e) => {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("{} not detected via custom URL: {e}", ip_type.describe()),
+ );
+ }
+ }
+ Vec::new()
+}
+
+// --- Helpers ---
+
+fn matches_ip_type(ip: &IpAddr, ip_type: IpType) -> bool {
+ match ip_type {
+ IpType::V4 => ip.is_ipv4(),
+ IpType::V6 => ip.is_ipv6(),
+ }
+}
+
+fn filter_ips_by_type(ips: &[IpAddr], ip_type: IpType) -> Vec {
+ ips.iter()
+ .copied()
+ .filter(|ip| matches_ip_type(ip, ip_type))
+ .collect()
+}
+
+/// Extension trait for IpAddr to check if it's a global address.
+/// std::net::IpAddr::is_global is unstable, so we implement it ourselves.
+trait IsGlobal {
+ fn is_global_(&self) -> bool;
+}
+
+impl IsGlobal for IpAddr {
+ fn is_global_(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => is_global_v4(ip),
+ IpAddr::V6(ip) => is_global_v6(ip),
+ }
+ }
+}
+
+fn is_global_v4(ip: &Ipv4Addr) -> bool {
+ !ip.is_loopback()
+ && !ip.is_private()
+ && !ip.is_link_local()
+ && !ip.is_broadcast()
+ && !ip.is_unspecified()
+ && !ip.is_documentation()
+ && !(ip.octets()[0] == 100 && ip.octets()[1] >= 64 && ip.octets()[1] <= 127) // 100.64.0.0/10 shared address space
+ && !ip.octets().starts_with(&[192, 0, 0]) // 192.0.0.0/24
+}
+
+fn is_global_v6(ip: &Ipv6Addr) -> bool {
+ !ip.is_loopback()
+ && !ip.is_unspecified()
+ && !ip.is_multicast()
+ // Not link-local (fe80::/10)
+ && (ip.segments()[0] & 0xffc0) != 0xfe80
+ // Not unique local (fc00::/7)
+ && (ip.segments()[0] & 0xfe00) != 0xfc00
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_trace_ip() {
+ let body = "fl=1f1\nh=1.1.1.1\nip=203.0.113.42\nts=1234567890\nvisit_scheme=https\n";
+ assert_eq!(parse_trace_ip(body), Some("203.0.113.42".to_string()));
+ }
+
+ #[test]
+ fn test_parse_trace_ip_missing() {
+ let body = "fl=1f1\nh=1.1.1.1\nts=1234567890\n";
+ assert_eq!(parse_trace_ip(body), None);
+ }
+
+ #[test]
+ fn test_provider_parse() {
+ assert!(matches!(
+ ProviderType::parse("cloudflare.trace").unwrap(),
+ ProviderType::CloudflareTrace { url: None }
+ ));
+ assert!(matches!(
+ ProviderType::parse("cloudflare.doh").unwrap(),
+ ProviderType::CloudflareDOH
+ ));
+ assert!(matches!(
+ ProviderType::parse("ipify").unwrap(),
+ ProviderType::Ipify
+ ));
+ assert!(matches!(
+ ProviderType::parse("local").unwrap(),
+ ProviderType::Local
+ ));
+ assert!(matches!(
+ ProviderType::parse("none").unwrap(),
+ ProviderType::None
+ ));
+ }
+
+ #[test]
+ fn test_provider_parse_literal() {
+ match ProviderType::parse("literal:1.2.3.4,5.6.7.8").unwrap() {
+ ProviderType::Literal { ips } => {
+ assert_eq!(ips.len(), 2);
+ }
+ _ => panic!("Expected Literal provider"),
+ }
+ }
+
+ #[test]
+ fn test_provider_parse_local_iface() {
+ match ProviderType::parse("local.iface:eth0").unwrap() {
+ ProviderType::LocalIface { interface } => {
+ assert_eq!(interface, "eth0");
+ }
+ _ => panic!("Expected LocalIface provider"),
+ }
+ }
+
+ #[test]
+ fn test_provider_parse_custom_url() {
+ match ProviderType::parse("url:https://example.com/ip").unwrap() {
+ ProviderType::CustomURL { url } => {
+ assert_eq!(url, "https://example.com/ip");
+ }
+ _ => panic!("Expected CustomURL provider"),
+ }
+ }
+
+ // ---- build_dns_query ----
+
+ #[test]
+ fn test_build_dns_query_header_structure() {
+ let name = b"\x06whoami\x0Acloudflare\x00";
+ let query = build_dns_query(name, 16, 3);
+
+ // Header is 12 bytes
+ assert!(query.len() >= 12);
+
+ // Flags: 0x0100 (standard query, RD=1)
+ assert_eq!(query[2], 0x01);
+ assert_eq!(query[3], 0x00);
+
+ // QDCOUNT = 1
+ assert_eq!(u16::from_be_bytes([query[4], query[5]]), 1);
+
+ // ANCOUNT, NSCOUNT, ARCOUNT = 0
+ assert_eq!(u16::from_be_bytes([query[6], query[7]]), 0);
+ assert_eq!(u16::from_be_bytes([query[8], query[9]]), 0);
+ assert_eq!(u16::from_be_bytes([query[10], query[11]]), 0);
+
+ // After 12-byte header, the name bytes should be present
+ let name_start = 12;
+ let name_end = name_start + name.len();
+ assert_eq!(&query[name_start..name_end], name);
+
+ // Then QTYPE and QCLASS
+ let qtype = u16::from_be_bytes([query[name_end], query[name_end + 1]]);
+ let qclass = u16::from_be_bytes([query[name_end + 2], query[name_end + 3]]);
+ assert_eq!(qtype, 16);
+ assert_eq!(qclass, 3);
+
+ // Total length: 12 + name.len() + 4
+ assert_eq!(query.len(), 12 + name.len() + 4);
+ }
+
+ // ---- parse_dns_txt_response ----
+
+ /// Helper: build a minimal valid DNS TXT response
+ fn build_test_dns_response(txt: &str) -> Vec {
+ let mut data = Vec::new();
+ // Header (12 bytes)
+ data.extend_from_slice(&[0x00, 0x01]); // ID
+ data.extend_from_slice(&[0x81, 0x00]); // Flags: QR=1, RD=1, RCODE=0
+ data.extend_from_slice(&[0x00, 0x01]); // QDCOUNT=1
+ data.extend_from_slice(&[0x00, 0x01]); // ANCOUNT=1
+ data.extend_from_slice(&[0x00, 0x00]); // NSCOUNT=0
+ data.extend_from_slice(&[0x00, 0x00]); // ARCOUNT=0
+ // Question section: name = \x04test\x00
+ data.extend_from_slice(b"\x04test\x00");
+ data.extend_from_slice(&[0x00, 0x10]); // QTYPE=TXT
+ data.extend_from_slice(&[0x00, 0x01]); // QCLASS=IN
+ // Answer section: name pointer to offset 12
+ data.extend_from_slice(&[0xC0, 0x0C]); // pointer to question name
+ data.extend_from_slice(&[0x00, 0x10]); // TYPE=TXT
+ data.extend_from_slice(&[0x00, 0x01]); // CLASS=IN
+ data.extend_from_slice(&[0x00, 0x00, 0x00, 0x3C]); // TTL=60
+ let rdlength = (1 + txt.len()) as u16;
+ data.extend_from_slice(&rdlength.to_be_bytes()); // RDLENGTH
+ data.push(txt.len() as u8); // TXT string length
+ data.extend_from_slice(txt.as_bytes());
+ data
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_valid() {
+ let data = build_test_dns_response("203.0.113.42");
+ let result = parse_dns_txt_response(&data);
+ assert_eq!(result, Some("203.0.113.42".to_string()));
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_strips_quotes() {
+ let data = build_test_dns_response("\"1.2.3.4\"");
+ let result = parse_dns_txt_response(&data);
+ assert_eq!(result, Some("1.2.3.4".to_string()));
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_empty() {
+ let result = parse_dns_txt_response(&[]);
+ assert_eq!(result, None);
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_too_short() {
+ let result = parse_dns_txt_response(&[0u8; 11]);
+ assert_eq!(result, None);
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_not_response() {
+ // QR bit not set (byte 2 bit 7 = 0)
+ let mut data = build_test_dns_response("1.2.3.4");
+ data[2] = 0x01; // clear QR bit
+ assert_eq!(parse_dns_txt_response(&data), None);
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_nonzero_rcode() {
+ let mut data = build_test_dns_response("1.2.3.4");
+ data[3] = 0x03; // RCODE = NXDOMAIN
+ assert_eq!(parse_dns_txt_response(&data), None);
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_zero_ancount() {
+ let mut data = build_test_dns_response("1.2.3.4");
+ data[6] = 0x00;
+ data[7] = 0x00; // ANCOUNT = 0
+ assert_eq!(parse_dns_txt_response(&data), None);
+ }
+
+ #[test]
+ fn test_parse_dns_txt_response_pointer_compressed_name() {
+ // The build_test_dns_response already uses pointer compression in the answer name
+ let data = build_test_dns_response("10.0.0.1");
+ // Verify it parses correctly with pointer compression
+ assert_eq!(parse_dns_txt_response(&data), Some("10.0.0.1".to_string()));
+ }
+
+ // ---- skip_dns_name ----
+
+ #[test]
+ fn test_skip_dns_name_normal_labels() {
+ // \x03www\x07example\x03com\x00
+ let data = b"\x03www\x07example\x03com\x00";
+ let result = skip_dns_name(data, 0);
+ assert_eq!(result, Some(data.len()));
+ }
+
+ #[test]
+ fn test_skip_dns_name_pointer() {
+ // A pointer: 0xC0 0x0C
+ let data = [0xC0, 0x0C];
+ let result = skip_dns_name(&data, 0);
+ assert_eq!(result, Some(2));
+ }
+
+ #[test]
+ fn test_skip_dns_name_empty_input() {
+ let result = skip_dns_name(&[], 0);
+ assert_eq!(result, None);
+ }
+
+ #[test]
+ fn test_skip_dns_name_root() {
+ // Root name: just \x00
+ let data = [0x00];
+ let result = skip_dns_name(&data, 0);
+ assert_eq!(result, Some(1));
+ }
+
+ // ---- detect_cloudflare_trace with wiremock ----
+
+ use wiremock::{Mock, MockServer, ResponseTemplate, matchers::{method, path}};
+ use crate::pp::PP;
+
+ #[tokio::test]
+ async fn test_detect_cloudflare_trace_primary_succeeds() {
+ let server = MockServer::start().await;
+ let trace_body = "fl=1f1\nh=test\nip=93.184.216.34\nts=123\n";
+
+ Mock::given(method("GET"))
+ .and(path("/cdn-cgi/trace"))
+ .respond_with(ResponseTemplate::new(200).set_body_string(trace_body))
+ .mount(&server)
+ .await;
+
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let url = format!("{}/cdn-cgi/trace", server.uri());
+ let timeout = Duration::from_secs(5);
+
+ let result = detect_cloudflare_trace(
+ &client,
+ IpType::V4,
+ timeout,
+ Some(&url),
+ &ppfmt,
+ )
+ .await;
+
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0], "93.184.216.34".parse::().unwrap());
+ }
+
+ #[tokio::test]
+ async fn test_detect_cloudflare_trace_primary_fails_fallback_succeeds() {
+ let primary = MockServer::start().await;
+ let fallback = MockServer::start().await;
+
+ // Primary returns 500
+ Mock::given(method("GET"))
+ .and(path("/cdn-cgi/trace"))
+ .respond_with(ResponseTemplate::new(500))
+ .mount(&primary)
+ .await;
+
+ // Fallback returns valid trace
+ let trace_body = "fl=1f1\nip=93.184.216.34\n";
+ Mock::given(method("GET"))
+ .and(path("/cdn-cgi/trace"))
+ .respond_with(ResponseTemplate::new(200).set_body_string(trace_body))
+ .mount(&fallback)
+ .await;
+
+ // We can't override the hardcoded primary/fallback URLs, but we can test
+ // the custom URL path: first with a failing URL, then a succeeding one.
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ // Custom URL pointing to primary (which fails with 500 -> no ip= line parseable from error page)
+ let result_fail = detect_cloudflare_trace(
+ &client,
+ IpType::V4,
+ timeout,
+ Some(&format!("{}/cdn-cgi/trace", primary.uri())),
+ &ppfmt,
+ )
+ .await;
+ assert!(result_fail.is_empty());
+
+ // Custom URL pointing to fallback (which succeeds)
+ let result_ok = detect_cloudflare_trace(
+ &client,
+ IpType::V4,
+ timeout,
+ Some(&format!("{}/cdn-cgi/trace", fallback.uri())),
+ &ppfmt,
+ )
+ .await;
+ assert_eq!(result_ok.len(), 1);
+ assert_eq!(result_ok[0], "93.184.216.34".parse::().unwrap());
+ }
+
+ // ---- detect_ipify with wiremock ----
+
+ #[tokio::test]
+ async fn test_detect_ipify_v4() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .and(path("/"))
+ .respond_with(ResponseTemplate::new(200).set_body_string("198.51.100.1\n"))
+ .mount(&server)
+ .await;
+
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ // detect_ipify uses hardcoded URLs, so we test via detect_custom_url instead
+ // which uses the same logic
+ let result = detect_custom_url(&client, &server.uri(), IpType::V4, timeout, &ppfmt).await;
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0], "198.51.100.1".parse::().unwrap());
+ }
+
+ #[tokio::test]
+ async fn test_detect_ipify_v6() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .and(path("/"))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_string("2001:db8::1\n"),
+ )
+ .mount(&server)
+ .await;
+
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ let result = detect_custom_url(&client, &server.uri(), IpType::V6, timeout, &ppfmt).await;
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0], "2001:db8::1".parse::().unwrap());
+ }
+
+ // ---- detect_custom_url with wiremock ----
+
+ #[tokio::test]
+ async fn test_detect_custom_url_success() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .and(path("/my-ip"))
+ .respond_with(ResponseTemplate::new(200).set_body_string("10.0.0.1"))
+ .mount(&server)
+ .await;
+
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+ let url = format!("{}/my-ip", server.uri());
+
+ // 10.0.0.1 is a valid IPv4, should match V4
+ let result = detect_custom_url(&client, &url, IpType::V4, timeout, &ppfmt).await;
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0], "10.0.0.1".parse::().unwrap());
+ }
+
+ #[tokio::test]
+ async fn test_detect_custom_url_wrong_ip_type() {
+ let server = MockServer::start().await;
+
+ Mock::given(method("GET"))
+ .and(path("/my-ip"))
+ .respond_with(ResponseTemplate::new(200).set_body_string("10.0.0.1"))
+ .mount(&server)
+ .await;
+
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+ let url = format!("{}/my-ip", server.uri());
+
+ // 10.0.0.1 is IPv4 but we ask for V6 -> empty
+ let result = detect_custom_url(&client, &url, IpType::V6, timeout, &ppfmt).await;
+ assert!(result.is_empty());
+ }
+
+ // ---- detect_local ----
+
+ #[test]
+ fn test_detect_local_returns_results_or_empty() {
+ let ppfmt = PP::default_pp();
+ // detect_local may return an IP or an empty vec depending on environment
+ let result_v4 = detect_local(IpType::V4, &ppfmt);
+ for ip in &result_v4 {
+ assert!(ip.is_ipv4());
+ }
+ let result_v6 = detect_local(IpType::V6, &ppfmt);
+ for ip in &result_v6 {
+ assert!(ip.is_ipv6());
+ }
+ }
+
+ // ---- matches_ip_type ----
+
+ #[test]
+ fn test_matches_ip_type_v4() {
+ let v4: IpAddr = "1.2.3.4".parse().unwrap();
+ assert!(matches_ip_type(&v4, IpType::V4));
+ assert!(!matches_ip_type(&v4, IpType::V6));
+ }
+
+ #[test]
+ fn test_matches_ip_type_v6() {
+ let v6: IpAddr = "::1".parse().unwrap();
+ assert!(!matches_ip_type(&v6, IpType::V4));
+ assert!(matches_ip_type(&v6, IpType::V6));
+ }
+
+ // ---- filter_ips_by_type ----
+
+ #[test]
+ fn test_filter_ips_by_type_mixed() {
+ let ips: Vec = vec![
+ "1.2.3.4".parse().unwrap(),
+ "::1".parse().unwrap(),
+ "5.6.7.8".parse().unwrap(),
+ "2001:db8::1".parse().unwrap(),
+ ];
+
+ let v4s = filter_ips_by_type(&ips, IpType::V4);
+ assert_eq!(v4s.len(), 2);
+ assert!(v4s.iter().all(|ip| ip.is_ipv4()));
+
+ let v6s = filter_ips_by_type(&ips, IpType::V6);
+ assert_eq!(v6s.len(), 2);
+ assert!(v6s.iter().all(|ip| ip.is_ipv6()));
+ }
+
+ #[test]
+ fn test_filter_ips_by_type_empty() {
+ let ips: Vec = vec![];
+ assert!(filter_ips_by_type(&ips, IpType::V4).is_empty());
+ assert!(filter_ips_by_type(&ips, IpType::V6).is_empty());
+ }
+
+ // ---- is_global_v4 ----
+
+ #[test]
+ fn test_is_global_v4_private() {
+ assert!(!is_global_v4(&Ipv4Addr::new(10, 0, 0, 1)));
+ assert!(!is_global_v4(&Ipv4Addr::new(172, 16, 0, 1)));
+ assert!(!is_global_v4(&Ipv4Addr::new(192, 168, 1, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v4_loopback() {
+ assert!(!is_global_v4(&Ipv4Addr::new(127, 0, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v4_link_local() {
+ assert!(!is_global_v4(&Ipv4Addr::new(169, 254, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v4_broadcast() {
+ assert!(!is_global_v4(&Ipv4Addr::new(255, 255, 255, 255)));
+ }
+
+ #[test]
+ fn test_is_global_v4_documentation() {
+ assert!(!is_global_v4(&Ipv4Addr::new(192, 0, 2, 1))); // 192.0.2.0/24
+ assert!(!is_global_v4(&Ipv4Addr::new(198, 51, 100, 1))); // 198.51.100.0/24
+ assert!(!is_global_v4(&Ipv4Addr::new(203, 0, 113, 1))); // 203.0.113.0/24
+ }
+
+ #[test]
+ fn test_is_global_v4_shared_address_space() {
+ assert!(!is_global_v4(&Ipv4Addr::new(100, 64, 0, 1)));
+ assert!(!is_global_v4(&Ipv4Addr::new(100, 127, 255, 254)));
+ // 100.128.x.x is outside the shared range
+ assert!(is_global_v4(&Ipv4Addr::new(100, 128, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v4_global() {
+ assert!(is_global_v4(&Ipv4Addr::new(8, 8, 8, 8)));
+ assert!(is_global_v4(&Ipv4Addr::new(1, 1, 1, 1)));
+ assert!(is_global_v4(&Ipv4Addr::new(93, 184, 216, 34)));
+ }
+
+ // ---- is_global_v6 ----
+
+ #[test]
+ fn test_is_global_v6_loopback() {
+ assert!(!is_global_v6(&Ipv6Addr::LOCALHOST));
+ }
+
+ #[test]
+ fn test_is_global_v6_link_local() {
+ // fe80::1
+ assert!(!is_global_v6(&Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v6_unique_local() {
+ // fc00::1
+ assert!(!is_global_v6(&Ipv6Addr::new(0xfc00, 0, 0, 0, 0, 0, 0, 1)));
+ // fd00::1
+ assert!(!is_global_v6(&Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v6_multicast() {
+ // ff02::1
+ assert!(!is_global_v6(&Ipv6Addr::new(0xff02, 0, 0, 0, 0, 0, 0, 1)));
+ }
+
+ #[test]
+ fn test_is_global_v6_global() {
+ // 2606:4700:4700::1111 (Cloudflare DNS)
+ assert!(is_global_v6(&Ipv6Addr::new(0x2606, 0x4700, 0x4700, 0, 0, 0, 0, 0x1111)));
+ // 2001:db8::1 is documentation, but our impl doesn't explicitly exclude it
+ // so it should be considered global by our function
+ assert!(is_global_v6(&Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0, 1)));
+ }
+
+ // ---- ProviderType::name ----
+
+ #[test]
+ fn test_provider_type_name() {
+ assert_eq!(ProviderType::CloudflareTrace { url: None }.name(), "cloudflare.trace");
+ assert_eq!(
+ ProviderType::CloudflareTrace { url: Some("https://x".into()) }.name(),
+ "cloudflare.trace"
+ );
+ assert_eq!(ProviderType::CloudflareDOH.name(), "cloudflare.doh");
+ assert_eq!(ProviderType::Ipify.name(), "ipify");
+ assert_eq!(ProviderType::Local.name(), "local");
+ assert_eq!(
+ ProviderType::LocalIface { interface: "eth0".into() }.name(),
+ "local.iface"
+ );
+ assert_eq!(
+ ProviderType::CustomURL { url: "https://x".into() }.name(),
+ "url:"
+ );
+ assert_eq!(
+ ProviderType::Literal { ips: vec![] }.name(),
+ "literal:"
+ );
+ assert_eq!(ProviderType::None.name(), "none");
+ }
+
+ // ---- ProviderType::parse error cases ----
+
+ #[test]
+ fn test_provider_parse_invalid_url_scheme() {
+ let result = ProviderType::parse("url:ftp://example.com");
+ assert!(result.is_err());
+ assert!(result.unwrap_err().contains("http or https"));
+ }
+
+ #[test]
+ fn test_provider_parse_invalid_literal_ip() {
+ let result = ProviderType::parse("literal:not_an_ip");
+ assert!(result.is_err());
+ assert!(result.unwrap_err().contains("Invalid IP"));
+ }
+
+ #[test]
+ fn test_provider_parse_unknown() {
+ let result = ProviderType::parse("totally_unknown");
+ assert!(result.is_err());
+ assert!(result.unwrap_err().contains("Unknown provider"));
+ }
+
+ // ---- ProviderType::Literal - detect_ips filters by ip_type ----
+
+ #[tokio::test]
+ async fn test_literal_detect_ips_filters_v4() {
+ let provider = ProviderType::Literal {
+ ips: vec![
+ "1.2.3.4".parse().unwrap(),
+ "::1".parse().unwrap(),
+ "5.6.7.8".parse().unwrap(),
+ ],
+ };
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ let result = provider.detect_ips(&client, IpType::V4, timeout, &ppfmt).await;
+ assert_eq!(result.len(), 2);
+ assert!(result.iter().all(|ip| ip.is_ipv4()));
+ }
+
+ #[tokio::test]
+ async fn test_literal_detect_ips_filters_v6() {
+ let provider = ProviderType::Literal {
+ ips: vec![
+ "1.2.3.4".parse().unwrap(),
+ "::1".parse().unwrap(),
+ "2001:db8::1".parse().unwrap(),
+ ],
+ };
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ let result = provider.detect_ips(&client, IpType::V6, timeout, &ppfmt).await;
+ assert_eq!(result.len(), 2);
+ assert!(result.iter().all(|ip| ip.is_ipv6()));
+ }
+
+ // ---- ProviderType::None - detect_ips returns empty ----
+
+ #[tokio::test]
+ async fn test_none_detect_ips_returns_empty() {
+ let provider = ProviderType::None;
+ let client = Client::new();
+ let ppfmt = PP::default_pp();
+ let timeout = Duration::from_secs(5);
+
+ let result_v4 = provider.detect_ips(&client, IpType::V4, timeout, &ppfmt).await;
+ assert!(result_v4.is_empty());
+
+ let result_v6 = provider.detect_ips(&client, IpType::V6, timeout, &ppfmt).await;
+ assert!(result_v6.is_empty());
+ }
+}
diff --git a/src/updater.rs b/src/updater.rs
new file mode 100644
index 0000000..b3ee7e1
--- /dev/null
+++ b/src/updater.rs
@@ -0,0 +1,2375 @@
+use crate::cloudflare::{CloudflareHandle, SetResult};
+use crate::config::{AppConfig, LegacyCloudflareEntry, LegacySubdomainEntry};
+use crate::domain::make_fqdn;
+use crate::notifier::{CompositeNotifier, Heartbeat, Message};
+use crate::pp::{self, PP};
+use crate::provider::IpType;
+use reqwest::Client;
+use std::collections::HashMap;
+use std::net::IpAddr;
+use std::time::Duration;
+
+/// Run a single update cycle.
+pub async fn update_once(
+ config: &AppConfig,
+ handle: &CloudflareHandle,
+ notifier: &CompositeNotifier,
+ heartbeat: &Heartbeat,
+ ppfmt: &PP,
+) -> bool {
+ let detection_client = Client::builder()
+ .timeout(config.detection_timeout)
+ .build()
+ .unwrap_or_default();
+
+ let mut all_ok = true;
+ let mut messages = Vec::new();
+
+ if config.legacy_mode {
+ all_ok = update_legacy(config, ppfmt).await;
+ } else {
+ // Detect IPs for each provider
+ let mut detected_ips: HashMap> = HashMap::new();
+
+ for (ip_type, provider) in &config.providers {
+ ppfmt.infof(
+ pp::EMOJI_DETECT,
+ &format!("Detecting {} via {}", ip_type.describe(), provider.name()),
+ );
+ let ips = provider
+ .detect_ips(&detection_client, *ip_type, config.detection_timeout, ppfmt)
+ .await;
+
+ if ips.is_empty() {
+ ppfmt.warningf(
+ pp::EMOJI_WARNING,
+ &format!("No {} address detected", ip_type.describe()),
+ );
+ messages.push(Message::new_fail(&format!(
+ "Failed to detect {} address",
+ ip_type.describe()
+ )));
+ } else {
+ let ip_strs: Vec = ips.iter().map(|ip| ip.to_string()).collect();
+ ppfmt.infof(
+ pp::EMOJI_DETECT,
+ &format!("Detected {}: {}", ip_type.describe(), ip_strs.join(", ")),
+ );
+ messages.push(Message::new_ok(&format!(
+ "Detected {}: {}",
+ ip_type.describe(),
+ ip_strs.join(", ")
+ )));
+ detected_ips.insert(*ip_type, ips);
+ }
+ }
+
+ // Update DNS records (env var mode - domain-based)
+ for (ip_type, domains) in &config.domains {
+ let ips = detected_ips.get(ip_type).cloned().unwrap_or_default();
+ let record_type = ip_type.record_type();
+
+ for domain_str in domains {
+ // Find zone ID for this domain
+ let zone_id = match handle.zone_id_of_domain(domain_str, ppfmt).await {
+ Some(id) => id,
+ None => {
+ ppfmt.errorf(
+ pp::EMOJI_ERROR,
+ &format!("Could not find zone for domain {domain_str}"),
+ );
+ all_ok = false;
+ messages.push(Message::new_fail(&format!(
+ "Failed to find zone for {domain_str}"
+ )));
+ continue;
+ }
+ };
+
+ let proxied = config
+ .proxied_expression
+ .as_ref()
+ .map(|f| f(domain_str))
+ .unwrap_or(false);
+
+ let result = handle
+ .set_ips(
+ &zone_id,
+ domain_str,
+ record_type,
+ &ips,
+ proxied,
+ config.ttl,
+ config.record_comment.as_deref(),
+ config.dry_run,
+ ppfmt,
+ )
+ .await;
+
+ match result {
+ SetResult::Updated => {
+ let ip_strs: Vec = ips.iter().map(|ip| ip.to_string()).collect();
+ messages.push(Message::new_ok(&format!(
+ "Updated {domain_str} -> {}",
+ ip_strs.join(", ")
+ )));
+ }
+ SetResult::Failed => {
+ all_ok = false;
+ messages.push(Message::new_fail(&format!(
+ "Failed to update {domain_str}"
+ )));
+ }
+ SetResult::Noop => {}
+ }
+ }
+ }
+
+ // Update WAF lists
+ for waf_list in &config.waf_lists {
+ // Collect all detected IPs for WAF lists
+ let all_ips: Vec = detected_ips
+ .values()
+ .flatten()
+ .copied()
+ .collect();
+
+ let result = handle
+ .set_waf_list(
+ waf_list,
+ &all_ips,
+ config.waf_list_item_comment.as_deref(),
+ config.waf_list_description.as_deref(),
+ config.dry_run,
+ ppfmt,
+ )
+ .await;
+
+ match result {
+ SetResult::Updated => {
+ messages.push(Message::new_ok(&format!(
+ "Updated WAF list {}",
+ waf_list.describe()
+ )));
+ }
+ SetResult::Failed => {
+ all_ok = false;
+ messages.push(Message::new_fail(&format!(
+ "Failed to update WAF list {}",
+ waf_list.describe()
+ )));
+ }
+ SetResult::Noop => {}
+ }
+ }
+ }
+
+ // Send heartbeat
+ let heartbeat_msg = Message::merge(messages.clone());
+ heartbeat.ping(&heartbeat_msg).await;
+
+ // Send notifications
+ let notifier_msg = Message::merge(messages);
+ notifier.send(¬ifier_msg).await;
+
+ all_ok
+}
+
+/// Run legacy mode update (using the original cloudflare-ddns logic with zone_id-based config).
+async fn update_legacy(config: &AppConfig, _ppfmt: &PP) -> bool {
+ let legacy = match &config.legacy_config {
+ Some(l) => l,
+ None => return false,
+ };
+
+ let client = Client::builder()
+ .timeout(config.update_timeout)
+ .build()
+ .unwrap_or_default();
+
+ let ddns = LegacyDdnsClient {
+ client,
+ cf_api_base: "https://api.cloudflare.com/client/v4".to_string(),
+ ipv4_urls: vec![
+ "https://1.1.1.1/cdn-cgi/trace".to_string(),
+ "https://1.0.0.1/cdn-cgi/trace".to_string(),
+ ],
+ ipv6_urls: vec![
+ "https://[2606:4700:4700::1111]/cdn-cgi/trace".to_string(),
+ "https://[2606:4700:4700::1001]/cdn-cgi/trace".to_string(),
+ ],
+ dry_run: config.dry_run,
+ };
+
+ let mut warnings = LegacyWarningState::default();
+
+ let ips = ddns
+ .get_ips(
+ legacy.a,
+ legacy.aaaa,
+ legacy.purge_unknown_records,
+ &legacy.cloudflare,
+ &mut warnings,
+ )
+ .await;
+
+ ddns.update_ips(
+ &ips,
+ &legacy.cloudflare,
+ legacy.ttl,
+ legacy.purge_unknown_records,
+ )
+ .await;
+
+ true
+}
+
+/// Delete records on stop (for env var mode).
+pub async fn final_delete(
+ config: &AppConfig,
+ handle: &CloudflareHandle,
+ notifier: &CompositeNotifier,
+ heartbeat: &Heartbeat,
+ ppfmt: &PP,
+) {
+ let mut messages = Vec::new();
+
+ // Delete DNS records
+ for (ip_type, domains) in &config.domains {
+ let record_type = ip_type.record_type();
+
+ for domain_str in domains {
+ if let Some(zone_id) = handle.zone_id_of_domain(domain_str, ppfmt).await {
+ handle.final_delete(&zone_id, domain_str, record_type, ppfmt).await;
+ messages.push(Message::new_ok(&format!("Deleted records for {domain_str}")));
+ }
+ }
+ }
+
+ // Clear WAF lists
+ for waf_list in &config.waf_lists {
+ handle.final_clear_waf_list(waf_list, ppfmt).await;
+ messages.push(Message::new_ok(&format!(
+ "Cleared WAF list {}",
+ waf_list.describe()
+ )));
+ }
+
+ // Send notifications
+ let msg = Message::merge(messages);
+ heartbeat.exit(&msg).await;
+ notifier.send(&msg).await;
+}
+
+// ============================================================
+// Legacy DDNS Client (preserved for backwards compatibility)
+// ============================================================
+
+pub struct LegacyIpInfo {
+ pub record_type: String,
+ pub ip: String,
+}
+
+struct LegacyWarningState {
+ shown_ipv4: bool,
+ shown_ipv4_secondary: bool,
+ shown_ipv6: bool,
+ shown_ipv6_secondary: bool,
+}
+
+impl Default for LegacyWarningState {
+ fn default() -> Self {
+ Self {
+ shown_ipv4: false,
+ shown_ipv4_secondary: false,
+ shown_ipv6: false,
+ shown_ipv6_secondary: false,
+ }
+ }
+}
+
+struct LegacyDdnsClient {
+ client: Client,
+ cf_api_base: String,
+ ipv4_urls: Vec,
+ ipv6_urls: Vec,
+ dry_run: bool,
+}
+
+impl LegacyDdnsClient {
+ async fn get_ips(
+ &self,
+ ipv4_enabled: bool,
+ ipv6_enabled: bool,
+ purge_unknown_records: bool,
+ config: &[LegacyCloudflareEntry],
+ warnings: &mut LegacyWarningState,
+ ) -> HashMap {
+ let mut ips = HashMap::new();
+
+ if ipv4_enabled {
+ let a = self
+ .try_trace_urls(
+ &self.ipv4_urls,
+ &mut warnings.shown_ipv4,
+ &mut warnings.shown_ipv4_secondary,
+ "IPv4",
+ )
+ .await;
+ if a.is_none() && purge_unknown_records {
+ self.delete_entries("A", config).await;
+ }
+ if let Some(ip) = a {
+ ips.insert(
+ "ipv4".to_string(),
+ LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip,
+ },
+ );
+ }
+ }
+
+ if ipv6_enabled {
+ let aaaa = self
+ .try_trace_urls(
+ &self.ipv6_urls,
+ &mut warnings.shown_ipv6,
+ &mut warnings.shown_ipv6_secondary,
+ "IPv6",
+ )
+ .await;
+ if aaaa.is_none() && purge_unknown_records {
+ self.delete_entries("AAAA", config).await;
+ }
+ if let Some(ip) = aaaa {
+ ips.insert(
+ "ipv6".to_string(),
+ LegacyIpInfo {
+ record_type: "AAAA".to_string(),
+ ip,
+ },
+ );
+ }
+ }
+
+ ips
+ }
+
+ async fn try_trace_urls(
+ &self,
+ urls: &[String],
+ shown_primary: &mut bool,
+ shown_secondary: &mut bool,
+ label: &str,
+ ) -> Option {
+ for (i, url) in urls.iter().enumerate() {
+ match self.client.get(url).send().await {
+ Ok(resp) => {
+ if let Some(ip) =
+ crate::provider::parse_trace_ip(&resp.text().await.unwrap_or_default())
+ {
+ return Some(ip);
+ }
+ }
+ Err(_) => {
+ if i == 0 && !*shown_primary {
+ *shown_primary = true;
+ let next = if urls.len() > 1 {
+ ", trying fallback"
+ } else {
+ ""
+ };
+ eprintln!("{label} not detected via primary{next}");
+ } else if i > 0 && !*shown_secondary {
+ *shown_secondary = true;
+ eprintln!("{label} not detected via fallback. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.");
+ }
+ }
+ }
+ }
+ None
+ }
+
+ async fn cf_api(
+ &self,
+ endpoint: &str,
+ method: &str,
+ entry: &LegacyCloudflareEntry,
+ body: Option<&impl serde::Serialize>,
+ ) -> Option {
+ let url = format!("{}/{endpoint}", self.cf_api_base);
+
+ let mut req = match method {
+ "GET" => self.client.get(&url),
+ "POST" => self.client.post(&url),
+ "PUT" => self.client.put(&url),
+ "PATCH" => self.client.patch(&url),
+ "DELETE" => self.client.delete(&url),
+ _ => return None,
+ };
+
+ if !entry.authentication.api_token.is_empty()
+ && entry.authentication.api_token != "api_token_here"
+ {
+ req = req.header(
+ "Authorization",
+ format!("Bearer {}", entry.authentication.api_token),
+ );
+ } else if let Some(api_key) = &entry.authentication.api_key {
+ req = req
+ .header("X-Auth-Email", &api_key.account_email)
+ .header("X-Auth-Key", &api_key.api_key);
+ }
+
+ if let Some(b) = body {
+ req = req.json(b);
+ }
+
+ match req.send().await {
+ Ok(resp) => {
+ if resp.status().is_success() {
+ resp.json::().await.ok()
+ } else {
+ let url_str = resp.url().to_string();
+ let text = resp.text().await.unwrap_or_default();
+ eprintln!("Error sending '{method}' request to '{url_str}': {text}");
+ None
+ }
+ }
+ Err(e) => {
+ eprintln!("Exception sending '{method}' request to '{endpoint}': {e}");
+ None
+ }
+ }
+ }
+
+ async fn delete_entries(&self, record_type: &str, entries: &[LegacyCloudflareEntry]) {
+ for entry in entries {
+ let endpoint = format!(
+ "zones/{}/dns_records?per_page=100&type={record_type}",
+ entry.zone_id
+ );
+ let answer: Option>> =
+ self.cf_api(&endpoint, "GET", entry, None::<&()>.as_ref())
+ .await;
+
+ if let Some(resp) = answer {
+ if let Some(records) = resp.result {
+ for record in records {
+ if self.dry_run {
+ println!("[DRY RUN] Would delete stale record {}", record.id);
+ continue;
+ }
+ let del_endpoint = format!(
+ "zones/{}/dns_records/{}",
+ entry.zone_id, record.id
+ );
+ let _: Option = self
+ .cf_api(&del_endpoint, "DELETE", entry, None::<&()>.as_ref())
+ .await;
+ println!("Deleted stale record {}", record.id);
+ }
+ }
+ } else {
+ tokio::time::sleep(Duration::from_secs(5)).await;
+ }
+ }
+ }
+
+ async fn update_ips(
+ &self,
+ ips: &HashMap,
+ config: &[LegacyCloudflareEntry],
+ ttl: i64,
+ purge_unknown_records: bool,
+ ) {
+ for ip in ips.values() {
+ self.commit_record(ip, config, ttl, purge_unknown_records)
+ .await;
+ }
+ }
+
+ async fn commit_record(
+ &self,
+ ip: &LegacyIpInfo,
+ config: &[LegacyCloudflareEntry],
+ ttl: i64,
+ purge_unknown_records: bool,
+ ) {
+ for entry in config {
+ let zone_resp: Option> = self
+ .cf_api(
+ &format!("zones/{}", entry.zone_id),
+ "GET",
+ entry,
+ None::<&()>.as_ref(),
+ )
+ .await;
+
+ let base_domain = match zone_resp.and_then(|r| r.result) {
+ Some(z) => z.name,
+ None => {
+ tokio::time::sleep(Duration::from_secs(5)).await;
+ continue;
+ }
+ };
+
+ for subdomain in &entry.subdomains {
+ let (name, proxied) = match subdomain {
+ LegacySubdomainEntry::Detailed { name, proxied } => {
+ (name.to_lowercase().trim().to_string(), *proxied)
+ }
+ LegacySubdomainEntry::Simple(name) => {
+ (name.to_lowercase().trim().to_string(), entry.proxied)
+ }
+ };
+
+ let fqdn = make_fqdn(&name, &base_domain);
+
+ let record = LegacyDnsRecordPayload {
+ record_type: ip.record_type.clone(),
+ name: fqdn.clone(),
+ content: ip.ip.clone(),
+ proxied,
+ ttl,
+ };
+
+ let dns_endpoint = format!(
+ "zones/{}/dns_records?per_page=100&type={}",
+ entry.zone_id, ip.record_type
+ );
+ let dns_records: Option>> =
+ self.cf_api(&dns_endpoint, "GET", entry, None::<&()>.as_ref())
+ .await;
+
+ let mut identifier: Option = None;
+ let mut modified = false;
+ let mut duplicate_ids: Vec = Vec::new();
+
+ if let Some(resp) = dns_records {
+ if let Some(records) = resp.result {
+ for r in &records {
+ if r.name == fqdn {
+ if let Some(ref existing_id) = identifier {
+ if r.content == ip.ip {
+ duplicate_ids.push(existing_id.clone());
+ identifier = Some(r.id.clone());
+ } else {
+ duplicate_ids.push(r.id.clone());
+ }
+ } else {
+ identifier = Some(r.id.clone());
+ if r.content != record.content
+ || r.proxied != record.proxied
+ {
+ modified = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if let Some(ref id) = identifier {
+ if modified {
+ if self.dry_run {
+ println!("[DRY RUN] Would update record {fqdn} -> {}", ip.ip);
+ } else {
+ println!("Updating record {fqdn} -> {}", ip.ip);
+ let update_endpoint =
+ format!("zones/{}/dns_records/{id}", entry.zone_id);
+ let _: Option = self
+ .cf_api(&update_endpoint, "PUT", entry, Some(&record))
+ .await;
+ }
+ } else if self.dry_run {
+ println!("[DRY RUN] Record {fqdn} is up to date ({})", ip.ip);
+ }
+ } else if self.dry_run {
+ println!("[DRY RUN] Would add new record {fqdn} -> {}", ip.ip);
+ } else {
+ println!("Adding new record {fqdn} -> {}", ip.ip);
+ let create_endpoint = format!("zones/{}/dns_records", entry.zone_id);
+ let _: Option = self
+ .cf_api(&create_endpoint, "POST", entry, Some(&record))
+ .await;
+ }
+
+ if purge_unknown_records {
+ for dup_id in &duplicate_ids {
+ if self.dry_run {
+ println!("[DRY RUN] Would delete stale record {dup_id}");
+ } else {
+ println!("Deleting stale record {dup_id}");
+ let del_endpoint =
+ format!("zones/{}/dns_records/{dup_id}", entry.zone_id);
+ let _: Option = self
+ .cf_api(&del_endpoint, "DELETE", entry, None::<&()>.as_ref())
+ .await;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::cloudflare::{Auth, CloudflareHandle, TTL, WAFList};
+ use crate::config::{AppConfig, CronSchedule};
+ use crate::notifier::{CompositeNotifier, Heartbeat};
+ use crate::pp::PP;
+ use crate::provider::{IpType, ProviderType};
+ use std::collections::HashMap;
+ use std::net::IpAddr;
+ use std::time::Duration;
+ use wiremock::matchers::{method, path, path_regex, query_param};
+ use wiremock::{Mock, MockServer, ResponseTemplate};
+
+ // -------------------------------------------------------
+ // Helpers
+ // -------------------------------------------------------
+
+ fn pp() -> PP {
+ // quiet=true suppresses output during tests
+ PP::new(false, true)
+ }
+
+ fn empty_notifier() -> CompositeNotifier {
+ CompositeNotifier::new(vec![])
+ }
+
+ fn empty_heartbeat() -> Heartbeat {
+ Heartbeat::new(vec![])
+ }
+
+ /// Build a minimal AppConfig for env-var (non-legacy) mode with a single V4 domain.
+ fn make_config(
+ providers: HashMap,
+ domains: HashMap>,
+ waf_lists: Vec,
+ dry_run: bool,
+ ) -> AppConfig {
+ AppConfig {
+ auth: Auth::Token("test-token".to_string()),
+ providers,
+ domains,
+ waf_lists,
+ update_cron: CronSchedule::Once,
+ update_on_start: true,
+ delete_on_stop: false,
+ ttl: TTL::AUTO,
+ proxied_expression: None,
+ record_comment: None,
+ managed_comment_regex: None,
+ waf_list_description: None,
+ waf_list_item_comment: None,
+ managed_waf_comment_regex: None,
+ detection_timeout: Duration::from_secs(5),
+ update_timeout: Duration::from_secs(5),
+ dry_run,
+ emoji: false,
+ quiet: true,
+ legacy_mode: false,
+ legacy_config: None,
+ repeat: false,
+ }
+ }
+
+ fn handle(base_url: &str) -> CloudflareHandle {
+ CloudflareHandle::with_base_url(base_url, Auth::Token("test-token".to_string()))
+ }
+
+ /// JSON for a Cloudflare zones list response returning a single zone.
+ fn zones_response(zone_id: &str, name: &str) -> serde_json::Value {
+ serde_json::json!({
+ "result": [{ "id": zone_id, "name": name }]
+ })
+ }
+
+ /// JSON for an empty zones list response (zone not found).
+ fn zones_empty_response() -> serde_json::Value {
+ serde_json::json!({ "result": [] })
+ }
+
+ /// JSON for an empty DNS records list.
+ fn dns_records_empty() -> serde_json::Value {
+ serde_json::json!({ "result": [] })
+ }
+
+ /// JSON for a DNS records list containing one record.
+ fn dns_records_one(id: &str, name: &str, content: &str) -> serde_json::Value {
+ serde_json::json!({
+ "result": [{
+ "id": id,
+ "name": name,
+ "content": content,
+ "proxied": false,
+ "ttl": 1,
+ "comment": null
+ }]
+ })
+ }
+
+ /// JSON for a successful DNS record create/update response.
+ fn dns_record_created(id: &str, name: &str, content: &str) -> serde_json::Value {
+ serde_json::json!({
+ "result": {
+ "id": id,
+ "name": name,
+ "content": content,
+ "proxied": false,
+ "ttl": 1,
+ "comment": null
+ }
+ })
+ }
+
+ /// JSON for a WAF lists response returning a single list.
+ fn waf_lists_response(list_id: &str, list_name: &str) -> serde_json::Value {
+ serde_json::json!({
+ "result": [{ "id": list_id, "name": list_name }]
+ })
+ }
+
+ /// JSON for WAF list items response.
+ fn waf_items_response(items: serde_json::Value) -> serde_json::Value {
+ serde_json::json!({ "result": items })
+ }
+
+ // -------------------------------------------------------
+ // update_once tests
+ // -------------------------------------------------------
+
+ /// update_once with a Literal IP provider creates a new DNS record when none exists.
+ #[tokio::test]
+ async fn test_update_once_creates_new_record() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+ let ip = "198.51.100.42";
+
+ // Zone lookup: GET zones?name=home.example.com
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List existing records: GET zones/{zone_id}/dns_records?...
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // Create record: POST zones/{zone_id}/dns_records
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_record_created("rec-1", domain, ip)),
+ )
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once returns true (all_ok) when IP is already correct (Noop).
+ #[tokio::test]
+ async fn test_update_once_noop_when_record_up_to_date() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+ let ip = "198.51.100.42";
+
+ // Zone lookup
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List existing records - record already exists with correct IP
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_records_one("rec-1", domain, ip)),
+ )
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once returns true even when IP detection yields empty (no providers configured),
+ /// but marks the result as degraded via messages (all_ok = false only on zone/record errors).
+ /// Here we use ProviderType::None so no IPs are detected - all_ok stays true since there
+ /// is no domain update attempted (empty ips -> set_ips with empty slice -> Noop).
+ #[tokio::test]
+ async fn test_update_once_empty_ip_detection_with_none_provider() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+
+ // Zone lookup - still called even with empty IPs
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List records (set_ips called with empty ips, will list to delete managed records)
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // Provider that returns no IPs
+ let mut providers = HashMap::new();
+ providers.insert(IpType::V4, ProviderType::None);
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ // all_ok = true because no zone-level errors occurred (empty ips just noop or warn)
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ // Providers with None are not inserted in loop, so no IP detection warning is emitted,
+ // no detected_ips entry is created, and set_ips is called with empty slice -> Noop.
+ assert!(ok);
+ }
+
+ /// When the Literal provider is used but the zone is not found, update_once returns false.
+ #[tokio::test]
+ async fn test_update_once_returns_false_when_zone_not_found() {
+ let server = MockServer::start().await;
+ let domain = "missing.example.com";
+ let ip = "198.51.100.1";
+
+ // Zone lookup for full domain fails
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_empty_response()),
+ )
+ .mount(&server)
+ .await;
+
+ // Zone lookup for parent domain also fails
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", "example.com"))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_empty_response()),
+ )
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(!ok, "Expected false when zone is not found");
+ }
+
+ /// update_once in dry_run mode does NOT POST to create records.
+ #[tokio::test]
+ async fn test_update_once_dry_run_does_not_create_record() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+ let ip = "198.51.100.42";
+
+ // Zone lookup
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List existing records - none exist
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // POST must NOT be called in dry_run - if it is, wiremock will panic at drop
+ // (no Mock registered for POST, and strict mode is default for unexpected requests)
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(providers, domains, vec![], true /* dry_run */);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ // dry_run returns Updated from set_ips (it signals intent), all_ok should be true
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once with WAF lists: IPs are detected and WAF list is updated.
+ #[tokio::test]
+ async fn test_update_once_with_waf_list() {
+ let server = MockServer::start().await;
+ let account_id = "acc-123";
+ let list_name = "my_list";
+ let list_id = "list-id-1";
+ let ip = "198.51.100.42";
+
+ // GET accounts/{account_id}/rules/lists - returns our list
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_lists_response(list_id, list_name)),
+ )
+ .mount(&server)
+ .await;
+
+ // GET list items - empty (need to add the IP)
+ Mock::given(method("GET"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_items_response(serde_json::json!([]))),
+ )
+ .mount(&server)
+ .await;
+
+ // POST to add items
+ Mock::given(method("POST"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": {}
+ })))
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ // No DNS domains - only WAF list
+ let config = make_config(providers, HashMap::new(), vec![waf_list], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once with WAF list in dry_run mode: items are NOT POSTed.
+ #[tokio::test]
+ async fn test_update_once_waf_list_dry_run() {
+ let server = MockServer::start().await;
+ let account_id = "acc-123";
+ let list_name = "my_list";
+ let list_id = "list-id-1";
+ let ip = "198.51.100.42";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_lists_response(list_id, list_name)),
+ )
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_items_response(serde_json::json!([]))),
+ )
+ .mount(&server)
+ .await;
+
+ // No POST mock registered - dry_run must not POST
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ let config = make_config(providers, HashMap::new(), vec![waf_list], true /* dry_run */);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once with WAF list when WAF list is not found returns false (Failed).
+ #[tokio::test]
+ async fn test_update_once_waf_list_not_found_returns_false() {
+ let server = MockServer::start().await;
+ let account_id = "acc-123";
+ let list_name = "my_list";
+ let ip = "198.51.100.42";
+
+ // GET accounts/{account_id}/rules/lists - returns empty (list not found)
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(serde_json::json!({ "result": [] })),
+ )
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip.parse::().unwrap()],
+ },
+ );
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ let config = make_config(providers, HashMap::new(), vec![waf_list], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(!ok, "Expected false when WAF list is not found");
+ }
+
+ /// update_once with two domains (V4 and V6) - both updated independently.
+ #[tokio::test]
+ async fn test_update_once_v4_and_v6_domains() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain_v4 = "v4.example.com";
+ let domain_v6 = "v6.example.com";
+ let ip_v4 = "198.51.100.42";
+ let ip_v6 = "2001:db8::1";
+
+ // Zone lookups
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain_v4))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain_v6))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List records for both domains (no existing records)
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // Create record for V4
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_record_created("rec-v4", domain_v4, ip_v4)),
+ )
+ .mount(&server)
+ .await;
+
+ // Create record for V6
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_record_created("rec-v6", domain_v6, ip_v6)),
+ )
+ .mount(&server)
+ .await;
+
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip_v4.parse::().unwrap()],
+ },
+ );
+ providers.insert(
+ IpType::V6,
+ ProviderType::Literal {
+ ips: vec![ip_v6.parse::().unwrap()],
+ },
+ );
+
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain_v4.to_string()]);
+ domains.insert(IpType::V6, vec![domain_v6.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ /// update_once with no providers and no domains is a degenerate but valid case - returns true.
+ #[tokio::test]
+ async fn test_update_once_no_providers_no_domains() {
+ let server = MockServer::start().await;
+ // No HTTP mocks needed - nothing should be called
+
+ let config = make_config(HashMap::new(), HashMap::new(), vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+
+ // -------------------------------------------------------
+ // final_delete tests
+ // -------------------------------------------------------
+
+ /// final_delete removes existing DNS records for a domain.
+ #[tokio::test]
+ async fn test_final_delete_removes_dns_records() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+ let record_id = "rec-to-delete";
+ let ip = "198.51.100.1";
+
+ // Zone lookup
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List records - one record exists
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_records_one(record_id, domain, ip)),
+ )
+ .mount(&server)
+ .await;
+
+ // DELETE the record
+ Mock::given(method("DELETE"))
+ .and(path(format!("/zones/{zone_id}/dns_records/{record_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": record_id }
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(HashMap::new(), domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ // Should complete without panic
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ /// final_delete does nothing when no records exist for the domain.
+ #[tokio::test]
+ async fn test_final_delete_noop_when_no_records() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+
+ // Zone lookup
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List records - empty
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // No DELETE mock - ensures DELETE is not called
+
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(HashMap::new(), domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ /// final_delete skips DNS deletion when zone is not found.
+ #[tokio::test]
+ async fn test_final_delete_skips_when_zone_not_found() {
+ let server = MockServer::start().await;
+ let domain = "missing.example.com";
+
+ // Zone lookup - not found at either level
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_empty_response()),
+ )
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", "example.com"))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_empty_response()),
+ )
+ .mount(&server)
+ .await;
+
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+
+ let config = make_config(HashMap::new(), domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ // Should complete without error - zone not found means skip
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ /// final_delete clears WAF list items.
+ #[tokio::test]
+ async fn test_final_delete_clears_waf_list() {
+ let server = MockServer::start().await;
+ let account_id = "acc-123";
+ let list_name = "my_list";
+ let list_id = "list-id-1";
+ let item_id = "item-abc";
+ let ip = "198.51.100.42";
+
+ // GET lists
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_lists_response(list_id, list_name)),
+ )
+ .mount(&server)
+ .await;
+
+ // GET items - one item exists
+ Mock::given(method("GET"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(waf_items_response(serde_json::json!([
+ { "id": item_id, "ip": ip, "comment": null }
+ ]))),
+ )
+ .mount(&server)
+ .await;
+
+ // DELETE items
+ Mock::given(method("DELETE"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": {}
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ let config = make_config(HashMap::new(), HashMap::new(), vec![waf_list], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ /// final_delete with no WAF items does not call DELETE.
+ #[tokio::test]
+ async fn test_final_delete_waf_list_no_items() {
+ let server = MockServer::start().await;
+ let account_id = "acc-123";
+ let list_name = "my_list";
+ let list_id = "list-id-1";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_lists_response(list_id, list_name)),
+ )
+ .mount(&server)
+ .await;
+
+ // GET items - empty
+ Mock::given(method("GET"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_items_response(serde_json::json!([]))),
+ )
+ .mount(&server)
+ .await;
+
+ // No DELETE mock - ensures DELETE is not called for empty list
+
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ let config = make_config(HashMap::new(), HashMap::new(), vec![waf_list], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ /// final_delete with both DNS domains and WAF lists - both are cleaned up.
+ #[tokio::test]
+ async fn test_final_delete_dns_and_waf() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain = "home.example.com";
+ let record_id = "rec-del";
+ let ip = "198.51.100.5";
+ let account_id = "acc-999";
+ let list_name = "ddns_ips";
+ let list_id = "list-xyz";
+ let item_id = "item-xyz";
+
+ // Zone lookup
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List DNS records
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(dns_records_one(record_id, domain, ip)),
+ )
+ .mount(&server)
+ .await;
+
+ // DELETE DNS record
+ Mock::given(method("DELETE"))
+ .and(path(format!("/zones/{zone_id}/dns_records/{record_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": record_id }
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ // WAF: GET lists
+ Mock::given(method("GET"))
+ .and(path(format!("/accounts/{account_id}/rules/lists")))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_json(waf_lists_response(list_id, list_name)),
+ )
+ .mount(&server)
+ .await;
+
+ // WAF: GET items
+ Mock::given(method("GET"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(waf_items_response(serde_json::json!([
+ { "id": item_id, "ip": ip, "comment": null }
+ ]))),
+ )
+ .mount(&server)
+ .await;
+
+ // WAF: DELETE items
+ Mock::given(method("DELETE"))
+ .and(path(format!(
+ "/accounts/{account_id}/rules/lists/{list_id}/items"
+ )))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": {}
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V4, vec![domain.to_string()]);
+ let waf_list = WAFList {
+ account_id: account_id.to_string(),
+ list_name: list_name.to_string(),
+ };
+
+ let config = make_config(HashMap::new(), domains, vec![waf_list], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ final_delete(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ }
+
+ // -------------------------------------------------------
+ // Literal provider IP detection filtering
+ // -------------------------------------------------------
+
+ /// Literal provider only injects IPs of the matching type into the update cycle.
+ /// V6 Literal IPs are ignored when the domain is V4-only.
+ #[tokio::test]
+ async fn test_update_once_literal_v4_not_used_for_v6_domain() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-abc";
+ let domain_v6 = "v6only.example.com";
+ // Only a V4 literal provider is configured but domain is V6
+ let ip_v4 = "198.51.100.1";
+
+ // Zone lookup for V6 domain
+ Mock::given(method("GET"))
+ .and(path("/zones"))
+ .and(query_param("name", domain_v6))
+ .respond_with(
+ ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
+ )
+ .mount(&server)
+ .await;
+
+ // List AAAA records - no existing records; set_ips called with empty ips -> Noop
+ Mock::given(method("GET"))
+ .and(path_regex(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
+ .mount(&server)
+ .await;
+
+ // V4 literal provider but V6 domain - the V4 provider will not be in detected_ips for V6
+ let mut providers = HashMap::new();
+ providers.insert(
+ IpType::V4,
+ ProviderType::Literal {
+ ips: vec![ip_v4.parse::().unwrap()],
+ },
+ );
+ // No V6 provider -> detected_ips won't have V6 -> set_ips called with empty slice
+ let mut domains = HashMap::new();
+ domains.insert(IpType::V6, vec![domain_v6.to_string()]);
+
+ let config = make_config(providers, domains, vec![], false);
+ let cf = handle(&server.uri());
+ let notifier = empty_notifier();
+ let heartbeat = empty_heartbeat();
+ let ppfmt = pp();
+
+ // set_ips with empty ips and no existing records = Noop; all_ok = true
+ let ok = update_once(&config, &cf, ¬ifier, &heartbeat, &ppfmt).await;
+ assert!(ok);
+ }
+ // -------------------------------------------------------
+ // LegacyDdnsClient tests (internal/private struct)
+ // -------------------------------------------------------
+
+ #[tokio::test]
+ async fn test_legacy_try_trace_urls_primary_success() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/trace"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("fl=1\nh=mock\nip=198.51.100.1\nts=0\n"),
+ )
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![format!("{}/trace", server.uri())],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut shown_primary = false;
+ let mut shown_secondary = false;
+ let result = ddns
+ .try_trace_urls(&ddns.ipv4_urls, &mut shown_primary, &mut shown_secondary, "IPv4")
+ .await;
+ assert_eq!(result, Some("198.51.100.1".to_string()));
+ }
+
+ #[tokio::test]
+ async fn test_legacy_try_trace_urls_primary_fails_fallback_succeeds() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/fallback"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("fl=1\nh=mock\nip=198.51.100.2\nts=0\n"),
+ )
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![
+ "http://127.0.0.1:1/nonexistent".to_string(), // will fail
+ format!("{}/fallback", server.uri()),
+ ],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut shown_primary = false;
+ let mut shown_secondary = false;
+ let result = ddns
+ .try_trace_urls(&ddns.ipv4_urls, &mut shown_primary, &mut shown_secondary, "IPv4")
+ .await;
+ assert_eq!(result, Some("198.51.100.2".to_string()));
+ assert!(shown_primary);
+ }
+
+ #[tokio::test]
+ async fn test_legacy_try_trace_urls_all_fail() {
+ let ddns = LegacyDdnsClient {
+ client: Client::builder().timeout(Duration::from_millis(100)).build().unwrap(),
+ cf_api_base: String::new(),
+ ipv4_urls: vec![
+ "http://127.0.0.1:1/fail1".to_string(),
+ "http://127.0.0.1:1/fail2".to_string(),
+ ],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut shown_primary = false;
+ let mut shown_secondary = false;
+ let result = ddns
+ .try_trace_urls(&ddns.ipv4_urls, &mut shown_primary, &mut shown_secondary, "IPv4")
+ .await;
+ assert!(result.is_none());
+ assert!(shown_primary);
+ assert!(shown_secondary);
+ }
+
+ #[tokio::test]
+ async fn test_legacy_cf_api_get_success() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/zones/zone1"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let entry = crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ };
+ let result: Option> = ddns
+ .cf_api("zones/zone1", "GET", &entry, None::<&()>.as_ref())
+ .await;
+ assert!(result.is_some());
+ assert_eq!(result.unwrap().result.unwrap().name, "example.com");
+ }
+
+ #[tokio::test]
+ async fn test_legacy_cf_api_post_success() {
+ let server = MockServer::start().await;
+ Mock::given(method("POST"))
+ .and(path("/zones/zone1/dns_records"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "new-rec" }
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let entry = crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ };
+ let body = serde_json::json!({"name": "test"});
+ let result: Option = ddns
+ .cf_api("zones/zone1/dns_records", "POST", &entry, Some(&body))
+ .await;
+ assert!(result.is_some());
+ }
+
+ #[tokio::test]
+ async fn test_legacy_cf_api_error_response() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(403).set_body_string("forbidden"))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let entry = crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ };
+ let result: Option = ddns
+ .cf_api("zones/zone1", "GET", &entry, None::<&()>.as_ref())
+ .await;
+ assert!(result.is_none());
+ }
+
+ #[tokio::test]
+ async fn test_legacy_cf_api_unknown_method() {
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: "http://localhost".to_string(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let entry = crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ };
+ let result: Option = ddns
+ .cf_api("zones/zone1", "OPTIONS", &entry, None::<&()>.as_ref())
+ .await;
+ assert!(result.is_none());
+ }
+
+ #[tokio::test]
+ async fn test_legacy_cf_api_with_api_key_auth() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let entry = crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: String::new(),
+ api_key: Some(crate::config::LegacyApiKey {
+ api_key: "key123".to_string(),
+ account_email: "user@example.com".to_string(),
+ }),
+ },
+ zone_id: "zone1".to_string(),
+ subdomains: vec![],
+ proxied: false,
+ };
+ let result: Option = ddns
+ .cf_api("zones/zone1", "GET", &entry, None::<&()>.as_ref())
+ .await;
+ assert!(result.is_some());
+ }
+
+ #[tokio::test]
+ async fn test_legacy_get_ips_ipv4_enabled() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/trace"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("ip=198.51.100.42\n"),
+ )
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![format!("{}/trace", server.uri())],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut warnings = LegacyWarningState::default();
+ let config: Vec = vec![];
+ let ips = ddns.get_ips(true, false, false, &config, &mut warnings).await;
+ assert!(ips.contains_key("ipv4"));
+ assert_eq!(ips["ipv4"].ip, "198.51.100.42");
+ assert_eq!(ips["ipv4"].record_type, "A");
+ }
+
+ #[tokio::test]
+ async fn test_legacy_get_ips_ipv6_enabled() {
+ let server = MockServer::start().await;
+ Mock::given(method("GET"))
+ .and(path("/trace6"))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .set_body_string("ip=2001:db8::1\n"),
+ )
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![format!("{}/trace6", server.uri())],
+ dry_run: false,
+ };
+ let mut warnings = LegacyWarningState::default();
+ let config: Vec = vec![];
+ let ips = ddns.get_ips(false, true, false, &config, &mut warnings).await;
+ assert!(ips.contains_key("ipv6"));
+ assert_eq!(ips["ipv6"].ip, "2001:db8::1");
+ assert_eq!(ips["ipv6"].record_type, "AAAA");
+ }
+
+ #[tokio::test]
+ async fn test_legacy_get_ips_both_disabled() {
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: String::new(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut warnings = LegacyWarningState::default();
+ let config: Vec = vec![];
+ let ips = ddns.get_ips(false, false, false, &config, &mut warnings).await;
+ assert!(ips.is_empty());
+ }
+
+ #[tokio::test]
+ async fn test_legacy_commit_record_creates_new() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg1";
+
+ // GET zone
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ // GET dns_records - empty
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ // POST create
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "new-rec" }
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let ip = LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }];
+ ddns.commit_record(&ip, &config, 300, false).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_commit_record_updates_existing() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg2";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [{
+ "id": "rec-1",
+ "name": "example.com",
+ "content": "10.0.0.1",
+ "proxied": false
+ }]
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("PUT"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-1")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "rec-1" }
+ })))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let ip = LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }];
+ ddns.commit_record(&ip, &config, 300, false).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_commit_record_dry_run() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg3";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: true,
+ };
+ let ip = LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }];
+ // Should not POST
+ ddns.commit_record(&ip, &config, 300, false).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_commit_record_with_detailed_subdomain() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg4";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("POST"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "new-rec" }
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let ip = LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Detailed {
+ name: "vpn".to_string(),
+ proxied: true,
+ }],
+ proxied: false,
+ }];
+ ddns.commit_record(&ip, &config, 300, false).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_commit_record_purge_duplicates() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg5";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-1", "name": "example.com", "content": "198.51.100.1", "proxied": false },
+ { "id": "rec-dup", "name": "example.com", "content": "198.51.100.1", "proxied": false }
+ ]
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("DELETE"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-1")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({})))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let ip = LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }];
+ ddns.commit_record(&ip, &config, 300, true).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_update_ips_calls_commit_for_each_ip() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg6";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "name": "example.com" }
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": []
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("POST"))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": { "id": "new-rec" }
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let mut ips = HashMap::new();
+ ips.insert("ipv4".to_string(), LegacyIpInfo {
+ record_type: "A".to_string(),
+ ip: "198.51.100.1".to_string(),
+ });
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![LegacySubdomainEntry::Simple("@".to_string())],
+ proxied: false,
+ }];
+ ddns.update_ips(&ips, &config, 300, false).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_delete_entries() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg7";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-1", "name": "example.com", "content": "10.0.0.1", "proxied": false }
+ ]
+ })))
+ .mount(&server)
+ .await;
+
+ Mock::given(method("DELETE"))
+ .and(path(format!("/zones/{zone_id}/dns_records/rec-1")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({})))
+ .expect(1)
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: false,
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![],
+ proxied: false,
+ }];
+ ddns.delete_entries("A", &config).await;
+ }
+
+ #[tokio::test]
+ async fn test_legacy_delete_entries_dry_run() {
+ let server = MockServer::start().await;
+ let zone_id = "zone-leg8";
+
+ Mock::given(method("GET"))
+ .and(path(format!("/zones/{zone_id}/dns_records")))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "result": [
+ { "id": "rec-1", "name": "example.com", "content": "10.0.0.1", "proxied": false }
+ ]
+ })))
+ .mount(&server)
+ .await;
+
+ let ddns = LegacyDdnsClient {
+ client: Client::new(),
+ cf_api_base: server.uri(),
+ ipv4_urls: vec![],
+ ipv6_urls: vec![],
+ dry_run: true,
+ };
+ let config = vec![crate::config::LegacyCloudflareEntry {
+ authentication: crate::config::LegacyAuthentication {
+ api_token: "test-token".to_string(),
+ api_key: None,
+ },
+ zone_id: zone_id.to_string(),
+ subdomains: vec![],
+ proxied: false,
+ }];
+ // dry_run: should not DELETE
+ ddns.delete_entries("A", &config).await;
+ }
+
+ #[test]
+ fn test_legacy_warning_state_default() {
+ let w = LegacyWarningState::default();
+ assert!(!w.shown_ipv4);
+ assert!(!w.shown_ipv4_secondary);
+ assert!(!w.shown_ipv6);
+ assert!(!w.shown_ipv6_secondary);
+ }
+}
+
+// Legacy types for backwards compatibility
+#[derive(Debug, serde::Deserialize)]
+struct LegacyCfResponse {
+ result: Option,
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct LegacyZoneResult {
+ name: String,
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct LegacyDnsRecord {
+ id: String,
+ name: String,
+ content: String,
+ proxied: bool,
+}
+
+#[derive(Debug, serde::Serialize)]
+struct LegacyDnsRecordPayload {
+ #[serde(rename = "type")]
+ record_type: String,
+ name: String,
+ content: String,
+ proxied: bool,
+ ttl: i64,
+}
diff --git a/start-sync.sh b/start-sync.sh
deleted file mode 100755
index 423caa7..0000000
--- a/start-sync.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-
-python3 -m venv venv
-source ./venv/bin/activate
-
-cd $DIR
-set -o pipefail; pip install -r requirements.txt | { grep -v "already satisfied" || :; }
-
-python3 cloudflare-ddns.py