mirror of
https://github.com/timothymiller/cloudflare-ddns.git
synced 2026-05-06 09:53:40 -03:00
Compare commits
51 Commits
2.0.1
...
fddabc7a3d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fddabc7a3d | ||
|
|
548d89dacf | ||
|
|
22320bea79 | ||
|
|
1bb347bea7 | ||
|
|
1d5ad2738c | ||
|
|
08ff76f443 | ||
|
|
199bbae2bd | ||
|
|
591f3e4905 | ||
|
|
687d299bda | ||
|
|
25122d2ce3 | ||
|
|
64c971b198 | ||
|
|
b1d8721e8d | ||
|
|
278f8ae629 | ||
|
|
896e08e38e | ||
|
|
85d060678d | ||
|
|
8501a35c82 | ||
|
|
0f2b772ecb | ||
|
|
b748e80592 | ||
|
|
714ec4f11f | ||
|
|
d344ae0174 | ||
|
|
c76a141f58 | ||
|
|
5eb93b45d1 | ||
|
|
e816cce5a8 | ||
|
|
7b20b7a477 | ||
|
|
38d7023987 | ||
|
|
3e2b8a3a40 | ||
|
|
9b140d2350 | ||
|
|
2913ce379c | ||
|
|
697089b43d | ||
|
|
766e1ac0d4 | ||
|
|
8c7af02698 | ||
|
|
245ac0b061 | ||
|
|
2446c1d6a0 | ||
|
|
9b8aba5e20 | ||
|
|
83dd454c42 | ||
|
|
f8d5b5cb7e | ||
|
|
bb5cc43651 | ||
|
|
7ff8379cfb | ||
|
|
943e38d70c | ||
|
|
ac982a208e | ||
|
|
4b1875b0cd | ||
|
|
54ca4a5eae | ||
|
|
94ce10fccc | ||
|
|
7e96816740 | ||
|
|
8a4b57c163 | ||
|
|
3c7072f4b6 | ||
|
|
3d796d470c | ||
|
|
36bdbea568 | ||
|
|
6085ba0cc2 | ||
|
|
560a3b7b28 | ||
|
|
1b3928865b |
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@@ -0,0 +1,6 @@
|
||||
target/
|
||||
.git/
|
||||
.github/
|
||||
.gitignore
|
||||
*.md
|
||||
LICENSE
|
||||
12
.github/workflows/image.yml
vendored
12
.github/workflows/image.yml
vendored
@@ -9,20 +9,22 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
@@ -35,7 +37,7 @@ jobs:
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
images: timothyjmiller/cloudflare-ddns
|
||||
tags: |
|
||||
@@ -46,7 +48,7 @@ jobs:
|
||||
type=raw,enable=${{ github.ref == 'refs/heads/master' }},value=${{ steps.version.outputs.version }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
808
Cargo.lock
generated
808
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
16
Cargo.toml
16
Cargo.toml
@@ -1,23 +1,23 @@
|
||||
[package]
|
||||
name = "cloudflare-ddns"
|
||||
version = "2.0.1"
|
||||
version = "2.1.2"
|
||||
edition = "2021"
|
||||
description = "Access your home network remotely via a custom domain name without a static IP"
|
||||
license = "GPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false }
|
||||
reqwest = { version = "0.13", features = ["json", "form", "rustls-no-provider"], default-features = false }
|
||||
rustls = { version = "0.23", features = ["ring"], default-features = false }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "time", "signal"] }
|
||||
regex = "1"
|
||||
chrono = { version = "0.4", features = ["clock"] }
|
||||
tokio = { version = "1", features = ["rt", "macros", "time", "signal", "net"] }
|
||||
regex-lite = "0.1"
|
||||
url = "2"
|
||||
idna = "1"
|
||||
if-addrs = "0.13"
|
||||
if-addrs = "0.15"
|
||||
rand = "0.10"
|
||||
|
||||
[profile.release]
|
||||
opt-level = "s"
|
||||
opt-level = "z"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
||||
|
||||
@@ -5,6 +5,7 @@ WORKDIR /build
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY src ./src
|
||||
RUN cargo build --release
|
||||
RUN apk add --no-cache upx && upx --best --lzma target/release/cloudflare-ddns
|
||||
|
||||
# ---- Release ----
|
||||
FROM scratch AS release
|
||||
|
||||
71
README.md
71
README.md
@@ -4,7 +4,7 @@
|
||||
|
||||
Access your home network remotely via a custom domain name without a static IP!
|
||||
|
||||
A feature-complete dynamic DNS client for Cloudflare, written in Rust. The **smallest and most memory-efficient** open-source Cloudflare DDNS Docker image available — **~1.9 MB image size** and **~3.5 MB RAM** at runtime, smaller and leaner than Go-based alternatives. Built as a fully static binary from scratch with zero runtime dependencies.
|
||||
A feature-complete dynamic DNS client for Cloudflare, written in Rust. The **smallest and most memory-efficient** open-source Cloudflare DDNS Docker image available — **~1.1 MB image size** and **~3.5 MB RAM** at runtime, smaller and leaner than Go-based alternatives. Built as a fully static binary from scratch with zero runtime dependencies.
|
||||
|
||||
Configure everything with environment variables. Supports notifications, heartbeat monitoring, WAF list management, flexible scheduling, and more.
|
||||
|
||||
@@ -28,7 +28,8 @@ Configure everything with environment variables. Supports notifications, heartbe
|
||||
- 🎨 **Pretty output with emoji** — Configurable emoji and verbosity levels
|
||||
- 🔒 **Zero-log IP detection** — Uses Cloudflare's [cdn-cgi/trace](https://www.cloudflare.com/cdn-cgi/trace) by default
|
||||
- 🏠 **CGNAT-aware local detection** — Filters out shared address space (100.64.0.0/10) and private ranges
|
||||
- 🤏 **Tiny static binary** — ~1.9 MB Docker image built from scratch, zero runtime dependencies
|
||||
- 🚫 **Cloudflare IP rejection** — Automatically rejects Cloudflare anycast IPs to prevent incorrect DNS updates
|
||||
- 🤏 **Tiny static binary** — ~1.1 MB Docker image built from scratch, zero runtime dependencies
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
@@ -87,6 +88,18 @@ Available providers:
|
||||
| `literal:<ips>` | 📌 Static IP addresses (comma-separated) |
|
||||
| `none` | 🚫 Disable this IP type |
|
||||
|
||||
## 🚫 Cloudflare IP Rejection
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `REJECT_CLOUDFLARE_IPS` | `true` | Reject detected IPs that fall within Cloudflare's IP ranges |
|
||||
|
||||
Some IP detection providers occasionally return a Cloudflare anycast IP instead of your real public IP. When this happens, your DNS record gets updated to point at Cloudflare infrastructure rather than your actual address.
|
||||
|
||||
By default, each update cycle fetches [Cloudflare's published IP ranges](https://www.cloudflare.com/ips/) and skips any detected IP that falls within them. A warning is logged for every rejected IP. If the ranges cannot be fetched, the update is skipped entirely to prevent writing a Cloudflare IP.
|
||||
|
||||
To disable this protection, set `REJECT_CLOUDFLARE_IPS=false`.
|
||||
|
||||
## ⏱️ Scheduling
|
||||
|
||||
| Variable | Default | Description |
|
||||
@@ -94,6 +107,7 @@ Available providers:
|
||||
| `UPDATE_CRON` | `@every 5m` | Update schedule |
|
||||
| `UPDATE_ON_START` | `true` | Run an update immediately on startup |
|
||||
| `DELETE_ON_STOP` | `false` | Delete managed DNS records on shutdown |
|
||||
| `DELETE_ON_FAILURE` | `true` | Delete managed DNS records when failed to obtain IP from provider |
|
||||
|
||||
Schedule formats:
|
||||
|
||||
@@ -200,6 +214,7 @@ Heartbeats are sent after each update cycle. On failure, a fail signal is sent.
|
||||
| `UPDATE_CRON` | `@every 5m` | ⏱️ Update schedule |
|
||||
| `UPDATE_ON_START` | `true` | 🚀 Update on startup |
|
||||
| `DELETE_ON_STOP` | `false` | 🧹 Delete records on shutdown |
|
||||
| `DELETE_ON_FAILURE` | `true` | 🧹 Delete records if failed to obtain new records |
|
||||
| `TTL` | `1` | ⏳ DNS record TTL |
|
||||
| `PROXIED` | `false` | ☁️ Proxied expression |
|
||||
| `RECORD_COMMENT` | — | 💬 DNS record comment |
|
||||
@@ -210,6 +225,7 @@ Heartbeats are sent after each update cycle. On failure, a fail signal is sent.
|
||||
| `MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX` | — | 🎯 Managed WAF items regex |
|
||||
| `DETECTION_TIMEOUT` | `5s` | ⏳ IP detection timeout |
|
||||
| `UPDATE_TIMEOUT` | `30s` | ⏳ API request timeout |
|
||||
| `REJECT_CLOUDFLARE_IPS` | `true` | 🚫 Reject Cloudflare anycast IPs |
|
||||
| `EMOJI` | `true` | 🎨 Enable emoji output |
|
||||
| `QUIET` | `false` | 🤫 Suppress info output |
|
||||
| `HEALTHCHECKS` | — | 💓 Healthchecks.io URL |
|
||||
@@ -349,6 +365,21 @@ Some ISP provided modems only allow port forwarding over IPv4 or IPv6. Disable t
|
||||
|
||||
### ⚙️ Config Options
|
||||
|
||||
By default, the legacy config file is loaded from `./config.json`. Set the `CONFIG_PATH` environment variable to change the directory:
|
||||
|
||||
```bash
|
||||
CONFIG_PATH=/etc/cloudflare-ddns cloudflare-ddns
|
||||
```
|
||||
|
||||
Or in Docker Compose:
|
||||
|
||||
```yml
|
||||
environment:
|
||||
- CONFIG_PATH=/config
|
||||
volumes:
|
||||
- /your/path/config.json:/config/config.json
|
||||
```
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| `cloudflare` | array | required | List of zone configurations |
|
||||
@@ -356,6 +387,42 @@ Some ISP provided modems only allow port forwarding over IPv4 or IPv6. Disable t
|
||||
| `aaaa` | bool | `true` | Enable IPv6 (AAAA record) updates |
|
||||
| `purgeUnknownRecords` | bool | `false` | Delete stale/duplicate DNS records |
|
||||
| `ttl` | int | `300` | DNS record TTL in seconds (30-86400, values < 30 become auto) |
|
||||
| `ip4_provider` | string | `"cloudflare.trace"` | IPv4 detection provider (same values as `IP4_PROVIDER` env var) |
|
||||
| `ip6_provider` | string | `"cloudflare.trace"` | IPv6 detection provider (same values as `IP6_PROVIDER` env var) |
|
||||
|
||||
### 🚫 Cloudflare IP Rejection (Legacy Mode)
|
||||
|
||||
Cloudflare IP rejection is enabled by default in legacy mode too. To disable it, set `REJECT_CLOUDFLARE_IPS=false` alongside your `config.json`:
|
||||
|
||||
```bash
|
||||
REJECT_CLOUDFLARE_IPS=false cloudflare-ddns
|
||||
```
|
||||
|
||||
Or in Docker Compose:
|
||||
|
||||
```yml
|
||||
environment:
|
||||
- REJECT_CLOUDFLARE_IPS=false
|
||||
volumes:
|
||||
- ./config.json:/config.json
|
||||
```
|
||||
|
||||
### 🔍 IP Detection (Legacy Mode)
|
||||
|
||||
Legacy mode now uses the same shared provider abstraction as environment variable mode. By default it uses the `cloudflare.trace` provider, which builds an IP-family-bound HTTP client (`0.0.0.0` for IPv4, `[::]` for IPv6) to guarantee the correct address family on dual-stack hosts.
|
||||
|
||||
You can override the detection method per address family with `ip4_provider` and `ip6_provider` in your `config.json`. Supported values are the same as the `IP4_PROVIDER` / `IP6_PROVIDER` environment variables: `cloudflare.trace`, `cloudflare.doh`, `ipify`, `local`, `local.iface:<name>`, `url:<https://...>`, `none`.
|
||||
|
||||
Set a provider to `"none"` to disable detection for that address family (overrides `a`/`aaaa`):
|
||||
|
||||
```json
|
||||
{
|
||||
"a": true,
|
||||
"aaaa": true,
|
||||
"ip4_provider": "cloudflare.trace",
|
||||
"ip6_provider": "none"
|
||||
}
|
||||
```
|
||||
|
||||
Each zone entry contains:
|
||||
|
||||
|
||||
49
RELEASE_NOTES_2.1.1.md
Normal file
49
RELEASE_NOTES_2.1.1.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# cloudflare-ddns v2.1.1
|
||||
|
||||
Maintenance release. Bug fix for `rand` 0.10 API change, plus opt-in failure-safe deletion behavior contributed in the v2.1.0 → v2.1.1 window, dependency refresh, and proportional jitter for IP detection.
|
||||
|
||||
## Highlights
|
||||
|
||||
- **Fix:** Restore the build under `rand` 0.10 — `random_range` moved to the `RngExt` trait, and the unconditional jitter sleep in `--repeat` mode no longer fails to compile.
|
||||
- **New:** `DELETE_ON_FAILURE` (env-var mode) controls whether DNS records are removed when an IP detection or update fails. Defaults to `true` to preserve existing behavior; set `DELETE_ON_FAILURE=false` to keep stale records on transient failures instead of yanking them.
|
||||
- **Improvement:** Proportional jitter (up to 20% of the update interval) is added before each scheduled update to spread requests across clients and reduce synchronized spikes against the Cloudflare API.
|
||||
|
||||
## Changes since v2.1.0
|
||||
|
||||
### Features
|
||||
- `DELETE_ON_FAILURE` env var to prevent DNS record deletion on failed updates (#263, thanks @DMaxter)
|
||||
- Proportional jitter on update intervals to desynchronize API traffic (#253, thanks @jhutchings1)
|
||||
|
||||
### Fixes
|
||||
- Compile fix for `rand` 0.10: import `RngExt` so `random_range` resolves
|
||||
- `delete_on_failure` regression test coverage added
|
||||
|
||||
### Dependencies
|
||||
- `rustls` 0.23.37 → 0.23.40
|
||||
- `rustls-webpki` 0.103.10 → 0.103.13
|
||||
- `tokio` 1.50.0 → 1.52.1
|
||||
- `reqwest` 0.13.2 → 0.13.3
|
||||
- `rand` 0.9.2 → 0.10.1
|
||||
|
||||
### Docs
|
||||
- Document `DELETE_ON_FAILURE` in the README
|
||||
|
||||
## Upgrade notes
|
||||
|
||||
- **Default behavior unchanged.** `DELETE_ON_FAILURE` defaults to `true`, matching pre-2.1.1 behavior. Set it to `false` if you want stale records preserved during outages.
|
||||
- No config file schema changes. Existing `config.json` deployments continue to work without edits.
|
||||
|
||||
## Docker
|
||||
|
||||
```sh
|
||||
docker pull timothyjmiller/cloudflare-ddns:2.1.1
|
||||
docker pull timothyjmiller/cloudflare-ddns:latest
|
||||
```
|
||||
|
||||
Multi-arch: `linux/amd64`, `linux/arm64`, `linux/ppc64le`.
|
||||
|
||||
## Verification
|
||||
|
||||
- `cargo test` — 352 tests pass
|
||||
- Release build succeeds, binary size ~1.7 MiB (pre-UPX)
|
||||
- Smoke tested in both legacy `config.json` mode and env-var mode against the live Cloudflare API
|
||||
48
RELEASE_NOTES_2.1.2.md
Normal file
48
RELEASE_NOTES_2.1.2.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# cloudflare-ddns v2.1.2 — Notification & Domain Casing Fixes
|
||||
|
||||
This patch release fixes three bugs reported on GitHub.
|
||||
|
||||
## Bug fixes
|
||||
|
||||
- **Mixed-case domains now match existing DNS records (#255).**
|
||||
In env-var mode, configuring a domain with mixed casing (for example
|
||||
`ExaMple.com`) caused every update cycle to attempt a duplicate record
|
||||
create and fail with Cloudflare error `81058: An identical record already
|
||||
exists.` Cloudflare normalizes record names to lowercase server-side, so
|
||||
the lookup is now case-insensitive.
|
||||
|
||||
- **Pushover notifications work again (#258).**
|
||||
The shoutrrr-style URL `pushover://shoutrrr:TOKEN@USER` (the canonical form
|
||||
from `containrrr/shoutrrr`) was being parsed with the literal `shoutrrr:`
|
||||
username included in the API token, which Pushover rejected. The parser
|
||||
now strips the optional `<user>:` prefix from the token segment, restoring
|
||||
the v2.0.7 behavior. Optional shoutrrr query parameters (`?devices=...`,
|
||||
`?priority=...`) are tolerated.
|
||||
|
||||
- **Gotify notifications now produce a valid request URL (#262).**
|
||||
The Gotify URL parser blindly appended `/message` after any query string,
|
||||
producing malformed webhook URLs like
|
||||
`https://host:9090?token=XYZ/message`. The parser now follows shoutrrr's
|
||||
canonical layout — token as the final path segment or `?token=` query —
|
||||
and supports `?disabletls=yes` to switch the resulting webhook from HTTPS
|
||||
to HTTP for typical home-LAN setups, plus the `gotify+http://` /
|
||||
`gotify+https://` aliases.
|
||||
|
||||
## Already addressed (closing #257)
|
||||
|
||||
The robust public-IP discovery enhancements requested in #257 (multi-endpoint
|
||||
trace fallback, strict address-family validation, API request timeouts,
|
||||
duplicate record cleanup) were already folded into the Rust port shipped in
|
||||
v2.0.8 — see `src/provider.rs` (`CF_TRACE_PRIMARY` / `CF_TRACE_FALLBACK`,
|
||||
`validate_detected_ip`, `build_split_client`) and `src/cloudflare.rs`
|
||||
(`set_ips` dedup behavior, per-request `timeout`).
|
||||
|
||||
## Upgrade
|
||||
|
||||
```bash
|
||||
docker pull timothyjmiller/cloudflare-ddns:2.1.2
|
||||
# or
|
||||
docker pull timothyjmiller/cloudflare-ddns:latest
|
||||
```
|
||||
|
||||
No configuration changes are required.
|
||||
78
SECURITY.md
Normal file
78
SECURITY.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 2.0.x | :white_check_mark: |
|
||||
| < 2.0 | :x: |
|
||||
|
||||
Only the latest release in the `2.0.x` series receives security updates. The legacy Python codebase and all `1.x` releases are **end-of-life** and will not be patched. Users on older versions should upgrade to the latest release immediately.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Please do not open a public GitHub issue for security vulnerabilities.**
|
||||
|
||||
Instead, report vulnerabilities privately using one of the following methods:
|
||||
|
||||
1. **GitHub Private Vulnerability Reporting** — Use the [Security Advisories](https://github.com/timothymiller/cloudflare-ddns/security/advisories/new) page to submit a private report directly on GitHub.
|
||||
2. **Email** — Contact the maintainer directly at the email address listed on the [GitHub profile](https://github.com/timothymiller).
|
||||
|
||||
### What to Include
|
||||
|
||||
- A clear description of the vulnerability and its potential impact
|
||||
- Steps to reproduce or a proof-of-concept
|
||||
- Affected version(s)
|
||||
- Any suggested fix or mitigation, if applicable
|
||||
|
||||
### What to Expect
|
||||
|
||||
- **Acknowledgment** within 72 hours of your report
|
||||
- **Status updates** at least every 7 days while the issue is being investigated
|
||||
- A coordinated disclosure timeline — we aim to release a fix within 30 days of a confirmed vulnerability, and will credit reporters (unless anonymity is preferred) in the release notes
|
||||
|
||||
If a report is declined (e.g., out of scope or not reproducible), you will receive an explanation.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
This project handles **Cloudflare API tokens** that grant DNS editing privileges. Users should be aware of the following:
|
||||
|
||||
### API Token Handling
|
||||
|
||||
- **Never commit your API token** to version control or include it in Docker images.
|
||||
- Use `CLOUDFLARE_API_TOKEN_FILE` or Docker secrets to inject tokens at runtime rather than passing them as plain environment variables where possible.
|
||||
- Create a **scoped API token** with only "Edit DNS" permission on the specific zones you need — avoid using Global API Keys.
|
||||
|
||||
### Container Security
|
||||
|
||||
- The Docker image runs as a **static binary from scratch** with zero runtime dependencies, which minimizes the attack surface.
|
||||
- Use `security_opt: no-new-privileges:true` in Docker Compose deployments.
|
||||
- Pin image tags to a specific version (e.g., `timothyjmiller/cloudflare-ddns:v2.0.10`) rather than using `latest` in production.
|
||||
|
||||
### Network Security
|
||||
|
||||
- The default IP detection provider (`cloudflare.trace`) communicates directly with Cloudflare's infrastructure over HTTPS and does not log your IP.
|
||||
- All Cloudflare API calls are made over HTTPS/TLS.
|
||||
- `--network host` mode is required for IPv6 detection — be aware this gives the container access to the host's full network stack.
|
||||
|
||||
### Supply Chain
|
||||
|
||||
- The project is built with `cargo` and all dependencies are declared in `Cargo.lock` for reproducible builds.
|
||||
- Docker images are built via GitHub Actions and published to Docker Hub. Multi-arch builds cover `linux/amd64`, `linux/arm64`, and `linux/ppc64le`.
|
||||
|
||||
## Scope
|
||||
|
||||
The following are considered **in scope** for security reports:
|
||||
|
||||
- Authentication or authorization flaws (e.g., token leakage, insufficient credential protection)
|
||||
- Injection vulnerabilities in configuration parsing
|
||||
- Vulnerabilities in DNS record handling that could lead to record hijacking or poisoning
|
||||
- Dependency vulnerabilities with a demonstrable exploit path
|
||||
- Container escape or privilege escalation
|
||||
|
||||
The following are **out of scope**:
|
||||
|
||||
- Denial of service against the user's own instance
|
||||
- Vulnerabilities in Cloudflare's API or infrastructure (report those to [Cloudflare](https://hackerone.com/cloudflare))
|
||||
- Social engineering attacks
|
||||
- Issues requiring physical access to the host machine
|
||||
@@ -24,5 +24,7 @@
|
||||
"a": true,
|
||||
"aaaa": true,
|
||||
"purgeUnknownRecords": false,
|
||||
"ttl": 300
|
||||
"ttl": 300,
|
||||
"ip4_provider": "cloudflare.trace",
|
||||
"ip6_provider": "cloudflare.trace"
|
||||
}
|
||||
|
||||
421
src/cf_ip_filter.rs
Normal file
421
src/cf_ip_filter.rs
Normal file
@@ -0,0 +1,421 @@
|
||||
use crate::pp::{self, PP};
|
||||
use reqwest::Client;
|
||||
use std::net::IpAddr;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
const CF_IPV4_URL: &str = "https://www.cloudflare.com/ips-v4";
|
||||
const CF_IPV6_URL: &str = "https://www.cloudflare.com/ips-v6";
|
||||
|
||||
/// A CIDR range parsed from "address/prefix" notation.
|
||||
struct CidrRange {
|
||||
addr: IpAddr,
|
||||
prefix_len: u8,
|
||||
}
|
||||
|
||||
impl CidrRange {
|
||||
fn parse(s: &str) -> Option<Self> {
|
||||
let (addr_str, prefix_str) = s.split_once('/')?;
|
||||
let addr: IpAddr = addr_str.parse().ok()?;
|
||||
let prefix_len: u8 = prefix_str.parse().ok()?;
|
||||
match addr {
|
||||
IpAddr::V4(_) if prefix_len > 32 => None,
|
||||
IpAddr::V6(_) if prefix_len > 128 => None,
|
||||
_ => Some(Self { addr, prefix_len }),
|
||||
}
|
||||
}
|
||||
|
||||
fn contains(&self, ip: &IpAddr) -> bool {
|
||||
match (self.addr, ip) {
|
||||
(IpAddr::V4(net), IpAddr::V4(ip)) => {
|
||||
let net_bits = u32::from(net);
|
||||
let ip_bits = u32::from(*ip);
|
||||
if self.prefix_len == 0 {
|
||||
return true;
|
||||
}
|
||||
let mask = !0u32 << (32 - self.prefix_len);
|
||||
(net_bits & mask) == (ip_bits & mask)
|
||||
}
|
||||
(IpAddr::V6(net), IpAddr::V6(ip)) => {
|
||||
let net_bits = u128::from(net);
|
||||
let ip_bits = u128::from(*ip);
|
||||
if self.prefix_len == 0 {
|
||||
return true;
|
||||
}
|
||||
let mask = !0u128 << (128 - self.prefix_len);
|
||||
(net_bits & mask) == (ip_bits & mask)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds parsed Cloudflare CIDR ranges for IP filtering.
|
||||
pub struct CloudflareIpFilter {
|
||||
ranges: Vec<CidrRange>,
|
||||
}
|
||||
|
||||
impl CloudflareIpFilter {
|
||||
/// Fetch Cloudflare IP ranges from their published URLs and parse them.
|
||||
pub async fn fetch(client: &Client, timeout: Duration, ppfmt: &PP) -> Option<Self> {
|
||||
let mut ranges = Vec::new();
|
||||
|
||||
let (v4_result, v6_result) = tokio::join!(
|
||||
client.get(CF_IPV4_URL).timeout(timeout).send(),
|
||||
client.get(CF_IPV6_URL).timeout(timeout).send(),
|
||||
);
|
||||
|
||||
for (url, result) in [(CF_IPV4_URL, v4_result), (CF_IPV6_URL, v6_result)] {
|
||||
match result {
|
||||
Ok(resp) if resp.status().is_success() => match resp.text().await {
|
||||
Ok(body) => {
|
||||
for line in body.lines() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
match CidrRange::parse(line) {
|
||||
Some(range) => ranges.push(range),
|
||||
None => {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
&format!(
|
||||
"Failed to parse Cloudflare IP range '{line}'"
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
&format!("Failed to read Cloudflare IP ranges from {url}: {e}"),
|
||||
);
|
||||
return None;
|
||||
}
|
||||
},
|
||||
Ok(resp) => {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
&format!(
|
||||
"Failed to fetch Cloudflare IP ranges from {url}: HTTP {}",
|
||||
resp.status()
|
||||
),
|
||||
);
|
||||
return None;
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
&format!("Failed to fetch Cloudflare IP ranges from {url}: {e}"),
|
||||
);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ranges.is_empty() {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
"No Cloudflare IP ranges loaded; skipping filter",
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
ppfmt.infof(
|
||||
pp::EMOJI_DETECT,
|
||||
&format!("Loaded {} Cloudflare IP ranges for filtering", ranges.len()),
|
||||
);
|
||||
|
||||
Some(Self { ranges })
|
||||
}
|
||||
|
||||
/// Parse ranges from raw text lines (for testing).
|
||||
#[cfg(test)]
|
||||
pub fn from_lines(lines: &str) -> Option<Self> {
|
||||
let ranges: Vec<CidrRange> = lines
|
||||
.lines()
|
||||
.filter_map(|l| {
|
||||
let l = l.trim();
|
||||
if l.is_empty() {
|
||||
None
|
||||
} else {
|
||||
CidrRange::parse(l)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
if ranges.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Self { ranges })
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an IP address falls within any Cloudflare range.
|
||||
pub fn contains(&self, ip: &IpAddr) -> bool {
|
||||
self.ranges.iter().any(|net| net.contains(ip))
|
||||
}
|
||||
}
|
||||
|
||||
/// Refresh interval for Cloudflare IP ranges (24 hours).
|
||||
const CF_RANGE_REFRESH: Duration = Duration::from_secs(24 * 60 * 60);
|
||||
|
||||
/// Cached wrapper around [`CloudflareIpFilter`].
|
||||
///
|
||||
/// Fetches once, then re-uses the cached ranges for [`CF_RANGE_REFRESH`].
|
||||
/// If a refresh fails, the previously cached ranges are kept.
|
||||
pub struct CachedCloudflareFilter {
|
||||
filter: Option<CloudflareIpFilter>,
|
||||
fetched_at: Option<Instant>,
|
||||
}
|
||||
|
||||
impl CachedCloudflareFilter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
filter: None,
|
||||
fetched_at: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reference to the current filter, refreshing if stale or absent.
|
||||
pub async fn get(
|
||||
&mut self,
|
||||
client: &Client,
|
||||
timeout: Duration,
|
||||
ppfmt: &PP,
|
||||
) -> Option<&CloudflareIpFilter> {
|
||||
let stale = match self.fetched_at {
|
||||
Some(t) => t.elapsed() >= CF_RANGE_REFRESH,
|
||||
None => true,
|
||||
};
|
||||
|
||||
if stale {
|
||||
match CloudflareIpFilter::fetch(client, timeout, ppfmt).await {
|
||||
Some(new_filter) => {
|
||||
self.filter = Some(new_filter);
|
||||
self.fetched_at = Some(Instant::now());
|
||||
}
|
||||
None => {
|
||||
if self.filter.is_some() {
|
||||
ppfmt.warningf(
|
||||
pp::EMOJI_WARNING,
|
||||
"Failed to refresh Cloudflare IP ranges; using cached version",
|
||||
);
|
||||
// Keep using cached filter, but don't update fetched_at
|
||||
// so we retry next cycle.
|
||||
}
|
||||
// If no cached filter exists, return None (caller handles fail-safe).
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.filter.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
const SAMPLE_RANGES: &str = "\
|
||||
173.245.48.0/20
|
||||
103.21.244.0/22
|
||||
103.22.200.0/22
|
||||
104.16.0.0/13
|
||||
2400:cb00::/32
|
||||
2606:4700::/32
|
||||
";
|
||||
|
||||
#[test]
|
||||
fn test_parse_ranges() {
|
||||
let filter = CloudflareIpFilter::from_lines(SAMPLE_RANGES).unwrap();
|
||||
assert_eq!(filter.ranges.len(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_cloudflare_ipv4() {
|
||||
let filter = CloudflareIpFilter::from_lines(SAMPLE_RANGES).unwrap();
|
||||
// 104.16.0.1 is within 104.16.0.0/13
|
||||
let ip: IpAddr = IpAddr::V4(Ipv4Addr::new(104, 16, 0, 1));
|
||||
assert!(filter.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rejects_non_cloudflare_ipv4() {
|
||||
let filter = CloudflareIpFilter::from_lines(SAMPLE_RANGES).unwrap();
|
||||
// 203.0.113.42 is a documentation IP, not Cloudflare
|
||||
let ip: IpAddr = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42));
|
||||
assert!(!filter.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_cloudflare_ipv6() {
|
||||
let filter = CloudflareIpFilter::from_lines(SAMPLE_RANGES).unwrap();
|
||||
// 2606:4700::1 is within 2606:4700::/32
|
||||
let ip: IpAddr = IpAddr::V6(Ipv6Addr::new(0x2606, 0x4700, 0, 0, 0, 0, 0, 1));
|
||||
assert!(filter.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rejects_non_cloudflare_ipv6() {
|
||||
let filter = CloudflareIpFilter::from_lines(SAMPLE_RANGES).unwrap();
|
||||
// 2001:db8::1 is a documentation address, not Cloudflare
|
||||
let ip: IpAddr = IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
|
||||
assert!(!filter.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_input() {
|
||||
assert!(CloudflareIpFilter::from_lines("").is_none());
|
||||
assert!(CloudflareIpFilter::from_lines(" \n \n").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_of_range() {
|
||||
let filter = CloudflareIpFilter::from_lines("104.16.0.0/13").unwrap();
|
||||
// First IP in range
|
||||
assert!(filter.contains(&IpAddr::V4(Ipv4Addr::new(104, 16, 0, 0))));
|
||||
// Last IP in range (104.23.255.255)
|
||||
assert!(filter.contains(&IpAddr::V4(Ipv4Addr::new(104, 23, 255, 255))));
|
||||
// Just outside range (104.24.0.0)
|
||||
assert!(!filter.contains(&IpAddr::V4(Ipv4Addr::new(104, 24, 0, 0))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_prefix_rejected() {
|
||||
assert!(CidrRange::parse("10.0.0.0/33").is_none());
|
||||
assert!(CidrRange::parse("::1/129").is_none());
|
||||
assert!(CidrRange::parse("not-an-ip/24").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_v4_does_not_match_v6() {
|
||||
let filter = CloudflareIpFilter::from_lines("104.16.0.0/13").unwrap();
|
||||
let ip: IpAddr = IpAddr::V6(Ipv6Addr::new(0x2606, 0x4700, 0, 0, 0, 0, 0, 1));
|
||||
assert!(!filter.contains(&ip));
|
||||
}
|
||||
|
||||
/// All real Cloudflare ranges as of 2026-03. Verifies every range parses
|
||||
/// and that the first and last IP in each range is matched while the
|
||||
/// address just past the end is not.
|
||||
const ALL_CF_RANGES: &str = "\
|
||||
173.245.48.0/20
|
||||
103.21.244.0/22
|
||||
103.22.200.0/22
|
||||
103.31.4.0/22
|
||||
141.101.64.0/18
|
||||
108.162.192.0/18
|
||||
190.93.240.0/20
|
||||
188.114.96.0/20
|
||||
197.234.240.0/22
|
||||
198.41.128.0/17
|
||||
162.158.0.0/15
|
||||
104.16.0.0/13
|
||||
104.24.0.0/14
|
||||
172.64.0.0/13
|
||||
131.0.72.0/22
|
||||
2400:cb00::/32
|
||||
2606:4700::/32
|
||||
2803:f800::/32
|
||||
2405:b500::/32
|
||||
2405:8100::/32
|
||||
2a06:98c0::/29
|
||||
2c0f:f248::/32
|
||||
";
|
||||
|
||||
#[test]
|
||||
fn test_all_real_ranges_parse() {
|
||||
let filter = CloudflareIpFilter::from_lines(ALL_CF_RANGES).unwrap();
|
||||
assert_eq!(filter.ranges.len(), 22);
|
||||
}
|
||||
|
||||
/// For a /N IPv4 range starting at `base`, return (first, last, just_outside).
|
||||
fn v4_range_bounds(a: u8, b: u8, c: u8, d: u8, prefix: u8) -> (Ipv4Addr, Ipv4Addr, Ipv4Addr) {
|
||||
let base = u32::from(Ipv4Addr::new(a, b, c, d));
|
||||
let size = 1u32 << (32 - prefix);
|
||||
let first = Ipv4Addr::from(base);
|
||||
let last = Ipv4Addr::from(base + size - 1);
|
||||
let outside = Ipv4Addr::from(base + size);
|
||||
(first, last, outside)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_real_ipv4_ranges_match() {
|
||||
// Test each range individually so adjacent ranges (e.g. 104.16.0.0/13
|
||||
// and 104.24.0.0/14) don't cause false failures on boundary checks.
|
||||
let ranges: &[(u8, u8, u8, u8, u8)] = &[
|
||||
(173, 245, 48, 0, 20),
|
||||
(103, 21, 244, 0, 22),
|
||||
(103, 22, 200, 0, 22),
|
||||
(103, 31, 4, 0, 22),
|
||||
(141, 101, 64, 0, 18),
|
||||
(108, 162, 192, 0, 18),
|
||||
(190, 93, 240, 0, 20),
|
||||
(188, 114, 96, 0, 20),
|
||||
(197, 234, 240, 0, 22),
|
||||
(198, 41, 128, 0, 17),
|
||||
(162, 158, 0, 0, 15),
|
||||
(104, 16, 0, 0, 13),
|
||||
(104, 24, 0, 0, 14),
|
||||
(172, 64, 0, 0, 13),
|
||||
(131, 0, 72, 0, 22),
|
||||
];
|
||||
|
||||
for &(a, b, c, d, prefix) in ranges {
|
||||
let cidr = format!("{a}.{b}.{c}.{d}/{prefix}");
|
||||
let filter = CloudflareIpFilter::from_lines(&cidr).unwrap();
|
||||
let (first, last, outside) = v4_range_bounds(a, b, c, d, prefix);
|
||||
assert!(
|
||||
filter.contains(&IpAddr::V4(first)),
|
||||
"First IP {first} should be in {cidr}"
|
||||
);
|
||||
assert!(
|
||||
filter.contains(&IpAddr::V4(last)),
|
||||
"Last IP {last} should be in {cidr}"
|
||||
);
|
||||
assert!(
|
||||
!filter.contains(&IpAddr::V4(outside)),
|
||||
"IP {outside} should NOT be in {cidr}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_real_ipv6_ranges_match() {
|
||||
let filter = CloudflareIpFilter::from_lines(ALL_CF_RANGES).unwrap();
|
||||
|
||||
// (base high 16-bit segment, prefix len)
|
||||
let ranges: &[(u16, u16, u8)] = &[
|
||||
(0x2400, 0xcb00, 32),
|
||||
(0x2606, 0x4700, 32),
|
||||
(0x2803, 0xf800, 32),
|
||||
(0x2405, 0xb500, 32),
|
||||
(0x2405, 0x8100, 32),
|
||||
(0x2a06, 0x98c0, 29),
|
||||
(0x2c0f, 0xf248, 32),
|
||||
];
|
||||
|
||||
for &(seg0, seg1, prefix) in ranges {
|
||||
let base = u128::from(Ipv6Addr::new(seg0, seg1, 0, 0, 0, 0, 0, 0));
|
||||
let size = 1u128 << (128 - prefix);
|
||||
|
||||
let first = Ipv6Addr::from(base);
|
||||
let last = Ipv6Addr::from(base + size - 1);
|
||||
let outside = Ipv6Addr::from(base + size);
|
||||
|
||||
assert!(
|
||||
filter.contains(&IpAddr::V6(first)),
|
||||
"First IP {first} should be in {seg0:x}:{seg1:x}::/{prefix}"
|
||||
);
|
||||
assert!(
|
||||
filter.contains(&IpAddr::V6(last)),
|
||||
"Last IP {last} should be in {seg0:x}:{seg1:x}::/{prefix}"
|
||||
);
|
||||
assert!(
|
||||
!filter.contains(&IpAddr::V6(outside)),
|
||||
"IP {outside} should NOT be in {seg0:x}:{seg1:x}::/{prefix}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,16 +152,16 @@ pub struct CloudflareHandle {
|
||||
client: Client,
|
||||
base_url: String,
|
||||
auth: Auth,
|
||||
managed_comment_regex: Option<regex::Regex>,
|
||||
managed_waf_comment_regex: Option<regex::Regex>,
|
||||
managed_comment_regex: Option<regex_lite::Regex>,
|
||||
managed_waf_comment_regex: Option<regex_lite::Regex>,
|
||||
}
|
||||
|
||||
impl CloudflareHandle {
|
||||
pub fn new(
|
||||
auth: Auth,
|
||||
update_timeout: Duration,
|
||||
managed_comment_regex: Option<regex::Regex>,
|
||||
managed_waf_comment_regex: Option<regex::Regex>,
|
||||
managed_comment_regex: Option<regex_lite::Regex>,
|
||||
managed_waf_comment_regex: Option<regex_lite::Regex>,
|
||||
) -> Self {
|
||||
let client = Client::builder()
|
||||
.timeout(update_timeout)
|
||||
@@ -182,6 +182,7 @@ impl CloudflareHandle {
|
||||
base_url: &str,
|
||||
auth: Auth,
|
||||
) -> Self {
|
||||
crate::init_crypto();
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()
|
||||
@@ -200,39 +201,18 @@ impl CloudflareHandle {
|
||||
format!("{}/{path}", self.base_url)
|
||||
}
|
||||
|
||||
async fn api_get<T: serde::de::DeserializeOwned>(
|
||||
async fn api_request<T: serde::de::DeserializeOwned>(
|
||||
&self,
|
||||
method: reqwest::Method,
|
||||
path: &str,
|
||||
body: Option<&impl Serialize>,
|
||||
ppfmt: &PP,
|
||||
) -> Option<T> {
|
||||
let url = self.api_url(path);
|
||||
let req = self.auth.apply(self.client.get(&url));
|
||||
match req.send().await {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
resp.json::<T>().await.ok()
|
||||
} else {
|
||||
let url_str = resp.url().to_string();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API GET '{url_str}' failed: {text}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API GET '{path}' error: {e}"));
|
||||
None
|
||||
}
|
||||
let mut req = self.auth.apply(self.client.request(method.clone(), &url));
|
||||
if let Some(b) = body {
|
||||
req = req.json(b);
|
||||
}
|
||||
}
|
||||
|
||||
async fn api_post<T: serde::de::DeserializeOwned, B: Serialize>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: &B,
|
||||
ppfmt: &PP,
|
||||
) -> Option<T> {
|
||||
let url = self.api_url(path);
|
||||
let req = self.auth.apply(self.client.post(&url)).json(body);
|
||||
match req.send().await {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
@@ -240,63 +220,12 @@ impl CloudflareHandle {
|
||||
} else {
|
||||
let url_str = resp.url().to_string();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API POST '{url_str}' failed: {text}"));
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API {method} '{url_str}' failed: {text}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API POST '{path}' error: {e}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn api_put<T: serde::de::DeserializeOwned, B: Serialize>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: &B,
|
||||
ppfmt: &PP,
|
||||
) -> Option<T> {
|
||||
let url = self.api_url(path);
|
||||
let req = self.auth.apply(self.client.put(&url)).json(body);
|
||||
match req.send().await {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
resp.json::<T>().await.ok()
|
||||
} else {
|
||||
let url_str = resp.url().to_string();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API PUT '{url_str}' failed: {text}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API PUT '{path}' error: {e}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn api_delete<T: serde::de::DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
ppfmt: &PP,
|
||||
) -> Option<T> {
|
||||
let url = self.api_url(path);
|
||||
let req = self.auth.apply(self.client.delete(&url));
|
||||
match req.send().await {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
resp.json::<T>().await.ok()
|
||||
} else {
|
||||
let url_str = resp.url().to_string();
|
||||
let text = resp.text().await.unwrap_or_default();
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API DELETE '{url_str}' failed: {text}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API DELETE '{path}' error: {e}"));
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("API {method} '{path}' error: {e}"));
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -309,7 +238,7 @@ impl CloudflareHandle {
|
||||
let mut current = domain.to_string();
|
||||
loop {
|
||||
let resp: Option<CfListResponse<ZoneResult>> = self
|
||||
.api_get(&format!("zones?name={current}"), ppfmt)
|
||||
.api_request(reqwest::Method::GET, &format!("zones?name={current}"), None::<&()>, ppfmt)
|
||||
.await;
|
||||
if let Some(r) = resp {
|
||||
if let Some(zones) = r.result {
|
||||
@@ -340,7 +269,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> Vec<DnsRecord> {
|
||||
let path = format!("zones/{zone_id}/dns_records?per_page=100&type={record_type}");
|
||||
let resp: Option<CfListResponse<DnsRecord>> = self.api_get(&path, ppfmt).await;
|
||||
let resp: Option<CfListResponse<DnsRecord>> = self.api_request(reqwest::Method::GET, &path, None::<&()>, ppfmt).await;
|
||||
resp.and_then(|r| r.result).unwrap_or_default()
|
||||
}
|
||||
|
||||
@@ -351,8 +280,16 @@ impl CloudflareHandle {
|
||||
name: &str,
|
||||
ppfmt: &PP,
|
||||
) -> Vec<DnsRecord> {
|
||||
// Cloudflare normalizes DNS record names to lowercase server-side, so a
|
||||
// case-sensitive match against the user-supplied name (e.g. ExaMple.com)
|
||||
// would never find existing records and trigger 81058 duplicate-create
|
||||
// errors on every cycle. Match case-insensitively to mirror Cloudflare's
|
||||
// own comparison rules.
|
||||
let records = self.list_records(zone_id, record_type, ppfmt).await;
|
||||
records.into_iter().filter(|r| r.name == name).collect()
|
||||
records
|
||||
.into_iter()
|
||||
.filter(|r| r.name.eq_ignore_ascii_case(name))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn is_managed_record(&self, record: &DnsRecord) -> bool {
|
||||
@@ -372,7 +309,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> Option<DnsRecord> {
|
||||
let path = format!("zones/{zone_id}/dns_records");
|
||||
let resp: Option<CfResponse<DnsRecord>> = self.api_post(&path, payload, ppfmt).await;
|
||||
let resp: Option<CfResponse<DnsRecord>> = self.api_request(reqwest::Method::POST, &path, Some(payload), ppfmt).await;
|
||||
resp.and_then(|r| r.result)
|
||||
}
|
||||
|
||||
@@ -384,7 +321,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> Option<DnsRecord> {
|
||||
let path = format!("zones/{zone_id}/dns_records/{record_id}");
|
||||
let resp: Option<CfResponse<DnsRecord>> = self.api_put(&path, payload, ppfmt).await;
|
||||
let resp: Option<CfResponse<DnsRecord>> = self.api_request(reqwest::Method::PUT, &path, Some(payload), ppfmt).await;
|
||||
resp.and_then(|r| r.result)
|
||||
}
|
||||
|
||||
@@ -395,7 +332,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> bool {
|
||||
let path = format!("zones/{zone_id}/dns_records/{record_id}");
|
||||
let resp: Option<CfResponse<serde_json::Value>> = self.api_delete(&path, ppfmt).await;
|
||||
let resp: Option<CfResponse<serde_json::Value>> = self.api_request(reqwest::Method::DELETE, &path, None::<&()>, ppfmt).await;
|
||||
resp.is_some()
|
||||
}
|
||||
|
||||
@@ -467,7 +404,7 @@ impl CloudflareHandle {
|
||||
self.update_record(zone_id, &record.id, &payload, ppfmt).await;
|
||||
}
|
||||
} else {
|
||||
ppfmt.infof(pp::EMOJI_SKIP, &format!("Record {fqdn} is up to date ({ip_str})"));
|
||||
// Caller handles "up to date" logging based on SetResult::Noop
|
||||
}
|
||||
} else {
|
||||
// Find an existing managed record to update, or create new
|
||||
@@ -550,7 +487,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> Option<WAFListMeta> {
|
||||
let path = format!("accounts/{}/rules/lists", waf_list.account_id);
|
||||
let resp: Option<CfListResponse<WAFListMeta>> = self.api_get(&path, ppfmt).await;
|
||||
let resp: Option<CfListResponse<WAFListMeta>> = self.api_request(reqwest::Method::GET, &path, None::<&()>, ppfmt).await;
|
||||
resp.and_then(|r| r.result)
|
||||
.and_then(|lists| lists.into_iter().find(|l| l.name == waf_list.list_name))
|
||||
}
|
||||
@@ -562,7 +499,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> Vec<WAFListItem> {
|
||||
let path = format!("accounts/{account_id}/rules/lists/{list_id}/items");
|
||||
let resp: Option<CfListResponse<WAFListItem>> = self.api_get(&path, ppfmt).await;
|
||||
let resp: Option<CfListResponse<WAFListItem>> = self.api_request(reqwest::Method::GET, &path, None::<&()>, ppfmt).await;
|
||||
resp.and_then(|r| r.result).unwrap_or_default()
|
||||
}
|
||||
|
||||
@@ -574,7 +511,7 @@ impl CloudflareHandle {
|
||||
ppfmt: &PP,
|
||||
) -> bool {
|
||||
let path = format!("accounts/{account_id}/rules/lists/{list_id}/items");
|
||||
let resp: Option<CfResponse<serde_json::Value>> = self.api_post(&path, &items, ppfmt).await;
|
||||
let resp: Option<CfResponse<serde_json::Value>> = self.api_request(reqwest::Method::POST, &path, Some(&items), ppfmt).await;
|
||||
resp.is_some()
|
||||
}
|
||||
|
||||
@@ -668,10 +605,7 @@ impl CloudflareHandle {
|
||||
.collect();
|
||||
|
||||
if to_add.is_empty() && ids_to_delete.is_empty() {
|
||||
ppfmt.infof(
|
||||
pp::EMOJI_SKIP,
|
||||
&format!("WAF list {} is up to date", waf_list.describe()),
|
||||
);
|
||||
// Caller handles "up to date" logging based on SetResult::Noop
|
||||
return SetResult::Noop;
|
||||
}
|
||||
|
||||
@@ -797,6 +731,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn handle_with_regex(base_url: &str, pattern: &str) -> CloudflareHandle {
|
||||
crate::init_crypto();
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()
|
||||
@@ -805,7 +740,7 @@ mod tests {
|
||||
client,
|
||||
base_url: base_url.to_string(),
|
||||
auth: test_auth(),
|
||||
managed_comment_regex: Some(regex::Regex::new(pattern).unwrap()),
|
||||
managed_comment_regex: Some(regex_lite::Regex::new(pattern).unwrap()),
|
||||
managed_waf_comment_regex: None,
|
||||
}
|
||||
}
|
||||
@@ -999,6 +934,29 @@ mod tests {
|
||||
assert_eq!(records[1].id, "r2");
|
||||
}
|
||||
|
||||
// Issue #255: Cloudflare normalizes record names to lowercase, so a
|
||||
// case-sensitive match against the user-supplied name (e.g. ExaMple.com)
|
||||
// would loop forever creating duplicates. Verify match is case-insensitive.
|
||||
#[tokio::test]
|
||||
async fn list_records_by_name_case_insensitive() {
|
||||
let server = MockServer::start().await;
|
||||
let body = dns_list_response(vec![
|
||||
dns_record_json("r1", "example.com", "1.2.3.4", None),
|
||||
]);
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/zones/z1/dns_records"))
|
||||
.respond_with(ResponseTemplate::new(200).set_body_json(body))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let h = handle(&server.uri());
|
||||
let records = h
|
||||
.list_records_by_name("z1", "A", "ExaMple.com", &pp())
|
||||
.await;
|
||||
assert_eq!(records.len(), 1);
|
||||
assert_eq!(records[0].id, "r1");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_records_by_name_filters() {
|
||||
let server = MockServer::start().await;
|
||||
@@ -1427,7 +1385,7 @@ mod tests {
|
||||
api_key: "key123".to_string(),
|
||||
email: "user@example.com".to_string(),
|
||||
};
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let req = client.get("http://example.com");
|
||||
let req = auth.apply(req);
|
||||
// Just verify it doesn't panic - we can't inspect headers easily
|
||||
@@ -1446,7 +1404,7 @@ mod tests {
|
||||
|
||||
let h = handle(&server.uri());
|
||||
let pp = PP::new(false, true); // quiet
|
||||
let result: Option<CfListResponse<ZoneResult>> = h.api_get("zones", &pp).await;
|
||||
let result: Option<CfListResponse<ZoneResult>> = h.api_request(reqwest::Method::GET, "zones", None::<&()>, &pp).await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
@@ -1461,7 +1419,7 @@ mod tests {
|
||||
let h = handle(&server.uri());
|
||||
let pp = PP::new(false, true);
|
||||
let body = serde_json::json!({"test": true});
|
||||
let result: Option<CfResponse<serde_json::Value>> = h.api_post("endpoint", &body, &pp).await;
|
||||
let result: Option<CfResponse<serde_json::Value>> = h.api_request(reqwest::Method::POST, "endpoint", Some(&body), &pp).await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
@@ -1476,7 +1434,7 @@ mod tests {
|
||||
let h = handle(&server.uri());
|
||||
let pp = PP::new(false, true);
|
||||
let body = serde_json::json!({"test": true});
|
||||
let result: Option<CfResponse<serde_json::Value>> = h.api_put("endpoint", &body, &pp).await;
|
||||
let result: Option<CfResponse<serde_json::Value>> = h.api_request(reqwest::Method::PUT, "endpoint", Some(&body), &pp).await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
|
||||
197
src/config.rs
197
src/config.rs
@@ -27,6 +27,10 @@ pub struct LegacyConfig {
|
||||
pub purge_unknown_records: bool,
|
||||
#[serde(default = "default_ttl")]
|
||||
pub ttl: i64,
|
||||
#[serde(default)]
|
||||
pub ip4_provider: Option<String>,
|
||||
#[serde(default)]
|
||||
pub ip6_provider: Option<String>,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
@@ -80,15 +84,17 @@ pub struct AppConfig {
|
||||
pub update_cron: CronSchedule,
|
||||
pub update_on_start: bool,
|
||||
pub delete_on_stop: bool,
|
||||
pub delete_on_failure: bool,
|
||||
pub ttl: TTL,
|
||||
pub proxied_expression: Option<Box<dyn Fn(&str) -> bool + Send + Sync>>,
|
||||
pub record_comment: Option<String>,
|
||||
pub managed_comment_regex: Option<regex::Regex>,
|
||||
pub managed_comment_regex: Option<regex_lite::Regex>,
|
||||
pub waf_list_description: Option<String>,
|
||||
pub waf_list_item_comment: Option<String>,
|
||||
pub managed_waf_comment_regex: Option<regex::Regex>,
|
||||
pub managed_waf_comment_regex: Option<regex_lite::Regex>,
|
||||
pub detection_timeout: Duration,
|
||||
pub update_timeout: Duration,
|
||||
pub reject_cloudflare_ips: bool,
|
||||
pub dry_run: bool,
|
||||
pub emoji: bool,
|
||||
pub quiet: bool,
|
||||
@@ -325,9 +331,9 @@ fn read_cron_from_env(ppfmt: &PP) -> Result<CronSchedule, String> {
|
||||
}
|
||||
}
|
||||
|
||||
fn read_regex(key: &str, ppfmt: &PP) -> Option<regex::Regex> {
|
||||
fn read_regex(key: &str, ppfmt: &PP) -> Option<regex_lite::Regex> {
|
||||
match getenv(key) {
|
||||
Some(s) if !s.is_empty() => match regex::Regex::new(&s) {
|
||||
Some(s) if !s.is_empty() => match regex_lite::Regex::new(&s) {
|
||||
Ok(r) => Some(r),
|
||||
Err(e) => {
|
||||
ppfmt.errorf(pp::EMOJI_ERROR, &format!("Invalid regex in {key}: {e}"));
|
||||
@@ -386,7 +392,7 @@ pub fn parse_legacy_config(content: &str) -> Result<LegacyConfig, String> {
|
||||
}
|
||||
|
||||
/// Convert a legacy config into a unified AppConfig
|
||||
fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> AppConfig {
|
||||
fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Result<AppConfig, String> {
|
||||
// Extract auth from first entry
|
||||
let auth = if let Some(entry) = legacy.cloudflare.first() {
|
||||
if !entry.authentication.api_token.is_empty()
|
||||
@@ -405,13 +411,27 @@ fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Ap
|
||||
Auth::Token(String::new())
|
||||
};
|
||||
|
||||
// Build providers
|
||||
// Build providers — ip4_provider/ip6_provider override the default cloudflare.trace
|
||||
let mut providers = HashMap::new();
|
||||
if legacy.a {
|
||||
providers.insert(IpType::V4, ProviderType::CloudflareTrace { url: None });
|
||||
let provider = match &legacy.ip4_provider {
|
||||
Some(s) => ProviderType::parse(s)
|
||||
.map_err(|e| format!("Invalid ip4_provider in config.json: {e}"))?,
|
||||
None => ProviderType::CloudflareTrace { url: None },
|
||||
};
|
||||
if !matches!(provider, ProviderType::None) {
|
||||
providers.insert(IpType::V4, provider);
|
||||
}
|
||||
}
|
||||
if legacy.aaaa {
|
||||
providers.insert(IpType::V6, ProviderType::CloudflareTrace { url: None });
|
||||
let provider = match &legacy.ip6_provider {
|
||||
Some(s) => ProviderType::parse(s)
|
||||
.map_err(|e| format!("Invalid ip6_provider in config.json: {e}"))?,
|
||||
None => ProviderType::CloudflareTrace { url: None },
|
||||
};
|
||||
if !matches!(provider, ProviderType::None) {
|
||||
providers.insert(IpType::V6, provider);
|
||||
}
|
||||
}
|
||||
|
||||
let ttl = TTL::new(legacy.ttl);
|
||||
@@ -422,7 +442,7 @@ fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Ap
|
||||
CronSchedule::Once
|
||||
};
|
||||
|
||||
AppConfig {
|
||||
Ok(AppConfig {
|
||||
auth,
|
||||
providers,
|
||||
domains: HashMap::new(),
|
||||
@@ -430,6 +450,7 @@ fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Ap
|
||||
update_cron: schedule,
|
||||
update_on_start: true,
|
||||
delete_on_stop: false,
|
||||
delete_on_failure: true,
|
||||
ttl,
|
||||
proxied_expression: None,
|
||||
record_comment: None,
|
||||
@@ -439,13 +460,14 @@ fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Ap
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: getenv_bool("REJECT_CLOUDFLARE_IPS", true),
|
||||
dry_run,
|
||||
emoji: false,
|
||||
quiet: false,
|
||||
legacy_mode: true,
|
||||
legacy_config: Some(legacy),
|
||||
repeat,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
@@ -483,6 +505,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
|
||||
let update_cron = read_cron_from_env(ppfmt)?;
|
||||
let update_on_start = getenv_bool("UPDATE_ON_START", true);
|
||||
let delete_on_stop = getenv_bool("DELETE_ON_STOP", false);
|
||||
let delete_on_failure = getenv_bool("DELETE_ON_FAILURE", true);
|
||||
|
||||
let ttl_val = getenv("TTL")
|
||||
.and_then(|s| s.parse::<i64>().ok())
|
||||
@@ -509,6 +532,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
|
||||
|
||||
let emoji = getenv_bool("EMOJI", true);
|
||||
let quiet = getenv_bool("QUIET", false);
|
||||
let reject_cloudflare_ips = getenv_bool("REJECT_CLOUDFLARE_IPS", true);
|
||||
|
||||
// Validate: must have at least one update target
|
||||
if domains.is_empty() && waf_lists.is_empty() {
|
||||
@@ -550,6 +574,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
|
||||
update_cron,
|
||||
update_on_start,
|
||||
delete_on_stop,
|
||||
delete_on_failure,
|
||||
ttl,
|
||||
proxied_expression,
|
||||
record_comment,
|
||||
@@ -559,6 +584,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
|
||||
managed_waf_comment_regex,
|
||||
detection_timeout,
|
||||
update_timeout,
|
||||
reject_cloudflare_ips,
|
||||
dry_run: false, // Set later from CLI args
|
||||
emoji,
|
||||
quiet,
|
||||
@@ -579,7 +605,7 @@ pub fn load_config(dry_run: bool, repeat: bool, ppfmt: &PP) -> Result<AppConfig,
|
||||
} else {
|
||||
ppfmt.infof(pp::EMOJI_CONFIG, "Using config.json configuration");
|
||||
let legacy = load_legacy_config()?;
|
||||
Ok(legacy_to_app_config(legacy, dry_run, repeat))
|
||||
legacy_to_app_config(legacy, dry_run, repeat)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,6 +685,10 @@ pub fn print_config_summary(config: &AppConfig, ppfmt: &PP) {
|
||||
inner.infof("", "Delete on stop: enabled");
|
||||
}
|
||||
|
||||
if !config.reject_cloudflare_ips {
|
||||
inner.warningf("", "Cloudflare IP rejection: DISABLED (REJECT_CLOUDFLARE_IPS=false)");
|
||||
}
|
||||
|
||||
if let Some(ref comment) = config.record_comment {
|
||||
inner.infof("", &format!("Record comment: {comment}"));
|
||||
}
|
||||
@@ -987,8 +1017,10 @@ mod tests {
|
||||
aaaa: false,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
};
|
||||
let config = legacy_to_app_config(legacy, false, false);
|
||||
let config = legacy_to_app_config(legacy, false, false).unwrap();
|
||||
assert!(config.legacy_mode);
|
||||
assert!(matches!(config.auth, Auth::Token(ref t) if t == "my-token"));
|
||||
assert!(config.providers.contains_key(&IpType::V4));
|
||||
@@ -1013,8 +1045,10 @@ mod tests {
|
||||
aaaa: true,
|
||||
purge_unknown_records: false,
|
||||
ttl: 120,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
};
|
||||
let config = legacy_to_app_config(legacy, true, true);
|
||||
let config = legacy_to_app_config(legacy, true, true).unwrap();
|
||||
assert!(matches!(config.update_cron, CronSchedule::Every(d) if d == Duration::from_secs(120)));
|
||||
assert!(config.repeat);
|
||||
assert!(config.dry_run);
|
||||
@@ -1039,12 +1073,118 @@ mod tests {
|
||||
aaaa: true,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
};
|
||||
let config = legacy_to_app_config(legacy, false, false);
|
||||
let config = legacy_to_app_config(legacy, false, false).unwrap();
|
||||
assert!(matches!(config.auth, Auth::Key { ref api_key, ref email }
|
||||
if api_key == "key123" && email == "test@example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_to_app_config_custom_providers() {
|
||||
let legacy = LegacyConfig {
|
||||
cloudflare: vec![LegacyCloudflareEntry {
|
||||
authentication: LegacyAuthentication {
|
||||
api_token: "tok".to_string(),
|
||||
api_key: None,
|
||||
},
|
||||
zone_id: "z".to_string(),
|
||||
subdomains: vec![],
|
||||
proxied: false,
|
||||
}],
|
||||
a: true,
|
||||
aaaa: true,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: Some("ipify".to_string()),
|
||||
ip6_provider: Some("cloudflare.doh".to_string()),
|
||||
};
|
||||
let config = legacy_to_app_config(legacy, false, false).unwrap();
|
||||
assert!(matches!(config.providers[&IpType::V4], ProviderType::Ipify));
|
||||
assert!(matches!(config.providers[&IpType::V6], ProviderType::CloudflareDOH));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_to_app_config_provider_none_overrides_a_flag() {
|
||||
let legacy = LegacyConfig {
|
||||
cloudflare: vec![LegacyCloudflareEntry {
|
||||
authentication: LegacyAuthentication {
|
||||
api_token: "tok".to_string(),
|
||||
api_key: None,
|
||||
},
|
||||
zone_id: "z".to_string(),
|
||||
subdomains: vec![],
|
||||
proxied: false,
|
||||
}],
|
||||
a: true,
|
||||
aaaa: true,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: Some("none".to_string()),
|
||||
ip6_provider: None,
|
||||
};
|
||||
let config = legacy_to_app_config(legacy, false, false).unwrap();
|
||||
// ip4_provider=none should exclude V4 even though a=true
|
||||
assert!(!config.providers.contains_key(&IpType::V4));
|
||||
assert!(config.providers.contains_key(&IpType::V6));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_to_app_config_invalid_provider_returns_error() {
|
||||
let legacy = LegacyConfig {
|
||||
cloudflare: vec![LegacyCloudflareEntry {
|
||||
authentication: LegacyAuthentication {
|
||||
api_token: "tok".to_string(),
|
||||
api_key: None,
|
||||
},
|
||||
zone_id: "z".to_string(),
|
||||
subdomains: vec![],
|
||||
proxied: false,
|
||||
}],
|
||||
a: true,
|
||||
aaaa: false,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: Some("totally_invalid".to_string()),
|
||||
ip6_provider: None,
|
||||
};
|
||||
let result = legacy_to_app_config(legacy, false, false);
|
||||
assert!(result.is_err());
|
||||
let err = result.err().unwrap();
|
||||
assert!(err.contains("ip4_provider"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_config_deserializes_providers() {
|
||||
let json = r#"{
|
||||
"cloudflare": [{
|
||||
"authentication": { "api_token": "tok" },
|
||||
"zone_id": "z",
|
||||
"subdomains": ["@"]
|
||||
}],
|
||||
"ip4_provider": "ipify",
|
||||
"ip6_provider": "none"
|
||||
}"#;
|
||||
let config = parse_legacy_config(json).unwrap();
|
||||
assert_eq!(config.ip4_provider, Some("ipify".to_string()));
|
||||
assert_eq!(config.ip6_provider, Some("none".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_config_deserializes_without_providers() {
|
||||
let json = r#"{
|
||||
"cloudflare": [{
|
||||
"authentication": { "api_token": "tok" },
|
||||
"zone_id": "z",
|
||||
"subdomains": ["@"]
|
||||
}]
|
||||
}"#;
|
||||
let config = parse_legacy_config(json).unwrap();
|
||||
assert!(config.ip4_provider.is_none());
|
||||
assert!(config.ip6_provider.is_none());
|
||||
}
|
||||
|
||||
// --- is_env_config_mode ---
|
||||
|
||||
#[test]
|
||||
@@ -1181,6 +1321,7 @@ mod tests {
|
||||
update_cron: CronSchedule::Once,
|
||||
update_on_start: true,
|
||||
delete_on_stop: false,
|
||||
delete_on_failure: true,
|
||||
ttl: TTL::AUTO,
|
||||
proxied_expression: None,
|
||||
record_comment: None,
|
||||
@@ -1190,6 +1331,7 @@ mod tests {
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: false,
|
||||
dry_run: false,
|
||||
emoji: false,
|
||||
quiet: false,
|
||||
@@ -1214,6 +1356,7 @@ mod tests {
|
||||
update_cron: CronSchedule::Every(Duration::from_secs(300)),
|
||||
update_on_start: true,
|
||||
delete_on_stop: true,
|
||||
delete_on_failure: true,
|
||||
ttl: TTL::new(60),
|
||||
proxied_expression: None,
|
||||
record_comment: Some("managed".to_string()),
|
||||
@@ -1223,6 +1366,7 @@ mod tests {
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: false,
|
||||
dry_run: false,
|
||||
emoji: false,
|
||||
quiet: false,
|
||||
@@ -1793,19 +1937,16 @@ mod tests {
|
||||
let mut g = EnvGuard::set("_PLACEHOLDER_SN", "x");
|
||||
g.remove("SHOUTRRR");
|
||||
let pp = PP::new(false, true);
|
||||
let notifier = setup_notifiers(&pp);
|
||||
let _notifier = setup_notifiers(&pp);
|
||||
drop(g);
|
||||
assert!(notifier.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_setup_notifiers_empty_shoutrrr_returns_empty() {
|
||||
let g = EnvGuard::set("SHOUTRRR", "");
|
||||
let pp = PP::new(false, true);
|
||||
let notifier = setup_notifiers(&pp);
|
||||
let _notifier = setup_notifiers(&pp);
|
||||
drop(g);
|
||||
// Empty string is treated as unset by getenv_list.
|
||||
assert!(notifier.is_empty());
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
@@ -1818,9 +1959,8 @@ mod tests {
|
||||
g.remove("HEALTHCHECKS");
|
||||
g.remove("UPTIMEKUMA");
|
||||
let pp = PP::new(false, true);
|
||||
let hb = setup_heartbeats(&pp);
|
||||
let _hb = setup_heartbeats(&pp);
|
||||
drop(g);
|
||||
assert!(hb.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1828,9 +1968,8 @@ mod tests {
|
||||
let mut g = EnvGuard::set("HEALTHCHECKS", "https://hc-ping.com/abc123");
|
||||
g.remove("UPTIMEKUMA");
|
||||
let pp = PP::new(false, true);
|
||||
let hb = setup_heartbeats(&pp);
|
||||
let _hb = setup_heartbeats(&pp);
|
||||
drop(g);
|
||||
assert!(!hb.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1838,9 +1977,8 @@ mod tests {
|
||||
let mut g = EnvGuard::set("UPTIMEKUMA", "https://status.example.com/api/push/abc");
|
||||
g.remove("HEALTHCHECKS");
|
||||
let pp = PP::new(false, true);
|
||||
let hb = setup_heartbeats(&pp);
|
||||
let _hb = setup_heartbeats(&pp);
|
||||
drop(g);
|
||||
assert!(!hb.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1848,9 +1986,8 @@ mod tests {
|
||||
let mut g = EnvGuard::set("HEALTHCHECKS", "https://hc-ping.com/abc");
|
||||
g.add("UPTIMEKUMA", "https://status.example.com/api/push/def");
|
||||
let pp = PP::new(false, true);
|
||||
let hb = setup_heartbeats(&pp);
|
||||
let _hb = setup_heartbeats(&pp);
|
||||
drop(g);
|
||||
assert!(!hb.is_empty());
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
@@ -1872,6 +2009,7 @@ mod tests {
|
||||
update_cron: CronSchedule::Every(Duration::from_secs(300)),
|
||||
update_on_start: true,
|
||||
delete_on_stop: false,
|
||||
delete_on_failure: true,
|
||||
ttl: TTL::AUTO,
|
||||
proxied_expression: None,
|
||||
record_comment: None,
|
||||
@@ -1881,6 +2019,7 @@ mod tests {
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: false,
|
||||
dry_run: false,
|
||||
emoji: false,
|
||||
quiet: false,
|
||||
@@ -1907,6 +2046,7 @@ mod tests {
|
||||
update_cron: CronSchedule::Every(Duration::from_secs(600)),
|
||||
update_on_start: true,
|
||||
delete_on_stop: true,
|
||||
delete_on_failure: true,
|
||||
ttl: TTL::new(120),
|
||||
proxied_expression: None,
|
||||
record_comment: Some("cf-ddns".to_string()),
|
||||
@@ -1916,6 +2056,7 @@ mod tests {
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: false,
|
||||
dry_run: false,
|
||||
emoji: false,
|
||||
quiet: true,
|
||||
@@ -1939,6 +2080,7 @@ mod tests {
|
||||
update_cron: CronSchedule::Once,
|
||||
update_on_start: true,
|
||||
delete_on_stop: false,
|
||||
delete_on_failure: true,
|
||||
ttl: TTL::AUTO,
|
||||
proxied_expression: None,
|
||||
record_comment: None,
|
||||
@@ -1948,6 +2090,7 @@ mod tests {
|
||||
managed_waf_comment_regex: None,
|
||||
detection_timeout: Duration::from_secs(5),
|
||||
update_timeout: Duration::from_secs(30),
|
||||
reject_cloudflare_ips: false,
|
||||
dry_run: false,
|
||||
emoji: false,
|
||||
quiet: false,
|
||||
|
||||
253
src/domain.rs
253
src/domain.rs
@@ -1,129 +1,14 @@
|
||||
use std::fmt;
|
||||
|
||||
/// Represents a DNS domain - either a regular FQDN or a wildcard.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum Domain {
|
||||
FQDN(String),
|
||||
Wildcard(String),
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Domain {
|
||||
/// Parse a domain string. Handles:
|
||||
/// - "@" or "" -> root domain (handled at FQDN construction time)
|
||||
/// - "*.example.com" -> wildcard
|
||||
/// - "sub.example.com" -> regular FQDN
|
||||
pub fn new(input: &str) -> Result<Self, String> {
|
||||
let trimmed = input.trim().to_lowercase();
|
||||
if trimmed.starts_with("*.") {
|
||||
let base = &trimmed[2..];
|
||||
let ascii = domain_to_ascii(base)?;
|
||||
Ok(Domain::Wildcard(ascii))
|
||||
} else {
|
||||
let ascii = domain_to_ascii(&trimmed)?;
|
||||
Ok(Domain::FQDN(ascii))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the DNS name in ASCII form suitable for API calls.
|
||||
pub fn dns_name_ascii(&self) -> String {
|
||||
match self {
|
||||
Domain::FQDN(s) => s.clone(),
|
||||
Domain::Wildcard(s) => format!("*.{s}"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a human-readable description of the domain.
|
||||
pub fn describe(&self) -> String {
|
||||
match self {
|
||||
Domain::FQDN(s) => describe_domain(s),
|
||||
Domain::Wildcard(s) => format!("*.{}", describe_domain(s)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the zones (parent domains) for this domain, from most specific to least.
|
||||
pub fn zones(&self) -> Vec<String> {
|
||||
let base = match self {
|
||||
Domain::FQDN(s) => s.as_str(),
|
||||
Domain::Wildcard(s) => s.as_str(),
|
||||
};
|
||||
let mut zones = Vec::new();
|
||||
let mut current = base.to_string();
|
||||
while !current.is_empty() {
|
||||
zones.push(current.clone());
|
||||
if let Some(pos) = current.find('.') {
|
||||
current = current[pos + 1..].to_string();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
zones
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Domain {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.describe())
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct an FQDN from a subdomain name and base domain.
|
||||
pub fn make_fqdn(subdomain: &str, base_domain: &str) -> String {
|
||||
let name = subdomain.to_lowercase();
|
||||
let name = name.trim();
|
||||
if name.is_empty() || name == "@" {
|
||||
base_domain.to_lowercase()
|
||||
} else if name.starts_with("*.") {
|
||||
// Wildcard subdomain
|
||||
format!("{name}.{}", base_domain.to_lowercase())
|
||||
} else {
|
||||
format!("{name}.{}", base_domain.to_lowercase())
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a domain to ASCII using IDNA encoding.
|
||||
#[allow(dead_code)]
|
||||
fn domain_to_ascii(domain: &str) -> Result<String, String> {
|
||||
if domain.is_empty() {
|
||||
return Ok(String::new());
|
||||
}
|
||||
// Try IDNA encoding for internationalized domain names
|
||||
match idna::domain_to_ascii(domain) {
|
||||
Ok(ascii) => Ok(ascii),
|
||||
Err(_) => {
|
||||
// Fallback: if it's already ASCII, just return it
|
||||
if domain.is_ascii() {
|
||||
Ok(domain.to_string())
|
||||
} else {
|
||||
Err(format!("Invalid domain name: {domain}"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert ASCII domain back to Unicode for display.
|
||||
#[allow(dead_code)]
|
||||
fn describe_domain(ascii: &str) -> String {
|
||||
// Try to convert punycode back to unicode for display
|
||||
match idna::domain_to_unicode(ascii) {
|
||||
(unicode, Ok(())) => unicode,
|
||||
_ => ascii.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a comma-separated list of domain strings.
|
||||
#[allow(dead_code)]
|
||||
pub fn parse_domain_list(input: &str) -> Result<Vec<Domain>, String> {
|
||||
if input.trim().is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
input
|
||||
.split(',')
|
||||
.map(|s| Domain::new(s.trim()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
// --- Domain Expression Evaluator ---
|
||||
// Supports: true, false, is(domain,...), sub(domain,...), !, &&, ||, ()
|
||||
|
||||
@@ -305,18 +190,6 @@ mod tests {
|
||||
assert_eq!(make_fqdn("VPN", "Example.COM"), "vpn.example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_wildcard() {
|
||||
let d = Domain::new("*.example.com").unwrap();
|
||||
assert_eq!(d.dns_name_ascii(), "*.example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_domain_list() {
|
||||
let domains = parse_domain_list("example.com, *.example.com, sub.example.com").unwrap();
|
||||
assert_eq!(domains.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proxied_expr_true() {
|
||||
let pred = parse_proxied_expression("true").unwrap();
|
||||
@@ -359,129 +232,6 @@ mod tests {
|
||||
assert!(pred("public.com"));
|
||||
}
|
||||
|
||||
// --- Domain::new with regular FQDN ---
|
||||
#[test]
|
||||
fn test_domain_new_fqdn() {
|
||||
let d = Domain::new("example.com").unwrap();
|
||||
assert_eq!(d, Domain::FQDN("example.com".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_new_fqdn_uppercase() {
|
||||
let d = Domain::new("EXAMPLE.COM").unwrap();
|
||||
assert_eq!(d, Domain::FQDN("example.com".to_string()));
|
||||
}
|
||||
|
||||
// --- Domain::dns_name_ascii for FQDN ---
|
||||
#[test]
|
||||
fn test_dns_name_ascii_fqdn() {
|
||||
let d = Domain::FQDN("example.com".to_string());
|
||||
assert_eq!(d.dns_name_ascii(), "example.com");
|
||||
}
|
||||
|
||||
// --- Domain::describe for both variants ---
|
||||
#[test]
|
||||
fn test_describe_fqdn() {
|
||||
let d = Domain::FQDN("example.com".to_string());
|
||||
// ASCII domain should round-trip through describe unchanged
|
||||
assert_eq!(d.describe(), "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_describe_wildcard() {
|
||||
let d = Domain::Wildcard("example.com".to_string());
|
||||
assert_eq!(d.describe(), "*.example.com");
|
||||
}
|
||||
|
||||
// --- Domain::zones ---
|
||||
#[test]
|
||||
fn test_zones_fqdn() {
|
||||
let d = Domain::FQDN("sub.example.com".to_string());
|
||||
let zones = d.zones();
|
||||
assert_eq!(zones, vec!["sub.example.com", "example.com", "com"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zones_wildcard() {
|
||||
let d = Domain::Wildcard("example.com".to_string());
|
||||
let zones = d.zones();
|
||||
assert_eq!(zones, vec!["example.com", "com"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zones_single_label() {
|
||||
let d = Domain::FQDN("localhost".to_string());
|
||||
let zones = d.zones();
|
||||
assert_eq!(zones, vec!["localhost"]);
|
||||
}
|
||||
|
||||
// --- Domain Display trait ---
|
||||
#[test]
|
||||
fn test_display_fqdn() {
|
||||
let d = Domain::FQDN("example.com".to_string());
|
||||
assert_eq!(format!("{d}"), "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_display_wildcard() {
|
||||
let d = Domain::Wildcard("example.com".to_string());
|
||||
assert_eq!(format!("{d}"), "*.example.com");
|
||||
}
|
||||
|
||||
// --- domain_to_ascii (tested indirectly via Domain::new) ---
|
||||
#[test]
|
||||
fn test_domain_new_empty_string() {
|
||||
// empty string -> domain_to_ascii returns Ok("") -> Domain::FQDN("")
|
||||
let d = Domain::new("").unwrap();
|
||||
assert_eq!(d, Domain::FQDN("".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_new_ascii_domain() {
|
||||
let d = Domain::new("www.example.org").unwrap();
|
||||
assert_eq!(d.dns_name_ascii(), "www.example.org");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_new_internationalized() {
|
||||
// "münchen.de" should be encoded to punycode
|
||||
let d = Domain::new("münchen.de").unwrap();
|
||||
let ascii = d.dns_name_ascii();
|
||||
// The punycode-encoded form should start with "xn--"
|
||||
assert!(ascii.contains("xn--"), "expected punycode, got: {ascii}");
|
||||
}
|
||||
|
||||
// --- describe_domain (tested indirectly via Domain::describe) ---
|
||||
#[test]
|
||||
fn test_describe_punycode_roundtrip() {
|
||||
// Build a domain with a known punycode label and confirm describe decodes it
|
||||
let d = Domain::new("münchen.de").unwrap();
|
||||
let described = d.describe();
|
||||
// Should contain the Unicode form, not the raw punycode
|
||||
assert!(described.contains("münchen") || described.contains("xn--"),
|
||||
"describe returned: {described}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_describe_regular_ascii() {
|
||||
let d = Domain::FQDN("example.com".to_string());
|
||||
assert_eq!(d.describe(), "example.com");
|
||||
}
|
||||
|
||||
// --- parse_domain_list with empty input ---
|
||||
#[test]
|
||||
fn test_parse_domain_list_empty() {
|
||||
let result = parse_domain_list("").unwrap();
|
||||
assert!(result.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_domain_list_whitespace_only() {
|
||||
let result = parse_domain_list(" ").unwrap();
|
||||
assert!(result.is_empty());
|
||||
}
|
||||
|
||||
// --- Tokenizer edge cases (via parse_proxied_expression) ---
|
||||
#[test]
|
||||
fn test_tokenizer_single_ampersand_error() {
|
||||
let result = parse_proxied_expression("is(a.com) & is(b.com)");
|
||||
@@ -504,7 +254,6 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// --- Parser edge cases ---
|
||||
#[test]
|
||||
fn test_parse_and_expr_double_ampersand() {
|
||||
let pred = parse_proxied_expression("is(a.com) && is(b.com)").unwrap();
|
||||
@@ -538,10 +287,8 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// --- make_fqdn with wildcard subdomain ---
|
||||
#[test]
|
||||
fn test_make_fqdn_wildcard_subdomain() {
|
||||
// A name starting with "*." is treated as a wildcard subdomain
|
||||
assert_eq!(make_fqdn("*.sub", "example.com"), "*.sub.example.com");
|
||||
}
|
||||
}
|
||||
|
||||
160
src/main.rs
160
src/main.rs
@@ -1,3 +1,4 @@
|
||||
mod cf_ip_filter;
|
||||
mod cloudflare;
|
||||
mod config;
|
||||
mod domain;
|
||||
@@ -10,15 +11,22 @@ use crate::cloudflare::{Auth, CloudflareHandle};
|
||||
use crate::config::{AppConfig, CronSchedule};
|
||||
use crate::notifier::{CompositeNotifier, Heartbeat, Message};
|
||||
use crate::pp::PP;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use rand::RngExt;
|
||||
use reqwest::Client;
|
||||
use tokio::signal;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
#[tokio::main]
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
async fn main() {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("Failed to install rustls crypto provider");
|
||||
|
||||
// Parse CLI args
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let dry_run = args.iter().any(|a| a == "--dry-run");
|
||||
@@ -115,12 +123,18 @@ async fn main() {
|
||||
// Start heartbeat
|
||||
heartbeat.start().await;
|
||||
|
||||
let mut cf_cache = cf_ip_filter::CachedCloudflareFilter::new();
|
||||
let detection_client = Client::builder()
|
||||
.timeout(app_config.detection_timeout)
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
|
||||
if app_config.legacy_mode {
|
||||
// --- Legacy mode (original cloudflare-ddns behavior) ---
|
||||
run_legacy_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running).await;
|
||||
run_legacy_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running, &mut cf_cache, &detection_client).await;
|
||||
} else {
|
||||
// --- Env var mode (cf-ddns behavior) ---
|
||||
run_env_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running).await;
|
||||
run_env_mode(&app_config, &handle, ¬ifier, &heartbeat, &ppfmt, running, &mut cf_cache, &detection_client).await;
|
||||
}
|
||||
|
||||
// On shutdown: delete records if configured
|
||||
@@ -142,12 +156,16 @@ async fn run_legacy_mode(
|
||||
heartbeat: &Heartbeat,
|
||||
ppfmt: &PP,
|
||||
running: Arc<AtomicBool>,
|
||||
cf_cache: &mut cf_ip_filter::CachedCloudflareFilter,
|
||||
detection_client: &Client,
|
||||
) {
|
||||
let legacy = match &config.legacy_config {
|
||||
Some(l) => l,
|
||||
None => return,
|
||||
};
|
||||
|
||||
let mut noop_reported = HashSet::new();
|
||||
|
||||
if config.repeat {
|
||||
match (legacy.a, legacy.aaaa) {
|
||||
(true, true) => println!(
|
||||
@@ -164,7 +182,7 @@ async fn run_legacy_mode(
|
||||
}
|
||||
|
||||
while running.load(Ordering::SeqCst) {
|
||||
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
|
||||
updater::update_once(config, handle, notifier, heartbeat, cf_cache, ppfmt, &mut noop_reported, detection_client).await;
|
||||
|
||||
for _ in 0..legacy.ttl {
|
||||
if !running.load(Ordering::SeqCst) {
|
||||
@@ -174,7 +192,7 @@ async fn run_legacy_mode(
|
||||
}
|
||||
}
|
||||
} else {
|
||||
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
|
||||
updater::update_once(config, handle, notifier, heartbeat, cf_cache, ppfmt, &mut noop_reported, detection_client).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,11 +203,15 @@ async fn run_env_mode(
|
||||
heartbeat: &Heartbeat,
|
||||
ppfmt: &PP,
|
||||
running: Arc<AtomicBool>,
|
||||
cf_cache: &mut cf_ip_filter::CachedCloudflareFilter,
|
||||
detection_client: &Client,
|
||||
) {
|
||||
let mut noop_reported = HashSet::new();
|
||||
|
||||
match &config.update_cron {
|
||||
CronSchedule::Once => {
|
||||
if config.update_on_start {
|
||||
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
|
||||
updater::update_once(config, handle, notifier, heartbeat, cf_cache, ppfmt, &mut noop_reported, detection_client).await;
|
||||
}
|
||||
}
|
||||
schedule => {
|
||||
@@ -205,20 +227,18 @@ async fn run_env_mode(
|
||||
|
||||
// Update on start if configured
|
||||
if config.update_on_start {
|
||||
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
|
||||
updater::update_once(config, handle, notifier, heartbeat, cf_cache, ppfmt, &mut noop_reported, detection_client).await;
|
||||
}
|
||||
|
||||
// Main loop
|
||||
while running.load(Ordering::SeqCst) {
|
||||
// Sleep for interval, checking running flag each second
|
||||
let secs = interval.as_secs();
|
||||
let next_time = chrono::Local::now() + chrono::Duration::seconds(secs as i64);
|
||||
let mins = secs / 60;
|
||||
let rem_secs = secs % 60;
|
||||
ppfmt.infof(
|
||||
pp::EMOJI_SLEEP,
|
||||
&format!(
|
||||
"Next update at {}",
|
||||
next_time.format("%Y-%m-%d %H:%M:%S %Z")
|
||||
),
|
||||
&format!("Next update in {}m {}s", mins, rem_secs),
|
||||
);
|
||||
|
||||
for _ in 0..secs {
|
||||
@@ -232,12 +252,28 @@ async fn run_env_mode(
|
||||
return;
|
||||
}
|
||||
|
||||
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
|
||||
// Apply proportional jitter before each update to spread API calls
|
||||
// across clients and reduce synchronized traffic spikes at Cloudflare.
|
||||
let max_jitter = interval.as_secs() / 5;
|
||||
if max_jitter > 0 {
|
||||
let jitter_secs = rand::rng().random_range(0..=max_jitter);
|
||||
sleep(std::time::Duration::from_secs(jitter_secs)).await;
|
||||
}
|
||||
|
||||
updater::update_once(config, handle, notifier, heartbeat, cf_cache, ppfmt, &mut noop_reported, detection_client).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn jitter_duration(interval_secs: u64, rand_val: u64) -> std::time::Duration {
|
||||
let max_jitter = interval_secs / 5;
|
||||
if max_jitter == 0 {
|
||||
return std::time::Duration::ZERO;
|
||||
}
|
||||
std::time::Duration::from_secs(rand_val % (max_jitter + 1))
|
||||
}
|
||||
|
||||
fn describe_duration(d: Duration) -> String {
|
||||
let secs = d.as_secs();
|
||||
if secs >= 3600 {
|
||||
@@ -265,6 +301,21 @@ fn describe_duration(d: Duration) -> String {
|
||||
// Tests (backwards compatible with original test suite)
|
||||
// ============================================================
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn init_crypto() {
|
||||
use std::sync::Once;
|
||||
static INIT: Once = Once::new();
|
||||
INIT.call_once(|| {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn test_client() -> reqwest::Client {
|
||||
init_crypto();
|
||||
reqwest::Client::new()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config::{
|
||||
@@ -300,6 +351,8 @@ mod tests {
|
||||
aaaa: false,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,7 +367,7 @@ mod tests {
|
||||
impl TestDdnsClient {
|
||||
fn new(base_url: &str) -> Self {
|
||||
Self {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
cf_api_base: base_url.to_string(),
|
||||
ipv4_urls: vec![format!("{base_url}/cdn-cgi/trace")],
|
||||
dry_run: false,
|
||||
@@ -379,6 +432,7 @@ mod tests {
|
||||
config: &[LegacyCloudflareEntry],
|
||||
ttl: i64,
|
||||
purge_unknown_records: bool,
|
||||
noop_reported: &mut std::collections::HashSet<String>,
|
||||
) {
|
||||
for entry in config {
|
||||
#[derive(serde::Deserialize)]
|
||||
@@ -480,8 +534,10 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
let noop_key = format!("{fqdn}:{record_type}");
|
||||
if let Some(ref id) = identifier {
|
||||
if modified {
|
||||
noop_reported.remove(&noop_key);
|
||||
if self.dry_run {
|
||||
println!("[DRY RUN] Would update record {fqdn} -> {ip}");
|
||||
} else {
|
||||
@@ -497,23 +553,30 @@ mod tests {
|
||||
)
|
||||
.await;
|
||||
}
|
||||
} else if self.dry_run {
|
||||
println!("[DRY RUN] Record {fqdn} is up to date ({ip})");
|
||||
} else if noop_reported.insert(noop_key) {
|
||||
if self.dry_run {
|
||||
println!("[DRY RUN] Record {fqdn} is up to date");
|
||||
} else {
|
||||
println!("Record {fqdn} is up to date");
|
||||
}
|
||||
}
|
||||
} else if self.dry_run {
|
||||
println!("[DRY RUN] Would add new record {fqdn} -> {ip}");
|
||||
} else {
|
||||
println!("Adding new record {fqdn} -> {ip}");
|
||||
let create_endpoint =
|
||||
format!("zones/{}/dns_records", entry.zone_id);
|
||||
let _: Option<serde_json::Value> = self
|
||||
.cf_api(
|
||||
&create_endpoint,
|
||||
"POST",
|
||||
&entry.authentication.api_token,
|
||||
Some(&record),
|
||||
)
|
||||
.await;
|
||||
noop_reported.remove(&noop_key);
|
||||
if self.dry_run {
|
||||
println!("[DRY RUN] Would add new record {fqdn} -> {ip}");
|
||||
} else {
|
||||
println!("Adding new record {fqdn} -> {ip}");
|
||||
let create_endpoint =
|
||||
format!("zones/{}/dns_records", entry.zone_id);
|
||||
let _: Option<serde_json::Value> = self
|
||||
.cf_api(
|
||||
&create_endpoint,
|
||||
"POST",
|
||||
&entry.authentication.api_token,
|
||||
Some(&record),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
if purge_unknown_records {
|
||||
@@ -633,7 +696,7 @@ mod tests {
|
||||
|
||||
let ddns = TestDdnsClient::new(&mock_server.uri());
|
||||
let config = test_config(zone_id);
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -682,7 +745,7 @@ mod tests {
|
||||
|
||||
let ddns = TestDdnsClient::new(&mock_server.uri());
|
||||
let config = test_config(zone_id);
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -725,7 +788,7 @@ mod tests {
|
||||
|
||||
let ddns = TestDdnsClient::new(&mock_server.uri());
|
||||
let config = test_config(zone_id);
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -759,7 +822,7 @@ mod tests {
|
||||
|
||||
let ddns = TestDdnsClient::new(&mock_server.uri()).dry_run();
|
||||
let config = test_config(zone_id);
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -813,11 +876,36 @@ mod tests {
|
||||
aaaa: false,
|
||||
purge_unknown_records: true,
|
||||
ttl: 300,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
};
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, true)
|
||||
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, true, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
|
||||
// --- jitter_duration tests ---
|
||||
#[test]
|
||||
fn test_jitter_duration_standard() {
|
||||
// 5-minute interval: max jitter = 60s
|
||||
let d = super::jitter_duration(300, 30);
|
||||
assert_eq!(d, std::time::Duration::from_secs(30));
|
||||
let d = super::jitter_duration(300, 61);
|
||||
assert_eq!(d, std::time::Duration::from_secs(61 % 61)); // wraps within [0, 60]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jitter_duration_short_interval() {
|
||||
// interval < 5s: must return zero
|
||||
assert_eq!(super::jitter_duration(4, 99), std::time::Duration::ZERO);
|
||||
assert_eq!(super::jitter_duration(0, 99), std::time::Duration::ZERO);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jitter_duration_deterministic() {
|
||||
// rand_val=0 always returns zero duration
|
||||
assert_eq!(super::jitter_duration(300, 0), std::time::Duration::ZERO);
|
||||
}
|
||||
|
||||
// --- describe_duration tests ---
|
||||
#[test]
|
||||
fn test_describe_duration_seconds_only() {
|
||||
@@ -912,9 +1000,11 @@ mod tests {
|
||||
aaaa: false,
|
||||
purge_unknown_records: false,
|
||||
ttl: 300,
|
||||
ip4_provider: None,
|
||||
ip6_provider: None,
|
||||
};
|
||||
|
||||
ddns.commit_record("203.0.113.99", "A", &config.cloudflare, 300, false)
|
||||
ddns.commit_record("203.0.113.99", "A", &config.cloudflare, 300, false, &mut std::collections::HashSet::new())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
345
src/notifier.rs
345
src/notifier.rs
@@ -11,14 +11,6 @@ pub struct Message {
|
||||
}
|
||||
|
||||
impl Message {
|
||||
#[allow(dead_code)]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
lines: Vec::new(),
|
||||
ok: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_ok(msg: &str) -> Self {
|
||||
Self {
|
||||
lines: vec![msg.to_string()],
|
||||
@@ -52,16 +44,6 @@ impl Message {
|
||||
}
|
||||
Message { lines, ok }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn add_line(&mut self, line: &str) {
|
||||
self.lines.push(line.to_string());
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn set_fail(&mut self) {
|
||||
self.ok = false;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Composite Notifier ---
|
||||
@@ -72,8 +54,6 @@ pub struct CompositeNotifier {
|
||||
|
||||
// Object-safe version of Notifier
|
||||
pub trait NotifierDyn: Send + Sync {
|
||||
#[allow(dead_code)]
|
||||
fn describe(&self) -> String;
|
||||
fn send_dyn<'a>(
|
||||
&'a self,
|
||||
msg: &'a Message,
|
||||
@@ -85,16 +65,6 @@ impl CompositeNotifier {
|
||||
Self { notifiers }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.notifiers.is_empty()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn describe(&self) -> Vec<String> {
|
||||
self.notifiers.iter().map(|n| n.describe()).collect()
|
||||
}
|
||||
|
||||
pub async fn send(&self, msg: &Message) {
|
||||
if msg.is_empty() {
|
||||
return;
|
||||
@@ -295,10 +265,6 @@ impl ShoutrrrNotifier {
|
||||
}
|
||||
|
||||
impl NotifierDyn for ShoutrrrNotifier {
|
||||
fn describe(&self) -> String {
|
||||
ShoutrrrNotifier::describe(self)
|
||||
}
|
||||
|
||||
fn send_dyn<'a>(
|
||||
&'a self,
|
||||
msg: &'a Message,
|
||||
@@ -308,6 +274,90 @@ impl NotifierDyn for ShoutrrrNotifier {
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a Gotify webhook URL from a shoutrrr-style URL.
|
||||
///
|
||||
/// Accepted forms:
|
||||
/// gotify://host[:port]/TOKEN[?disabletls=yes]
|
||||
/// gotify://host[:port]/path/?token=TOKEN[&disabletls=yes]
|
||||
/// gotify+http://host[:port]/TOKEN
|
||||
/// gotify+https://host[:port]/TOKEN
|
||||
///
|
||||
/// `disabletls=yes` switches the resulting webhook to plain HTTP, which is
|
||||
/// required for typical home-LAN deployments where Gotify is reachable on a
|
||||
/// private IP without TLS.
|
||||
fn parse_gotify_url(
|
||||
original: &str,
|
||||
rest: &str,
|
||||
default_scheme: &str,
|
||||
) -> Result<ShoutrrrService, String> {
|
||||
// Split off the query string (if any) before path manipulation.
|
||||
let (path_part, query_part) = match rest.split_once('?') {
|
||||
Some((p, q)) => (p, q),
|
||||
None => (rest, ""),
|
||||
};
|
||||
|
||||
let mut token: Option<String> = None;
|
||||
let mut scheme = default_scheme;
|
||||
if !query_part.is_empty() {
|
||||
for pair in query_part.split('&') {
|
||||
let (k, v) = match pair.split_once('=') {
|
||||
Some(kv) => kv,
|
||||
None => continue,
|
||||
};
|
||||
match k {
|
||||
"token" => token = Some(v.to_string()),
|
||||
"disabletls" if v.eq_ignore_ascii_case("yes") => scheme = "http",
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// host[:port][/extra/path]/TOKEN -- token is the last non-empty path segment.
|
||||
let trimmed = path_part.trim_end_matches('/');
|
||||
let (host_path, last_segment) = match trimmed.rsplit_once('/') {
|
||||
Some((h, t)) => (h, t),
|
||||
None => (trimmed, ""),
|
||||
};
|
||||
|
||||
if token.is_none() && !last_segment.is_empty() {
|
||||
token = Some(last_segment.to_string());
|
||||
}
|
||||
|
||||
let token = match token {
|
||||
Some(t) if !t.is_empty() => t,
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"Invalid Gotify shoutrrr URL (missing token): {original}"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// host_path is either "host[:port]" or "host[:port]/extra/path" if user
|
||||
// had additional path segments before the token.
|
||||
let host_and_path = if host_path.is_empty() {
|
||||
// No slash before token -> token *was* the only segment, host is path_part minus token.
|
||||
path_part
|
||||
.trim_end_matches('/')
|
||||
.trim_end_matches(&token[..])
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
} else {
|
||||
host_path.to_string()
|
||||
};
|
||||
|
||||
if host_and_path.is_empty() {
|
||||
return Err(format!(
|
||||
"Invalid Gotify shoutrrr URL (missing host): {original}"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(ShoutrrrService {
|
||||
original_url: original.to_string(),
|
||||
service_type: ShoutrrrServiceType::Gotify,
|
||||
webhook_url: format!("{scheme}://{host_and_path}/message?token={token}"),
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
|
||||
// Shoutrrr URL formats:
|
||||
// discord://token@id -> https://discord.com/api/webhooks/id/token
|
||||
@@ -368,15 +418,13 @@ fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
|
||||
return Err(format!("Invalid Telegram shoutrrr URL: {url_str}"));
|
||||
}
|
||||
|
||||
if let Some(rest) = url_str
|
||||
.strip_prefix("gotify://")
|
||||
.or_else(|| url_str.strip_prefix("gotify+https://"))
|
||||
if let Some((rest, default_scheme)) = url_str
|
||||
.strip_prefix("gotify+https://")
|
||||
.map(|r| (r, "https"))
|
||||
.or_else(|| url_str.strip_prefix("gotify+http://").map(|r| (r, "http")))
|
||||
.or_else(|| url_str.strip_prefix("gotify://").map(|r| (r, "https")))
|
||||
{
|
||||
return Ok(ShoutrrrService {
|
||||
original_url: url_str.to_string(),
|
||||
service_type: ShoutrrrServiceType::Gotify,
|
||||
webhook_url: format!("https://{rest}/message"),
|
||||
});
|
||||
return parse_gotify_url(url_str, rest, default_scheme);
|
||||
}
|
||||
|
||||
if let Some(rest) = url_str
|
||||
@@ -399,14 +447,28 @@ fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
|
||||
}
|
||||
|
||||
if let Some(rest) = url_str.strip_prefix("pushover://") {
|
||||
let parts: Vec<&str> = rest.splitn(2, '@').collect();
|
||||
// Strip query string (devices, priority, title) — not yet supported.
|
||||
let body = rest.split('?').next().unwrap_or(rest).trim_end_matches('/');
|
||||
let parts: Vec<&str> = body.splitn(2, '@').collect();
|
||||
if parts.len() == 2 {
|
||||
// Shoutrrr's canonical pushover URL is
|
||||
// pushover://shoutrrr:APIToken@UserKey
|
||||
// where the literal "shoutrrr:" username is required. Strip an
|
||||
// optional "<user>:" prefix from the token portion so both the
|
||||
// canonical form and the bare "pushover://TOKEN@USER" form work.
|
||||
let token = parts[0]
|
||||
.rsplit_once(':')
|
||||
.map(|(_, t)| t)
|
||||
.unwrap_or(parts[0]);
|
||||
let user = parts[1];
|
||||
if token.is_empty() || user.is_empty() {
|
||||
return Err(format!("Invalid Pushover shoutrrr URL: {url_str}"));
|
||||
}
|
||||
return Ok(ShoutrrrService {
|
||||
original_url: url_str.to_string(),
|
||||
service_type: ShoutrrrServiceType::Pushover,
|
||||
webhook_url: format!(
|
||||
"https://api.pushover.net/1/messages.json?token={}&user={}",
|
||||
parts[1], parts[0]
|
||||
"https://api.pushover.net/1/messages.json?token={token}&user={user}"
|
||||
),
|
||||
});
|
||||
}
|
||||
@@ -442,8 +504,6 @@ pub struct Heartbeat {
|
||||
}
|
||||
|
||||
pub trait HeartbeatMonitor: Send + Sync {
|
||||
#[allow(dead_code)]
|
||||
fn describe(&self) -> String;
|
||||
fn ping<'a>(
|
||||
&'a self,
|
||||
msg: &'a Message,
|
||||
@@ -462,16 +522,6 @@ impl Heartbeat {
|
||||
Self { monitors }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.monitors.is_empty()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn describe(&self) -> Vec<String> {
|
||||
self.monitors.iter().map(|m| m.describe()).collect()
|
||||
}
|
||||
|
||||
pub async fn ping(&self, msg: &Message) {
|
||||
for monitor in &self.monitors {
|
||||
monitor.ping(msg).await;
|
||||
@@ -532,10 +582,6 @@ impl HealthchecksMonitor {
|
||||
}
|
||||
|
||||
impl HeartbeatMonitor for HealthchecksMonitor {
|
||||
fn describe(&self) -> String {
|
||||
"Healthchecks.io".to_string()
|
||||
}
|
||||
|
||||
fn ping<'a>(
|
||||
&'a self,
|
||||
msg: &'a Message,
|
||||
@@ -590,10 +636,6 @@ impl UptimeKumaMonitor {
|
||||
}
|
||||
|
||||
impl HeartbeatMonitor for UptimeKumaMonitor {
|
||||
fn describe(&self) -> String {
|
||||
"Uptime Kuma".to_string()
|
||||
}
|
||||
|
||||
fn ping<'a>(
|
||||
&'a self,
|
||||
msg: &'a Message,
|
||||
@@ -675,19 +717,6 @@ mod tests {
|
||||
assert!(!msg.ok);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_new() {
|
||||
let msg = Message::new();
|
||||
assert!(msg.lines.is_empty());
|
||||
assert!(msg.ok);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_is_empty_true() {
|
||||
let msg = Message::new();
|
||||
assert!(msg.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_is_empty_false() {
|
||||
let msg = Message::new_ok("something");
|
||||
@@ -700,20 +729,6 @@ mod tests {
|
||||
assert_eq!(msg.format(), "line1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_format_multiple_lines() {
|
||||
let mut msg = Message::new_ok("line1");
|
||||
msg.add_line("line2");
|
||||
msg.add_line("line3");
|
||||
assert_eq!(msg.format(), "line1\nline2\nline3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_format_empty() {
|
||||
let msg = Message::new();
|
||||
assert_eq!(msg.format(), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_merge_all_ok() {
|
||||
let m1 = Message::new_ok("a");
|
||||
@@ -751,30 +766,12 @@ mod tests {
|
||||
assert!(merged.ok);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_add_line() {
|
||||
let mut msg = Message::new();
|
||||
msg.add_line("first");
|
||||
msg.add_line("second");
|
||||
assert_eq!(msg.lines, vec!["first".to_string(), "second".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_set_fail() {
|
||||
let mut msg = Message::new();
|
||||
assert!(msg.ok);
|
||||
msg.set_fail();
|
||||
assert!(!msg.ok);
|
||||
}
|
||||
|
||||
// ---- CompositeNotifier tests ----
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_composite_notifier_empty_send_does_nothing() {
|
||||
let notifier = CompositeNotifier::new(vec![]);
|
||||
assert!(notifier.is_empty());
|
||||
let msg = Message::new_ok("test");
|
||||
// Should not panic or error
|
||||
notifier.send(&msg).await;
|
||||
}
|
||||
|
||||
@@ -834,15 +831,53 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gotify() {
|
||||
let result = parse_shoutrrr_url("gotify://myhost.com/somepath").unwrap();
|
||||
fn test_parse_gotify_token_as_path_segment() {
|
||||
// Shoutrrr canonical format: token is the final path segment.
|
||||
let result = parse_shoutrrr_url("gotify://myhost.com/MYTOKEN").unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"https://myhost.com/somepath/message"
|
||||
"https://myhost.com/message?token=MYTOKEN"
|
||||
);
|
||||
assert!(matches!(result.service_type, ShoutrrrServiceType::Gotify));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gotify_token_query_param() {
|
||||
// Older "gotify://host?token=..." form (issue #262).
|
||||
let result =
|
||||
parse_shoutrrr_url("gotify://192.168.178.222:9090?token=AtE2tUGQig67b0J&disabletls=yes")
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"http://192.168.178.222:9090/message?token=AtE2tUGQig67b0J"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gotify_disabletls_switches_to_http() {
|
||||
let result =
|
||||
parse_shoutrrr_url("gotify://10.0.0.1:8080/TOKEN123?disabletls=yes").unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"http://10.0.0.1:8080/message?token=TOKEN123"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gotify_plus_http_scheme() {
|
||||
let result = parse_shoutrrr_url("gotify+http://10.0.0.1:8080/TOKEN").unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"http://10.0.0.1:8080/message?token=TOKEN"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gotify_missing_token_errors() {
|
||||
assert!(parse_shoutrrr_url("gotify://myhost.com/").is_err());
|
||||
assert!(parse_shoutrrr_url("gotify://myhost.com").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_generic() {
|
||||
let result = parse_shoutrrr_url("generic://example.com/webhook").unwrap();
|
||||
@@ -868,7 +903,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_pushover() {
|
||||
let result = parse_shoutrrr_url("pushover://userkey@apitoken").unwrap();
|
||||
let result = parse_shoutrrr_url("pushover://apitoken@userkey").unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"https://api.pushover.net/1/messages.json?token=apitoken&user=userkey"
|
||||
@@ -879,12 +914,42 @@ mod tests {
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_pushover_shoutrrr_canonical_form() {
|
||||
// Shoutrrr's canonical URL has a literal "shoutrrr:" username.
|
||||
// Issue #258: parser must strip this prefix or Pushover rejects the token.
|
||||
let result =
|
||||
parse_shoutrrr_url("pushover://shoutrrr:apitoken@userkey").unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"https://api.pushover.net/1/messages.json?token=apitoken&user=userkey"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_pushover_strips_query_params() {
|
||||
// Optional shoutrrr query params (devices, priority) should not break parsing.
|
||||
let result =
|
||||
parse_shoutrrr_url("pushover://shoutrrr:tok@user/?devices=phone&priority=1")
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
result.webhook_url,
|
||||
"https://api.pushover.net/1/messages.json?token=tok&user=user"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_pushover_invalid() {
|
||||
let result = parse_shoutrrr_url("pushover://noatsign");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_pushover_empty_token_errors() {
|
||||
assert!(parse_shoutrrr_url("pushover://shoutrrr:@user").is_err());
|
||||
assert!(parse_shoutrrr_url("pushover://tok@").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_plain_https_url() {
|
||||
let result =
|
||||
@@ -1111,7 +1176,7 @@ mod tests {
|
||||
|
||||
// Build a notifier that points discord webhook at our mock server
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "discord://token@id".to_string(),
|
||||
service_type: ShoutrrrServiceType::Discord,
|
||||
@@ -1135,7 +1200,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "slack://a/b/c".to_string(),
|
||||
service_type: ShoutrrrServiceType::Slack,
|
||||
@@ -1159,7 +1224,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "generic://example.com/hook".to_string(),
|
||||
service_type: ShoutrrrServiceType::Generic,
|
||||
@@ -1175,10 +1240,10 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_shoutrrr_send_empty_message() {
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![],
|
||||
};
|
||||
let msg = Message::new();
|
||||
let msg = Message { lines: Vec::new(), ok: true };
|
||||
let pp = PP::default_pp();
|
||||
// Empty message should return true immediately
|
||||
let result = notifier.send(&msg, &pp).await;
|
||||
@@ -1211,7 +1276,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_shoutrrr_notifier_describe() {
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![
|
||||
ShoutrrrService {
|
||||
original_url: "discord://t@i".to_string(),
|
||||
@@ -1267,7 +1332,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "telegram://token@telegram?chats=123".to_string(),
|
||||
service_type: ShoutrrrServiceType::Telegram,
|
||||
@@ -1291,7 +1356,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "gotify://host/path".to_string(),
|
||||
service_type: ShoutrrrServiceType::Gotify,
|
||||
@@ -1307,7 +1372,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_pushover_url_query_parsing() {
|
||||
// Verify that the pushover webhook URL format contains the right params
|
||||
let service = parse_shoutrrr_url("pushover://myuser@mytoken").unwrap();
|
||||
// shoutrrr format: pushover://token@user
|
||||
let service = parse_shoutrrr_url("pushover://mytoken@myuser").unwrap();
|
||||
let parsed = url::Url::parse(&service.webhook_url).unwrap();
|
||||
let params: std::collections::HashMap<_, _> = parsed.query_pairs().collect();
|
||||
assert_eq!(params.get("token").unwrap().as_ref(), "mytoken");
|
||||
@@ -1325,7 +1391,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "custom://host/path".to_string(),
|
||||
service_type: ShoutrrrServiceType::Other("custom".to_string()),
|
||||
@@ -1349,7 +1415,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "discord://t@i".to_string(),
|
||||
service_type: ShoutrrrServiceType::Discord,
|
||||
@@ -1362,23 +1428,6 @@ mod tests {
|
||||
assert!(!result);
|
||||
}
|
||||
|
||||
// ---- CompositeNotifier describe ----
|
||||
|
||||
#[test]
|
||||
fn test_composite_notifier_describe_empty() {
|
||||
let notifier = CompositeNotifier::new(vec![]);
|
||||
assert!(notifier.describe().is_empty());
|
||||
}
|
||||
|
||||
// ---- Heartbeat describe and is_empty ----
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_is_empty() {
|
||||
let hb = Heartbeat::new(vec![]);
|
||||
assert!(hb.is_empty());
|
||||
assert!(hb.describe().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_heartbeat_ping_no_monitors() {
|
||||
let hb = Heartbeat::new(vec![]);
|
||||
@@ -1400,16 +1449,6 @@ mod tests {
|
||||
hb.exit(&msg).await;
|
||||
}
|
||||
|
||||
// ---- CompositeNotifier send with empty message ----
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_composite_notifier_send_empty_message_skips() {
|
||||
let notifier = CompositeNotifier::new(vec![]);
|
||||
let msg = Message::new(); // empty
|
||||
// Should return immediately without sending
|
||||
notifier.send(&msg).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shoutrrr_send_server_error() {
|
||||
let server = MockServer::start().await;
|
||||
@@ -1421,7 +1460,7 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let notifier = ShoutrrrNotifier {
|
||||
client: Client::new(),
|
||||
client: crate::test_client(),
|
||||
urls: vec![ShoutrrrService {
|
||||
original_url: "generic://example.com/hook".to_string(),
|
||||
service_type: ShoutrrrServiceType::Generic,
|
||||
|
||||
200
src/pp.rs
200
src/pp.rs
@@ -1,6 +1,3 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
// Verbosity levels
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum Verbosity {
|
||||
@@ -11,12 +8,8 @@ pub enum Verbosity {
|
||||
}
|
||||
|
||||
// Emoji constants
|
||||
#[allow(dead_code)]
|
||||
pub const EMOJI_GLOBE: &str = "\u{1F30D}";
|
||||
pub const EMOJI_WARNING: &str = "\u{26A0}\u{FE0F}";
|
||||
pub const EMOJI_ERROR: &str = "\u{274C}";
|
||||
#[allow(dead_code)]
|
||||
pub const EMOJI_SUCCESS: &str = "\u{2705}";
|
||||
pub const EMOJI_LAUNCH: &str = "\u{1F680}";
|
||||
pub const EMOJI_STOP: &str = "\u{1F6D1}";
|
||||
pub const EMOJI_SLEEP: &str = "\u{1F634}";
|
||||
@@ -28,8 +21,6 @@ pub const EMOJI_SKIP: &str = "\u{23ED}\u{FE0F}";
|
||||
pub const EMOJI_NOTIFY: &str = "\u{1F514}";
|
||||
pub const EMOJI_HEARTBEAT: &str = "\u{1F493}";
|
||||
pub const EMOJI_CONFIG: &str = "\u{2699}\u{FE0F}";
|
||||
#[allow(dead_code)]
|
||||
pub const EMOJI_HINT: &str = "\u{1F4A1}";
|
||||
|
||||
const INDENT_PREFIX: &str = " ";
|
||||
|
||||
@@ -37,7 +28,6 @@ pub struct PP {
|
||||
pub verbosity: Verbosity,
|
||||
pub emoji: bool,
|
||||
indent: usize,
|
||||
seen: Arc<Mutex<HashSet<String>>>,
|
||||
}
|
||||
|
||||
impl PP {
|
||||
@@ -46,7 +36,6 @@ impl PP {
|
||||
verbosity: if quiet { Verbosity::Quiet } else { Verbosity::Verbose },
|
||||
emoji,
|
||||
indent: 0,
|
||||
seen: Arc::new(Mutex::new(HashSet::new())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +52,6 @@ impl PP {
|
||||
verbosity: self.verbosity,
|
||||
emoji: self.emoji,
|
||||
indent: self.indent + 1,
|
||||
seen: Arc::clone(&self.seen),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,54 +92,12 @@ impl PP {
|
||||
pub fn errorf(&self, emoji: &str, msg: &str) {
|
||||
self.output_err(emoji, msg);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn info_once(&self, key: &str, emoji: &str, msg: &str) {
|
||||
if self.is_showing(Verbosity::Info) {
|
||||
let mut seen = self.seen.lock().unwrap();
|
||||
if seen.insert(key.to_string()) {
|
||||
self.output(emoji, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn notice_once(&self, key: &str, emoji: &str, msg: &str) {
|
||||
if self.is_showing(Verbosity::Notice) {
|
||||
let mut seen = self.seen.lock().unwrap();
|
||||
if seen.insert(key.to_string()) {
|
||||
self.output(emoji, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn blank_line_if_verbose(&self) {
|
||||
if self.is_showing(Verbosity::Verbose) {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn english_join(items: &[String]) -> String {
|
||||
match items.len() {
|
||||
0 => String::new(),
|
||||
1 => items[0].clone(),
|
||||
2 => format!("{} and {}", items[0], items[1]),
|
||||
_ => {
|
||||
let (last, rest) = items.split_last().unwrap();
|
||||
format!("{}, and {last}", rest.join(", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// ---- PP::new with emoji flag ----
|
||||
|
||||
#[test]
|
||||
fn new_with_emoji_true() {
|
||||
let pp = PP::new(true, false);
|
||||
@@ -164,8 +110,6 @@ mod tests {
|
||||
assert!(!pp.emoji);
|
||||
}
|
||||
|
||||
// ---- PP::new with quiet flag (verbosity levels) ----
|
||||
|
||||
#[test]
|
||||
fn new_quiet_true_sets_verbosity_quiet() {
|
||||
let pp = PP::new(false, true);
|
||||
@@ -178,8 +122,6 @@ mod tests {
|
||||
assert_eq!(pp.verbosity, Verbosity::Verbose);
|
||||
}
|
||||
|
||||
// ---- PP::is_showing at different verbosity levels ----
|
||||
|
||||
#[test]
|
||||
fn quiet_shows_only_quiet_level() {
|
||||
let pp = PP::new(false, true);
|
||||
@@ -218,8 +160,6 @@ mod tests {
|
||||
assert!(!pp.is_showing(Verbosity::Verbose));
|
||||
}
|
||||
|
||||
// ---- PP::indent ----
|
||||
|
||||
#[test]
|
||||
fn indent_increments_indent_level() {
|
||||
let pp = PP::new(true, false);
|
||||
@@ -238,26 +178,6 @@ mod tests {
|
||||
assert_eq!(child.emoji, pp.emoji);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn indent_shares_seen_state() {
|
||||
let pp = PP::new(false, false);
|
||||
let child = pp.indent();
|
||||
|
||||
// Insert via parent's seen set
|
||||
pp.seen.lock().unwrap().insert("key1".to_string());
|
||||
|
||||
// Child should observe the same entry
|
||||
assert!(child.seen.lock().unwrap().contains("key1"));
|
||||
|
||||
// Insert via child
|
||||
child.seen.lock().unwrap().insert("key2".to_string());
|
||||
|
||||
// Parent should observe it too
|
||||
assert!(pp.seen.lock().unwrap().contains("key2"));
|
||||
}
|
||||
|
||||
// ---- PP::infof, noticef, warningf, errorf - no panic and verbosity gating ----
|
||||
|
||||
#[test]
|
||||
fn infof_does_not_panic_when_verbose() {
|
||||
let pp = PP::new(false, false);
|
||||
@@ -267,7 +187,6 @@ mod tests {
|
||||
#[test]
|
||||
fn infof_does_not_panic_when_quiet() {
|
||||
let pp = PP::new(false, true);
|
||||
// Should simply not print, and not panic
|
||||
pp.infof("", "test info message");
|
||||
}
|
||||
|
||||
@@ -291,7 +210,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn warningf_does_not_panic_when_quiet() {
|
||||
// warningf always outputs (no verbosity check), just verify no panic
|
||||
let pp = PP::new(false, true);
|
||||
pp.warningf("", "test warning");
|
||||
}
|
||||
@@ -308,124 +226,6 @@ mod tests {
|
||||
pp.errorf("", "test error");
|
||||
}
|
||||
|
||||
// ---- PP::info_once and notice_once ----
|
||||
|
||||
#[test]
|
||||
fn info_once_suppresses_duplicates() {
|
||||
let pp = PP::new(false, false);
|
||||
// First call inserts the key
|
||||
pp.info_once("dup_key", "", "first");
|
||||
// The key should now be in the seen set
|
||||
assert!(pp.seen.lock().unwrap().contains("dup_key"));
|
||||
|
||||
// Calling again with the same key should not insert again (set unchanged)
|
||||
let size_before = pp.seen.lock().unwrap().len();
|
||||
pp.info_once("dup_key", "", "second");
|
||||
let size_after = pp.seen.lock().unwrap().len();
|
||||
assert_eq!(size_before, size_after);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn info_once_allows_different_keys() {
|
||||
let pp = PP::new(false, false);
|
||||
pp.info_once("key_a", "", "msg a");
|
||||
pp.info_once("key_b", "", "msg b");
|
||||
let seen = pp.seen.lock().unwrap();
|
||||
assert!(seen.contains("key_a"));
|
||||
assert!(seen.contains("key_b"));
|
||||
assert_eq!(seen.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn info_once_skipped_when_quiet() {
|
||||
let pp = PP::new(false, true);
|
||||
pp.info_once("quiet_key", "", "should not register");
|
||||
// Because verbosity is Quiet, info_once should not even insert the key
|
||||
assert!(!pp.seen.lock().unwrap().contains("quiet_key"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn notice_once_suppresses_duplicates() {
|
||||
let pp = PP::new(false, false);
|
||||
pp.notice_once("notice_dup", "", "first");
|
||||
assert!(pp.seen.lock().unwrap().contains("notice_dup"));
|
||||
|
||||
let size_before = pp.seen.lock().unwrap().len();
|
||||
pp.notice_once("notice_dup", "", "second");
|
||||
let size_after = pp.seen.lock().unwrap().len();
|
||||
assert_eq!(size_before, size_after);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn notice_once_skipped_when_quiet() {
|
||||
let pp = PP::new(false, true);
|
||||
pp.notice_once("quiet_notice", "", "should not register");
|
||||
assert!(!pp.seen.lock().unwrap().contains("quiet_notice"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn info_once_shared_via_indent() {
|
||||
let pp = PP::new(false, false);
|
||||
let child = pp.indent();
|
||||
|
||||
// Mark a key via the parent
|
||||
pp.info_once("shared_key", "", "parent");
|
||||
assert!(pp.seen.lock().unwrap().contains("shared_key"));
|
||||
|
||||
// Child should see it as already present, so set size stays the same
|
||||
let size_before = child.seen.lock().unwrap().len();
|
||||
child.info_once("shared_key", "", "child duplicate");
|
||||
let size_after = child.seen.lock().unwrap().len();
|
||||
assert_eq!(size_before, size_after);
|
||||
|
||||
// Child can add a new key visible to parent
|
||||
child.info_once("child_key", "", "child new");
|
||||
assert!(pp.seen.lock().unwrap().contains("child_key"));
|
||||
}
|
||||
|
||||
// ---- english_join ----
|
||||
|
||||
#[test]
|
||||
fn english_join_empty() {
|
||||
let items: Vec<String> = vec![];
|
||||
assert_eq!(english_join(&items), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn english_join_single() {
|
||||
let items = vec!["alpha".to_string()];
|
||||
assert_eq!(english_join(&items), "alpha");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn english_join_two() {
|
||||
let items = vec!["alpha".to_string(), "beta".to_string()];
|
||||
assert_eq!(english_join(&items), "alpha and beta");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn english_join_three() {
|
||||
let items = vec![
|
||||
"alpha".to_string(),
|
||||
"beta".to_string(),
|
||||
"gamma".to_string(),
|
||||
];
|
||||
assert_eq!(english_join(&items), "alpha, beta, and gamma");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn english_join_four() {
|
||||
let items = vec![
|
||||
"a".to_string(),
|
||||
"b".to_string(),
|
||||
"c".to_string(),
|
||||
"d".to_string(),
|
||||
];
|
||||
assert_eq!(english_join(&items), "a, b, c, and d");
|
||||
}
|
||||
|
||||
// ---- default_pp ----
|
||||
|
||||
#[test]
|
||||
fn default_pp_is_verbose_no_emoji() {
|
||||
let pp = PP::default_pp();
|
||||
|
||||
166
src/provider.rs
166
src/provider.rs
@@ -1,6 +1,7 @@
|
||||
use crate::pp::{self, PP};
|
||||
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
|
||||
use reqwest::Client;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket};
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
|
||||
/// IP type: IPv4 or IPv6
|
||||
@@ -25,10 +26,6 @@ impl IpType {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn all() -> &'static [IpType] {
|
||||
&[IpType::V4, IpType::V6]
|
||||
}
|
||||
}
|
||||
|
||||
/// All supported provider types
|
||||
@@ -145,12 +142,15 @@ impl ProviderType {
|
||||
|
||||
// --- Cloudflare Trace ---
|
||||
|
||||
/// Primary trace URL uses a hostname so DNS resolves normally, avoiding the
|
||||
/// problem where WARP/Zero Trust intercepts requests to literal 1.1.1.1.
|
||||
const CF_TRACE_PRIMARY: &str = "https://api.cloudflare.com/cdn-cgi/trace";
|
||||
/// Fallback URLs use literal IPs for when api.cloudflare.com is unreachable.
|
||||
const CF_TRACE_V4_FALLBACK: &str = "https://1.0.0.1/cdn-cgi/trace";
|
||||
const CF_TRACE_V6_FALLBACK: &str = "https://[2606:4700:4700::1001]/cdn-cgi/trace";
|
||||
/// Primary trace URL uses cloudflare.com (the CDN endpoint, not the DNS
|
||||
/// resolver). The `build_split_client` forces the correct address family by
|
||||
/// filtering DNS results, so a dual-stack hostname is safe.
|
||||
/// Using literal DNS-resolver IPs (1.0.0.1 / [2606:4700:4700::1001]) caused
|
||||
/// TLS SNI mismatches and returned Cloudflare proxy IPs for some users.
|
||||
const CF_TRACE_PRIMARY: &str = "https://cloudflare.com/cdn-cgi/trace";
|
||||
/// Fallback uses api.cloudflare.com, which works when cloudflare.com is
|
||||
/// intercepted (e.g. Cloudflare WARP/Zero Trust).
|
||||
const CF_TRACE_FALLBACK: &str = "https://api.cloudflare.com/cdn-cgi/trace";
|
||||
|
||||
pub fn parse_trace_ip(body: &str) -> Option<String> {
|
||||
for line in body.lines() {
|
||||
@@ -161,28 +161,61 @@ pub fn parse_trace_ip(body: &str) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn fetch_trace_ip(client: &Client, url: &str, timeout: Duration) -> Option<IpAddr> {
|
||||
let resp = client
|
||||
.get(url)
|
||||
.timeout(timeout)
|
||||
.send()
|
||||
.await
|
||||
.ok()?;
|
||||
async fn fetch_trace_ip(
|
||||
client: &Client,
|
||||
url: &str,
|
||||
timeout: Duration,
|
||||
host_override: Option<&str>,
|
||||
) -> Option<IpAddr> {
|
||||
let mut req = client.get(url).timeout(timeout);
|
||||
if let Some(host) = host_override {
|
||||
req = req.header("Host", host);
|
||||
}
|
||||
let resp = req.send().await.ok()?;
|
||||
let body = resp.text().await.ok()?;
|
||||
let ip_str = parse_trace_ip(&body)?;
|
||||
ip_str.parse::<IpAddr>().ok()
|
||||
}
|
||||
|
||||
/// A DNS resolver that filters lookup results to a single address family.
|
||||
/// This is the Rust equivalent of favonia/cloudflare-ddns's "split dialer"
|
||||
/// pattern: by removing addresses of the wrong family *before* the HTTP
|
||||
/// client sees them, we guarantee it can only establish connections over the
|
||||
/// desired protocol — no happy-eyeballs race, no fallback to the wrong family.
|
||||
struct FilteredResolver {
|
||||
ip_type: IpType,
|
||||
}
|
||||
|
||||
impl Resolve for FilteredResolver {
|
||||
fn resolve(&self, name: Name) -> Resolving {
|
||||
let ip_type = self.ip_type;
|
||||
Box::pin(async move {
|
||||
let addrs: Vec<SocketAddr> = tokio::net::lookup_host((name.as_str(), 0))
|
||||
.await
|
||||
.map_err(|e| -> Box<dyn std::error::Error + Send + Sync> { Box::new(e) })?
|
||||
.filter(|addr| match ip_type {
|
||||
IpType::V4 => addr.is_ipv4(),
|
||||
IpType::V6 => addr.is_ipv6(),
|
||||
})
|
||||
.collect();
|
||||
if addrs.is_empty() {
|
||||
return Err(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::AddrNotAvailable,
|
||||
format!("no {} addresses found", ip_type.describe()),
|
||||
)) as Box<dyn std::error::Error + Send + Sync>);
|
||||
}
|
||||
Ok(Box::new(addrs.into_iter()) as Addrs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Build an HTTP client that only connects via the given IP family.
|
||||
/// Binding to 0.0.0.0 forces IPv4-only; binding to [::] forces IPv6-only.
|
||||
/// This ensures the trace endpoint sees the correct address family.
|
||||
fn build_split_client(ip_type: IpType, timeout: Duration) -> Client {
|
||||
let local_addr: IpAddr = match ip_type {
|
||||
IpType::V4 => Ipv4Addr::UNSPECIFIED.into(),
|
||||
IpType::V6 => Ipv6Addr::UNSPECIFIED.into(),
|
||||
};
|
||||
/// Uses a DNS-level filter to strip addresses of the wrong family from
|
||||
/// resolution results, ensuring the client never attempts a connection
|
||||
/// over the wrong protocol.
|
||||
pub fn build_split_client(ip_type: IpType, timeout: Duration) -> Client {
|
||||
Client::builder()
|
||||
.local_address(local_addr)
|
||||
.dns_resolver(FilteredResolver { ip_type })
|
||||
.timeout(timeout)
|
||||
.build()
|
||||
.unwrap_or_default()
|
||||
@@ -199,7 +232,7 @@ async fn detect_cloudflare_trace(
|
||||
let client = build_split_client(ip_type, timeout);
|
||||
|
||||
if let Some(url) = custom_url {
|
||||
if let Some(ip) = fetch_trace_ip(&client, url, timeout).await {
|
||||
if let Some(ip) = fetch_trace_ip(&client, url, timeout, None).await {
|
||||
if validate_detected_ip(&ip, ip_type, ppfmt) {
|
||||
return vec![ip];
|
||||
}
|
||||
@@ -211,13 +244,8 @@ async fn detect_cloudflare_trace(
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let fallback = match ip_type {
|
||||
IpType::V4 => CF_TRACE_V4_FALLBACK,
|
||||
IpType::V6 => CF_TRACE_V6_FALLBACK,
|
||||
};
|
||||
|
||||
// Try primary (api.cloudflare.com — resolves via DNS, avoids literal-IP interception)
|
||||
if let Some(ip) = fetch_trace_ip(&client, CF_TRACE_PRIMARY, timeout).await {
|
||||
// Try primary (cloudflare.com — the CDN trace endpoint)
|
||||
if let Some(ip) = fetch_trace_ip(&client, CF_TRACE_PRIMARY, timeout, None).await {
|
||||
if validate_detected_ip(&ip, ip_type, ppfmt) {
|
||||
return vec![ip];
|
||||
}
|
||||
@@ -227,8 +255,8 @@ async fn detect_cloudflare_trace(
|
||||
&format!("{} not detected via primary, trying fallback", ip_type.describe()),
|
||||
);
|
||||
|
||||
// Try fallback (literal IP — useful when DNS is broken)
|
||||
if let Some(ip) = fetch_trace_ip(&client, fallback, timeout).await {
|
||||
// Try fallback (hostname-based — works when literal IPs are intercepted by WARP/Zero Trust)
|
||||
if let Some(ip) = fetch_trace_ip(&client, CF_TRACE_FALLBACK, timeout, None).await {
|
||||
if validate_detected_ip(&ip, ip_type, ppfmt) {
|
||||
return vec![ip];
|
||||
}
|
||||
@@ -847,7 +875,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let url = format!("{}/cdn-cgi/trace", server.uri());
|
||||
let timeout = Duration::from_secs(5);
|
||||
@@ -887,7 +915,7 @@ mod tests {
|
||||
|
||||
// We can't override the hardcoded primary/fallback URLs, but we can test
|
||||
// the custom URL path: first with a failing URL, then a succeeding one.
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
@@ -918,23 +946,47 @@ mod tests {
|
||||
// ---- trace URL constants ----
|
||||
|
||||
#[test]
|
||||
fn test_trace_primary_uses_hostname_not_ip() {
|
||||
// Primary must use a hostname (api.cloudflare.com) so DNS resolves normally
|
||||
// and WARP/Zero Trust doesn't intercept the request.
|
||||
assert_eq!(CF_TRACE_PRIMARY, "https://api.cloudflare.com/cdn-cgi/trace");
|
||||
assert!(CF_TRACE_PRIMARY.contains("api.cloudflare.com"));
|
||||
// Fallbacks use literal IPs for when DNS is broken.
|
||||
assert!(CF_TRACE_V4_FALLBACK.contains("1.0.0.1"));
|
||||
assert!(CF_TRACE_V6_FALLBACK.contains("2606:4700:4700::1001"));
|
||||
fn test_trace_urls() {
|
||||
// Primary uses cloudflare.com CDN endpoint (not DNS resolver IPs).
|
||||
assert_eq!(CF_TRACE_PRIMARY, "https://cloudflare.com/cdn-cgi/trace");
|
||||
// Fallback uses api.cloudflare.com for when cloudflare.com is intercepted (WARP/Zero Trust).
|
||||
assert_eq!(CF_TRACE_FALLBACK, "https://api.cloudflare.com/cdn-cgi/trace");
|
||||
}
|
||||
|
||||
// ---- build_split_client ----
|
||||
// ---- FilteredResolver + build_split_client ----
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_filtered_resolver_v4() {
|
||||
let resolver = FilteredResolver { ip_type: IpType::V4 };
|
||||
let name: Name = "cloudflare.com".parse().unwrap();
|
||||
let addrs: Vec<SocketAddr> = resolver
|
||||
.resolve(name)
|
||||
.await
|
||||
.expect("DNS lookup failed")
|
||||
.collect();
|
||||
assert!(!addrs.is_empty(), "should resolve at least one address");
|
||||
for addr in &addrs {
|
||||
assert!(addr.is_ipv4(), "all addresses should be IPv4, got {addr}");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_filtered_resolver_v6() {
|
||||
let resolver = FilteredResolver { ip_type: IpType::V6 };
|
||||
let name: Name = "cloudflare.com".parse().unwrap();
|
||||
// IPv6 may not be available in all test environments, so we just
|
||||
// verify the resolver doesn't panic and returns only v6 if any.
|
||||
if let Ok(addrs) = resolver.resolve(name).await {
|
||||
for addr in addrs {
|
||||
assert!(addr.is_ipv6(), "all addresses should be IPv6, got {addr}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_split_client_v4() {
|
||||
let client = build_split_client(IpType::V4, Duration::from_secs(5));
|
||||
// Client should build successfully — we can't inspect local_address,
|
||||
// but we verify it doesn't panic.
|
||||
// Client should build successfully with filtered resolver.
|
||||
drop(client);
|
||||
}
|
||||
|
||||
@@ -956,7 +1008,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
@@ -979,7 +1031,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
@@ -1000,7 +1052,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
let url = format!("{}/my-ip", server.uri());
|
||||
@@ -1020,7 +1072,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
let url = format!("{}/my-ip", server.uri());
|
||||
@@ -1084,7 +1136,7 @@ mod tests {
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
let url = format!("{}/my-ip", server.uri());
|
||||
@@ -1295,7 +1347,7 @@ mod tests {
|
||||
"5.6.7.8".parse().unwrap(),
|
||||
],
|
||||
};
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
@@ -1313,7 +1365,7 @@ mod tests {
|
||||
"2001:db8::1".parse().unwrap(),
|
||||
],
|
||||
};
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
@@ -1327,7 +1379,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_none_detect_ips_returns_empty() {
|
||||
let provider = ProviderType::None;
|
||||
let client = Client::new();
|
||||
let client = crate::test_client();
|
||||
let ppfmt = PP::default_pp();
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
|
||||
1026
src/updater.rs
1026
src/updater.rs
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user