14 Commits

Author SHA1 Message Date
dependabot[bot]
ae752f69ac Bump tokio from 1.52.1 to 1.52.2
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.52.1 to 1.52.2.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.52.1...tokio-1.52.2)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.52.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-05-05 07:36:59 +00:00
Timothy Miller
fddabc7a3d Release v2.1.2
Patch release: case-insensitive Cloudflare DNS record matching (#255),
Pushover URL parsing fix for canonical shoutrrr format (#258), and
Gotify URL parsing fix for ?token= query and ?disabletls=yes (#262).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 20:04:28 -04:00
Timothy Miller
548d89dacf Make Cloudflare lookups case-insensitive
Improve shoutrrr URL parsing for Gotify and Pushover

- Add parse_gotify_url to handle gotify://, gotify+http(s)://, token in
  final path segment or ?token=, and ?disabletls=yes to force http
- Accept canonical pushover URLs by stripping an optional 'shoutrrr:'
  user
  prefix and ignoring query params
- Add tests for Gotify, Pushover, and Cloudflare parsing/lookup behavior
2026-04-29 20:03:30 -04:00
Timothy Miller
22320bea79 Release v2.1.1
Fix rand 0.10 compile error (RngExt trait import) and ship version
bump alongside DELETE_ON_FAILURE, proportional jitter, and dependency
refresh changes already merged on master.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 18:57:53 -04:00
Timothy Miller
1bb347bea7 Merge pull request #263 from DMaxter/master
Allow not deleting domains if the IP list is empty
2026-04-29 18:51:59 -04:00
Timothy Miller
1d5ad2738c Merge pull request #265 from timothymiller/dependabot/cargo/reqwest-0.13.3
Bump reqwest from 0.13.2 to 0.13.3
2026-04-29 18:51:16 -04:00
Timothy Miller
08ff76f443 Merge pull request #266 from timothymiller/dependabot/cargo/rand-0.10.1
Bump rand from 0.9.3 to 0.10.1
2026-04-29 18:51:08 -04:00
Timothy Miller
199bbae2bd Merge pull request #267 from timothymiller/dependabot/cargo/rustls-0.23.40
Bump rustls from 0.23.39 to 0.23.40
2026-04-29 18:50:56 -04:00
dependabot[bot]
591f3e4905 Bump rustls from 0.23.39 to 0.23.40
Bumps [rustls](https://github.com/rustls/rustls) from 0.23.39 to 0.23.40.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.23.39...v/0.23.40)

---
updated-dependencies:
- dependency-name: rustls
  dependency-version: 0.23.40
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-29 07:36:47 +00:00
DMaxter
687d299bda docs: document the variable in the README 2026-04-28 23:56:25 +01:00
dependabot[bot]
25122d2ce3 Bump rand from 0.9.3 to 0.10.1
Bumps [rand](https://github.com/rust-random/rand) from 0.9.3 to 0.10.1.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/0.9.3...0.10.1)

---
updated-dependencies:
- dependency-name: rand
  dependency-version: 0.10.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-28 07:37:29 +00:00
dependabot[bot]
64c971b198 Bump reqwest from 0.13.2 to 0.13.3
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.13.2 to 0.13.3.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.13.2...v0.13.3)

---
updated-dependencies:
- dependency-name: reqwest
  dependency-version: 0.13.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-28 07:37:21 +00:00
DMaxter
b748e80592 tests: added tests for delete_on_failure 2026-04-26 01:06:49 +01:00
DMaxter
714ec4f11f feat: prevent deletion on failure 2026-04-26 00:46:34 +01:00
10 changed files with 636 additions and 91 deletions

108
Cargo.lock generated
View File

@@ -79,9 +79,20 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "chacha20"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601"
dependencies = [
"cfg-if",
"cpufeatures",
"rand_core",
]
[[package]]
name = "cloudflare-ddns"
version = "2.1.0"
version = "2.1.2"
dependencies = [
"if-addrs",
"rand",
@@ -122,6 +133,15 @@ version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "cpufeatures"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201"
dependencies = [
"libc",
]
[[package]]
name = "deadpool"
version = "0.12.3"
@@ -299,18 +319,6 @@ dependencies = [
"wasi",
]
[[package]]
name = "getrandom"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [
"cfg-if",
"libc",
"r-efi 5.3.0",
"wasip2",
]
[[package]]
name = "getrandom"
version = "0.4.2"
@@ -319,7 +327,8 @@ checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555"
dependencies = [
"cfg-if",
"libc",
"r-efi 6.0.0",
"r-efi",
"rand_core",
"wasip2",
"wasip3",
]
@@ -785,15 +794,6 @@ dependencies = [
"zerovec",
]
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "prettyplease"
version = "0.2.37"
@@ -822,12 +822,6 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "r-efi"
version = "6.0.0"
@@ -836,32 +830,20 @@ checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf"
[[package]]
name = "rand"
version = "0.9.3"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ec095654a25171c2124e9e3393a930bddbffdc939556c914957a4c3e0a87166"
checksum = "d2e8e8bcc7961af1fdac401278c6a831614941f6164ee3bf4ce61b7edb162207"
dependencies = [
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"chacha20",
"getrandom 0.4.2",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.9.5"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
dependencies = [
"getrandom 0.3.4",
]
checksum = "63b8176103e19a2643978565ca18b50549f6101881c443590420e4dc998a3c69"
[[package]]
name = "regex"
@@ -900,9 +882,9 @@ checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
[[package]]
name = "reqwest"
version = "0.13.2"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801"
checksum = "62e0021ea2c22aed41653bc7e1419abb2c97e038ff2c33d0e1309e49a97deec0"
dependencies = [
"base64",
"bytes",
@@ -964,9 +946,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.23.39"
version = "0.23.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c2c118cb077cca2822033836dfb1b975355dfb784b5e8da48f7b6c5db74e60e"
checksum = "ef86cd5876211988985292b91c96a8f2d298df24e75989a43a3c73f2d4d8168b"
dependencies = [
"once_cell",
"ring",
@@ -1275,9 +1257,9 @@ dependencies = [
[[package]]
name = "tokio"
version = "1.52.1"
version = "1.52.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6"
checksum = "110a78583f19d5cdb2c5ccf321d1290344e71313c6c37d43520d386027d18386"
dependencies = [
"bytes",
"libc",
@@ -1887,26 +1869,6 @@ dependencies = [
"synstructure",
]
[[package]]
name = "zerocopy"
version = "0.8.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zerofrom"
version = "0.1.6"

View File

@@ -1,6 +1,6 @@
[package]
name = "cloudflare-ddns"
version = "2.1.0"
version = "2.1.2"
edition = "2021"
description = "Access your home network remotely via a custom domain name without a static IP"
license = "GPL-3.0"
@@ -14,7 +14,7 @@ tokio = { version = "1", features = ["rt", "macros", "time", "signal", "net"] }
regex-lite = "0.1"
url = "2"
if-addrs = "0.15"
rand = "0.9"
rand = "0.10"
[profile.release]
opt-level = "z"

View File

@@ -107,6 +107,7 @@ To disable this protection, set `REJECT_CLOUDFLARE_IPS=false`.
| `UPDATE_CRON` | `@every 5m` | Update schedule |
| `UPDATE_ON_START` | `true` | Run an update immediately on startup |
| `DELETE_ON_STOP` | `false` | Delete managed DNS records on shutdown |
| `DELETE_ON_FAILURE` | `true` | Delete managed DNS records when failed to obtain IP from provider |
Schedule formats:
@@ -213,6 +214,7 @@ Heartbeats are sent after each update cycle. On failure, a fail signal is sent.
| `UPDATE_CRON` | `@every 5m` | ⏱️ Update schedule |
| `UPDATE_ON_START` | `true` | 🚀 Update on startup |
| `DELETE_ON_STOP` | `false` | 🧹 Delete records on shutdown |
| `DELETE_ON_FAILURE` | `true` | 🧹 Delete records if failed to obtain new records |
| `TTL` | `1` | ⏳ DNS record TTL |
| `PROXIED` | `false` | ☁️ Proxied expression |
| `RECORD_COMMENT` | — | 💬 DNS record comment |

49
RELEASE_NOTES_2.1.1.md Normal file
View File

@@ -0,0 +1,49 @@
# cloudflare-ddns v2.1.1
Maintenance release. Bug fix for `rand` 0.10 API change, plus opt-in failure-safe deletion behavior contributed in the v2.1.0 → v2.1.1 window, dependency refresh, and proportional jitter for IP detection.
## Highlights
- **Fix:** Restore the build under `rand` 0.10 — `random_range` moved to the `RngExt` trait, and the unconditional jitter sleep in `--repeat` mode no longer fails to compile.
- **New:** `DELETE_ON_FAILURE` (env-var mode) controls whether DNS records are removed when an IP detection or update fails. Defaults to `true` to preserve existing behavior; set `DELETE_ON_FAILURE=false` to keep stale records on transient failures instead of yanking them.
- **Improvement:** Proportional jitter (up to 20% of the update interval) is added before each scheduled update to spread requests across clients and reduce synchronized spikes against the Cloudflare API.
## Changes since v2.1.0
### Features
- `DELETE_ON_FAILURE` env var to prevent DNS record deletion on failed updates (#263, thanks @DMaxter)
- Proportional jitter on update intervals to desynchronize API traffic (#253, thanks @jhutchings1)
### Fixes
- Compile fix for `rand` 0.10: import `RngExt` so `random_range` resolves
- `delete_on_failure` regression test coverage added
### Dependencies
- `rustls` 0.23.37 → 0.23.40
- `rustls-webpki` 0.103.10 → 0.103.13
- `tokio` 1.50.0 → 1.52.1
- `reqwest` 0.13.2 → 0.13.3
- `rand` 0.9.2 → 0.10.1
### Docs
- Document `DELETE_ON_FAILURE` in the README
## Upgrade notes
- **Default behavior unchanged.** `DELETE_ON_FAILURE` defaults to `true`, matching pre-2.1.1 behavior. Set it to `false` if you want stale records preserved during outages.
- No config file schema changes. Existing `config.json` deployments continue to work without edits.
## Docker
```sh
docker pull timothyjmiller/cloudflare-ddns:2.1.1
docker pull timothyjmiller/cloudflare-ddns:latest
```
Multi-arch: `linux/amd64`, `linux/arm64`, `linux/ppc64le`.
## Verification
- `cargo test` — 352 tests pass
- Release build succeeds, binary size ~1.7 MiB (pre-UPX)
- Smoke tested in both legacy `config.json` mode and env-var mode against the live Cloudflare API

48
RELEASE_NOTES_2.1.2.md Normal file
View File

@@ -0,0 +1,48 @@
# cloudflare-ddns v2.1.2 — Notification & Domain Casing Fixes
This patch release fixes three bugs reported on GitHub.
## Bug fixes
- **Mixed-case domains now match existing DNS records (#255).**
In env-var mode, configuring a domain with mixed casing (for example
`ExaMple.com`) caused every update cycle to attempt a duplicate record
create and fail with Cloudflare error `81058: An identical record already
exists.` Cloudflare normalizes record names to lowercase server-side, so
the lookup is now case-insensitive.
- **Pushover notifications work again (#258).**
The shoutrrr-style URL `pushover://shoutrrr:TOKEN@USER` (the canonical form
from `containrrr/shoutrrr`) was being parsed with the literal `shoutrrr:`
username included in the API token, which Pushover rejected. The parser
now strips the optional `<user>:` prefix from the token segment, restoring
the v2.0.7 behavior. Optional shoutrrr query parameters (`?devices=...`,
`?priority=...`) are tolerated.
- **Gotify notifications now produce a valid request URL (#262).**
The Gotify URL parser blindly appended `/message` after any query string,
producing malformed webhook URLs like
`https://host:9090?token=XYZ/message`. The parser now follows shoutrrr's
canonical layout — token as the final path segment or `?token=` query —
and supports `?disabletls=yes` to switch the resulting webhook from HTTPS
to HTTP for typical home-LAN setups, plus the `gotify+http://` /
`gotify+https://` aliases.
## Already addressed (closing #257)
The robust public-IP discovery enhancements requested in #257 (multi-endpoint
trace fallback, strict address-family validation, API request timeouts,
duplicate record cleanup) were already folded into the Rust port shipped in
v2.0.8 — see `src/provider.rs` (`CF_TRACE_PRIMARY` / `CF_TRACE_FALLBACK`,
`validate_detected_ip`, `build_split_client`) and `src/cloudflare.rs`
(`set_ips` dedup behavior, per-request `timeout`).
## Upgrade
```bash
docker pull timothyjmiller/cloudflare-ddns:2.1.2
# or
docker pull timothyjmiller/cloudflare-ddns:latest
```
No configuration changes are required.

View File

@@ -280,8 +280,16 @@ impl CloudflareHandle {
name: &str,
ppfmt: &PP,
) -> Vec<DnsRecord> {
// Cloudflare normalizes DNS record names to lowercase server-side, so a
// case-sensitive match against the user-supplied name (e.g. ExaMple.com)
// would never find existing records and trigger 81058 duplicate-create
// errors on every cycle. Match case-insensitively to mirror Cloudflare's
// own comparison rules.
let records = self.list_records(zone_id, record_type, ppfmt).await;
records.into_iter().filter(|r| r.name == name).collect()
records
.into_iter()
.filter(|r| r.name.eq_ignore_ascii_case(name))
.collect()
}
fn is_managed_record(&self, record: &DnsRecord) -> bool {
@@ -926,6 +934,29 @@ mod tests {
assert_eq!(records[1].id, "r2");
}
// Issue #255: Cloudflare normalizes record names to lowercase, so a
// case-sensitive match against the user-supplied name (e.g. ExaMple.com)
// would loop forever creating duplicates. Verify match is case-insensitive.
#[tokio::test]
async fn list_records_by_name_case_insensitive() {
let server = MockServer::start().await;
let body = dns_list_response(vec![
dns_record_json("r1", "example.com", "1.2.3.4", None),
]);
Mock::given(method("GET"))
.and(path("/zones/z1/dns_records"))
.respond_with(ResponseTemplate::new(200).set_body_json(body))
.mount(&server)
.await;
let h = handle(&server.uri());
let records = h
.list_records_by_name("z1", "A", "ExaMple.com", &pp())
.await;
assert_eq!(records.len(), 1);
assert_eq!(records[0].id, "r1");
}
#[tokio::test]
async fn list_records_by_name_filters() {
let server = MockServer::start().await;

View File

@@ -84,6 +84,7 @@ pub struct AppConfig {
pub update_cron: CronSchedule,
pub update_on_start: bool,
pub delete_on_stop: bool,
pub delete_on_failure: bool,
pub ttl: TTL,
pub proxied_expression: Option<Box<dyn Fn(&str) -> bool + Send + Sync>>,
pub record_comment: Option<String>,
@@ -449,6 +450,7 @@ fn legacy_to_app_config(legacy: LegacyConfig, dry_run: bool, repeat: bool) -> Re
update_cron: schedule,
update_on_start: true,
delete_on_stop: false,
delete_on_failure: true,
ttl,
proxied_expression: None,
record_comment: None,
@@ -503,6 +505,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
let update_cron = read_cron_from_env(ppfmt)?;
let update_on_start = getenv_bool("UPDATE_ON_START", true);
let delete_on_stop = getenv_bool("DELETE_ON_STOP", false);
let delete_on_failure = getenv_bool("DELETE_ON_FAILURE", true);
let ttl_val = getenv("TTL")
.and_then(|s| s.parse::<i64>().ok())
@@ -571,6 +574,7 @@ pub fn load_env_config(ppfmt: &PP) -> Result<AppConfig, String> {
update_cron,
update_on_start,
delete_on_stop,
delete_on_failure,
ttl,
proxied_expression,
record_comment,
@@ -1317,6 +1321,7 @@ mod tests {
update_cron: CronSchedule::Once,
update_on_start: true,
delete_on_stop: false,
delete_on_failure: true,
ttl: TTL::AUTO,
proxied_expression: None,
record_comment: None,
@@ -1351,6 +1356,7 @@ mod tests {
update_cron: CronSchedule::Every(Duration::from_secs(300)),
update_on_start: true,
delete_on_stop: true,
delete_on_failure: true,
ttl: TTL::new(60),
proxied_expression: None,
record_comment: Some("managed".to_string()),
@@ -2003,6 +2009,7 @@ mod tests {
update_cron: CronSchedule::Every(Duration::from_secs(300)),
update_on_start: true,
delete_on_stop: false,
delete_on_failure: true,
ttl: TTL::AUTO,
proxied_expression: None,
record_comment: None,
@@ -2039,6 +2046,7 @@ mod tests {
update_cron: CronSchedule::Every(Duration::from_secs(600)),
update_on_start: true,
delete_on_stop: true,
delete_on_failure: true,
ttl: TTL::new(120),
proxied_expression: None,
record_comment: Some("cf-ddns".to_string()),
@@ -2072,6 +2080,7 @@ mod tests {
update_cron: CronSchedule::Once,
update_on_start: true,
delete_on_stop: false,
delete_on_failure: true,
ttl: TTL::AUTO,
proxied_expression: None,
record_comment: None,

View File

@@ -14,7 +14,7 @@ use crate::pp::PP;
use std::collections::HashSet;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use rand::Rng;
use rand::RngExt;
use reqwest::Client;
use tokio::signal;
use tokio::time::{sleep, Duration};

View File

@@ -274,6 +274,90 @@ impl NotifierDyn for ShoutrrrNotifier {
}
}
/// Build a Gotify webhook URL from a shoutrrr-style URL.
///
/// Accepted forms:
/// gotify://host[:port]/TOKEN[?disabletls=yes]
/// gotify://host[:port]/path/?token=TOKEN[&disabletls=yes]
/// gotify+http://host[:port]/TOKEN
/// gotify+https://host[:port]/TOKEN
///
/// `disabletls=yes` switches the resulting webhook to plain HTTP, which is
/// required for typical home-LAN deployments where Gotify is reachable on a
/// private IP without TLS.
fn parse_gotify_url(
original: &str,
rest: &str,
default_scheme: &str,
) -> Result<ShoutrrrService, String> {
// Split off the query string (if any) before path manipulation.
let (path_part, query_part) = match rest.split_once('?') {
Some((p, q)) => (p, q),
None => (rest, ""),
};
let mut token: Option<String> = None;
let mut scheme = default_scheme;
if !query_part.is_empty() {
for pair in query_part.split('&') {
let (k, v) = match pair.split_once('=') {
Some(kv) => kv,
None => continue,
};
match k {
"token" => token = Some(v.to_string()),
"disabletls" if v.eq_ignore_ascii_case("yes") => scheme = "http",
_ => {}
}
}
}
// host[:port][/extra/path]/TOKEN -- token is the last non-empty path segment.
let trimmed = path_part.trim_end_matches('/');
let (host_path, last_segment) = match trimmed.rsplit_once('/') {
Some((h, t)) => (h, t),
None => (trimmed, ""),
};
if token.is_none() && !last_segment.is_empty() {
token = Some(last_segment.to_string());
}
let token = match token {
Some(t) if !t.is_empty() => t,
_ => {
return Err(format!(
"Invalid Gotify shoutrrr URL (missing token): {original}"
));
}
};
// host_path is either "host[:port]" or "host[:port]/extra/path" if user
// had additional path segments before the token.
let host_and_path = if host_path.is_empty() {
// No slash before token -> token *was* the only segment, host is path_part minus token.
path_part
.trim_end_matches('/')
.trim_end_matches(&token[..])
.trim_end_matches('/')
.to_string()
} else {
host_path.to_string()
};
if host_and_path.is_empty() {
return Err(format!(
"Invalid Gotify shoutrrr URL (missing host): {original}"
));
}
Ok(ShoutrrrService {
original_url: original.to_string(),
service_type: ShoutrrrServiceType::Gotify,
webhook_url: format!("{scheme}://{host_and_path}/message?token={token}"),
})
}
fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
// Shoutrrr URL formats:
// discord://token@id -> https://discord.com/api/webhooks/id/token
@@ -334,15 +418,13 @@ fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
return Err(format!("Invalid Telegram shoutrrr URL: {url_str}"));
}
if let Some(rest) = url_str
.strip_prefix("gotify://")
.or_else(|| url_str.strip_prefix("gotify+https://"))
if let Some((rest, default_scheme)) = url_str
.strip_prefix("gotify+https://")
.map(|r| (r, "https"))
.or_else(|| url_str.strip_prefix("gotify+http://").map(|r| (r, "http")))
.or_else(|| url_str.strip_prefix("gotify://").map(|r| (r, "https")))
{
return Ok(ShoutrrrService {
original_url: url_str.to_string(),
service_type: ShoutrrrServiceType::Gotify,
webhook_url: format!("https://{rest}/message"),
});
return parse_gotify_url(url_str, rest, default_scheme);
}
if let Some(rest) = url_str
@@ -365,14 +447,28 @@ fn parse_shoutrrr_url(url_str: &str) -> Result<ShoutrrrService, String> {
}
if let Some(rest) = url_str.strip_prefix("pushover://") {
let parts: Vec<&str> = rest.splitn(2, '@').collect();
// Strip query string (devices, priority, title) — not yet supported.
let body = rest.split('?').next().unwrap_or(rest).trim_end_matches('/');
let parts: Vec<&str> = body.splitn(2, '@').collect();
if parts.len() == 2 {
// Shoutrrr's canonical pushover URL is
// pushover://shoutrrr:APIToken@UserKey
// where the literal "shoutrrr:" username is required. Strip an
// optional "<user>:" prefix from the token portion so both the
// canonical form and the bare "pushover://TOKEN@USER" form work.
let token = parts[0]
.rsplit_once(':')
.map(|(_, t)| t)
.unwrap_or(parts[0]);
let user = parts[1];
if token.is_empty() || user.is_empty() {
return Err(format!("Invalid Pushover shoutrrr URL: {url_str}"));
}
return Ok(ShoutrrrService {
original_url: url_str.to_string(),
service_type: ShoutrrrServiceType::Pushover,
webhook_url: format!(
"https://api.pushover.net/1/messages.json?token={}&user={}",
parts[0], parts[1]
"https://api.pushover.net/1/messages.json?token={token}&user={user}"
),
});
}
@@ -735,15 +831,53 @@ mod tests {
}
#[test]
fn test_parse_gotify() {
let result = parse_shoutrrr_url("gotify://myhost.com/somepath").unwrap();
fn test_parse_gotify_token_as_path_segment() {
// Shoutrrr canonical format: token is the final path segment.
let result = parse_shoutrrr_url("gotify://myhost.com/MYTOKEN").unwrap();
assert_eq!(
result.webhook_url,
"https://myhost.com/somepath/message"
"https://myhost.com/message?token=MYTOKEN"
);
assert!(matches!(result.service_type, ShoutrrrServiceType::Gotify));
}
#[test]
fn test_parse_gotify_token_query_param() {
// Older "gotify://host?token=..." form (issue #262).
let result =
parse_shoutrrr_url("gotify://192.168.178.222:9090?token=AtE2tUGQig67b0J&disabletls=yes")
.unwrap();
assert_eq!(
result.webhook_url,
"http://192.168.178.222:9090/message?token=AtE2tUGQig67b0J"
);
}
#[test]
fn test_parse_gotify_disabletls_switches_to_http() {
let result =
parse_shoutrrr_url("gotify://10.0.0.1:8080/TOKEN123?disabletls=yes").unwrap();
assert_eq!(
result.webhook_url,
"http://10.0.0.1:8080/message?token=TOKEN123"
);
}
#[test]
fn test_parse_gotify_plus_http_scheme() {
let result = parse_shoutrrr_url("gotify+http://10.0.0.1:8080/TOKEN").unwrap();
assert_eq!(
result.webhook_url,
"http://10.0.0.1:8080/message?token=TOKEN"
);
}
#[test]
fn test_parse_gotify_missing_token_errors() {
assert!(parse_shoutrrr_url("gotify://myhost.com/").is_err());
assert!(parse_shoutrrr_url("gotify://myhost.com").is_err());
}
#[test]
fn test_parse_generic() {
let result = parse_shoutrrr_url("generic://example.com/webhook").unwrap();
@@ -780,12 +914,42 @@ mod tests {
));
}
#[test]
fn test_parse_pushover_shoutrrr_canonical_form() {
// Shoutrrr's canonical URL has a literal "shoutrrr:" username.
// Issue #258: parser must strip this prefix or Pushover rejects the token.
let result =
parse_shoutrrr_url("pushover://shoutrrr:apitoken@userkey").unwrap();
assert_eq!(
result.webhook_url,
"https://api.pushover.net/1/messages.json?token=apitoken&user=userkey"
);
}
#[test]
fn test_parse_pushover_strips_query_params() {
// Optional shoutrrr query params (devices, priority) should not break parsing.
let result =
parse_shoutrrr_url("pushover://shoutrrr:tok@user/?devices=phone&priority=1")
.unwrap();
assert_eq!(
result.webhook_url,
"https://api.pushover.net/1/messages.json?token=tok&user=user"
);
}
#[test]
fn test_parse_pushover_invalid() {
let result = parse_shoutrrr_url("pushover://noatsign");
assert!(result.is_err());
}
#[test]
fn test_parse_pushover_empty_token_errors() {
assert!(parse_shoutrrr_url("pushover://shoutrrr:@user").is_err());
assert!(parse_shoutrrr_url("pushover://tok@").is_err());
}
#[test]
fn test_parse_plain_https_url() {
let result =

View File

@@ -115,6 +115,19 @@ pub async fn update_once(
// Update DNS records (env var mode - domain-based)
for (ip_type, domains) in &config.domains {
let ips = detected_ips.get(ip_type).cloned().unwrap_or_default();
if ips.is_empty() && !config.delete_on_failure {
ppfmt.warningf(
pp::EMOJI_WARNING,
&format!(
"Skipping {} domain update for {}",
ip_type.describe(),
domains.join(", ")
),
);
continue;
}
let record_type = ip_type.record_type();
for domain_str in domains {
@@ -713,6 +726,7 @@ mod tests {
update_cron: CronSchedule::Once,
update_on_start: true,
delete_on_stop: false,
delete_on_failure: true,
ttl: TTL::AUTO,
proxied_expression: None,
record_comment: None,
@@ -2307,6 +2321,272 @@ mod tests {
ddns.delete_entries("A", &config).await;
}
// -------------------------------------------------------
// delete_on_failure tests
// -------------------------------------------------------
/// When IPv4 detection fails but IPv6 succeeds, and delete_on_failure=false, skip V4 domains but update V6
#[tokio::test]
async fn test_skip_v4_domains_when_v4_detection_fails() {
let server = MockServer::start().await;
let zone_id = "zone-abc";
let ip_v6 = "2001:db8::1";
// Zone lookup for V6 domain
Mock::given(method("GET"))
.and(path("/zones"))
.and(query_param("name", "v6.example.com"))
.respond_with(
ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
)
.mount(&server)
.await;
// LIST existing records for V6
Mock::given(method("GET"))
.and(path_regex(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
.mount(&server)
.await;
// POST for V6 should be called (V6 succeeds)
Mock::given(method("POST"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_record_created(
"rec-1",
"v6.example.com",
"2001:db8::1",
)))
.expect(1)
.mount(&server)
.await;
// Providers: V4 fails (None), V6 succeeds
let mut providers = HashMap::new();
providers.insert(IpType::V4, ProviderType::None);
providers.insert(
IpType::V6,
ProviderType::Literal {
ips: vec![ip_v6.parse().unwrap()],
},
);
let mut domains = HashMap::new();
domains.insert(IpType::V4, vec!["v4.example.com".to_string()]);
domains.insert(IpType::V6, vec!["v6.example.com".to_string()]);
let mut config = make_config(providers, domains, vec![], false);
config.delete_on_failure = false;
let cf = handle(&server.uri());
let notifier = empty_notifier();
let heartbeat = empty_heartbeat();
let ppfmt = pp();
let mut cf_cache = CachedCloudflareFilter::new();
let ok = update_once(
&config,
&cf,
&notifier,
&heartbeat,
&mut cf_cache,
&ppfmt,
&mut HashSet::new(),
&crate::test_client(),
)
.await;
assert!(ok, "Should succeed with partial detection");
}
/// When IPv6 detection fails but IPv4 succeeds, and delete_on_failure=false, skip V6 domains but update V4
#[tokio::test]
async fn test_skip_v6_domains_when_v6_detection_fails() {
let server = MockServer::start().await;
let zone_id = "zone-abc";
let ip_v4 = "198.51.100.42";
// Zone lookup for V4 domain
Mock::given(method("GET"))
.and(path("/zones"))
.and(query_param("name", "v4.example.com"))
.respond_with(
ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
)
.mount(&server)
.await;
// LIST existing records for V4
Mock::given(method("GET"))
.and(path_regex(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
.mount(&server)
.await;
// POST for V4 should be called (V4 succeeds)
Mock::given(method("POST"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_record_created(
"rec-1",
"v4.example.com",
"198.51.100.42",
)))
.expect(1)
.mount(&server)
.await;
// Providers: V4 succeeds, V6 fails (None)
let mut providers = HashMap::new();
providers.insert(
IpType::V4,
ProviderType::Literal {
ips: vec![ip_v4.parse().unwrap()],
},
);
providers.insert(IpType::V6, ProviderType::None);
let mut domains = HashMap::new();
domains.insert(IpType::V4, vec!["v4.example.com".to_string()]);
domains.insert(IpType::V6, vec!["v6.example.com".to_string()]);
let mut config = make_config(providers, domains, vec![], false);
config.delete_on_failure = false;
let cf = handle(&server.uri());
let notifier = empty_notifier();
let heartbeat = empty_heartbeat();
let ppfmt = pp();
let mut cf_cache = CachedCloudflareFilter::new();
let ok = update_once(
&config,
&cf,
&notifier,
&heartbeat,
&mut cf_cache,
&ppfmt,
&mut HashSet::new(),
&crate::test_client(),
)
.await;
assert!(ok, "Should succeed with partial detection");
}
/// When both IPv4 and IPv6 detection fail, and delete_on_failure=false, skip all domains
#[tokio::test]
async fn test_skip_all_domains_when_both_detect_fail() {
let server = MockServer::start().await;
// No POST/DELETE should be called at all
// Providers: both fail (None)
let mut providers = HashMap::new();
providers.insert(IpType::V4, ProviderType::None);
providers.insert(IpType::V6, ProviderType::None);
let mut domains = HashMap::new();
domains.insert(IpType::V4, vec!["v4.example.com".to_string()]);
domains.insert(IpType::V6, vec!["v6.example.com".to_string()]);
let mut config = make_config(providers, domains, vec![], false);
config.delete_on_failure = false;
let cf = handle(&server.uri());
let notifier = empty_notifier();
let heartbeat = empty_heartbeat();
let ppfmt = pp();
let mut cf_cache = CachedCloudflareFilter::new();
let ok = update_once(
&config,
&cf,
&notifier,
&heartbeat,
&mut cf_cache,
&ppfmt,
&mut HashSet::new(),
&crate::test_client(),
)
.await;
assert!(ok, "Should succeed (no updates, no failures)");
}
/// When both IPv4 and IPv6 detection succeed, and delete_on_failure=false, update all domains
#[tokio::test]
async fn test_update_all_domains_when_both_detect() {
let server = MockServer::start().await;
let zone_id = "zone-abc";
let ip_v4 = "198.51.100.42";
let ip_v6 = "2001:db8::1";
// Zone lookups for both domains
Mock::given(method("GET"))
.and(path("/zones"))
.respond_with(
ResponseTemplate::new(200).set_body_json(zones_response(zone_id, "example.com")),
)
.mount(&server)
.await;
// LIST existing records (empty for both)
Mock::given(method("GET"))
.and(path_regex(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_records_empty()))
.mount(&server)
.await;
// POST for both should be called
Mock::given(method("POST"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(dns_record_created(
"rec-new",
"example.com",
"198.51.100.42",
)))
.expect(2) // Two POSTs: one for V4, one for V6
.mount(&server)
.await;
// Providers: both succeed
let mut providers = HashMap::new();
providers.insert(
IpType::V4,
ProviderType::Literal {
ips: vec![ip_v4.parse().unwrap()],
},
);
providers.insert(
IpType::V6,
ProviderType::Literal {
ips: vec![ip_v6.parse().unwrap()],
},
);
let mut domains = HashMap::new();
domains.insert(IpType::V4, vec!["v4.example.com".to_string()]);
domains.insert(IpType::V6, vec!["v6.example.com".to_string()]);
let mut config = make_config(providers, domains, vec![], false);
config.delete_on_failure = false;
let cf = handle(&server.uri());
let notifier = empty_notifier();
let heartbeat = empty_heartbeat();
let ppfmt = pp();
let mut cf_cache = CachedCloudflareFilter::new();
let ok = update_once(
&config,
&cf,
&notifier,
&heartbeat,
&mut cf_cache,
&ppfmt,
&mut HashSet::new(),
&crate::test_client(),
)
.await;
assert!(ok, "Should succeed with both detections");
}
}
// Legacy types for backwards compatibility