Migrate cloudflare-ddns to Rust

Add Cargo.toml, Cargo.lock and a full src/ tree with modules and tests
Update Dockerfile to build a Rust release binary and simplify CI/publish
Remove legacy Python script, requirements.txt, and startup helper
Switch .gitignore to Rust artifacts; update Dependabot and workflows to
cargo
Add .env example, docker-compose env, and update README and VSCode
settings

Remove the old Python implementation and requirements; add a Rust
implementation with Cargo.toml/Cargo.lock and full src/ modules, tests,
and notifier/heartbeat support. Update Dockerfile, build/publish
scripts, dependabot and workflows, README, and provide env-based
docker-compose and .env examples.
This commit is contained in:
Timothy Miller
2026-03-10 01:21:21 -04:00
parent f0d9510fff
commit b1a2fa7af3
23 changed files with 13115 additions and 792 deletions

View File

@@ -1,6 +1,6 @@
version: 2
updates:
- package-ecosystem: 'pip'
- package-ecosystem: 'cargo'
directory: '/'
schedule:
interval: 'daily'

View File

@@ -3,6 +3,8 @@ name: Build cloudflare-ddns Docker image (multi-arch)
on:
push:
branches: master
tags:
- 'v*'
pull_request:
jobs:
@@ -10,45 +12,48 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
# https://github.com/docker/setup-qemu-action
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
# https://github.com/docker/setup-buildx-action
- name: Setting up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
- name: Extract version from Cargo.toml
id: version
run: |
VERSION=$(grep '^version' Cargo.toml | head -1 | sed 's/.*"\(.*\)".*/\1/')
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
- name: Docker meta
id: meta
uses: docker/metadata-action@v3
uses: docker/metadata-action@v5
with:
images: timothyjmiller/cloudflare-ddns
sep-tags: ','
flavor: |
latest=false
tags: |
type=raw,enable=${{ steps.extract_branch.outputs.branch == 'master' }},value=latest
type=schedule
type=ref,event=pr
- name: Build and publish
uses: docker/build-push-action@v2
type=raw,enable=${{ github.ref == 'refs/heads/master' }},value=latest
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,enable=${{ github.ref == 'refs/heads/master' }},value=${{ steps.version.outputs.version }}
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
platforms: linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64
platforms: linux/amd64,linux/arm64,linux/arm/v7
labels: |
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.created=${{ steps.meta.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.version=${{ steps.version.outputs.version }}

61
.gitignore vendored
View File

@@ -1,63 +1,10 @@
# Private API keys for updating IPv4 & IPv6 addresses on Cloudflare
config.json
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Rust build artifacts
/target/
debug/
*.pdb
# Git History
**/.history/*

View File

@@ -11,11 +11,7 @@
".vscode": true,
"Dockerfile": true,
"LICENSE": true,
"requirements.txt": true,
"venv": true
"target": true
},
"explorerExclude.backup": {},
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.formatting.provider": "autopep8"
"explorerExclude.backup": {}
}

1870
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

21
Cargo.toml Normal file
View File

@@ -0,0 +1,21 @@
[package]
name = "cloudflare-ddns"
version = "2.0.0"
edition = "2021"
description = "Access your home network remotely via a custom domain name without a static IP"
license = "GPL-3.0"
[dependencies]
reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1", features = ["full"] }
regex = "1"
chrono = { version = "0.4", features = ["clock"] }
url = "2"
idna = "1"
if-addrs = "0.13"
[dev-dependencies]
tempfile = "3.26.0"
wiremock = "0.6"

View File

@@ -1,18 +1,13 @@
# ---- Base ----
FROM python:alpine AS base
# ---- Build ----
FROM rust:alpine AS builder
RUN apk add --no-cache musl-dev
WORKDIR /build
COPY Cargo.toml Cargo.lock ./
COPY src ./src
RUN cargo build --release
#
# ---- Dependencies ----
FROM base AS dependencies
# install dependencies
COPY requirements.txt .
RUN pip install --user -r requirements.txt
#
# ---- Release ----
FROM base AS release
# copy installed dependencies and project source file(s)
WORKDIR /
COPY --from=dependencies /root/.local /root/.local
COPY cloudflare-ddns.py .
CMD ["python", "-u", "/cloudflare-ddns.py", "--repeat"]
FROM alpine:latest AS release
RUN apk add --no-cache ca-certificates
COPY --from=builder /build/target/release/cloudflare-ddns /usr/local/bin/cloudflare-ddns
CMD ["cloudflare-ddns", "--repeat"]

763
README.md
View File

@@ -1,286 +1,222 @@
<p align="center"><a href="https://timknowsbest.com/free-dynamic-dns" target="_blank" rel="noopener noreferrer"><img width="1024" src="feature-graphic.jpg" alt="Cloudflare DDNS"/></a></p>
# 🚀 Cloudflare DDNS
# 🌍 Cloudflare DDNS
Access your home network remotely via a custom domain name without a static IP!
## ⚡ Efficiency
A feature-complete dynamic DNS client for Cloudflare, written in Rust. Configure everything with environment variables. Supports notifications, heartbeat monitoring, WAF list management, flexible scheduling, and more.
- ❤️ Easy config. List your domains and you're done.
- 🔁 The Python runtime will re-use existing HTTP connections.
- 🗃️ Cloudflare API responses are cached to reduce API usage.
- 🤏 The Docker image is small and efficient.
- 0⃣ Zero dependencies.
- 💪 Supports all platforms.
- 🏠 Enables low cost self hosting to promote a more decentralized internet.
- 🔒 Zero-log IP provider ([cdn-cgi/trace](https://www.cloudflare.com/cdn-cgi/trace))
- 👐 GPL-3.0 License. Open source for open audits.
## ✨ Features
## 💯 Complete Support of Domain Names, Subdomains, IPv4 & IPv6, and Load Balancing
- 🔍 **Multiple IP detection providers** — Cloudflare Trace, Cloudflare DNS-over-HTTPS, ipify, local interface, custom URL, or static IPs
- 📡 **IPv4 and IPv6** — Full dual-stack support with independent provider configuration
- 🌐 **Multiple domains and zones** — Update any number of domains across multiple Cloudflare zones
- 🃏 **Wildcard domains** — Support for `*.example.com` records
- 🌍 **Internationalized domain names** — Full IDN/punycode support (e.g. `münchen.de`)
- 🛡️ **WAF list management** — Automatically update Cloudflare WAF IP lists
- 🔔 **Notifications** — Shoutrrr-compatible notifications (Discord, Slack, Telegram, Gotify, Pushover, generic webhooks)
- 💓 **Heartbeat monitoring** — Healthchecks.io and Uptime Kuma integration
- ⏱️ **Cron scheduling** — Flexible update intervals via cron expressions
- 🧪 **Dry-run mode** — Preview changes without modifying DNS records
- 🧹 **Graceful shutdown** — Signal handling (SIGINT/SIGTERM) with optional DNS record cleanup
- 💬 **Record comments** — Tag managed records with comments for identification
- 🎯 **Managed record regex** — Control which records the tool manages via regex matching
- 🎨 **Pretty output with emoji** — Configurable emoji and verbosity levels
- 🔒 **Zero-log IP detection** — Uses Cloudflare's [cdn-cgi/trace](https://www.cloudflare.com/cdn-cgi/trace) by default
- 🏠 **CGNAT-aware local detection** — Filters out shared address space (100.64.0.0/10) and private ranges
- 🤏 **Tiny static binary** — Small Docker image, zero runtime dependencies
- 🌐 Supports multiple domains (zones) on the same IP.
- 📠 Supports multiple subdomains on the same IP.
- 📡 IPv4 and IPv6 support.
- 🌍 Supports all Cloudflare regions.
- ⚖️ Supports [Cloudflare Load Balancing](https://developers.cloudflare.com/load-balancing/understand-basics/pools/).
- 🇺🇸 Made in the U.S.A.
## 📊 Stats
| Size | Downloads | Discord |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [![cloudflare-ddns docker image size](https://img.shields.io/docker/image-size/timothyjmiller/cloudflare-ddns?style=flat-square)](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns 'cloudflare-ddns docker image size') | [![Total DockerHub pulls](https://img.shields.io/docker/pulls/timothyjmiller/cloudflare-ddns?style=flat-square)](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns 'Total DockerHub pulls') | [![Official Discord Server](https://img.shields.io/discord/785778163887112192?style=flat-square)](https://discord.gg/UgGmwMvNxm 'Official Discord Server') |
## 🚦 Getting Started
First copy the example configuration file into the real one.
## 🚀 Quick Start
```bash
cp config-example.json config.json
docker run -d \
--name cloudflare-ddns \
--restart unless-stopped \
--network host \
-e CLOUDFLARE_API_TOKEN=your-api-token \
-e DOMAINS=example.com,www.example.com \
timothyjmiller/cloudflare-ddns:latest
```
Edit `config.json` and replace the values with your own.
That's it. The container detects your public IP and updates the DNS records for your domains every 5 minutes.
### 🔑 Authentication methods
> ⚠️ `--network host` is required to detect IPv6 addresses. If you only need IPv4, you can omit it and set `IP6_PROVIDER=none`.
You can choose to use either the newer API tokens, or the traditional API keys
## 🔑 Authentication
To generate a new API tokens, go to your [Cloudflare Profile](https://dash.cloudflare.com/profile/api-tokens) and create a token capable of **Edit DNS**. Then replace the value in
| Variable | Description |
|----------|-------------|
| `CLOUDFLARE_API_TOKEN` | API token with "Edit DNS" capability |
| `CLOUDFLARE_API_TOKEN_FILE` | Path to a file containing the API token (Docker secrets compatible) |
```json
"authentication":
"api_token": "Your cloudflare API token, including the capability of **Edit DNS**"
```
To generate an API token, go to your [Cloudflare Profile](https://dash.cloudflare.com/profile/api-tokens) and create a token capable of **Edit DNS**.
Alternatively, you can use the traditional API keys by setting appropriate values for:
## 🌐 Domains
```json
"authentication":
"api_key":
"api_key": "Your cloudflare API Key",
"account_email": "The email address you use to sign in to cloudflare",
```
| Variable | Description |
|----------|-------------|
| `DOMAINS` | Comma-separated list of domains to update for both IPv4 and IPv6 |
| `IP4_DOMAINS` | Comma-separated list of IPv4-only domains |
| `IP6_DOMAINS` | Comma-separated list of IPv6-only domains |
### 📍 Enable or disable IPv4 or IPv6
Wildcard domains are supported: `*.example.com`
Some ISP provided modems only allow port forwarding over IPv4 or IPv6. In this case, you would want to disable any interface not accessible via port forward.
At least one of `DOMAINS`, `IP4_DOMAINS`, `IP6_DOMAINS`, or `WAF_LISTS` must be set.
```json
"a": true,
"aaaa": true
```
## 🔍 IP Detection Providers
### 🎛️ Other values explained
| Variable | Default | Description |
|----------|---------|-------------|
| `IP4_PROVIDER` | `cloudflare.trace` | IPv4 detection method |
| `IP6_PROVIDER` | `cloudflare.trace` | IPv6 detection method |
```json
"zone_id": "The ID of the zone that will get the records. From your dashboard click into the zone. Under the overview tab, scroll down and the zone ID is listed in the right rail",
"subdomains": "Array of subdomains you want to update the A & where applicable, AAAA records. IMPORTANT! Only write subdomain name. Do not include the base domain name. (e.g. foo or an empty string to update the base domain)",
"proxied": "Defaults to false. Make it true if you want CDN/SSL benefits from cloudflare. This usually disables SSH)",
"ttl": "Defaults to 300 seconds. Longer TTLs speed up DNS lookups by increasing the chance of cached results, but a longer TTL also means that updates to your records take longer to go into effect. You can choose a TTL between 30 seconds and 1 day. For more information, see [Cloudflare's TTL documentation](https://developers.cloudflare.com/dns/manage-dns-records/reference/ttl/)",
```
Available providers:
## 📠 Hosting multiple subdomains on the same IP?
| Provider | Description |
|----------|-------------|
| `cloudflare.trace` | 🔒 Cloudflare's `/cdn-cgi/trace` endpoint (default, zero-log) |
| `cloudflare.doh` | 🌐 Cloudflare DNS-over-HTTPS (`whoami.cloudflare` TXT query) |
| `ipify` | 🌎 ipify.org API |
| `local` | 🏠 Local IP via system routing table (no network traffic, CGNAT-aware) |
| `local.iface:<name>` | 🔌 IP from a specific network interface (e.g., `local.iface:eth0`) |
| `url:<url>` | 🔗 Custom HTTP(S) endpoint that returns an IP address |
| `literal:<ips>` | 📌 Static IP addresses (comma-separated) |
| `none` | 🚫 Disable this IP type |
This script can be used to update multiple subdomains on the same IP address.
## ⏱️ Scheduling
For example, if you have a domain `example.com` and you want to host additional subdomains at `foo.example.com` and `bar.example.com` on the same IP address, you can use this script to update the DNS records for all subdomains.
| Variable | Default | Description |
|----------|---------|-------------|
| `UPDATE_CRON` | `@every 5m` | Update schedule |
| `UPDATE_ON_START` | `true` | Run an update immediately on startup |
| `DELETE_ON_STOP` | `false` | Delete managed DNS records on shutdown |
### ⚠️ Note
Schedule formats:
Please remove the comments after `//` in the below example. They are only there to explain the config.
- `@every 5m` — Every 5 minutes
- `@every 1h` — Every hour
- `@every 30s` — Every 30 seconds
- `@once` — Run once and exit
Do not include the base domain name in your `subdomains` config. Do not use the [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name).
When `UPDATE_CRON=@once`, `UPDATE_ON_START` must be `true` and `DELETE_ON_STOP` must be `false`.
### 👉 Example 🚀
## 📝 DNS Record Settings
```bash
{
"cloudflare": [
{
"authentication": {
"api_token": "api_token_here", // Either api_token or api_key
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"zone_id": "your_zone_id_here",
"subdomains": [
{
"name": "", // Root domain (example.com)
"proxied": true
},
{
"name": "foo", // (foo.example.com)
"proxied": true
},
{
"name": "bar", // (bar.example.com)
"proxied": true
}
]
}
],
"a": true,
"aaaa": true,
"purgeUnknownRecords": false,
"ttl": 300
}
```
| Variable | Default | Description |
|----------|---------|-------------|
| `TTL` | `1` (auto) | DNS record TTL in seconds (1=auto, or 30-86400) |
| `PROXIED` | `false` | Expression controlling which domains are proxied through Cloudflare |
| `RECORD_COMMENT` | (empty) | Comment attached to managed DNS records |
| `MANAGED_RECORDS_COMMENT_REGEX` | (empty) | Regex to identify which records are managed (empty = all) |
## 🌐 Hosting multiple domains (zones) on the same IP?
The `PROXIED` variable supports boolean expressions:
You can handle ddns for multiple domains (cloudflare zones) using the same docker container by duplicating your configs inside the `cloudflare: []` key within `config.json` like below:
| Expression | Meaning |
|------------|---------|
| `true` | ☁️ Proxy all domains |
| `false` | 🔓 Don't proxy any domains |
| `is(example.com)` | 🎯 Only proxy `example.com` |
| `sub(cdn.example.com)` | 🌳 Proxy `cdn.example.com` and its subdomains |
| `is(a.com) \|\| is(b.com)` | 🔀 Proxy `a.com` or `b.com` |
| `!is(vpn.example.com)` | 🚫 Proxy everything except `vpn.example.com` |
### ⚠️ Note:
Operators: `is()`, `sub()`, `!`, `&&`, `||`, `()`
If you are using API Tokens, make sure the token used supports editing your zone ID.
## 🛡️ WAF Lists
```bash
{
"cloudflare": [
{
"authentication": {
"api_token": "api_token_here",
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"zone_id": "your_first_zone_id_here",
"subdomains": [
{
"name": "",
"proxied": false
},
{
"name": "remove_or_replace_with_your_subdomain",
"proxied": false
}
]
},
{
"authentication": {
"api_token": "api_token_here",
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"zone_id": "your_second_zone_id_here",
"subdomains": [
{
"name": "",
"proxied": false
},
{
"name": "remove_or_replace_with_your_subdomain",
"proxied": false
}
]
}
],
"a": true,
"aaaa": true,
"purgeUnknownRecords": false
}
```
| Variable | Default | Description |
|----------|---------|-------------|
| `WAF_LISTS` | (empty) | Comma-separated WAF lists in `account-id/list-name` format |
| `WAF_LIST_DESCRIPTION` | (empty) | Description for managed WAF lists |
| `WAF_LIST_ITEM_COMMENT` | (empty) | Comment for WAF list items |
| `MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX` | (empty) | Regex to identify managed WAF list items |
## ⚖️ Load Balancing
WAF list names must match the pattern `[a-z0-9_]+`.
If you have multiple IP addresses and want to load balance between them, you can use the `loadBalancing` option. This will create a CNAME record for each subdomain that points to the subdomain with the lowest IP address.
## 🔔 Notifications (Shoutrrr)
### 📜 Example config to support load balancing
| Variable | Description |
|----------|-------------|
| `SHOUTRRR` | Newline-separated list of notification service URLs |
```json
{
"cloudflare": [
{
"authentication": {
"api_token": "api_token_here",
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"zone_id": "your_zone_id_here",
"subdomains": [
{
"name": "",
"proxied": false
},
{
"name": "remove_or_replace_with_your_subdomain",
"proxied": false
}
]
}
],{
"cloudflare": [
{
"authentication": {
"api_token": "api_token_here",
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"zone_id": "your_zone_id_here",
"subdomains": [
{
"name": "",
"proxied": false
},
{
"name": "remove_or_replace_with_your_subdomain",
"proxied": false
}
]
}
],
"load_balancer": [
{
"authentication": {
"api_token": "api_token_here",
"api_key": {
"api_key": "api_key_here",
"account_email": "your_email_here"
}
},
"pool_id": "your_pool_id_here",
"origin": "your_origin_name_here"
}
],
"a": true,
"aaaa": true,
"purgeUnknownRecords": false,
"ttl": 300
}
```
Supported services:
### Docker environment variable support
| Service | URL format |
|---------|------------|
| 💬 Discord | `discord://token@webhook-id` |
| 📨 Slack | `slack://token-a/token-b/token-c` |
| ✈️ Telegram | `telegram://bot-token@telegram?chats=chat-id` |
| 📡 Gotify | `gotify://host/path?token=app-token` |
| 📲 Pushover | `pushover://user-key@api-token` |
| 🌐 Generic webhook | `generic://host/path` or `generic+https://host/path` |
Define environmental variables starts with `CF_DDNS_` and use it in config.json
Notifications are sent when DNS records are updated, created, deleted, or when errors occur.
For ex:
## 💓 Heartbeat Monitoring
```json
{
"cloudflare": [
{
"authentication": {
"api_token": "${CF_DDNS_API_TOKEN}",
```
| Variable | Description |
|----------|-------------|
| `HEALTHCHECKS` | Healthchecks.io ping URL |
| `UPTIMEKUMA` | Uptime Kuma push URL |
### 🧹 Optional features
Heartbeats are sent after each update cycle. On failure, a fail signal is sent. On shutdown, an exit signal is sent.
`purgeUnknownRecords` removes stale DNS records from Cloudflare. This is useful if you have a dynamic DNS record that you no longer want to use. If you have a dynamic DNS record that you no longer want to use, you can set `purgeUnknownRecords` to `true` and the script will remove the stale DNS record from Cloudflare.
## ⏳ Timeouts
## 🐳 Deploy with Docker Compose
| Variable | Default | Description |
|----------|---------|-------------|
| `DETECTION_TIMEOUT` | `5s` | Timeout for IP detection requests |
| `UPDATE_TIMEOUT` | `30s` | Timeout for Cloudflare API requests |
Pre-compiled images are available via [the official docker container on DockerHub](https://hub.docker.com/r/timothyjmiller/cloudflare-ddns).
## 🖥️ Output
Modify the host file path of config.json inside the volumes section of docker-compose.yml.
| Variable | Default | Description |
|----------|---------|-------------|
| `EMOJI` | `true` | Use emoji in output messages |
| `QUIET` | `false` | Suppress informational output |
## 🏁 CLI Flags
| Flag | Description |
|------|-------------|
| `--dry-run` | 🧪 Preview changes without modifying DNS records |
| `--repeat` | 🔁 Run continuously (legacy config mode only; env var mode uses `UPDATE_CRON`) |
## 📋 All Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `CLOUDFLARE_API_TOKEN` | — | 🔑 API token |
| `CLOUDFLARE_API_TOKEN_FILE` | — | 📄 Path to API token file |
| `DOMAINS` | — | 🌐 Domains for both IPv4 and IPv6 |
| `IP4_DOMAINS` | — | 4⃣ IPv4-only domains |
| `IP6_DOMAINS` | — | 6⃣ IPv6-only domains |
| `IP4_PROVIDER` | `cloudflare.trace` | 🔍 IPv4 detection provider |
| `IP6_PROVIDER` | `cloudflare.trace` | 🔍 IPv6 detection provider |
| `UPDATE_CRON` | `@every 5m` | ⏱️ Update schedule |
| `UPDATE_ON_START` | `true` | 🚀 Update on startup |
| `DELETE_ON_STOP` | `false` | 🧹 Delete records on shutdown |
| `TTL` | `1` | ⏳ DNS record TTL |
| `PROXIED` | `false` | ☁️ Proxied expression |
| `RECORD_COMMENT` | — | 💬 DNS record comment |
| `MANAGED_RECORDS_COMMENT_REGEX` | — | 🎯 Managed records regex |
| `WAF_LISTS` | — | 🛡️ WAF lists to manage |
| `WAF_LIST_DESCRIPTION` | — | 📝 WAF list description |
| `WAF_LIST_ITEM_COMMENT` | — | 💬 WAF list item comment |
| `MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX` | — | 🎯 Managed WAF items regex |
| `DETECTION_TIMEOUT` | `5s` | ⏳ IP detection timeout |
| `UPDATE_TIMEOUT` | `30s` | ⏳ API request timeout |
| `EMOJI` | `true` | 🎨 Enable emoji output |
| `QUIET` | `false` | 🤫 Suppress info output |
| `HEALTHCHECKS` | — | 💓 Healthchecks.io URL |
| `UPTIMEKUMA` | — | 💓 Uptime Kuma URL |
| `SHOUTRRR` | — | 🔔 Notification URLs (newline-separated) |
---
## 🚢 Deployment
### 🐳 Docker Compose
```yml
version: '3.9'
@@ -292,146 +228,259 @@ services:
- no-new-privileges:true
network_mode: 'host'
environment:
- PUID=1000
- PGID=1000
- CLOUDFLARE_API_TOKEN=your-api-token
- DOMAINS=example.com,www.example.com
- PROXIED=true
- IP6_PROVIDER=none
- HEALTHCHECKS=https://hc-ping.com/your-uuid
restart: unless-stopped
```
> ⚠️ Docker requires `network_mode: host` to access the IPv6 public address.
### ☸️ Kubernetes
The included manifest uses the legacy JSON config mode. Create a secret containing your `config.json` and apply:
```bash
kubectl create secret generic config-cloudflare-ddns --from-file=config.json -n ddns
kubectl apply -f k8s/cloudflare-ddns.yml
```
### 🐧 Linux + Systemd
1. Build and install:
```bash
cargo build --release
sudo cp target/release/cloudflare-ddns /usr/local/bin/
```
2. Copy the systemd units from the `systemd/` directory:
```bash
sudo cp systemd/cloudflare-ddns.service /etc/systemd/system/
sudo cp systemd/cloudflare-ddns.timer /etc/systemd/system/
```
3. Place a `config.json` at `/etc/cloudflare-ddns/config.json` (the systemd service uses legacy config mode).
4. Enable the timer:
```bash
sudo systemctl enable --now cloudflare-ddns.timer
```
The timer runs the service every 15 minutes (configurable in `cloudflare-ddns.timer`).
## 🔨 Building from Source
```bash
cargo build --release
```
The binary is at `target/release/cloudflare-ddns`.
### 🐳 Docker builds
```bash
# Single architecture (linux/amd64)
./scripts/docker-build.sh
# Multi-architecture (linux/amd64, linux/arm64, linux/arm/v7)
./scripts/docker-build-all.sh
```
## 💻 Supported Platforms
- 🐳 [Docker](https://docs.docker.com/get-docker/) (amd64, arm64, arm/v7)
- 🐙 [Docker Compose](https://docs.docker.com/compose/install/)
- ☸️ [Kubernetes](https://kubernetes.io/docs/tasks/tools/)
- 🐧 [Systemd](https://www.freedesktop.org/wiki/Software/systemd/)
- 🍎 macOS, 🪟 Windows, 🐧 Linux — anywhere Rust compiles
---
## 📁 Legacy JSON Config File
For backwards compatibility, cloudflare-ddns still supports configuration via a `config.json` file. This mode is used automatically when no `CLOUDFLARE_API_TOKEN` environment variable is set.
### 🚀 Quick Start
```bash
cp config-example.json config.json
# Edit config.json with your values
cloudflare-ddns
```
### 🔑 Authentication
Use either an API token (recommended) or a legacy API key:
```json
"authentication": {
"api_token": "Your cloudflare API token with Edit DNS capability"
}
```
Or with a legacy API key:
```json
"authentication": {
"api_key": {
"api_key": "Your cloudflare API Key",
"account_email": "The email address you use to sign in to cloudflare"
}
}
```
### 📡 IPv4 and IPv6
Some ISP provided modems only allow port forwarding over IPv4 or IPv6. Disable the interface that is not accessible:
```json
"a": true,
"aaaa": true
```
### ⚙️ Config Options
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| `cloudflare` | array | required | List of zone configurations |
| `a` | bool | `true` | Enable IPv4 (A record) updates |
| `aaaa` | bool | `true` | Enable IPv6 (AAAA record) updates |
| `purgeUnknownRecords` | bool | `false` | Delete stale/duplicate DNS records |
| `ttl` | int | `300` | DNS record TTL in seconds (30-86400, values < 30 become auto) |
Each zone entry contains:
| Key | Type | Description |
|-----|------|-------------|
| `authentication` | object | API token or API key credentials |
| `zone_id` | string | Cloudflare zone ID (found in zone dashboard) |
| `subdomains` | array | Subdomain entries to update |
| `proxied` | bool | Default proxied status for subdomains in this zone |
Subdomain entries can be a simple string or a detailed object:
```json
"subdomains": [
"",
"@",
"www",
{ "name": "vpn", "proxied": true }
]
```
Use `""` or `"@"` for the root domain. Do not include the base domain name.
### 🔄 Environment Variable Substitution
In the legacy config file, values can reference environment variables with the `CF_DDNS_` prefix:
```json
{
"cloudflare": [{
"authentication": {
"api_token": "${CF_DDNS_API_TOKEN}"
},
...
}]
}
```
### 📠 Example: Multiple Subdomains
```json
{
"cloudflare": [
{
"authentication": {
"api_token": "your-api-token"
},
"zone_id": "your_zone_id",
"subdomains": [
{ "name": "", "proxied": true },
{ "name": "www", "proxied": true },
{ "name": "vpn", "proxied": false }
]
}
],
"a": true,
"aaaa": true,
"purgeUnknownRecords": false,
"ttl": 300
}
```
### 🌐 Example: Multiple Zones
```json
{
"cloudflare": [
{
"authentication": { "api_token": "your-api-token" },
"zone_id": "first_zone_id",
"subdomains": [
{ "name": "", "proxied": false }
]
},
{
"authentication": { "api_token": "your-api-token" },
"zone_id": "second_zone_id",
"subdomains": [
{ "name": "", "proxied": false }
]
}
],
"a": true,
"aaaa": true,
"purgeUnknownRecords": false
}
```
### 🐳 Docker Compose (legacy config file)
```yml
version: '3.9'
services:
cloudflare-ddns:
image: timothyjmiller/cloudflare-ddns:latest
container_name: cloudflare-ddns
security_opt:
- no-new-privileges:true
network_mode: 'host'
volumes:
- /YOUR/PATH/HERE/config.json:/config.json
restart: unless-stopped
```
### ⚠️ IPv6
### 🏁 Legacy CLI Flags
Docker requires network_mode be set to host in order to access the IPv6 public address.
### 🏃‍♂️ Running
From the project root directory
In legacy config mode, use `--repeat` to run continuously (the TTL value is used as the update interval):
```bash
docker-compose up -d
cloudflare-ddns --repeat
cloudflare-ddns --repeat --dry-run
```
## 🐋 Kubernetes
---
Create config File
## 🔗 Helpful Links
```bash
cp ../../config-example.json config.json
```
- 🔑 [Cloudflare API token](https://dash.cloudflare.com/profile/api-tokens)
- 🆔 [Cloudflare zone ID](https://support.cloudflare.com/hc/en-us/articles/200167836-Where-do-I-find-my-Cloudflare-IP-address-)
- 📋 [Cloudflare zone DNS record ID](https://support.cloudflare.com/hc/en-us/articles/360019093151-Managing-DNS-records-in-Cloudflare)
Edit config.jsonon (vim, nvim, nano... )
## 📜 License
```bash
${EDITOR} config.json
```
This project is licensed under the GNU General Public License, version 3 (GPLv3).
Create config file as Secret.
```bash
kubectl create secret generic config-cloudflare-ddns --from-file=config.json --dry-run=client -oyaml -n ddns > config-cloudflare-ddns-Secret.yaml
```
apply this secret
```bash
kubectl apply -f config-cloudflare-ddns-Secret.yaml
rm config.json # recomended Just keep de secret on Kubernetes Cluster
```
apply this Deployment
```bash
kubectl apply -f cloudflare-ddns-Deployment.yaml
```
## 🐧 Deploy with Linux + Cron
### 🏃 Running (all distros)
This script requires Python 3.5+, which comes preinstalled on the latest version of Raspbian. Download/clone this repo and give permission to the project's bash script by running `chmod +x ./start-sync.sh`. Now you can execute `./start-sync.sh`, which will set up a virtualenv, pull in any dependencies, and fire the script.
1. Upload the cloudflare-ddns folder to your home directory /home/your_username_here/
2. Run the following code in terminal
```bash
crontab -e
```
3. Add the following lines to sync your DNS records every 15 minutes
```bash
*/15 * * * * /home/your_username_here/cloudflare-ddns/start-sync.sh
```
## Building from source
Create a config.json file with your production credentials.
### 💖 Please Note
The optional `docker-build-all.sh` script requires Docker experimental support to be enabled.
Docker Hub has experimental support for multi-architecture builds. Their official blog post specifies easy instructions for building with [Mac and Windows versions of Docker Desktop](https://docs.docker.com/docker-for-mac/multi-arch/).
1. Choose build platform
- Multi-architecture (experimental) `docker-build-all.sh`
- Linux/amd64 by default `docker-build.sh`
2. Give your bash script permission to execute.
```bash
sudo chmod +x ./docker-build.sh
```
```bash
sudo chmod +x ./docker-build-all.sh
```
3. At project root, run the `docker-build.sh` script.
Recommended for local development
```bash
./docker-build.sh
```
Recommended for production
```bash
./docker-build-all.sh
```
### Run the locally compiled version
```bash
docker run -d timothyjmiller/cloudflare_ddns:latest
```
## Supported Platforms
- [Docker](https://docs.docker.com/get-docker/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Kubernetes](https://kubernetes.io/docs/tasks/tools/)
- [Python 3](https://www.python.org/downloads/)
- [Systemd](https://www.freedesktop.org/wiki/Software/systemd/)
## 📜 Helpful links
- [Cloudflare API token](https://dash.cloudflare.com/profile/api-tokens)
- [Cloudflare zone ID](https://support.cloudflare.com/hc/en-us/articles/200167836-Where-do-I-find-my-Cloudflare-IP-address-)
- [Cloudflare zone DNS record ID](https://support.cloudflare.com/hc/en-us/articles/360019093151-Managing-DNS-records-in-Cloudflare)
## License
This Template is licensed under the GNU General Public License, version 3 (GPLv3).
## Author
## 👨‍💻 Author
Timothy Miller
[View my GitHub profile 💡](https://github.com/timothymiller)
[View my personal website 💻](https://timknowsbest.com)
[View my personal website 💻](https://itstmillertime.com)

View File

@@ -1,319 +0,0 @@
#!/usr/bin/env python3
# cloudflare-ddns.py
# Summary: Access your home network remotely via a custom domain name without a static IP!
# Description: Access your home network remotely via a custom domain
# Access your home network remotely via a custom domain
# A small, 🕵️ privacy centric, and ⚡
# lightning fast multi-architecture Docker image for self hosting projects.
__version__ = "1.0.2"
from string import Template
import json
import os
import signal
import sys
import threading
import time
import requests
CONFIG_PATH = os.environ.get('CONFIG_PATH', os.getcwd())
# Read in all environment variables that have the correct prefix
ENV_VARS = {key: value for (key, value) in os.environ.items() if key.startswith('CF_DDNS_')}
class GracefulExit:
def __init__(self):
self.kill_now = threading.Event()
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
print("🛑 Stopping main thread...")
self.kill_now.set()
def deleteEntries(type):
# Helper function for deleting A or AAAA records
# in the case of no IPv4 or IPv6 connection, yet
# existing A or AAAA records are found.
for option in config["cloudflare"]:
answer = cf_api(
"zones/" + option['zone_id'] +
"/dns_records?per_page=100&type=" + type,
"GET", option)
if answer is None or answer["result"] is None:
time.sleep(5)
return
for record in answer["result"]:
identifier = str(record["id"])
cf_api(
"zones/" + option['zone_id'] + "/dns_records/" + identifier,
"DELETE", option)
print("🗑️ Deleted stale record " + identifier)
def getIPs():
a = None
aaaa = None
global ipv4_enabled
global ipv6_enabled
global purgeUnknownRecords
if ipv4_enabled:
try:
a = requests.get(
"https://1.1.1.1/cdn-cgi/trace").text.split("\n")
a.pop()
a = dict(s.split("=") for s in a)["ip"]
except Exception:
global shown_ipv4_warning
if not shown_ipv4_warning:
shown_ipv4_warning = True
print("🧩 IPv4 not detected via 1.1.1.1, trying 1.0.0.1")
# Try secondary IP check
try:
a = requests.get(
"https://1.0.0.1/cdn-cgi/trace").text.split("\n")
a.pop()
a = dict(s.split("=") for s in a)["ip"]
except Exception:
global shown_ipv4_warning_secondary
if not shown_ipv4_warning_secondary:
shown_ipv4_warning_secondary = True
print("🧩 IPv4 not detected via 1.0.0.1. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.")
if purgeUnknownRecords:
deleteEntries("A")
if ipv6_enabled:
try:
aaaa = requests.get(
"https://[2606:4700:4700::1111]/cdn-cgi/trace").text.split("\n")
aaaa.pop()
aaaa = dict(s.split("=") for s in aaaa)["ip"]
except Exception:
global shown_ipv6_warning
if not shown_ipv6_warning:
shown_ipv6_warning = True
print("🧩 IPv6 not detected via 1.1.1.1, trying 1.0.0.1")
try:
aaaa = requests.get(
"https://[2606:4700:4700::1001]/cdn-cgi/trace").text.split("\n")
aaaa.pop()
aaaa = dict(s.split("=") for s in aaaa)["ip"]
except Exception:
global shown_ipv6_warning_secondary
if not shown_ipv6_warning_secondary:
shown_ipv6_warning_secondary = True
print("🧩 IPv6 not detected via 1.0.0.1. Verify your ISP or DNS provider isn't blocking Cloudflare's IPs.")
if purgeUnknownRecords:
deleteEntries("AAAA")
ips = {}
if (a is not None):
ips["ipv4"] = {
"type": "A",
"ip": a
}
if (aaaa is not None):
ips["ipv6"] = {
"type": "AAAA",
"ip": aaaa
}
return ips
def commitRecord(ip):
global ttl
for option in config["cloudflare"]:
subdomains = option["subdomains"]
response = cf_api("zones/" + option['zone_id'], "GET", option)
if response is None or response["result"]["name"] is None:
time.sleep(5)
return
base_domain_name = response["result"]["name"]
for subdomain in subdomains:
try:
name = subdomain["name"].lower().strip()
proxied = subdomain["proxied"]
except:
name = subdomain
proxied = option["proxied"]
fqdn = base_domain_name
# Check if name provided is a reference to the root domain
if name != '' and name != '@':
fqdn = name + "." + base_domain_name
record = {
"type": ip["type"],
"name": fqdn,
"content": ip["ip"],
"proxied": proxied,
"ttl": ttl
}
dns_records = cf_api(
"zones/" + option['zone_id'] +
"/dns_records?per_page=100&type=" + ip["type"],
"GET", option)
identifier = None
modified = False
duplicate_ids = []
if dns_records is not None:
for r in dns_records["result"]:
if (r["name"] == fqdn):
if identifier:
if r["content"] == ip["ip"]:
duplicate_ids.append(identifier)
identifier = r["id"]
else:
duplicate_ids.append(r["id"])
else:
identifier = r["id"]
if r['content'] != record['content'] or r['proxied'] != record['proxied']:
modified = True
if identifier:
if modified:
print("📡 Updating record " + str(record))
response = cf_api(
"zones/" + option['zone_id'] +
"/dns_records/" + identifier,
"PUT", option, {}, record)
else:
print(" Adding new record " + str(record))
response = cf_api(
"zones/" + option['zone_id'] + "/dns_records", "POST", option, {}, record)
if purgeUnknownRecords:
for identifier in duplicate_ids:
identifier = str(identifier)
print("🗑️ Deleting stale record " + identifier)
response = cf_api(
"zones/" + option['zone_id'] +
"/dns_records/" + identifier,
"DELETE", option)
return True
def updateLoadBalancer(ip):
for option in config["load_balancer"]:
pools = cf_api('user/load_balancers/pools', 'GET', option)
if pools:
idxr = dict((p['id'], i) for i, p in enumerate(pools['result']))
idx = idxr.get(option['pool_id'])
origins = pools['result'][idx]['origins']
idxr = dict((o['name'], i) for i, o in enumerate(origins))
idx = idxr.get(option['origin'])
origins[idx]['address'] = ip['ip']
data = {'origins': origins}
response = cf_api(f'user/load_balancers/pools/{option["pool_id"]}', 'PATCH', option, {}, data)
def cf_api(endpoint, method, config, headers={}, data=False):
api_token = config['authentication']['api_token']
if api_token != '' and api_token != 'api_token_here':
headers = {
"Authorization": "Bearer " + api_token, **headers
}
else:
headers = {
"X-Auth-Email": config['authentication']['api_key']['account_email'],
"X-Auth-Key": config['authentication']['api_key']['api_key'],
}
try:
if (data == False):
response = requests.request(
method, "https://api.cloudflare.com/client/v4/" + endpoint, headers=headers)
else:
response = requests.request(
method, "https://api.cloudflare.com/client/v4/" + endpoint,
headers=headers, json=data)
if response.ok:
return response.json()
else:
print("😡 Error sending '" + method +
"' request to '" + response.url + "':")
print(response.text)
return None
except Exception as e:
print("😡 An exception occurred while sending '" +
method + "' request to '" + endpoint + "': " + str(e))
return None
def updateIPs(ips):
for ip in ips.values():
commitRecord(ip)
#updateLoadBalancer(ip)
if __name__ == '__main__':
shown_ipv4_warning = False
shown_ipv4_warning_secondary = False
shown_ipv6_warning = False
shown_ipv6_warning_secondary = False
ipv4_enabled = True
ipv6_enabled = True
purgeUnknownRecords = False
if sys.version_info < (3, 5):
raise Exception("🐍 This script requires Python 3.5+")
config = None
try:
with open(os.path.join(CONFIG_PATH, "config.json")) as config_file:
if len(ENV_VARS) != 0:
config = json.loads(Template(config_file.read()).safe_substitute(ENV_VARS))
else:
config = json.loads(config_file.read())
except:
print("😡 Error reading config.json")
# wait 10 seconds to prevent excessive logging on docker auto restart
time.sleep(10)
if config is not None:
try:
ipv4_enabled = config["a"]
ipv6_enabled = config["aaaa"]
except:
ipv4_enabled = True
ipv6_enabled = True
print("⚙️ Individually disable IPv4 or IPv6 with new config.json options. Read more about it here: https://github.com/timothymiller/cloudflare-ddns/blob/master/README.md")
try:
purgeUnknownRecords = config["purgeUnknownRecords"]
except:
purgeUnknownRecords = False
print("⚙️ No config detected for 'purgeUnknownRecords' - defaulting to False")
try:
ttl = int(config["ttl"])
except:
ttl = 300 # default Cloudflare TTL
print(
"⚙️ No config detected for 'ttl' - defaulting to 300 seconds (5 minutes)")
if ttl < 30:
ttl = 1 #
print("⚙️ TTL is too low - defaulting to 1 (auto)")
if (len(sys.argv) > 1):
if (sys.argv[1] == "--repeat"):
if ipv4_enabled and ipv6_enabled:
print(
"🕰️ Updating IPv4 (A) & IPv6 (AAAA) records every " + str(ttl) + " seconds")
elif ipv4_enabled and not ipv6_enabled:
print("🕰️ Updating IPv4 (A) records every " +
str(ttl) + " seconds")
elif ipv6_enabled and not ipv4_enabled:
print("🕰️ Updating IPv6 (AAAA) records every " +
str(ttl) + " seconds")
next_time = time.time()
killer = GracefulExit()
prev_ips = None
while True:
updateIPs(getIPs())
if killer.kill_now.wait(ttl):
break
else:
print("❓ Unrecognized parameter '" +
sys.argv[1] + "'. Stopping now.")
else:
updateIPs(getIPs())

View File

@@ -0,0 +1,19 @@
version: '3.9'
services:
cloudflare-ddns:
image: timothyjmiller/cloudflare-ddns:latest
container_name: cloudflare-ddns
security_opt:
- no-new-privileges:true
network_mode: 'host'
environment:
- CLOUDFLARE_API_TOKEN=your-api-token-here
- DOMAINS=example.com,www.example.com
- PROXIED=false
- TTL=1
- UPDATE_CRON=@every 5m
# - IP6_PROVIDER=none
# - HEALTHCHECKS=https://hc-ping.com/your-uuid
# - UPTIMEKUMA=https://kuma.example.com/api/push/your-token
# - SHOUTRRR=discord://token@webhook-id
restart: unless-stopped

98
env-example Normal file
View File

@@ -0,0 +1,98 @@
# Cloudflare DDNS - Environment Variable Configuration
# Copy this file to .env and set your values.
# Setting CLOUDFLARE_API_TOKEN activates environment variable mode.
# === Required ===
# Cloudflare API token with "Edit DNS" capability
CLOUDFLARE_API_TOKEN=your-api-token-here
# Or read from a file:
# CLOUDFLARE_API_TOKEN_FILE=/run/secrets/cloudflare_token
# Domains to update (comma-separated)
# At least one of DOMAINS, IP4_DOMAINS, IP6_DOMAINS, or WAF_LISTS must be set
DOMAINS=example.com,www.example.com
# IP4_DOMAINS=v4only.example.com
# IP6_DOMAINS=v6only.example.com
# === IP Detection ===
# Provider for IPv4 detection (default: cloudflare.trace)
# Options: cloudflare.trace, cloudflare.doh, ipify, local, local.iface:<name>,
# url:<custom-url>, literal:<ip1>,<ip2>, none
# IP4_PROVIDER=cloudflare.trace
# Provider for IPv6 detection (default: cloudflare.trace)
# IP6_PROVIDER=cloudflare.trace
# === Scheduling ===
# Update schedule (default: @every 5m)
# Formats: @every 5m, @every 1h, @every 30s, @once
# UPDATE_CRON=@every 5m
# Run an update immediately on startup (default: true)
# UPDATE_ON_START=true
# Delete managed DNS records on shutdown (default: false)
# DELETE_ON_STOP=false
# === DNS Records ===
# TTL in seconds: 1=auto, or 30-86400 (default: 1)
# TTL=1
# Proxied expression: true, false, is(domain), sub(domain), or boolean combos
# PROXIED=false
# Comment to attach to managed DNS records
# RECORD_COMMENT=Managed by cloudflare-ddns
# Regex to identify which records are managed (empty = all matching records)
# MANAGED_RECORDS_COMMENT_REGEX=cloudflare-ddns
# === WAF Lists ===
# Comma-separated WAF lists in account-id/list-name format
# WAF_LISTS=account123/my_ip_list
# Description for managed WAF lists
# WAF_LIST_DESCRIPTION=Dynamic IP list
# Comment for WAF list items
# WAF_LIST_ITEM_COMMENT=cloudflare-ddns
# Regex to identify managed WAF list items
# MANAGED_WAF_LIST_ITEMS_COMMENT_REGEX=cloudflare-ddns
# === Notifications ===
# Shoutrrr notification URLs (newline-separated)
# SHOUTRRR=discord://token@webhook-id
# SHOUTRRR=slack://token-a/token-b/token-c
# SHOUTRRR=telegram://bot-token@telegram?chats=chat-id
# SHOUTRRR=generic+https://hooks.example.com/webhook
# === Heartbeat Monitoring ===
# Healthchecks.io ping URL
# HEALTHCHECKS=https://hc-ping.com/your-uuid
# Uptime Kuma push URL
# UPTIMEKUMA=https://your-uptime-kuma.com/api/push/your-token
# === Timeouts ===
# IP detection timeout (default: 5s)
# DETECTION_TIMEOUT=5s
# Cloudflare API request timeout (default: 30s)
# UPDATE_TIMEOUT=30s
# === Output ===
# Use emoji in output (default: true)
# EMOJI=true
# Suppress informational output (default: false)
# QUIET=false

View File

@@ -1 +0,0 @@
requests==2.31.0

View File

@@ -1,4 +1,3 @@
#!/bin/bash
BASH_DIR=$(dirname $(realpath "${BASH_SOURCE}"))
docker buildx build --platform linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64 --tag timothyjmiller/cloudflare-ddns:latest ${BASH_DIR}/../
# TODO: Support linux/riscv64
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --tag timothyjmiller/cloudflare-ddns:latest ${BASH_DIR}/../

View File

@@ -1,3 +1,8 @@
#!/bin/bash
BASH_DIR=$(dirname $(realpath "${BASH_SOURCE}"))
docker buildx build --platform linux/ppc64le,linux/s390x,linux/386,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64 --tag timothyjmiller/cloudflare-ddns:latest --push ${BASH_DIR}/../
VERSION=$(grep '^version' ${BASH_DIR}/../Cargo.toml | head -1 | sed 's/.*"\(.*\)".*/\1/')
docker buildx build \
--platform linux/amd64,linux/arm64,linux/arm/v7 \
--tag timothyjmiller/cloudflare-ddns:latest \
--tag timothyjmiller/cloudflare-ddns:${VERSION} \
--push ${BASH_DIR}/../

1774
src/cloudflare.rs Normal file

File diff suppressed because it is too large Load Diff

1961
src/config.rs Normal file

File diff suppressed because it is too large Load Diff

547
src/domain.rs Normal file
View File

@@ -0,0 +1,547 @@
use std::fmt;
/// Represents a DNS domain - either a regular FQDN or a wildcard.
#[allow(dead_code)]
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Domain {
FQDN(String),
Wildcard(String),
}
#[allow(dead_code)]
impl Domain {
/// Parse a domain string. Handles:
/// - "@" or "" -> root domain (handled at FQDN construction time)
/// - "*.example.com" -> wildcard
/// - "sub.example.com" -> regular FQDN
pub fn new(input: &str) -> Result<Self, String> {
let trimmed = input.trim().to_lowercase();
if trimmed.starts_with("*.") {
let base = &trimmed[2..];
let ascii = domain_to_ascii(base)?;
Ok(Domain::Wildcard(ascii))
} else {
let ascii = domain_to_ascii(&trimmed)?;
Ok(Domain::FQDN(ascii))
}
}
/// Returns the DNS name in ASCII form suitable for API calls.
pub fn dns_name_ascii(&self) -> String {
match self {
Domain::FQDN(s) => s.clone(),
Domain::Wildcard(s) => format!("*.{s}"),
}
}
/// Returns a human-readable description of the domain.
pub fn describe(&self) -> String {
match self {
Domain::FQDN(s) => describe_domain(s),
Domain::Wildcard(s) => format!("*.{}", describe_domain(s)),
}
}
/// Returns the zones (parent domains) for this domain, from most specific to least.
pub fn zones(&self) -> Vec<String> {
let base = match self {
Domain::FQDN(s) => s.as_str(),
Domain::Wildcard(s) => s.as_str(),
};
let mut zones = Vec::new();
let mut current = base.to_string();
while !current.is_empty() {
zones.push(current.clone());
if let Some(pos) = current.find('.') {
current = current[pos + 1..].to_string();
} else {
break;
}
}
zones
}
}
impl fmt::Display for Domain {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.describe())
}
}
/// Construct an FQDN from a subdomain name and base domain.
pub fn make_fqdn(subdomain: &str, base_domain: &str) -> String {
let name = subdomain.to_lowercase();
let name = name.trim();
if name.is_empty() || name == "@" {
base_domain.to_lowercase()
} else if name.starts_with("*.") {
// Wildcard subdomain
format!("{name}.{}", base_domain.to_lowercase())
} else {
format!("{name}.{}", base_domain.to_lowercase())
}
}
/// Convert a domain to ASCII using IDNA encoding.
#[allow(dead_code)]
fn domain_to_ascii(domain: &str) -> Result<String, String> {
if domain.is_empty() {
return Ok(String::new());
}
// Try IDNA encoding for internationalized domain names
match idna::domain_to_ascii(domain) {
Ok(ascii) => Ok(ascii),
Err(_) => {
// Fallback: if it's already ASCII, just return it
if domain.is_ascii() {
Ok(domain.to_string())
} else {
Err(format!("Invalid domain name: {domain}"))
}
}
}
}
/// Convert ASCII domain back to Unicode for display.
#[allow(dead_code)]
fn describe_domain(ascii: &str) -> String {
// Try to convert punycode back to unicode for display
match idna::domain_to_unicode(ascii) {
(unicode, Ok(())) => unicode,
_ => ascii.to_string(),
}
}
/// Parse a comma-separated list of domain strings.
#[allow(dead_code)]
pub fn parse_domain_list(input: &str) -> Result<Vec<Domain>, String> {
if input.trim().is_empty() {
return Ok(Vec::new());
}
input
.split(',')
.map(|s| Domain::new(s.trim()))
.collect()
}
// --- Domain Expression Evaluator ---
// Supports: true, false, is(domain,...), sub(domain,...), !, &&, ||, ()
/// Parse and evaluate a domain expression to determine if a domain should be proxied.
pub fn parse_proxied_expression(expr: &str) -> Result<Box<dyn Fn(&str) -> bool + Send + Sync>, String> {
let expr = expr.trim();
if expr.is_empty() || expr == "false" {
return Ok(Box::new(|_: &str| false));
}
if expr == "true" {
return Ok(Box::new(|_: &str| true));
}
let tokens = tokenize_expr(expr)?;
let (predicate, rest) = parse_or_expr(&tokens)?;
if !rest.is_empty() {
return Err(format!("Unexpected tokens in proxied expression: {}", rest.join(" ")));
}
Ok(predicate)
}
fn tokenize_expr(input: &str) -> Result<Vec<String>, String> {
let mut tokens = Vec::new();
let mut chars = input.chars().peekable();
while let Some(&c) = chars.peek() {
match c {
' ' | '\t' | '\n' | '\r' => {
chars.next();
}
'(' | ')' | '!' | ',' => {
tokens.push(c.to_string());
chars.next();
}
'&' => {
chars.next();
if chars.peek() == Some(&'&') {
chars.next();
tokens.push("&&".to_string());
} else {
return Err("Expected '&&', got single '&'".to_string());
}
}
'|' => {
chars.next();
if chars.peek() == Some(&'|') {
chars.next();
tokens.push("||".to_string());
} else {
return Err("Expected '||', got single '|'".to_string());
}
}
_ => {
let mut word = String::new();
while let Some(&c) = chars.peek() {
if c.is_alphanumeric() || c == '.' || c == '-' || c == '_' || c == '*' || c == '@' {
word.push(c);
chars.next();
} else {
break;
}
}
if word.is_empty() {
return Err(format!("Unexpected character: {c}"));
}
tokens.push(word);
}
}
}
Ok(tokens)
}
type Predicate = Box<dyn Fn(&str) -> bool + Send + Sync>;
fn parse_or_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
let (mut left, mut rest) = parse_and_expr(tokens)?;
while !rest.is_empty() && rest[0] == "||" {
let (right, new_rest) = parse_and_expr(&rest[1..])?;
let prev = left;
left = Box::new(move |d: &str| prev(d) || right(d));
rest = new_rest;
}
Ok((left, rest))
}
fn parse_and_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
let (mut left, mut rest) = parse_not_expr(tokens)?;
while !rest.is_empty() && rest[0] == "&&" {
let (right, new_rest) = parse_not_expr(&rest[1..])?;
let prev = left;
left = Box::new(move |d: &str| prev(d) && right(d));
rest = new_rest;
}
Ok((left, rest))
}
fn parse_not_expr(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
if tokens.is_empty() {
return Err("Unexpected end of expression".to_string());
}
if tokens[0] == "!" {
let (inner, rest) = parse_not_expr(&tokens[1..])?;
let pred: Predicate = Box::new(move |d: &str| !inner(d));
Ok((pred, rest))
} else {
parse_atom(tokens)
}
}
fn parse_atom(tokens: &[String]) -> Result<(Predicate, &[String]), String> {
if tokens.is_empty() {
return Err("Unexpected end of expression".to_string());
}
match tokens[0].as_str() {
"true" => Ok((Box::new(|_: &str| true), &tokens[1..])),
"false" => Ok((Box::new(|_: &str| false), &tokens[1..])),
"(" => {
let (inner, rest) = parse_or_expr(&tokens[1..])?;
if rest.is_empty() || rest[0] != ")" {
return Err("Missing closing parenthesis".to_string());
}
Ok((inner, &rest[1..]))
}
"is" => {
let (domains, rest) = parse_domain_args(&tokens[1..])?;
let pred: Predicate = Box::new(move |d: &str| {
let d_lower = d.to_lowercase();
domains.iter().any(|dom| d_lower == *dom)
});
Ok((pred, rest))
}
"sub" => {
let (domains, rest) = parse_domain_args(&tokens[1..])?;
let pred: Predicate = Box::new(move |d: &str| {
let d_lower = d.to_lowercase();
domains.iter().any(|dom| {
d_lower == *dom || d_lower.ends_with(&format!(".{dom}"))
})
});
Ok((pred, rest))
}
_ => Err(format!("Unexpected token: {}", tokens[0])),
}
}
fn parse_domain_args(tokens: &[String]) -> Result<(Vec<String>, &[String]), String> {
if tokens.is_empty() || tokens[0] != "(" {
return Err("Expected '(' after function name".to_string());
}
let mut domains = Vec::new();
let mut i = 1;
while i < tokens.len() && tokens[i] != ")" {
if tokens[i] == "," {
i += 1;
continue;
}
domains.push(tokens[i].to_lowercase());
i += 1;
}
if i >= tokens.len() {
return Err("Missing closing ')' in function call".to_string());
}
Ok((domains, &tokens[i + 1..]))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_fqdn_root() {
assert_eq!(make_fqdn("", "example.com"), "example.com");
assert_eq!(make_fqdn("@", "example.com"), "example.com");
}
#[test]
fn test_make_fqdn_subdomain() {
assert_eq!(make_fqdn("www", "example.com"), "www.example.com");
assert_eq!(make_fqdn("VPN", "Example.COM"), "vpn.example.com");
}
#[test]
fn test_domain_wildcard() {
let d = Domain::new("*.example.com").unwrap();
assert_eq!(d.dns_name_ascii(), "*.example.com");
}
#[test]
fn test_parse_domain_list() {
let domains = parse_domain_list("example.com, *.example.com, sub.example.com").unwrap();
assert_eq!(domains.len(), 3);
}
#[test]
fn test_proxied_expr_true() {
let pred = parse_proxied_expression("true").unwrap();
assert!(pred("anything.com"));
}
#[test]
fn test_proxied_expr_false() {
let pred = parse_proxied_expression("false").unwrap();
assert!(!pred("anything.com"));
}
#[test]
fn test_proxied_expr_is() {
let pred = parse_proxied_expression("is(example.com)").unwrap();
assert!(pred("example.com"));
assert!(!pred("sub.example.com"));
}
#[test]
fn test_proxied_expr_sub() {
let pred = parse_proxied_expression("sub(example.com)").unwrap();
assert!(pred("example.com"));
assert!(pred("sub.example.com"));
assert!(!pred("other.com"));
}
#[test]
fn test_proxied_expr_complex() {
let pred = parse_proxied_expression("is(a.com) || is(b.com)").unwrap();
assert!(pred("a.com"));
assert!(pred("b.com"));
assert!(!pred("c.com"));
}
#[test]
fn test_proxied_expr_negation() {
let pred = parse_proxied_expression("!is(internal.com)").unwrap();
assert!(!pred("internal.com"));
assert!(pred("public.com"));
}
// --- Domain::new with regular FQDN ---
#[test]
fn test_domain_new_fqdn() {
let d = Domain::new("example.com").unwrap();
assert_eq!(d, Domain::FQDN("example.com".to_string()));
}
#[test]
fn test_domain_new_fqdn_uppercase() {
let d = Domain::new("EXAMPLE.COM").unwrap();
assert_eq!(d, Domain::FQDN("example.com".to_string()));
}
// --- Domain::dns_name_ascii for FQDN ---
#[test]
fn test_dns_name_ascii_fqdn() {
let d = Domain::FQDN("example.com".to_string());
assert_eq!(d.dns_name_ascii(), "example.com");
}
// --- Domain::describe for both variants ---
#[test]
fn test_describe_fqdn() {
let d = Domain::FQDN("example.com".to_string());
// ASCII domain should round-trip through describe unchanged
assert_eq!(d.describe(), "example.com");
}
#[test]
fn test_describe_wildcard() {
let d = Domain::Wildcard("example.com".to_string());
assert_eq!(d.describe(), "*.example.com");
}
// --- Domain::zones ---
#[test]
fn test_zones_fqdn() {
let d = Domain::FQDN("sub.example.com".to_string());
let zones = d.zones();
assert_eq!(zones, vec!["sub.example.com", "example.com", "com"]);
}
#[test]
fn test_zones_wildcard() {
let d = Domain::Wildcard("example.com".to_string());
let zones = d.zones();
assert_eq!(zones, vec!["example.com", "com"]);
}
#[test]
fn test_zones_single_label() {
let d = Domain::FQDN("localhost".to_string());
let zones = d.zones();
assert_eq!(zones, vec!["localhost"]);
}
// --- Domain Display trait ---
#[test]
fn test_display_fqdn() {
let d = Domain::FQDN("example.com".to_string());
assert_eq!(format!("{d}"), "example.com");
}
#[test]
fn test_display_wildcard() {
let d = Domain::Wildcard("example.com".to_string());
assert_eq!(format!("{d}"), "*.example.com");
}
// --- domain_to_ascii (tested indirectly via Domain::new) ---
#[test]
fn test_domain_new_empty_string() {
// empty string -> domain_to_ascii returns Ok("") -> Domain::FQDN("")
let d = Domain::new("").unwrap();
assert_eq!(d, Domain::FQDN("".to_string()));
}
#[test]
fn test_domain_new_ascii_domain() {
let d = Domain::new("www.example.org").unwrap();
assert_eq!(d.dns_name_ascii(), "www.example.org");
}
#[test]
fn test_domain_new_internationalized() {
// "münchen.de" should be encoded to punycode
let d = Domain::new("münchen.de").unwrap();
let ascii = d.dns_name_ascii();
// The punycode-encoded form should start with "xn--"
assert!(ascii.contains("xn--"), "expected punycode, got: {ascii}");
}
// --- describe_domain (tested indirectly via Domain::describe) ---
#[test]
fn test_describe_punycode_roundtrip() {
// Build a domain with a known punycode label and confirm describe decodes it
let d = Domain::new("münchen.de").unwrap();
let described = d.describe();
// Should contain the Unicode form, not the raw punycode
assert!(described.contains("münchen") || described.contains("xn--"),
"describe returned: {described}");
}
#[test]
fn test_describe_regular_ascii() {
let d = Domain::FQDN("example.com".to_string());
assert_eq!(d.describe(), "example.com");
}
// --- parse_domain_list with empty input ---
#[test]
fn test_parse_domain_list_empty() {
let result = parse_domain_list("").unwrap();
assert!(result.is_empty());
}
#[test]
fn test_parse_domain_list_whitespace_only() {
let result = parse_domain_list(" ").unwrap();
assert!(result.is_empty());
}
// --- Tokenizer edge cases (via parse_proxied_expression) ---
#[test]
fn test_tokenizer_single_ampersand_error() {
let result = parse_proxied_expression("is(a.com) & is(b.com)");
assert!(result.is_err());
let err = result.err().unwrap();
assert!(err.contains("&&"), "error was: {err}");
}
#[test]
fn test_tokenizer_single_pipe_error() {
let result = parse_proxied_expression("is(a.com) | is(b.com)");
assert!(result.is_err());
let err = result.err().unwrap();
assert!(err.contains("||"), "error was: {err}");
}
#[test]
fn test_tokenizer_unexpected_character_error() {
let result = parse_proxied_expression("is(a.com) $ is(b.com)");
assert!(result.is_err());
}
// --- Parser edge cases ---
#[test]
fn test_parse_and_expr_double_ampersand() {
let pred = parse_proxied_expression("is(a.com) && is(b.com)").unwrap();
assert!(!pred("a.com"));
assert!(!pred("b.com"));
let pred2 = parse_proxied_expression("sub(example.com) && !is(internal.example.com)").unwrap();
assert!(pred2("www.example.com"));
assert!(!pred2("internal.example.com"));
}
#[test]
fn test_parse_nested_parentheses() {
let pred = parse_proxied_expression("(is(a.com) || is(b.com)) && !is(c.com)").unwrap();
assert!(pred("a.com"));
assert!(pred("b.com"));
assert!(!pred("c.com"));
}
#[test]
fn test_parse_missing_closing_paren() {
let result = parse_proxied_expression("(is(a.com)");
assert!(result.is_err());
let err = result.err().unwrap();
assert!(err.contains("parenthesis") || err.contains(")"), "error was: {err}");
}
#[test]
fn test_parse_unexpected_tokens_after_expr() {
let result = parse_proxied_expression("true false");
assert!(result.is_err());
}
// --- make_fqdn with wildcard subdomain ---
#[test]
fn test_make_fqdn_wildcard_subdomain() {
// A name starting with "*." is treated as a wildcard subdomain
assert_eq!(make_fqdn("*.sub", "example.com"), "*.sub.example.com");
}
}

920
src/main.rs Normal file
View File

@@ -0,0 +1,920 @@
mod cloudflare;
mod config;
mod domain;
mod notifier;
mod pp;
mod provider;
mod updater;
use crate::cloudflare::{Auth, CloudflareHandle};
use crate::config::{AppConfig, CronSchedule};
use crate::notifier::{CompositeNotifier, Heartbeat, Message};
use crate::pp::PP;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::signal;
use tokio::time::{sleep, Duration};
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[tokio::main]
async fn main() {
// Parse CLI args
let args: Vec<String> = std::env::args().collect();
let dry_run = args.iter().any(|a| a == "--dry-run");
let repeat = args.iter().any(|a| a == "--repeat");
// Check for unknown args (legacy behavior)
let known_args = ["--dry-run", "--repeat"];
let unknown: Vec<&str> = args
.iter()
.skip(1)
.filter(|a| !known_args.contains(&a.as_str()))
.map(|a| a.as_str())
.collect();
if !unknown.is_empty() {
eprintln!(
"Unrecognized parameter(s): {}. Stopping now.",
unknown.join(", ")
);
return;
}
// Determine config mode and create initial PP for config loading
let initial_pp = if config::is_env_config_mode() {
// In env mode, read emoji/quiet from env before loading full config
let emoji = std::env::var("EMOJI")
.map(|v| matches!(v.to_lowercase().as_str(), "true" | "1" | "yes"))
.unwrap_or(true);
let quiet = std::env::var("QUIET")
.map(|v| matches!(v.to_lowercase().as_str(), "true" | "1" | "yes"))
.unwrap_or(false);
PP::new(emoji, quiet)
} else {
// Legacy mode: no emoji, not quiet (preserves original output behavior)
PP::new(false, false)
};
println!("cloudflare-ddns v{VERSION}");
// Load config
let app_config = match config::load_config(dry_run, repeat, &initial_pp) {
Ok(c) => c,
Err(e) => {
eprintln!("{e}");
sleep(Duration::from_secs(10)).await;
std::process::exit(1);
}
};
// Create PP with final settings
let ppfmt = PP::new(app_config.emoji, app_config.quiet);
if dry_run {
ppfmt.noticef(
pp::EMOJI_WARNING,
"[DRY RUN] No records will be created, updated, or deleted.",
);
}
// Print config summary (env mode only)
config::print_config_summary(&app_config, &ppfmt);
// Setup notifiers and heartbeats
let notifier = config::setup_notifiers(&ppfmt);
let heartbeat = config::setup_heartbeats(&ppfmt);
// Create Cloudflare handle (for env mode)
let handle = if !app_config.legacy_mode {
CloudflareHandle::new(
app_config.auth.clone(),
app_config.update_timeout,
app_config.managed_comment_regex.clone(),
app_config.managed_waf_comment_regex.clone(),
)
} else {
// Create a dummy handle for legacy mode (won't be used)
CloudflareHandle::new(
Auth::Token(String::new()),
Duration::from_secs(30),
None,
None,
)
};
// Signal handler for graceful shutdown
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
tokio::spawn(async move {
let _ = signal::ctrl_c().await;
println!("Stopping...");
r.store(false, Ordering::SeqCst);
});
// Start heartbeat
heartbeat.start().await;
if app_config.legacy_mode {
// --- Legacy mode (original cloudflare-ddns behavior) ---
run_legacy_mode(&app_config, &handle, &notifier, &heartbeat, &ppfmt, running).await;
} else {
// --- Env var mode (cf-ddns behavior) ---
run_env_mode(&app_config, &handle, &notifier, &heartbeat, &ppfmt, running).await;
}
// On shutdown: delete records if configured
if app_config.delete_on_stop && !app_config.legacy_mode {
ppfmt.noticef(pp::EMOJI_STOP, "Deleting records on stop...");
updater::final_delete(&app_config, &handle, &notifier, &heartbeat, &ppfmt).await;
}
// Exit heartbeat
heartbeat
.exit(&Message::new_ok("Shutting down"))
.await;
}
async fn run_legacy_mode(
config: &AppConfig,
handle: &CloudflareHandle,
notifier: &CompositeNotifier,
heartbeat: &Heartbeat,
ppfmt: &PP,
running: Arc<AtomicBool>,
) {
let legacy = match &config.legacy_config {
Some(l) => l,
None => return,
};
if config.repeat {
match (legacy.a, legacy.aaaa) {
(true, true) => println!(
"Updating IPv4 (A) & IPv6 (AAAA) records every {} seconds",
legacy.ttl
),
(true, false) => {
println!("Updating IPv4 (A) records every {} seconds", legacy.ttl)
}
(false, true) => {
println!("Updating IPv6 (AAAA) records every {} seconds", legacy.ttl)
}
(false, false) => println!("Both IPv4 and IPv6 are disabled"),
}
while running.load(Ordering::SeqCst) {
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
for _ in 0..legacy.ttl {
if !running.load(Ordering::SeqCst) {
break;
}
sleep(Duration::from_secs(1)).await;
}
}
} else {
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
}
}
async fn run_env_mode(
config: &AppConfig,
handle: &CloudflareHandle,
notifier: &CompositeNotifier,
heartbeat: &Heartbeat,
ppfmt: &PP,
running: Arc<AtomicBool>,
) {
match &config.update_cron {
CronSchedule::Once => {
if config.update_on_start {
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
}
}
schedule => {
let interval = schedule.next_duration().unwrap_or(Duration::from_secs(300));
ppfmt.noticef(
pp::EMOJI_LAUNCH,
&format!(
"Started cloudflare-ddns, updating every {}",
describe_duration(interval)
),
);
// Update on start if configured
if config.update_on_start {
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
}
// Main loop
while running.load(Ordering::SeqCst) {
// Sleep for interval, checking running flag each second
let secs = interval.as_secs();
let next_time = chrono::Local::now() + chrono::Duration::seconds(secs as i64);
ppfmt.infof(
pp::EMOJI_SLEEP,
&format!(
"Next update at {}",
next_time.format("%Y-%m-%d %H:%M:%S %Z")
),
);
for _ in 0..secs {
if !running.load(Ordering::SeqCst) {
return;
}
sleep(Duration::from_secs(1)).await;
}
if !running.load(Ordering::SeqCst) {
return;
}
updater::update_once(config, handle, notifier, heartbeat, ppfmt).await;
}
}
}
}
fn describe_duration(d: Duration) -> String {
let secs = d.as_secs();
if secs >= 3600 {
let hours = secs / 3600;
let mins = (secs % 3600) / 60;
if mins > 0 {
format!("{hours}h{mins}m")
} else {
format!("{hours}h")
}
} else if secs >= 60 {
let mins = secs / 60;
let s = secs % 60;
if s > 0 {
format!("{mins}m{s}s")
} else {
format!("{mins}m")
}
} else {
format!("{secs}s")
}
}
// ============================================================
// Tests (backwards compatible with original test suite)
// ============================================================
#[cfg(test)]
mod tests {
use crate::config::{
LegacyAuthentication, LegacyCloudflareEntry, LegacyConfig, LegacySubdomainEntry,
parse_legacy_config,
};
use crate::provider::parse_trace_ip;
use reqwest::Client;
use wiremock::matchers::{method, path, query_param};
use wiremock::{Mock, MockServer, ResponseTemplate};
fn test_config(zone_id: &str) -> LegacyConfig {
LegacyConfig {
cloudflare: vec![LegacyCloudflareEntry {
authentication: LegacyAuthentication {
api_token: "test-token".to_string(),
api_key: None,
},
zone_id: zone_id.to_string(),
subdomains: vec![
LegacySubdomainEntry::Detailed {
name: "".to_string(),
proxied: false,
},
LegacySubdomainEntry::Detailed {
name: "vpn".to_string(),
proxied: true,
},
],
proxied: false,
}],
a: true,
aaaa: false,
purge_unknown_records: false,
ttl: 300,
}
}
// Helper to create a legacy client for testing
struct TestDdnsClient {
client: Client,
cf_api_base: String,
ipv4_urls: Vec<String>,
dry_run: bool,
}
impl TestDdnsClient {
fn new(base_url: &str) -> Self {
Self {
client: Client::new(),
cf_api_base: base_url.to_string(),
ipv4_urls: vec![format!("{base_url}/cdn-cgi/trace")],
dry_run: false,
}
}
fn dry_run(mut self) -> Self {
self.dry_run = true;
self
}
async fn cf_api<T: serde::de::DeserializeOwned>(
&self,
endpoint: &str,
method_str: &str,
token: &str,
body: Option<&impl serde::Serialize>,
) -> Option<T> {
let url = format!("{}/{endpoint}", self.cf_api_base);
let mut req = match method_str {
"GET" => self.client.get(&url),
"POST" => self.client.post(&url),
"PUT" => self.client.put(&url),
"DELETE" => self.client.delete(&url),
_ => return None,
};
req = req.header("Authorization", format!("Bearer {token}"));
if let Some(b) = body {
req = req.json(b);
}
match req.send().await {
Ok(resp) if resp.status().is_success() => resp.json::<T>().await.ok(),
Ok(resp) => {
let text = resp.text().await.unwrap_or_default();
eprintln!("Error: {text}");
None
}
Err(e) => {
eprintln!("Exception: {e}");
None
}
}
}
async fn get_ip(&self) -> Option<String> {
for url in &self.ipv4_urls {
if let Ok(resp) = self.client.get(url).send().await {
if let Ok(body) = resp.text().await {
if let Some(ip) = parse_trace_ip(&body) {
return Some(ip);
}
}
}
}
None
}
async fn commit_record(
&self,
ip: &str,
record_type: &str,
config: &[LegacyCloudflareEntry],
ttl: i64,
purge_unknown_records: bool,
) {
for entry in config {
#[derive(serde::Deserialize)]
struct Resp<T> {
result: Option<T>,
}
#[derive(serde::Deserialize)]
struct Zone {
name: String,
}
#[derive(serde::Deserialize)]
struct Rec {
id: String,
name: String,
content: String,
proxied: bool,
}
let zone_resp: Option<Resp<Zone>> = self
.cf_api(
&format!("zones/{}", entry.zone_id),
"GET",
&entry.authentication.api_token,
None::<&()>.as_ref(),
)
.await;
let base_domain = match zone_resp.and_then(|r| r.result) {
Some(z) => z.name,
None => continue,
};
for subdomain in &entry.subdomains {
let (name, proxied) = match subdomain {
LegacySubdomainEntry::Detailed { name, proxied } => {
(name.to_lowercase().trim().to_string(), *proxied)
}
LegacySubdomainEntry::Simple(name) => {
(name.to_lowercase().trim().to_string(), entry.proxied)
}
};
let fqdn = crate::domain::make_fqdn(&name, &base_domain);
#[derive(serde::Serialize)]
struct Payload {
#[serde(rename = "type")]
record_type: String,
name: String,
content: String,
proxied: bool,
ttl: i64,
}
let record = Payload {
record_type: record_type.to_string(),
name: fqdn.clone(),
content: ip.to_string(),
proxied,
ttl,
};
let dns_endpoint = format!(
"zones/{}/dns_records?per_page=100&type={record_type}",
entry.zone_id
);
let dns_records: Option<Resp<Vec<Rec>>> = self
.cf_api(
&dns_endpoint,
"GET",
&entry.authentication.api_token,
None::<&()>.as_ref(),
)
.await;
let mut identifier: Option<String> = None;
let mut modified = false;
let mut duplicate_ids: Vec<String> = Vec::new();
if let Some(resp) = dns_records {
if let Some(records) = resp.result {
for r in &records {
if r.name == fqdn {
if let Some(ref existing_id) = identifier {
if r.content == ip {
duplicate_ids.push(existing_id.clone());
identifier = Some(r.id.clone());
} else {
duplicate_ids.push(r.id.clone());
}
} else {
identifier = Some(r.id.clone());
if r.content != ip || r.proxied != proxied {
modified = true;
}
}
}
}
}
}
if let Some(ref id) = identifier {
if modified {
if self.dry_run {
println!("[DRY RUN] Would update record {fqdn} -> {ip}");
} else {
println!("Updating record {fqdn} -> {ip}");
let update_endpoint =
format!("zones/{}/dns_records/{id}", entry.zone_id);
let _: Option<serde_json::Value> = self
.cf_api(
&update_endpoint,
"PUT",
&entry.authentication.api_token,
Some(&record),
)
.await;
}
} else if self.dry_run {
println!("[DRY RUN] Record {fqdn} is up to date ({ip})");
}
} else if self.dry_run {
println!("[DRY RUN] Would add new record {fqdn} -> {ip}");
} else {
println!("Adding new record {fqdn} -> {ip}");
let create_endpoint =
format!("zones/{}/dns_records", entry.zone_id);
let _: Option<serde_json::Value> = self
.cf_api(
&create_endpoint,
"POST",
&entry.authentication.api_token,
Some(&record),
)
.await;
}
if purge_unknown_records {
for dup_id in &duplicate_ids {
if self.dry_run {
println!("[DRY RUN] Would delete stale record {dup_id}");
} else {
println!("Deleting stale record {dup_id}");
let del_endpoint =
format!("zones/{}/dns_records/{dup_id}", entry.zone_id);
let _: Option<serde_json::Value> = self
.cf_api(
&del_endpoint,
"DELETE",
&entry.authentication.api_token,
None::<&()>.as_ref(),
)
.await;
}
}
}
}
}
}
}
#[test]
fn test_parse_trace_ip() {
let body = "fl=1f1\nh=1.1.1.1\nip=203.0.113.42\nts=1234567890\nvisit_scheme=https\n";
assert_eq!(parse_trace_ip(body), Some("203.0.113.42".to_string()));
}
#[test]
fn test_parse_trace_ip_missing() {
let body = "fl=1f1\nh=1.1.1.1\nts=1234567890\n";
assert_eq!(parse_trace_ip(body), None);
}
#[test]
fn test_parse_config_minimal() {
let json = r#"{
"cloudflare": [{
"authentication": { "api_token": "tok123" },
"zone_id": "zone1",
"subdomains": ["@"]
}]
}"#;
let config = parse_legacy_config(json).unwrap();
assert!(config.a);
assert!(config.aaaa);
assert!(!config.purge_unknown_records);
assert_eq!(config.ttl, 300);
}
#[test]
fn test_parse_config_low_ttl() {
let json = r#"{
"cloudflare": [{
"authentication": { "api_token": "tok123" },
"zone_id": "zone1",
"subdomains": ["@"]
}],
"ttl": 10
}"#;
let config = parse_legacy_config(json).unwrap();
assert_eq!(config.ttl, 1);
}
#[tokio::test]
async fn test_ip_detection() {
let mock_server = MockServer::start().await;
Mock::given(method("GET"))
.and(path("/cdn-cgi/trace"))
.respond_with(
ResponseTemplate::new(200)
.set_body_string("fl=1f1\nh=mock\nip=198.51.100.7\nts=0\n"),
)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let ip = ddns.get_ip().await;
assert_eq!(ip, Some("198.51.100.7".to_string()));
}
#[tokio::test]
async fn test_creates_new_record() {
let mock_server = MockServer::start().await;
let zone_id = "zone-abc-123";
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": []
})))
.mount(&mock_server)
.await;
Mock::given(method("POST"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "id": "new-record-1" }
})))
.expect(2)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let config = test_config(zone_id);
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
.await;
}
#[tokio::test]
async fn test_updates_existing_record() {
let mock_server = MockServer::start().await;
let zone_id = "zone-update-1";
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": [
{ "id": "rec-1", "name": "example.com", "content": "10.0.0.1", "proxied": false },
{ "id": "rec-2", "name": "vpn.example.com", "content": "10.0.0.1", "proxied": true }
]
})))
.mount(&mock_server)
.await;
Mock::given(method("PUT"))
.and(path(format!("/zones/{zone_id}/dns_records/rec-1")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "id": "rec-1" }
})))
.expect(1)
.mount(&mock_server)
.await;
Mock::given(method("PUT"))
.and(path(format!("/zones/{zone_id}/dns_records/rec-2")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "id": "rec-2" }
})))
.expect(1)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let config = test_config(zone_id);
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
.await;
}
#[tokio::test]
async fn test_skips_up_to_date_record() {
let mock_server = MockServer::start().await;
let zone_id = "zone-noop";
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": [
{ "id": "rec-1", "name": "example.com", "content": "198.51.100.7", "proxied": false },
{ "id": "rec-2", "name": "vpn.example.com", "content": "198.51.100.7", "proxied": true }
]
})))
.mount(&mock_server)
.await;
Mock::given(method("PUT"))
.respond_with(ResponseTemplate::new(500))
.expect(0)
.mount(&mock_server)
.await;
Mock::given(method("POST"))
.respond_with(ResponseTemplate::new(500))
.expect(0)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let config = test_config(zone_id);
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
.await;
}
#[tokio::test]
async fn test_dry_run_does_not_mutate() {
let mock_server = MockServer::start().await;
let zone_id = "zone-dry";
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": []
})))
.mount(&mock_server)
.await;
Mock::given(method("POST"))
.respond_with(ResponseTemplate::new(500))
.expect(0)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri()).dry_run();
let config = test_config(zone_id);
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, false)
.await;
}
#[tokio::test]
async fn test_purge_duplicate_records() {
let mock_server = MockServer::start().await;
let zone_id = "zone-purge";
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": [
{ "id": "rec-keep", "name": "example.com", "content": "198.51.100.7", "proxied": false },
{ "id": "rec-dup", "name": "example.com", "content": "198.51.100.7", "proxied": false }
]
})))
.mount(&mock_server)
.await;
Mock::given(method("DELETE"))
.and(path(format!("/zones/{zone_id}/dns_records/rec-keep")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({})))
.expect(1)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let config = LegacyConfig {
cloudflare: vec![LegacyCloudflareEntry {
authentication: LegacyAuthentication {
api_token: "test-token".to_string(),
api_key: None,
},
zone_id: zone_id.to_string(),
subdomains: vec![LegacySubdomainEntry::Detailed {
name: "".to_string(),
proxied: false,
}],
proxied: false,
}],
a: true,
aaaa: false,
purge_unknown_records: true,
ttl: 300,
};
ddns.commit_record("198.51.100.7", "A", &config.cloudflare, 300, true)
.await;
}
// --- describe_duration tests ---
#[test]
fn test_describe_duration_seconds_only() {
use tokio::time::Duration;
assert_eq!(super::describe_duration(Duration::from_secs(45)), "45s");
}
#[test]
fn test_describe_duration_exact_minutes() {
use tokio::time::Duration;
assert_eq!(super::describe_duration(Duration::from_secs(300)), "5m");
}
#[test]
fn test_describe_duration_minutes_and_seconds() {
use tokio::time::Duration;
assert_eq!(super::describe_duration(Duration::from_secs(330)), "5m30s");
}
#[test]
fn test_describe_duration_exact_hours() {
use tokio::time::Duration;
assert_eq!(super::describe_duration(Duration::from_secs(7200)), "2h");
}
#[test]
fn test_describe_duration_hours_and_minutes() {
use tokio::time::Duration;
assert_eq!(super::describe_duration(Duration::from_secs(5400)), "1h30m");
}
#[tokio::test]
async fn test_end_to_end_detect_and_update() {
let mock_server = MockServer::start().await;
let zone_id = "zone-e2e";
Mock::given(method("GET"))
.and(path("/cdn-cgi/trace"))
.respond_with(
ResponseTemplate::new(200)
.set_body_string("fl=1f1\nh=mock\nip=203.0.113.99\nts=0\n"),
)
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "name": "example.com" }
})))
.mount(&mock_server)
.await;
Mock::given(method("GET"))
.and(path(format!("/zones/{zone_id}/dns_records")))
.and(query_param("type", "A"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": [
{ "id": "rec-root", "name": "example.com", "content": "10.0.0.1", "proxied": false }
]
})))
.mount(&mock_server)
.await;
Mock::given(method("PUT"))
.and(path(format!("/zones/{zone_id}/dns_records/rec-root")))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"result": { "id": "rec-root" }
})))
.expect(1)
.mount(&mock_server)
.await;
let ddns = TestDdnsClient::new(&mock_server.uri());
let ip = ddns.get_ip().await;
assert_eq!(ip, Some("203.0.113.99".to_string()));
let config = LegacyConfig {
cloudflare: vec![LegacyCloudflareEntry {
authentication: LegacyAuthentication {
api_token: "test-token".to_string(),
api_key: None,
},
zone_id: zone_id.to_string(),
subdomains: vec![LegacySubdomainEntry::Detailed {
name: "".to_string(),
proxied: false,
}],
proxied: false,
}],
a: true,
aaaa: false,
purge_unknown_records: false,
ttl: 300,
};
ddns.commit_record("203.0.113.99", "A", &config.cloudflare, 300, false)
.await;
}
}

1436
src/notifier.rs Normal file

File diff suppressed because it is too large Load Diff

435
src/pp.rs Normal file
View File

@@ -0,0 +1,435 @@
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
// Verbosity levels
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Verbosity {
Quiet,
Notice,
Info,
Verbose,
}
// Emoji constants
#[allow(dead_code)]
pub const EMOJI_GLOBE: &str = "\u{1F30D}";
pub const EMOJI_WARNING: &str = "\u{26A0}\u{FE0F}";
pub const EMOJI_ERROR: &str = "\u{274C}";
#[allow(dead_code)]
pub const EMOJI_SUCCESS: &str = "\u{2705}";
pub const EMOJI_LAUNCH: &str = "\u{1F680}";
pub const EMOJI_STOP: &str = "\u{1F6D1}";
pub const EMOJI_SLEEP: &str = "\u{1F634}";
pub const EMOJI_DETECT: &str = "\u{1F50D}";
pub const EMOJI_UPDATE: &str = "\u{2B06}\u{FE0F}";
pub const EMOJI_CREATE: &str = "\u{2795}";
pub const EMOJI_DELETE: &str = "\u{2796}";
pub const EMOJI_SKIP: &str = "\u{23ED}\u{FE0F}";
pub const EMOJI_NOTIFY: &str = "\u{1F514}";
pub const EMOJI_HEARTBEAT: &str = "\u{1F493}";
pub const EMOJI_CONFIG: &str = "\u{2699}\u{FE0F}";
#[allow(dead_code)]
pub const EMOJI_HINT: &str = "\u{1F4A1}";
const INDENT_PREFIX: &str = " ";
pub struct PP {
pub verbosity: Verbosity,
pub emoji: bool,
indent: usize,
seen: Arc<Mutex<HashSet<String>>>,
}
impl PP {
pub fn new(emoji: bool, quiet: bool) -> Self {
Self {
verbosity: if quiet { Verbosity::Quiet } else { Verbosity::Verbose },
emoji,
indent: 0,
seen: Arc::new(Mutex::new(HashSet::new())),
}
}
pub fn default_pp() -> Self {
Self::new(false, false)
}
pub fn is_showing(&self, level: Verbosity) -> bool {
self.verbosity >= level
}
pub fn indent(&self) -> PP {
PP {
verbosity: self.verbosity,
emoji: self.emoji,
indent: self.indent + 1,
seen: Arc::clone(&self.seen),
}
}
fn output(&self, emoji: &str, msg: &str) {
let prefix = INDENT_PREFIX.repeat(self.indent);
if self.emoji && !emoji.is_empty() {
println!("{prefix}{emoji} {msg}");
} else {
println!("{prefix}{msg}");
}
}
fn output_err(&self, emoji: &str, msg: &str) {
let prefix = INDENT_PREFIX.repeat(self.indent);
if self.emoji && !emoji.is_empty() {
eprintln!("{prefix}{emoji} {msg}");
} else {
eprintln!("{prefix}{msg}");
}
}
pub fn infof(&self, emoji: &str, msg: &str) {
if self.is_showing(Verbosity::Info) {
self.output(emoji, msg);
}
}
pub fn noticef(&self, emoji: &str, msg: &str) {
if self.is_showing(Verbosity::Notice) {
self.output(emoji, msg);
}
}
pub fn warningf(&self, emoji: &str, msg: &str) {
self.output_err(emoji, msg);
}
pub fn errorf(&self, emoji: &str, msg: &str) {
self.output_err(emoji, msg);
}
#[allow(dead_code)]
pub fn info_once(&self, key: &str, emoji: &str, msg: &str) {
if self.is_showing(Verbosity::Info) {
let mut seen = self.seen.lock().unwrap();
if seen.insert(key.to_string()) {
self.output(emoji, msg);
}
}
}
#[allow(dead_code)]
pub fn notice_once(&self, key: &str, emoji: &str, msg: &str) {
if self.is_showing(Verbosity::Notice) {
let mut seen = self.seen.lock().unwrap();
if seen.insert(key.to_string()) {
self.output(emoji, msg);
}
}
}
#[allow(dead_code)]
pub fn blank_line_if_verbose(&self) {
if self.is_showing(Verbosity::Verbose) {
println!();
}
}
}
#[allow(dead_code)]
pub fn english_join(items: &[String]) -> String {
match items.len() {
0 => String::new(),
1 => items[0].clone(),
2 => format!("{} and {}", items[0], items[1]),
_ => {
let (last, rest) = items.split_last().unwrap();
format!("{}, and {last}", rest.join(", "))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// ---- PP::new with emoji flag ----
#[test]
fn new_with_emoji_true() {
let pp = PP::new(true, false);
assert!(pp.emoji);
}
#[test]
fn new_with_emoji_false() {
let pp = PP::new(false, false);
assert!(!pp.emoji);
}
// ---- PP::new with quiet flag (verbosity levels) ----
#[test]
fn new_quiet_true_sets_verbosity_quiet() {
let pp = PP::new(false, true);
assert_eq!(pp.verbosity, Verbosity::Quiet);
}
#[test]
fn new_quiet_false_sets_verbosity_verbose() {
let pp = PP::new(false, false);
assert_eq!(pp.verbosity, Verbosity::Verbose);
}
// ---- PP::is_showing at different verbosity levels ----
#[test]
fn quiet_shows_only_quiet_level() {
let pp = PP::new(false, true);
assert!(pp.is_showing(Verbosity::Quiet));
assert!(!pp.is_showing(Verbosity::Notice));
assert!(!pp.is_showing(Verbosity::Info));
assert!(!pp.is_showing(Verbosity::Verbose));
}
#[test]
fn verbose_shows_all_levels() {
let pp = PP::new(false, false);
assert!(pp.is_showing(Verbosity::Quiet));
assert!(pp.is_showing(Verbosity::Notice));
assert!(pp.is_showing(Verbosity::Info));
assert!(pp.is_showing(Verbosity::Verbose));
}
#[test]
fn notice_level_shows_quiet_and_notice_only() {
let mut pp = PP::new(false, false);
pp.verbosity = Verbosity::Notice;
assert!(pp.is_showing(Verbosity::Quiet));
assert!(pp.is_showing(Verbosity::Notice));
assert!(!pp.is_showing(Verbosity::Info));
assert!(!pp.is_showing(Verbosity::Verbose));
}
#[test]
fn info_level_shows_up_to_info() {
let mut pp = PP::new(false, false);
pp.verbosity = Verbosity::Info;
assert!(pp.is_showing(Verbosity::Quiet));
assert!(pp.is_showing(Verbosity::Notice));
assert!(pp.is_showing(Verbosity::Info));
assert!(!pp.is_showing(Verbosity::Verbose));
}
// ---- PP::indent ----
#[test]
fn indent_increments_indent_level() {
let pp = PP::new(true, false);
assert_eq!(pp.indent, 0);
let child = pp.indent();
assert_eq!(child.indent, 1);
let grandchild = child.indent();
assert_eq!(grandchild.indent, 2);
}
#[test]
fn indent_preserves_verbosity_and_emoji() {
let pp = PP::new(true, true);
let child = pp.indent();
assert_eq!(child.verbosity, pp.verbosity);
assert_eq!(child.emoji, pp.emoji);
}
#[test]
fn indent_shares_seen_state() {
let pp = PP::new(false, false);
let child = pp.indent();
// Insert via parent's seen set
pp.seen.lock().unwrap().insert("key1".to_string());
// Child should observe the same entry
assert!(child.seen.lock().unwrap().contains("key1"));
// Insert via child
child.seen.lock().unwrap().insert("key2".to_string());
// Parent should observe it too
assert!(pp.seen.lock().unwrap().contains("key2"));
}
// ---- PP::infof, noticef, warningf, errorf - no panic and verbosity gating ----
#[test]
fn infof_does_not_panic_when_verbose() {
let pp = PP::new(false, false);
pp.infof("", "test info message");
}
#[test]
fn infof_does_not_panic_when_quiet() {
let pp = PP::new(false, true);
// Should simply not print, and not panic
pp.infof("", "test info message");
}
#[test]
fn noticef_does_not_panic_when_verbose() {
let pp = PP::new(true, false);
pp.noticef(EMOJI_DETECT, "test notice message");
}
#[test]
fn noticef_does_not_panic_when_quiet() {
let pp = PP::new(false, true);
pp.noticef("", "test notice message");
}
#[test]
fn warningf_does_not_panic() {
let pp = PP::new(true, false);
pp.warningf(EMOJI_WARNING, "test warning");
}
#[test]
fn warningf_does_not_panic_when_quiet() {
// warningf always outputs (no verbosity check), just verify no panic
let pp = PP::new(false, true);
pp.warningf("", "test warning");
}
#[test]
fn errorf_does_not_panic() {
let pp = PP::new(true, false);
pp.errorf(EMOJI_ERROR, "test error");
}
#[test]
fn errorf_does_not_panic_when_quiet() {
let pp = PP::new(false, true);
pp.errorf("", "test error");
}
// ---- PP::info_once and notice_once ----
#[test]
fn info_once_suppresses_duplicates() {
let pp = PP::new(false, false);
// First call inserts the key
pp.info_once("dup_key", "", "first");
// The key should now be in the seen set
assert!(pp.seen.lock().unwrap().contains("dup_key"));
// Calling again with the same key should not insert again (set unchanged)
let size_before = pp.seen.lock().unwrap().len();
pp.info_once("dup_key", "", "second");
let size_after = pp.seen.lock().unwrap().len();
assert_eq!(size_before, size_after);
}
#[test]
fn info_once_allows_different_keys() {
let pp = PP::new(false, false);
pp.info_once("key_a", "", "msg a");
pp.info_once("key_b", "", "msg b");
let seen = pp.seen.lock().unwrap();
assert!(seen.contains("key_a"));
assert!(seen.contains("key_b"));
assert_eq!(seen.len(), 2);
}
#[test]
fn info_once_skipped_when_quiet() {
let pp = PP::new(false, true);
pp.info_once("quiet_key", "", "should not register");
// Because verbosity is Quiet, info_once should not even insert the key
assert!(!pp.seen.lock().unwrap().contains("quiet_key"));
}
#[test]
fn notice_once_suppresses_duplicates() {
let pp = PP::new(false, false);
pp.notice_once("notice_dup", "", "first");
assert!(pp.seen.lock().unwrap().contains("notice_dup"));
let size_before = pp.seen.lock().unwrap().len();
pp.notice_once("notice_dup", "", "second");
let size_after = pp.seen.lock().unwrap().len();
assert_eq!(size_before, size_after);
}
#[test]
fn notice_once_skipped_when_quiet() {
let pp = PP::new(false, true);
pp.notice_once("quiet_notice", "", "should not register");
assert!(!pp.seen.lock().unwrap().contains("quiet_notice"));
}
#[test]
fn info_once_shared_via_indent() {
let pp = PP::new(false, false);
let child = pp.indent();
// Mark a key via the parent
pp.info_once("shared_key", "", "parent");
assert!(pp.seen.lock().unwrap().contains("shared_key"));
// Child should see it as already present, so set size stays the same
let size_before = child.seen.lock().unwrap().len();
child.info_once("shared_key", "", "child duplicate");
let size_after = child.seen.lock().unwrap().len();
assert_eq!(size_before, size_after);
// Child can add a new key visible to parent
child.info_once("child_key", "", "child new");
assert!(pp.seen.lock().unwrap().contains("child_key"));
}
// ---- english_join ----
#[test]
fn english_join_empty() {
let items: Vec<String> = vec![];
assert_eq!(english_join(&items), "");
}
#[test]
fn english_join_single() {
let items = vec!["alpha".to_string()];
assert_eq!(english_join(&items), "alpha");
}
#[test]
fn english_join_two() {
let items = vec!["alpha".to_string(), "beta".to_string()];
assert_eq!(english_join(&items), "alpha and beta");
}
#[test]
fn english_join_three() {
let items = vec![
"alpha".to_string(),
"beta".to_string(),
"gamma".to_string(),
];
assert_eq!(english_join(&items), "alpha, beta, and gamma");
}
#[test]
fn english_join_four() {
let items = vec![
"a".to_string(),
"b".to_string(),
"c".to_string(),
"d".to_string(),
];
assert_eq!(english_join(&items), "a, b, c, and d");
}
// ---- default_pp ----
#[test]
fn default_pp_is_verbose_no_emoji() {
let pp = PP::default_pp();
assert!(!pp.emoji);
assert_eq!(pp.verbosity, Verbosity::Verbose);
}
}

1201
src/provider.rs Normal file

File diff suppressed because it is too large Load Diff

2375
src/updater.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
python3 -m venv venv
source ./venv/bin/activate
cd $DIR
set -o pipefail; pip install -r requirements.txt | { grep -v "already satisfied" || :; }
python3 cloudflare-ddns.py