Updated docs and bug fixing

This commit is contained in:
anonpenguin23 2026-02-17 08:49:16 +02:00
parent 0b5b6e68e3
commit 8aef779fcd
11 changed files with 129 additions and 82 deletions

View File

@ -63,7 +63,7 @@ test-e2e-quick:
.PHONY: build clean test deps tidy fmt vet lint install-hooks redeploy-devnet redeploy-testnet release health .PHONY: build clean test deps tidy fmt vet lint install-hooks redeploy-devnet redeploy-testnet release health
VERSION := 0.107.0 VERSION := 0.107.2
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'

View File

@ -41,7 +41,7 @@ Install nodes **one at a time**, waiting for each to complete before starting th
```bash ```bash
# SSH: <user>@<ns1-ip> # SSH: <user>@<ns1-ip>
sudo orama install \ sudo orama node install \
--vps-ip <ns1-ip> \ --vps-ip <ns1-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
--base-domain <your-domain.com> \ --base-domain <your-domain.com> \
@ -50,7 +50,7 @@ sudo orama install \
After ns1 is installed, generate invite tokens: After ns1 is installed, generate invite tokens:
```bash ```bash
orama invite --expiry 24h sudo orama node invite --expiry 24h
``` ```
## ns2 - Nameserver + Relay ## ns2 - Nameserver + Relay
@ -58,7 +58,7 @@ orama invite --expiry 24h
```bash ```bash
# SSH: <user>@<ns2-ip> # SSH: <user>@<ns2-ip>
sudo orama install \ sudo orama node install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns2-ip> \ --vps-ip <ns2-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
@ -68,8 +68,7 @@ sudo orama install \
--anyone-nickname <relay-name> \ --anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \ --anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \ --anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \ --anyone-family "<fingerprint1>,<fingerprint2>,..."
--anyone-bandwidth 30
``` ```
## ns3 - Nameserver + Relay ## ns3 - Nameserver + Relay
@ -77,7 +76,7 @@ sudo orama install \
```bash ```bash
# SSH: <user>@<ns3-ip> # SSH: <user>@<ns3-ip>
sudo orama install \ sudo orama node install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns3-ip> \ --vps-ip <ns3-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
@ -87,27 +86,25 @@ sudo orama install \
--anyone-nickname <relay-name> \ --anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \ --anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \ --anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \ --anyone-family "<fingerprint1>,<fingerprint2>,..."
--anyone-bandwidth 30
``` ```
## node4 - Non-Nameserver + Relay ## node4 - Non-Nameserver + Relay
Domain is auto-generated (e.g., `node-a3f8k2.<your-domain.com>`). No `--domain` flag needed.
```bash ```bash
# SSH: <user>@<node4-ip> # SSH: <user>@<node4-ip>
sudo orama install \ sudo orama node install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node4-ip> \ --vps-ip <node4-ip> \
--domain node4.<your-domain.com> \
--base-domain <your-domain.com> \ --base-domain <your-domain.com> \
--skip-checks \
--anyone-relay --anyone-migrate \ --anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \ --anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \ --anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \ --anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \ --anyone-family "<fingerprint1>,<fingerprint2>,..."
--anyone-bandwidth 30
``` ```
## node5 - Non-Nameserver + Relay ## node5 - Non-Nameserver + Relay
@ -115,18 +112,15 @@ sudo orama install \
```bash ```bash
# SSH: <user>@<node5-ip> # SSH: <user>@<node5-ip>
sudo orama install \ sudo orama node install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node5-ip> \ --vps-ip <node5-ip> \
--domain node5.<your-domain.com> \
--base-domain <your-domain.com> \ --base-domain <your-domain.com> \
--skip-checks \
--anyone-relay --anyone-migrate \ --anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \ --anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \ --anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \ --anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \ --anyone-family "<fingerprint1>,<fingerprint2>,..."
--anyone-bandwidth 30
``` ```
## node6 - Non-Nameserver (No Anyone Relay) ## node6 - Non-Nameserver (No Anyone Relay)
@ -134,12 +128,10 @@ sudo orama install \
```bash ```bash
# SSH: <user>@<node6-ip> # SSH: <user>@<node6-ip>
sudo orama install \ sudo orama node install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node6-ip> \ --vps-ip <node6-ip> \
--domain node6.<your-domain.com> \ --base-domain <your-domain.com>
--base-domain <your-domain.com> \
--skip-checks
``` ```
## Verification ## Verification
@ -147,13 +139,14 @@ sudo orama install \
After all nodes are installed, verify cluster health: After all nodes are installed, verify cluster health:
```bash ```bash
# Check RQLite cluster (from any node) # Full cluster report (from local machine)
./bin/orama monitor report --env devnet
# Single node health
./bin/orama monitor report --env devnet --node <ip>
# Or manually from any VPS:
curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num_peers' curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num_peers'
# Should show: Leader (on one node) and N-1 peers
# Check gateway health
curl -s http://localhost:6001/health curl -s http://localhost:6001/health
# Check Anyone relay (on nodes with relays)
systemctl status orama-anyone-relay systemctl status orama-anyone-relay
``` ```

View File

@ -81,19 +81,33 @@ for ip in <ip1> <ip2> <ip3> <ip4> <ip5> <ip6>; do
done done
# 5. Find the RQLite leader (upgrade this one LAST) # 5. Find the RQLite leader (upgrade this one LAST)
ssh ubuntu@<any-node> 'curl -s http://localhost:5001/status | jq -r .store.raft.state' orama monitor report --env <env>
# Check "rqlite_leader" in summary output
# 6. Upgrade FOLLOWER nodes one at a time # 6. Upgrade FOLLOWER nodes one at a time
ssh ubuntu@<follower-ip> 'sudo orama node stop && sudo orama node upgrade --restart' ssh ubuntu@<follower-ip> 'sudo orama node stop && sudo orama node upgrade --restart'
# Wait for rejoin before proceeding to next node # IMPORTANT: Verify FULL health before proceeding to next node:
ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft.num_peers' orama monitor report --env <env> --node <follower-ip>
# Should show expected number of peers (N-1) # Check:
# - All services active, 0 restart loops
# - RQLite: Follower state, applied_index matches cluster
# - All RQLite peers reachable (no partition alerts)
# - WireGuard peers connected with recent handshakes
# Only proceed to next node after ALL checks pass.
#
# NOTE: After restarting a node, other nodes may briefly report it as
# "unreachable" with "broken pipe" errors. This is normal — Raft TCP
# connections need ~1-2 minutes to re-establish. Wait and re-check
# before escalating.
# Repeat for each follower... # Repeat for each follower...
# 7. Upgrade the LEADER node last # 7. Upgrade the LEADER node last
ssh ubuntu@<leader-ip> 'sudo orama node stop && sudo orama node upgrade --restart' ssh ubuntu@<leader-ip> 'sudo orama node stop && sudo orama node upgrade --restart'
# Verify the new leader was elected and cluster is fully healthy:
orama monitor report --env <env>
``` ```
#### What NOT to Do #### What NOT to Do
@ -140,7 +154,7 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
| Flag | Description | | Flag | Description |
|------|-------------| |------|-------------|
| `--vps-ip <ip>` | VPS public IP address (required) | | `--vps-ip <ip>` | VPS public IP address (required) |
| `--domain <domain>` | Domain for HTTPS certificates. Nameserver nodes use the base domain (e.g., `example.com`); non-nameserver nodes use a subdomain (e.g., `node-4.example.com`) | | `--domain <domain>` | Domain for HTTPS certificates. Required for nameserver nodes (use the base domain, e.g., `example.com`). Auto-generated for non-nameserver nodes if omitted (e.g., `node-a3f8k2.example.com`) |
| `--base-domain <domain>` | Base domain for deployment routing (e.g., example.com) | | `--base-domain <domain>` | Base domain for deployment routing (e.g., example.com) |
| `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) | | `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) |
| `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) | | `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) |
@ -242,13 +256,16 @@ sudo orama node install --vps-ip 1.2.3.4 --domain example.com \
--base-domain example.com --nameserver --base-domain example.com --nameserver
# 2. On genesis node, generate an invite # 2. On genesis node, generate an invite
orama node invite orama node invite --expiry 24h
# Output: sudo orama node install --join https://example.com --token <TOKEN> --vps-ip <IP> # Output: sudo orama node install --join https://example.com --token <TOKEN> --vps-ip <IP>
# 3. On the new node, run the printed command # 3a. Join as nameserver (requires --domain set to base domain)
# Nameserver nodes use the base domain; non-nameserver nodes use subdomains (e.g., node-4.example.com) sudo orama node install --join http://1.2.3.4 --token abc123... \
sudo orama node install --join https://example.com --token abc123... \
--vps-ip 5.6.7.8 --domain example.com --base-domain example.com --nameserver --vps-ip 5.6.7.8 --domain example.com --base-domain example.com --nameserver
# 3b. Join as regular node (domain auto-generated, no --domain needed)
sudo orama node install --join http://1.2.3.4 --token abc123... \
--vps-ip 5.6.7.8 --base-domain example.com
``` ```
The join flow establishes a WireGuard VPN tunnel before starting cluster services. The join flow establishes a WireGuard VPN tunnel before starting cluster services.

View File

@ -80,7 +80,7 @@ func ShowHelp() {
fmt.Printf(" --interactive - Launch interactive TUI wizard\n") fmt.Printf(" --interactive - Launch interactive TUI wizard\n")
fmt.Printf(" --force - Reconfigure all settings\n") fmt.Printf(" --force - Reconfigure all settings\n")
fmt.Printf(" --vps-ip IP - VPS public IP address (required)\n") fmt.Printf(" --vps-ip IP - VPS public IP address (required)\n")
fmt.Printf(" --domain DOMAIN - Domain for this node (e.g., node-1.orama.network)\n") fmt.Printf(" --domain DOMAIN - Domain for HTTPS (auto-generated if omitted)\n")
fmt.Printf(" --peers ADDRS - Comma-separated peer multiaddrs (for joining cluster)\n") fmt.Printf(" --peers ADDRS - Comma-separated peer multiaddrs (for joining cluster)\n")
fmt.Printf(" --join ADDR - RQLite join address IP:port (for joining cluster)\n") fmt.Printf(" --join ADDR - RQLite join address IP:port (for joining cluster)\n")
fmt.Printf(" --cluster-secret HEX - 64-hex cluster secret (required when joining)\n") fmt.Printf(" --cluster-secret HEX - 64-hex cluster secret (required when joining)\n")

View File

@ -52,7 +52,7 @@ func ParseFlags(args []string) (*Flags, error) {
flags := &Flags{} flags := &Flags{}
fs.StringVar(&flags.VpsIP, "vps-ip", "", "Public IP of this VPS (required)") fs.StringVar(&flags.VpsIP, "vps-ip", "", "Public IP of this VPS (required)")
fs.StringVar(&flags.Domain, "domain", "", "Domain name for HTTPS (optional, e.g. gateway.example.com)") fs.StringVar(&flags.Domain, "domain", "", "Domain for HTTPS (auto-generated for non-nameserver nodes if omitted)")
fs.StringVar(&flags.BaseDomain, "base-domain", "", "Base domain for deployment routing (e.g., dbrs.space)") fs.StringVar(&flags.BaseDomain, "base-domain", "", "Base domain for deployment routing (e.g., dbrs.space)")
fs.BoolVar(&flags.Force, "force", false, "Force reconfiguration even if already installed") fs.BoolVar(&flags.Force, "force", false, "Force reconfiguration even if already installed")
fs.BoolVar(&flags.DryRun, "dry-run", false, "Show what would be done without making changes") fs.BoolVar(&flags.DryRun, "dry-run", false, "Show what would be done without making changes")

View File

@ -2,6 +2,7 @@ package install
import ( import (
"bufio" "bufio"
"crypto/rand"
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -295,6 +296,12 @@ func (o *Orchestrator) executeJoinFlow() error {
} }
fmt.Printf(" ✓ Secrets saved\n") fmt.Printf(" ✓ Secrets saved\n")
// Auto-generate domain for non-nameserver joining nodes
if o.flags.Domain == "" && !o.flags.Nameserver && joinResp.BaseDomain != "" {
o.flags.Domain = generateNodeDomain(joinResp.BaseDomain)
fmt.Printf("\n🌐 Auto-generated domain: %s\n", o.flags.Domain)
}
// Step 7: Generate configs using WG IP as advertise address // Step 7: Generate configs using WG IP as advertise address
// All inter-node communication uses WireGuard IPs, not public IPs // All inter-node communication uses WireGuard IPs, not public IPs
fmt.Printf("\n⚙ Generating configurations...\n") fmt.Printf("\n⚙ Generating configurations...\n")
@ -537,3 +544,17 @@ func (o *Orchestrator) installNamespaceTemplates() error {
return nil return nil
} }
// generateNodeDomain creates a random subdomain like "node-a3f8k2.example.com"
func generateNodeDomain(baseDomain string) string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
b := make([]byte, 6)
if _, err := rand.Read(b); err != nil {
// Fallback to timestamp-based
return fmt.Sprintf("node-%06x.%s", time.Now().UnixNano()%0xffffff, baseDomain)
}
for i := range b {
b[i] = chars[int(b[i])%len(chars)]
}
return fmt.Sprintf("node-%s.%s", string(b), baseDomain)
}

View File

@ -59,11 +59,13 @@ func HandleStopWithFlags(force bool) {
{"coredns", "caddy"}, // 5. Stop DNS/TLS last {"coredns", "caddy"}, // 5. Stop DNS/TLS last
} }
// First, disable all services to prevent auto-restart // Mask all services to immediately prevent Restart=always from reviving them.
disableArgs := []string{"disable"} // Unlike "disable" (which only removes boot symlinks), "mask" links the unit
disableArgs = append(disableArgs, services...) // to /dev/null so systemd cannot start it at all. Unmasked by "orama node start".
if err := exec.Command("systemctl", disableArgs...).Run(); err != nil { maskArgs := []string{"mask"}
fmt.Printf(" Warning: Failed to disable some services: %v\n", err) maskArgs = append(maskArgs, services...)
if err := exec.Command("systemctl", maskArgs...).Run(); err != nil {
fmt.Printf(" Warning: Failed to mask some services: %v\n", err)
} }
// Stop services in order with brief pauses between groups // Stop services in order with brief pauses between groups
@ -135,31 +137,16 @@ func HandleStopWithFlags(force bool) {
} }
} }
// Disable the service to prevent it from auto-starting on boot // Service is already masked (prevents both restart and boot start).
enabled, err := utils.IsServiceEnabled(svc) // No additional disable needed.
if err != nil {
fmt.Printf(" ⚠️ Unable to check if %s is enabled: %v\n", svc, err)
// Continue anyway - try to disable
}
if enabled {
if err := exec.Command("systemctl", "disable", svc).Run(); err != nil {
fmt.Printf(" ⚠️ Failed to disable %s: %v\n", svc, err)
hadError = true
} else {
fmt.Printf(" ✓ Disabled %s (will not auto-start on boot)\n", svc)
}
} else {
fmt.Printf(" %s already disabled\n", svc)
}
} }
if hadError { if hadError {
fmt.Fprintf(os.Stderr, "\n⚠ Some services may still be restarting due to Restart=always\n") fmt.Fprintf(os.Stderr, "\n⚠ Some services could not be stopped cleanly\n")
fmt.Fprintf(os.Stderr, " Check status with: systemctl list-units 'orama-*'\n") fmt.Fprintf(os.Stderr, " Check status with: systemctl list-units 'orama-*'\n")
fmt.Fprintf(os.Stderr, " If services are still restarting, they may need manual intervention\n")
} else { } else {
fmt.Printf("\n✅ All services stopped and disabled (will not auto-start on boot)\n") fmt.Printf("\n✅ All services stopped and masked (will not auto-start on boot)\n")
fmt.Printf(" Use 'orama node start' to start and re-enable services\n") fmt.Printf(" Use 'orama node start' to unmask and start services\n")
} }
} }

View File

@ -244,7 +244,8 @@ WantedBy=multi-user.target
`, ssg.oramaHome, ssg.oramaDir, logFile) `, ssg.oramaHome, ssg.oramaDir, logFile)
} }
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit // GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit.
// Uses the same anon binary as the relay, but with a client-only config (SocksPort only, no relay).
func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string { func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string {
logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log") logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log")
@ -255,14 +256,13 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
Environment=HOME=%[1]s User=debian-anon
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin Group=debian-anon
WorkingDirectory=%[1]s ExecStart=/usr/bin/anon -f /etc/anon/anonrc
ExecStart=/usr/bin/npx anyone-client Restart=on-failure
Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[2]s StandardOutput=append:%[1]s
StandardError=append:%[2]s StandardError=append:%[1]s
SyslogIdentifier=anyone-client SyslogIdentifier=anyone-client
PrivateTmp=yes PrivateTmp=yes
@ -273,7 +273,7 @@ MemoryMax=1G
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
`, ssg.oramaHome, logFile, ssg.oramaDir) `, logFile)
} }
// GenerateAnyoneRelayService generates the Anyone Relay operator systemd unit // GenerateAnyoneRelayService generates the Anyone Relay operator systemd unit

View File

@ -11,6 +11,9 @@ Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}} ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
Restart=always Restart=always
RestartSec=5 RestartSec=5
TimeoutStopSec=45s
KillMode=mixed
KillSignal=SIGTERM
StandardOutput=journal StandardOutput=journal
StandardError=journal StandardError=journal
SyslogIdentifier=orama-node-{{.NodeType}} SyslogIdentifier=orama-node-{{.NodeType}}

View File

@ -1,11 +1,13 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# #
# Clean all testnet nodes for fresh reinstall. # Clean testnet nodes for fresh reinstall.
# Preserves Anyone relay keys (/var/lib/anon/) for --anyone-migrate. # Preserves Anyone relay keys (/var/lib/anon/) for --anyone-migrate.
# DOES NOT TOUCH DEVNET NODES. # DOES NOT TOUCH DEVNET NODES.
# #
# Usage: scripts/clean-testnet.sh [--nuclear] # Usage: scripts/clean-testnet.sh [--nuclear] [IP ...]
# --nuclear Also remove shared binaries (rqlited, ipfs, coredns, caddy, etc.) # --nuclear Also remove shared binaries (rqlited, ipfs, coredns, caddy, etc.)
# IP ... Optional: only clean specific nodes by IP (e.g. 62.72.44.87 51.178.84.172)
# If no IPs given, cleans ALL testnet nodes.
# #
set -euo pipefail set -euo pipefail
@ -16,7 +18,14 @@ CONF="$ROOT_DIR/scripts/remote-nodes.conf"
command -v sshpass >/dev/null 2>&1 || { echo "ERROR: sshpass not installed (brew install sshpass / apt install sshpass)"; exit 1; } command -v sshpass >/dev/null 2>&1 || { echo "ERROR: sshpass not installed (brew install sshpass / apt install sshpass)"; exit 1; }
NUCLEAR=false NUCLEAR=false
[[ "${1:-}" == "--nuclear" ]] && NUCLEAR=true TARGET_IPS=()
for arg in "$@"; do
if [[ "$arg" == "--nuclear" ]]; then
NUCLEAR=true
else
TARGET_IPS+=("$arg")
fi
done
SSH_OPTS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 -o LogLevel=ERROR -o PubkeyAuthentication=no) SSH_OPTS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10 -o LogLevel=ERROR -o PubkeyAuthentication=no)
@ -130,24 +139,41 @@ while IFS='|' read -r env hostspec pass role key; do
env="$(echo "$env" | xargs)" env="$(echo "$env" | xargs)"
[[ "$env" != "testnet" ]] && continue [[ "$env" != "testnet" ]] && continue
# If target IPs specified, only include matching nodes
if [[ ${#TARGET_IPS[@]} -gt 0 ]]; then
node_ip="${hostspec#*@}"
matched=false
for tip in "${TARGET_IPS[@]}"; do
[[ "$tip" == "$node_ip" ]] && matched=true && break
done
$matched || continue
fi
hosts+=("$hostspec") hosts+=("$hostspec")
passes+=("$pass") passes+=("$pass")
users+=("${hostspec%%@*}") users+=("${hostspec%%@*}")
done < "$CONF" done < "$CONF"
if [[ ${#hosts[@]} -eq 0 ]]; then if [[ ${#hosts[@]} -eq 0 ]]; then
if [[ ${#TARGET_IPS[@]} -gt 0 ]]; then
echo "ERROR: No testnet nodes found matching: ${TARGET_IPS[*]}"
else
echo "ERROR: No testnet nodes found in $CONF" echo "ERROR: No testnet nodes found in $CONF"
fi
exit 1 exit 1
fi fi
echo "== clean-testnet.sh — ${#hosts[@]} testnet nodes ==" if [[ ${#TARGET_IPS[@]} -gt 0 ]]; then
echo "== clean-testnet.sh — ${#hosts[@]} selected node(s) =="
else
echo "== clean-testnet.sh — ${#hosts[@]} testnet nodes (ALL) =="
fi
for i in "${!hosts[@]}"; do for i in "${!hosts[@]}"; do
echo " [$((i+1))] ${hosts[$i]}" echo " [$((i+1))] ${hosts[$i]}"
done done
echo "" echo ""
echo "This will CLEAN all testnet nodes (stop services, remove data)." echo "This will CLEAN the above node(s) (stop services, remove data)."
echo "Anyone relay keys (/var/lib/anon/) will be PRESERVED." echo "Anyone relay keys (/var/lib/anon/) will be PRESERVED."
echo "Devnet nodes will NOT be touched."
$NUCLEAR && echo "Nuclear mode: shared binaries will also be removed." $NUCLEAR && echo "Nuclear mode: shared binaries will also be removed."
echo "" echo ""
read -rp "Type 'yes' to continue: " confirm read -rp "Type 'yes' to continue: " confirm

View File

@ -22,7 +22,7 @@ ExecStart=/bin/sh -c 'exec /usr/local/bin/rqlited \
/opt/orama/.orama/data/namespaces/%i/rqlite/${NODE_ID}' /opt/orama/.orama/data/namespaces/%i/rqlite/${NODE_ID}'
# Graceful shutdown # Graceful shutdown
TimeoutStopSec=30s TimeoutStopSec=60s
KillMode=mixed KillMode=mixed
KillSignal=SIGTERM KillSignal=SIGTERM