mirror of
https://github.com/DeBrosOfficial/network.git
synced 2025-12-11 10:18:50 +00:00
refactor: streamline development and production command structure
- Consolidated development commands into a new `dev` command group for better organization. - Introduced a `prod` command group to manage production environment operations. - Updated Makefile to simplify the development environment setup and improve logging. - Enhanced README to clarify the development process and health check requirements. - Removed deprecated configuration and service management commands to streamline the CLI interface.
This commit is contained in:
parent
c726dfc401
commit
0388c3a766
16
CHANGELOG.md
16
CHANGELOG.md
@ -13,6 +13,22 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
||||
### Deprecated
|
||||
|
||||
### Fixed
|
||||
## [0.60.0] - 2025-11-09
|
||||
|
||||
### Added
|
||||
- Introduced comprehensive `network-cli dev` commands for managing the local development environment (start, stop, status, logs).
|
||||
- Added `network-cli prod` commands for streamlined production installation, upgrade, and service management on Linux systems (requires root).
|
||||
|
||||
### Changed
|
||||
- Refactored `Makefile` targets (`dev` and `kill`) to use the new `network-cli dev up` and `network-cli dev down` commands, significantly simplifying the development workflow.
|
||||
- Removed deprecated `network-cli config`, `network-cli setup`, `network-cli service`, and `network-cli rqlite` commands, consolidating functionality under `dev` and `prod`.
|
||||
|
||||
### Deprecated
|
||||
|
||||
### Removed
|
||||
|
||||
### Fixed
|
||||
\n
|
||||
## [0.59.2] - 2025-11-08
|
||||
|
||||
### Added
|
||||
|
||||
390
Makefile
390
Makefile
@ -21,7 +21,7 @@ test-e2e:
|
||||
|
||||
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
||||
|
||||
VERSION := 0.59.2
|
||||
VERSION := 0.60.0
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||
@ -82,336 +82,14 @@ run-gateway:
|
||||
@echo "Generate it with: network-cli config init --type gateway"
|
||||
go run ./cmd/gateway
|
||||
|
||||
# One-command dev: Start bootstrap, node2, node3, gateway, and anon in background
|
||||
# Requires: configs already exist in ~/.debros
|
||||
# Development environment target
|
||||
# Uses network-cli dev up to start full stack with dependency and port checking
|
||||
dev: build
|
||||
@echo "🚀 Starting development network stack..."
|
||||
@mkdir -p .dev/pids
|
||||
@mkdir -p $$HOME/.debros/logs
|
||||
@echo "Starting Anyone client (anon proxy)..."
|
||||
@if [ "$$(uname)" = "Darwin" ]; then \
|
||||
echo " Detected macOS - using npx anyone-client"; \
|
||||
if command -v npx >/dev/null 2>&1; then \
|
||||
nohup npx anyone-client > $$HOME/.debros/logs/anon.log 2>&1 & echo $$! > .dev/pids/anon.pid; \
|
||||
echo " Anyone client started (PID: $$(cat .dev/pids/anon.pid))"; \
|
||||
else \
|
||||
echo " ⚠️ npx not found - skipping Anyone client"; \
|
||||
echo " Install with: npm install -g npm"; \
|
||||
fi; \
|
||||
elif [ "$$(uname)" = "Linux" ]; then \
|
||||
echo " Detected Linux - checking systemctl"; \
|
||||
if systemctl is-active --quiet anon 2>/dev/null; then \
|
||||
echo " ✓ Anon service already running"; \
|
||||
elif command -v systemctl >/dev/null 2>&1; then \
|
||||
echo " Starting anon service..."; \
|
||||
sudo systemctl start anon 2>/dev/null || echo " ⚠️ Failed to start anon service"; \
|
||||
else \
|
||||
echo " ⚠️ systemctl not found - skipping Anon"; \
|
||||
fi; \
|
||||
fi
|
||||
@echo "Initializing IPFS and Cluster for all nodes..."
|
||||
@if command -v ipfs >/dev/null 2>&1 && command -v ipfs-cluster-service >/dev/null 2>&1; then \
|
||||
CLUSTER_SECRET=$$HOME/.debros/cluster-secret; \
|
||||
if [ ! -f $$CLUSTER_SECRET ]; then \
|
||||
echo " Generating shared cluster secret..."; \
|
||||
ipfs-cluster-service --version >/dev/null 2>&1 && openssl rand -hex 32 > $$CLUSTER_SECRET || echo "0000000000000000000000000000000000000000000000000000000000000000" > $$CLUSTER_SECRET; \
|
||||
fi; \
|
||||
SECRET=$$(cat $$CLUSTER_SECRET); \
|
||||
SWARM_KEY=$$HOME/.debros/swarm.key; \
|
||||
if [ ! -f $$SWARM_KEY ]; then \
|
||||
echo " Generating private swarm key..."; \
|
||||
KEY_HEX=$$(openssl rand -hex 32 | tr '[:lower:]' '[:upper:]'); \
|
||||
printf "/key/swarm/psk/1.0.0/\n/base16/\n%s\n" "$$KEY_HEX" > $$SWARM_KEY; \
|
||||
chmod 600 $$SWARM_KEY; \
|
||||
fi; \
|
||||
echo " Setting up bootstrap node (IPFS: 5001, Cluster: 9094)..."; \
|
||||
if [ ! -d $$HOME/.debros/bootstrap/ipfs/repo ]; then \
|
||||
echo " Initializing IPFS..."; \
|
||||
mkdir -p $$HOME/.debros/bootstrap/ipfs; \
|
||||
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
|
||||
cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \
|
||||
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101","/ip6/::/tcp/4101"]' 2>&1 | grep -v "generating" || true; \
|
||||
else \
|
||||
if [ ! -f $$HOME/.debros/bootstrap/ipfs/repo/swarm.key ]; then \
|
||||
cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \
|
||||
fi; \
|
||||
fi; \
|
||||
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
|
||||
mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \
|
||||
echo " Setting up node2 (IPFS: 5002, Cluster: 9104)..."; \
|
||||
if [ ! -d $$HOME/.debros/node2/ipfs/repo ]; then \
|
||||
echo " Initializing IPFS..."; \
|
||||
mkdir -p $$HOME/.debros/node2/ipfs; \
|
||||
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
|
||||
cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \
|
||||
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4102","/ip6/::/tcp/4102"]' 2>&1 | grep -v "generating" || true; \
|
||||
else \
|
||||
if [ ! -f $$HOME/.debros/node2/ipfs/repo/swarm.key ]; then \
|
||||
cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \
|
||||
fi; \
|
||||
fi; \
|
||||
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
|
||||
mkdir -p $$HOME/.debros/node2/ipfs-cluster; \
|
||||
echo " Setting up node3 (IPFS: 5003, Cluster: 9114)..."; \
|
||||
if [ ! -d $$HOME/.debros/node3/ipfs/repo ]; then \
|
||||
echo " Initializing IPFS..."; \
|
||||
mkdir -p $$HOME/.debros/node3/ipfs; \
|
||||
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
|
||||
cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \
|
||||
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \
|
||||
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4103","/ip6/::/tcp/4103"]' 2>&1 | grep -v "generating" || true; \
|
||||
else \
|
||||
if [ ! -f $$HOME/.debros/node3/ipfs/repo/swarm.key ]; then \
|
||||
cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \
|
||||
fi; \
|
||||
fi; \
|
||||
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
|
||||
mkdir -p $$HOME/.debros/node3/ipfs-cluster; \
|
||||
echo "Starting IPFS daemons..."; \
|
||||
if [ ! -f .dev/pids/ipfs-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null; then \
|
||||
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-bootstrap.pid; \
|
||||
echo " Bootstrap IPFS started (PID: $$(cat .dev/pids/ipfs-bootstrap.pid), API: 5001)"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ✓ Bootstrap IPFS already running"; \
|
||||
fi; \
|
||||
if [ ! -f .dev/pids/ipfs-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null; then \
|
||||
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-node2.pid; \
|
||||
echo " Node2 IPFS started (PID: $$(cat .dev/pids/ipfs-node2.pid), API: 5002)"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ✓ Node2 IPFS already running"; \
|
||||
fi; \
|
||||
if [ ! -f .dev/pids/ipfs-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null; then \
|
||||
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-node3.pid; \
|
||||
echo " Node3 IPFS started (PID: $$(cat .dev/pids/ipfs-node3.pid), API: 5003)"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ✓ Node3 IPFS already running"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ⚠️ ipfs or ipfs-cluster-service not found - skipping IPFS setup"; \
|
||||
echo " Install with: https://docs.ipfs.tech/install/ and https://ipfscluster.io/documentation/guides/install/"; \
|
||||
fi
|
||||
@sleep 2
|
||||
@echo "Starting bootstrap node..."
|
||||
@nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid
|
||||
@sleep 3
|
||||
@echo "Starting node2..."
|
||||
@nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid
|
||||
@sleep 2
|
||||
@echo "Starting node3..."
|
||||
@nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid
|
||||
@sleep 3
|
||||
@echo "Starting IPFS Cluster daemons (after Go nodes have configured them)..."
|
||||
@if command -v ipfs-cluster-service >/dev/null 2>&1; then \
|
||||
if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \
|
||||
if [ -f $$HOME/.debros/bootstrap/ipfs-cluster/service.json ]; then \
|
||||
env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \
|
||||
echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \
|
||||
echo " Waiting for bootstrap cluster to be ready..."; \
|
||||
for i in $$(seq 1 30); do \
|
||||
if curl -s http://localhost:9094/peers >/dev/null 2>&1; then \
|
||||
break; \
|
||||
fi; \
|
||||
sleep 1; \
|
||||
done; \
|
||||
sleep 2; \
|
||||
else \
|
||||
echo " ⚠️ Bootstrap cluster config not ready yet"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ✓ Bootstrap Cluster already running"; \
|
||||
fi; \
|
||||
if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \
|
||||
if [ -f $$HOME/.debros/node2/ipfs-cluster/service.json ]; then \
|
||||
env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \
|
||||
echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ⚠️ Node2 cluster config not ready yet"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ✓ Node2 Cluster already running"; \
|
||||
fi; \
|
||||
if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \
|
||||
if [ -f $$HOME/.debros/node3/ipfs-cluster/service.json ]; then \
|
||||
env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \
|
||||
echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ⚠️ Node3 cluster config not ready yet"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ✓ Node3 Cluster already running"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ⚠️ ipfs-cluster-service not found - skipping cluster daemon startup"; \
|
||||
fi
|
||||
@sleep 1
|
||||
@echo "Starting Olric cache server..."
|
||||
@if command -v olric-server >/dev/null 2>&1; then \
|
||||
if [ ! -f $$HOME/.debros/olric-config.yaml ]; then \
|
||||
echo " Creating Olric config..."; \
|
||||
mkdir -p $$HOME/.debros; \
|
||||
fi; \
|
||||
if ! pgrep -f "olric-server" >/dev/null 2>&1; then \
|
||||
OLRIC_SERVER_CONFIG=$$HOME/.debros/olric-config.yaml nohup olric-server > $$HOME/.debros/logs/olric.log 2>&1 & echo $$! > .dev/pids/olric.pid; \
|
||||
echo " Olric cache server started (PID: $$(cat .dev/pids/olric.pid))"; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo " ✓ Olric cache server already running"; \
|
||||
fi; \
|
||||
else \
|
||||
echo " ⚠️ olric-server command not found - skipping Olric (cache endpoints will be disabled)"; \
|
||||
echo " Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0"; \
|
||||
fi
|
||||
@sleep 1
|
||||
@echo "Starting gateway..."
|
||||
@nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid
|
||||
@echo ""
|
||||
@echo "============================================================"
|
||||
@echo "✅ Development stack started!"
|
||||
@echo "============================================================"
|
||||
@echo ""
|
||||
@echo "Processes:"
|
||||
@if [ -f .dev/pids/anon.pid ]; then \
|
||||
echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
|
||||
echo " Bootstrap IPFS: PID=$$(cat .dev/pids/ipfs-bootstrap.pid) (API: 5001)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-node2.pid ]; then \
|
||||
echo " Node2 IPFS: PID=$$(cat .dev/pids/ipfs-node2.pid) (API: 5002)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-node3.pid ]; then \
|
||||
echo " Node3 IPFS: PID=$$(cat .dev/pids/ipfs-node3.pid) (API: 5003)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
|
||||
echo " Bootstrap Cluster: PID=$$(cat .dev/pids/ipfs-cluster-bootstrap.pid) (API: 9094)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \
|
||||
echo " Node2 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node2.pid) (API: 9104)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \
|
||||
echo " Node3 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node3.pid) (API: 9114)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/olric.pid ]; then \
|
||||
echo " Olric: PID=$$(cat .dev/pids/olric.pid) (API: 3320)"; \
|
||||
fi
|
||||
@echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)"
|
||||
@echo " Node2: PID=$$(cat .dev/pids/node2.pid)"
|
||||
@echo " Node3: PID=$$(cat .dev/pids/node3.pid)"
|
||||
@echo " Gateway: PID=$$(cat .dev/pids/gateway.pid)"
|
||||
@echo ""
|
||||
@echo "Ports:"
|
||||
@echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)"
|
||||
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
|
||||
echo " Bootstrap IPFS API: 5001"; \
|
||||
echo " Node2 IPFS API: 5002"; \
|
||||
echo " Node3 IPFS API: 5003"; \
|
||||
echo " Bootstrap Cluster: 9094 (pin management)"; \
|
||||
echo " Node2 Cluster: 9104 (pin management)"; \
|
||||
echo " Node3 Cluster: 9114 (pin management)"; \
|
||||
fi
|
||||
@if [ -f .dev/pids/olric.pid ]; then \
|
||||
echo " Olric: 3320 (cache API)"; \
|
||||
fi
|
||||
@echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001"
|
||||
@echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002"
|
||||
@echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003"
|
||||
@echo " Gateway: 6001"
|
||||
@echo ""
|
||||
@echo "Press Ctrl+C to stop all processes"
|
||||
@echo "============================================================"
|
||||
@echo ""
|
||||
@LOGS="$$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log"; \
|
||||
if [ -f .dev/pids/anon.pid ]; then \
|
||||
LOGS="$$LOGS $$HOME/.debros/logs/anon.log"; \
|
||||
fi; \
|
||||
if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
|
||||
LOGS="$$LOGS $$HOME/.debros/logs/ipfs-bootstrap.log $$HOME/.debros/logs/ipfs-node2.log $$HOME/.debros/logs/ipfs-node3.log"; \
|
||||
fi; \
|
||||
if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
|
||||
LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster-bootstrap.log $$HOME/.debros/logs/ipfs-cluster-node2.log $$HOME/.debros/logs/ipfs-cluster-node3.log"; \
|
||||
fi; \
|
||||
if [ -f .dev/pids/olric.pid ]; then \
|
||||
LOGS="$$LOGS $$HOME/.debros/logs/olric.log"; \
|
||||
fi; \
|
||||
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
|
||||
tail -f $$LOGS
|
||||
@./bin/network-cli dev up
|
||||
|
||||
# Kill all processes
|
||||
# Kill all processes using network-cli dev down
|
||||
kill:
|
||||
@echo "🛑 Stopping all DeBros network services..."
|
||||
@echo ""
|
||||
@echo "Stopping DeBros nodes and gateway..."
|
||||
@if [ -f .dev/pids/gateway.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/gateway.pid) 2>/dev/null && echo " ✓ Gateway stopped" || echo " ✗ Gateway not running"; \
|
||||
rm -f .dev/pids/gateway.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/bootstrap.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap node stopped" || echo " ✗ Bootstrap not running"; \
|
||||
rm -f .dev/pids/bootstrap.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/node2.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/node2.pid) 2>/dev/null && echo " ✓ Node2 stopped" || echo " ✗ Node2 not running"; \
|
||||
rm -f .dev/pids/node2.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/node3.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/node3.pid) 2>/dev/null && echo " ✓ Node3 stopped" || echo " ✗ Node3 not running"; \
|
||||
rm -f .dev/pids/node3.pid; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "Stopping IPFS Cluster peers..."
|
||||
@if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap Cluster stopped" || echo " ✗ Bootstrap Cluster not running"; \
|
||||
rm -f .dev/pids/ipfs-cluster-bootstrap.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null && echo " ✓ Node2 Cluster stopped" || echo " ✗ Node2 Cluster not running"; \
|
||||
rm -f .dev/pids/ipfs-cluster-node2.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null && echo " ✓ Node3 Cluster stopped" || echo " ✗ Node3 Cluster not running"; \
|
||||
rm -f .dev/pids/ipfs-cluster-node3.pid; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "Stopping IPFS daemons..."
|
||||
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap IPFS stopped" || echo " ✗ Bootstrap IPFS not running"; \
|
||||
rm -f .dev/pids/ipfs-bootstrap.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-node2.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null && echo " ✓ Node2 IPFS stopped" || echo " ✗ Node2 IPFS not running"; \
|
||||
rm -f .dev/pids/ipfs-node2.pid; \
|
||||
fi
|
||||
@if [ -f .dev/pids/ipfs-node3.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null && echo " ✓ Node3 IPFS stopped" || echo " ✗ Node3 IPFS not running"; \
|
||||
rm -f .dev/pids/ipfs-node3.pid; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "Stopping Olric cache..."
|
||||
@if [ -f .dev/pids/olric.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/olric.pid) 2>/dev/null && echo " ✓ Olric stopped" || echo " ✗ Olric not running"; \
|
||||
rm -f .dev/pids/olric.pid; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "Stopping Anon proxy..."
|
||||
@if [ -f .dev/pids/anyone.pid ]; then \
|
||||
kill -TERM $$(cat .dev/pids/anyone.pid) 2>/dev/null && echo " ✓ Anon proxy stopped" || echo " ✗ Anon proxy not running"; \
|
||||
rm -f .dev/pids/anyone.pid; \
|
||||
fi
|
||||
@echo ""
|
||||
@echo "Cleaning up any remaining processes on ports..."
|
||||
@lsof -ti:7001,7002,7003,5001,5002,5003,6001,4001,4002,4003,9050,3320,3322,9094,9095,9096,9097,9104,9105,9106,9107,9114,9115,9116,9117,8080,8081,8082 2>/dev/null | xargs kill -9 2>/dev/null && echo " ✓ Cleaned up remaining port bindings" || echo " ✓ No lingering processes found"
|
||||
@echo ""
|
||||
@echo "✅ All services stopped!"
|
||||
@./bin/network-cli dev down
|
||||
|
||||
# Help
|
||||
help:
|
||||
@ -420,42 +98,25 @@ help:
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo " test - Run tests"
|
||||
@echo ""
|
||||
@echo "Development:"
|
||||
@echo " dev - Start full dev stack (bootstrap + 2 nodes + gateway)"
|
||||
@echo " Requires: configs in ~/.debros (run 'network-cli config init' first)"
|
||||
@echo "Local Development (Recommended):"
|
||||
@echo " make dev - Start full development stack with one command"
|
||||
@echo " - Checks dependencies and available ports"
|
||||
@echo " - Generates configs (bootstrap + node2 + node3 + gateway)"
|
||||
@echo " - Starts IPFS, RQLite, Olric, nodes, and gateway"
|
||||
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
|
||||
@echo " - Stops all services if health checks fail"
|
||||
@echo " - Includes comprehensive logging"
|
||||
@echo " make kill - Stop all development services"
|
||||
@echo ""
|
||||
@echo "Configuration (NEW):"
|
||||
@echo " First, generate config files in ~/.debros with:"
|
||||
@echo " make build # Build CLI first"
|
||||
@echo " ./bin/network-cli config init # Generate full stack"
|
||||
@echo "Development Management (via network-cli):"
|
||||
@echo " ./bin/network-cli dev status - Show status of all dev services"
|
||||
@echo " ./bin/network-cli dev logs <component> [--follow]"
|
||||
@echo ""
|
||||
@echo "Network Targets (requires config files in ~/.debros):"
|
||||
@echo " run-node - Start bootstrap node"
|
||||
@echo " run-node2 - Start second node"
|
||||
@echo " run-node3 - Start third node"
|
||||
@echo " run-gateway - Start HTTP gateway"
|
||||
@echo " run-example - Run usage example"
|
||||
@echo ""
|
||||
@echo "Running Multiple Nodes:"
|
||||
@echo " Nodes use --config flag to select which YAML file in ~/.debros to load:"
|
||||
@echo " go run ./cmd/node --config bootstrap.yaml"
|
||||
@echo " go run ./cmd/node --config node.yaml"
|
||||
@echo " go run ./cmd/node --config node2.yaml"
|
||||
@echo " Generate configs with: ./bin/network-cli config init --name <filename.yaml>"
|
||||
@echo ""
|
||||
@echo "CLI Commands:"
|
||||
@echo " run-cli - Run network CLI help"
|
||||
@echo " cli-health - Check network health"
|
||||
@echo " cli-peers - List network peers"
|
||||
@echo " cli-status - Get network status"
|
||||
@echo " cli-storage-test - Test storage operations"
|
||||
@echo " cli-pubsub-test - Test pub/sub operations"
|
||||
@echo ""
|
||||
@echo "Development:"
|
||||
@echo " test-multinode - Full multi-node test with 1 bootstrap + 2 nodes"
|
||||
@echo " test-peer-discovery - Test peer discovery (requires running nodes)"
|
||||
@echo " test-replication - Test data replication (requires running nodes)"
|
||||
@echo " test-consensus - Test database consensus (requires running nodes)"
|
||||
@echo "Individual Node Targets (advanced):"
|
||||
@echo " run-node - Start bootstrap node directly"
|
||||
@echo " run-node2 - Start second node directly"
|
||||
@echo " run-node3 - Start third node directly"
|
||||
@echo " run-gateway - Start HTTP gateway directly"
|
||||
@echo ""
|
||||
@echo "Maintenance:"
|
||||
@echo " deps - Download dependencies"
|
||||
@ -463,9 +124,4 @@ help:
|
||||
@echo " fmt - Format code"
|
||||
@echo " vet - Vet code"
|
||||
@echo " lint - Lint code (fmt + vet)"
|
||||
@echo " clear-ports - Clear common dev ports"
|
||||
@echo " kill - Stop all running services (nodes, IPFS, cluster, gateway, olric)"
|
||||
@echo " dev-setup - Setup development environment"
|
||||
@echo " dev-cluster - Show cluster startup commands"
|
||||
@echo " dev - Full development workflow"
|
||||
@echo " help - Show this help"
|
||||
|
||||
@ -43,7 +43,7 @@ DeBros Network is a decentralized peer-to-peer data platform built in Go. It com
|
||||
make dev
|
||||
```
|
||||
|
||||
This starts three nodes and the HTTP gateway. Stop with `Ctrl+C`.
|
||||
This starts three nodes and the HTTP gateway. **The command will not complete successfully until all services pass health checks** (IPFS peer connectivity, RQLite cluster formation, and LibP2P connectivity). If health checks fail, all services are stopped automatically. Stop with `Ctrl+C`.
|
||||
|
||||
4. Validate the network from another terminal:
|
||||
|
||||
|
||||
@ -64,20 +64,18 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Setup and service commands
|
||||
case "setup":
|
||||
cli.HandleSetupCommand(args)
|
||||
case "service":
|
||||
cli.HandleServiceCommand(args)
|
||||
// Development environment commands
|
||||
case "dev":
|
||||
cli.HandleDevCommand(args)
|
||||
|
||||
// Production environment commands
|
||||
case "prod":
|
||||
cli.HandleProdCommand(args)
|
||||
|
||||
// Authentication commands
|
||||
case "auth":
|
||||
cli.HandleAuthCommand(args)
|
||||
|
||||
// Config commands
|
||||
case "config":
|
||||
cli.HandleConfigCommand(args)
|
||||
|
||||
// Basic network commands
|
||||
case "health":
|
||||
cli.HandleHealthCommand(format, timeout)
|
||||
@ -108,10 +106,6 @@ func main() {
|
||||
}
|
||||
cli.HandleConnectCommand(args[0], timeout)
|
||||
|
||||
// RQLite commands
|
||||
case "rqlite":
|
||||
cli.HandleRQLiteCommand(args)
|
||||
|
||||
// Help
|
||||
case "help", "--help", "-h":
|
||||
showHelp()
|
||||
@ -151,13 +145,18 @@ func showHelp() {
|
||||
fmt.Printf(" devnet enable - Shorthand for switching to devnet\n")
|
||||
fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n")
|
||||
|
||||
fmt.Printf("🚀 Setup & Services:\n")
|
||||
fmt.Printf(" setup [--force] - Interactive VPS setup (Linux only, requires root)\n")
|
||||
fmt.Printf(" service start <target> - Start service (node, gateway, all)\n")
|
||||
fmt.Printf(" service stop <target> - Stop service\n")
|
||||
fmt.Printf(" service restart <target> - Restart service\n")
|
||||
fmt.Printf(" service status [target] - Show service status\n")
|
||||
fmt.Printf(" service logs <target> [opts] - View service logs (--follow, --since=1h)\n\n")
|
||||
fmt.Printf("💻 Local Development:\n")
|
||||
fmt.Printf(" dev up - Start full local dev environment\n")
|
||||
fmt.Printf(" dev down - Stop all dev services\n")
|
||||
fmt.Printf(" dev status - Show status of dev services\n")
|
||||
fmt.Printf(" dev logs <component> - View dev component logs\n\n")
|
||||
|
||||
fmt.Printf("🚀 Production Deployment:\n")
|
||||
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root)\n")
|
||||
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
|
||||
fmt.Printf(" prod status - Show production service status\n")
|
||||
fmt.Printf(" prod logs <service> - View production service logs\n")
|
||||
fmt.Printf(" prod uninstall - Remove production services (preserves data)\n\n")
|
||||
|
||||
fmt.Printf("🔐 Authentication:\n")
|
||||
fmt.Printf(" auth login - Authenticate with wallet\n")
|
||||
@ -165,10 +164,6 @@ func showHelp() {
|
||||
fmt.Printf(" auth whoami - Show current authentication\n")
|
||||
fmt.Printf(" auth status - Show detailed auth info\n\n")
|
||||
|
||||
fmt.Printf("⚙️ Configuration:\n")
|
||||
fmt.Printf(" config init [--type <type>] - Generate configs (full stack or single)\n")
|
||||
fmt.Printf(" config validate --name <file> - Validate config file\n\n")
|
||||
|
||||
fmt.Printf("🌐 Network Commands:\n")
|
||||
fmt.Printf(" health - Check network health\n")
|
||||
fmt.Printf(" peers - List connected peers\n")
|
||||
@ -179,9 +174,6 @@ func showHelp() {
|
||||
fmt.Printf("🗄️ Database:\n")
|
||||
fmt.Printf(" query <sql> 🔐 Execute database query\n\n")
|
||||
|
||||
fmt.Printf("🔧 RQLite:\n")
|
||||
fmt.Printf(" rqlite fix 🔧 Fix misconfigured join address and clean raft state\n\n")
|
||||
|
||||
fmt.Printf("📡 PubSub:\n")
|
||||
fmt.Printf(" pubsub publish <topic> <msg> 🔐 Publish message\n")
|
||||
fmt.Printf(" pubsub subscribe <topic> 🔐 Subscribe to topic\n")
|
||||
|
||||
@ -1,552 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/encryption"
|
||||
)
|
||||
|
||||
// HandleConfigCommand handles config management commands
|
||||
func HandleConfigCommand(args []string) {
|
||||
if len(args) == 0 {
|
||||
showConfigHelp()
|
||||
return
|
||||
}
|
||||
|
||||
subcommand := args[0]
|
||||
subargs := args[1:]
|
||||
|
||||
switch subcommand {
|
||||
case "init":
|
||||
handleConfigInit(subargs)
|
||||
case "validate":
|
||||
handleConfigValidate(subargs)
|
||||
case "help":
|
||||
showConfigHelp()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown config subcommand: %s\n", subcommand)
|
||||
showConfigHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showConfigHelp() {
|
||||
fmt.Printf("Config Management Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli config <subcommand> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" init - Generate full network stack in ~/.debros (bootstrap + 2 nodes + gateway)\n")
|
||||
fmt.Printf(" validate --name <file> - Validate a config file\n\n")
|
||||
fmt.Printf("Init Default Behavior (no --type):\n")
|
||||
fmt.Printf(" Generates bootstrap.yaml, node2.yaml, node3.yaml, gateway.yaml with:\n")
|
||||
fmt.Printf(" - Auto-generated identities for bootstrap, node2, node3\n")
|
||||
fmt.Printf(" - Correct bootstrap_peers and join addresses\n")
|
||||
fmt.Printf(" - Default ports: P2P 4001-4003, HTTP 5001-5003, Raft 7001-7003\n\n")
|
||||
fmt.Printf("Init Options:\n")
|
||||
fmt.Printf(" --type <type> - Single config type: node, bootstrap, gateway (skips stack generation)\n")
|
||||
fmt.Printf(" --name <file> - Output filename (default: depends on --type or 'stack' for full stack)\n")
|
||||
fmt.Printf(" --force - Overwrite existing config/stack files\n\n")
|
||||
fmt.Printf("Single Config Options (with --type):\n")
|
||||
fmt.Printf(" --id <id> - Node ID for bootstrap peers\n")
|
||||
fmt.Printf(" --listen-port <port> - LibP2P listen port (default: 4001)\n")
|
||||
fmt.Printf(" --rqlite-http-port <port> - RQLite HTTP port (default: 5001)\n")
|
||||
fmt.Printf(" --rqlite-raft-port <port> - RQLite Raft port (default: 7001)\n")
|
||||
fmt.Printf(" --join <host:port> - RQLite address to join (required for non-bootstrap)\n")
|
||||
fmt.Printf(" --bootstrap-peers <peers> - Comma-separated bootstrap peer multiaddrs\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli config init # Generate full stack\n")
|
||||
fmt.Printf(" network-cli config init --force # Overwrite existing stack\n")
|
||||
fmt.Printf(" network-cli config init --type bootstrap # Single bootstrap config (legacy)\n")
|
||||
fmt.Printf(" network-cli config validate --name node.yaml\n")
|
||||
}
|
||||
|
||||
func handleConfigInit(args []string) {
|
||||
// Parse flags
|
||||
var (
|
||||
cfgType = ""
|
||||
name = "" // Will be set based on type if not provided
|
||||
id string
|
||||
listenPort = 4001
|
||||
rqliteHTTPPort = 5001
|
||||
rqliteRaftPort = 7001
|
||||
joinAddr string
|
||||
bootstrapPeers string
|
||||
force bool
|
||||
)
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "--type":
|
||||
if i+1 < len(args) {
|
||||
cfgType = args[i+1]
|
||||
i++
|
||||
}
|
||||
case "--name":
|
||||
if i+1 < len(args) {
|
||||
name = args[i+1]
|
||||
i++
|
||||
}
|
||||
case "--id":
|
||||
if i+1 < len(args) {
|
||||
id = args[i+1]
|
||||
i++
|
||||
}
|
||||
case "--listen-port":
|
||||
if i+1 < len(args) {
|
||||
if p, err := strconv.Atoi(args[i+1]); err == nil {
|
||||
listenPort = p
|
||||
}
|
||||
i++
|
||||
}
|
||||
case "--rqlite-http-port":
|
||||
if i+1 < len(args) {
|
||||
if p, err := strconv.Atoi(args[i+1]); err == nil {
|
||||
rqliteHTTPPort = p
|
||||
}
|
||||
i++
|
||||
}
|
||||
case "--rqlite-raft-port":
|
||||
if i+1 < len(args) {
|
||||
if p, err := strconv.Atoi(args[i+1]); err == nil {
|
||||
rqliteRaftPort = p
|
||||
}
|
||||
i++
|
||||
}
|
||||
case "--join":
|
||||
if i+1 < len(args) {
|
||||
joinAddr = args[i+1]
|
||||
i++
|
||||
}
|
||||
case "--bootstrap-peers":
|
||||
if i+1 < len(args) {
|
||||
bootstrapPeers = args[i+1]
|
||||
i++
|
||||
}
|
||||
case "--force":
|
||||
force = true
|
||||
}
|
||||
}
|
||||
|
||||
// If --type is not specified, generate full stack
|
||||
if cfgType == "" {
|
||||
initFullStack(force)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, continue with single-file generation
|
||||
// Validate type
|
||||
if cfgType != "node" && cfgType != "bootstrap" && cfgType != "gateway" {
|
||||
fmt.Fprintf(os.Stderr, "Invalid --type: %s (expected: node, bootstrap, or gateway)\n", cfgType)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Set default name based on type if not provided
|
||||
if name == "" {
|
||||
switch cfgType {
|
||||
case "bootstrap":
|
||||
name = "bootstrap.yaml"
|
||||
case "gateway":
|
||||
name = "gateway.yaml"
|
||||
default:
|
||||
name = "node.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure config directory exists
|
||||
configDir, err := config.EnsureConfigDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to ensure config directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(configDir, name)
|
||||
|
||||
// Check if file exists
|
||||
if !force {
|
||||
if _, err := os.Stat(configPath); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Config file already exists at %s (use --force to overwrite)\n", configPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate config based on type
|
||||
var configContent string
|
||||
switch cfgType {
|
||||
case "node":
|
||||
configContent = GenerateNodeConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort, joinAddr, bootstrapPeers)
|
||||
case "bootstrap":
|
||||
configContent = GenerateBootstrapConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort)
|
||||
case "gateway":
|
||||
configContent = GenerateGatewayConfig(bootstrapPeers)
|
||||
}
|
||||
|
||||
// Write config file
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Configuration file created: %s\n", configPath)
|
||||
fmt.Printf(" Type: %s\n", cfgType)
|
||||
fmt.Printf("\nYou can now start the %s using the generated config.\n", cfgType)
|
||||
}
|
||||
|
||||
func handleConfigValidate(args []string) {
|
||||
var name string
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] == "--name" && i+1 < len(args) {
|
||||
name = args[i+1]
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
fmt.Fprintf(os.Stderr, "Missing --name flag\n")
|
||||
showConfigHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configDir, err := config.ConfigDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to get config directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(configDir, name)
|
||||
file, err := os.Open(configPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to open config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var cfg config.Config
|
||||
if err := config.DecodeStrict(file, &cfg); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to parse config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Run validation
|
||||
errs := cfg.Validate()
|
||||
if len(errs) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration errors (%d):\n", len(errs))
|
||||
for _, err := range errs {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Config is valid: %s\n", configPath)
|
||||
}
|
||||
|
||||
func initFullStack(force bool) {
|
||||
fmt.Printf("🚀 Initializing full network stack...\n")
|
||||
|
||||
// Ensure ~/.debros directory exists
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
if err := os.MkdirAll(debrosDir, 0755); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create ~/.debros directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Step 1: Generate bootstrap identity
|
||||
bootstrapIdentityDir := filepath.Join(debrosDir, "bootstrap")
|
||||
bootstrapIdentityPath := filepath.Join(bootstrapIdentityDir, "identity.key")
|
||||
|
||||
if !force {
|
||||
if _, err := os.Stat(bootstrapIdentityPath); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Bootstrap identity already exists at %s (use --force to overwrite)\n", bootstrapIdentityPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
bootstrapInfo, err := encryption.GenerateIdentity()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to generate bootstrap identity: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := os.MkdirAll(bootstrapIdentityDir, 0755); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create bootstrap data directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := encryption.SaveIdentity(bootstrapInfo, bootstrapIdentityPath); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to save bootstrap identity: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String())
|
||||
|
||||
// Construct bootstrap multiaddr
|
||||
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
|
||||
fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr)
|
||||
|
||||
// Generate configs for all nodes...
|
||||
// (rest of the implementation - similar to what was in main.go)
|
||||
// I'll keep it similar to the original for consistency
|
||||
|
||||
// Step 2: Generate bootstrap.yaml
|
||||
bootstrapName := "bootstrap.yaml"
|
||||
bootstrapPath := filepath.Join(debrosDir, bootstrapName)
|
||||
if !force {
|
||||
if _, err := os.Stat(bootstrapPath); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Bootstrap config already exists at %s (use --force to overwrite)\n", bootstrapPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
bootstrapContent := GenerateBootstrapConfig(bootstrapName, "", 4001, 5001, 7001)
|
||||
if err := os.WriteFile(bootstrapPath, []byte(bootstrapContent), 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write bootstrap config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✅ Generated bootstrap config: %s\n", bootstrapPath)
|
||||
|
||||
// Step 3: Generate node2.yaml
|
||||
node2Name := "node2.yaml"
|
||||
node2Path := filepath.Join(debrosDir, node2Name)
|
||||
if !force {
|
||||
if _, err := os.Stat(node2Path); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Node2 config already exists at %s (use --force to overwrite)\n", node2Path)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
node2Content := GenerateNodeConfig(node2Name, "", 4002, 5002, 7002, "localhost:5001", bootstrapMultiaddr)
|
||||
if err := os.WriteFile(node2Path, []byte(node2Content), 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write node2 config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✅ Generated node2 config: %s\n", node2Path)
|
||||
|
||||
// Step 4: Generate node3.yaml
|
||||
node3Name := "node3.yaml"
|
||||
node3Path := filepath.Join(debrosDir, node3Name)
|
||||
if !force {
|
||||
if _, err := os.Stat(node3Path); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Node3 config already exists at %s (use --force to overwrite)\n", node3Path)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
node3Content := GenerateNodeConfig(node3Name, "", 4003, 5003, 7003, "localhost:5001", bootstrapMultiaddr)
|
||||
if err := os.WriteFile(node3Path, []byte(node3Content), 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write node3 config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✅ Generated node3 config: %s\n", node3Path)
|
||||
|
||||
// Step 5: Generate gateway.yaml
|
||||
gatewayName := "gateway.yaml"
|
||||
gatewayPath := filepath.Join(debrosDir, gatewayName)
|
||||
if !force {
|
||||
if _, err := os.Stat(gatewayPath); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Gateway config already exists at %s (use --force to overwrite)\n", gatewayPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
gatewayContent := GenerateGatewayConfig(bootstrapMultiaddr)
|
||||
if err := os.WriteFile(gatewayPath, []byte(gatewayContent), 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write gateway config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✅ Generated gateway config: %s\n", gatewayPath)
|
||||
|
||||
fmt.Printf("\n" + strings.Repeat("=", 60) + "\n")
|
||||
fmt.Printf("✅ Full network stack initialized successfully!\n")
|
||||
fmt.Printf(strings.Repeat("=", 60) + "\n")
|
||||
fmt.Printf("\nBootstrap Peer ID: %s\n", bootstrapInfo.PeerID.String())
|
||||
fmt.Printf("Bootstrap Multiaddr: %s\n", bootstrapMultiaddr)
|
||||
fmt.Printf("\nGenerated configs:\n")
|
||||
fmt.Printf(" - %s\n", bootstrapPath)
|
||||
fmt.Printf(" - %s\n", node2Path)
|
||||
fmt.Printf(" - %s\n", node3Path)
|
||||
fmt.Printf(" - %s\n", gatewayPath)
|
||||
fmt.Printf("\nStart the network with: make dev\n")
|
||||
}
|
||||
|
||||
// GenerateNodeConfig generates a node configuration
|
||||
func GenerateNodeConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int, joinAddr, bootstrapPeers string) string {
|
||||
nodeID := id
|
||||
if nodeID == "" {
|
||||
nodeID = fmt.Sprintf("node-%d", time.Now().Unix())
|
||||
}
|
||||
|
||||
// Parse bootstrap peers
|
||||
var peers []string
|
||||
if bootstrapPeers != "" {
|
||||
for _, p := range strings.Split(bootstrapPeers, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Construct data_dir from name stem (remove .yaml)
|
||||
dataDir := strings.TrimSuffix(name, ".yaml")
|
||||
dataDir = filepath.Join(os.ExpandEnv("~"), ".debros", dataDir)
|
||||
|
||||
var peersYAML strings.Builder
|
||||
if len(peers) == 0 {
|
||||
peersYAML.WriteString(" bootstrap_peers: []")
|
||||
} else {
|
||||
peersYAML.WriteString(" bootstrap_peers:\n")
|
||||
for _, p := range peers {
|
||||
fmt.Fprintf(&peersYAML, " - \"%s\"\n", p)
|
||||
}
|
||||
}
|
||||
|
||||
if joinAddr == "" {
|
||||
joinAddr = "localhost:5001"
|
||||
}
|
||||
|
||||
// Calculate IPFS cluster API port (9094 for bootstrap, 9104+ for nodes)
|
||||
// Pattern: Bootstrap (5001) -> 9094, Node2 (5002) -> 9104, Node3 (5003) -> 9114
|
||||
clusterAPIPort := 9094 + (rqliteHTTPPort-5001)*10
|
||||
|
||||
return fmt.Sprintf(`node:
|
||||
id: "%s"
|
||||
type: "node"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/%d"
|
||||
data_dir: "%s"
|
||||
max_connections: 50
|
||||
|
||||
database:
|
||||
data_dir: "%s/rqlite"
|
||||
replication_factor: 3
|
||||
shard_count: 16
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: %d
|
||||
rqlite_raft_port: %d
|
||||
rqlite_join_address: "%s"
|
||||
cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
# IPFS Cluster API endpoint for pin management (leave empty to disable)
|
||||
cluster_api_url: "http://localhost:%d"
|
||||
# IPFS HTTP API endpoint for content retrieval
|
||||
api_url: "http://localhost:%d"
|
||||
# Timeout for IPFS operations
|
||||
timeout: "60s"
|
||||
# Replication factor for pinned content
|
||||
replication_factor: 3
|
||||
# Enable client-side encryption before upload
|
||||
enable_encryption: true
|
||||
|
||||
discovery:
|
||||
%s
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: %d
|
||||
http_adv_address: "localhost:%d"
|
||||
raft_adv_address: "localhost:%d"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
enable_tls: false
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, clusterAPIPort, rqliteHTTPPort, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort)
|
||||
}
|
||||
|
||||
// GenerateBootstrapConfig generates a bootstrap configuration
|
||||
func GenerateBootstrapConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int) string {
|
||||
nodeID := id
|
||||
if nodeID == "" {
|
||||
nodeID = "bootstrap"
|
||||
}
|
||||
|
||||
dataDir := filepath.Join(os.ExpandEnv("~"), ".debros", "bootstrap")
|
||||
|
||||
return fmt.Sprintf(`node:
|
||||
id: "%s"
|
||||
type: "bootstrap"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/%d"
|
||||
data_dir: "%s"
|
||||
max_connections: 50
|
||||
|
||||
database:
|
||||
data_dir: "%s/rqlite"
|
||||
replication_factor: 3
|
||||
shard_count: 16
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: %d
|
||||
rqlite_raft_port: %d
|
||||
rqlite_join_address: ""
|
||||
cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
# IPFS Cluster API endpoint for pin management (leave empty to disable)
|
||||
cluster_api_url: "http://localhost:9094"
|
||||
# IPFS HTTP API endpoint for content retrieval
|
||||
api_url: "http://localhost:%d"
|
||||
# Timeout for IPFS operations
|
||||
timeout: "60s"
|
||||
# Replication factor for pinned content
|
||||
replication_factor: 3
|
||||
# Enable client-side encryption before upload
|
||||
enable_encryption: true
|
||||
|
||||
discovery:
|
||||
bootstrap_peers: []
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: %d
|
||||
http_adv_address: "localhost:%d"
|
||||
raft_adv_address: "localhost:%d"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
enable_tls: false
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, rqliteHTTPPort, 4001, rqliteHTTPPort, rqliteRaftPort)
|
||||
}
|
||||
|
||||
// GenerateGatewayConfig generates a gateway configuration
|
||||
func GenerateGatewayConfig(bootstrapPeers string) string {
|
||||
var peers []string
|
||||
if bootstrapPeers != "" {
|
||||
for _, p := range strings.Split(bootstrapPeers, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var peersYAML strings.Builder
|
||||
if len(peers) == 0 {
|
||||
peersYAML.WriteString("bootstrap_peers: []")
|
||||
} else {
|
||||
peersYAML.WriteString("bootstrap_peers:\n")
|
||||
for _, p := range peers {
|
||||
fmt.Fprintf(&peersYAML, " - \"%s\"\n", p)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`listen_addr: ":6001"
|
||||
client_namespace: "default"
|
||||
rqlite_dsn: ""
|
||||
%s
|
||||
olric_servers:
|
||||
- "127.0.0.1:3320"
|
||||
olric_timeout: "10s"
|
||||
ipfs_cluster_api_url: "http://localhost:9094"
|
||||
ipfs_api_url: "http://localhost:5001"
|
||||
ipfs_timeout: "60s"
|
||||
ipfs_replication_factor: 3
|
||||
`, peersYAML.String())
|
||||
}
|
||||
191
pkg/cli/dev_commands.go
Normal file
191
pkg/cli/dev_commands.go
Normal file
@ -0,0 +1,191 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/environments/development"
|
||||
)
|
||||
|
||||
// HandleDevCommand handles the dev command group
|
||||
func HandleDevCommand(args []string) {
|
||||
if len(args) == 0 {
|
||||
showDevHelp()
|
||||
return
|
||||
}
|
||||
|
||||
subcommand := args[0]
|
||||
subargs := args[1:]
|
||||
|
||||
switch subcommand {
|
||||
case "up":
|
||||
handleDevUp(subargs)
|
||||
case "down":
|
||||
handleDevDown(subargs)
|
||||
case "status":
|
||||
handleDevStatus(subargs)
|
||||
case "logs":
|
||||
handleDevLogs(subargs)
|
||||
case "help":
|
||||
showDevHelp()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown dev subcommand: %s\n", subcommand)
|
||||
showDevHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showDevHelp() {
|
||||
fmt.Printf("🚀 Development Environment Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli dev <subcommand> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" up - Start development environment (bootstrap + 2 nodes + gateway)\n")
|
||||
fmt.Printf(" down - Stop all development services\n")
|
||||
fmt.Printf(" status - Show status of running services\n")
|
||||
fmt.Printf(" logs <component> - Tail logs for a component\n")
|
||||
fmt.Printf(" help - Show this help\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli dev up\n")
|
||||
fmt.Printf(" network-cli dev down\n")
|
||||
fmt.Printf(" network-cli dev status\n")
|
||||
fmt.Printf(" network-cli dev logs bootstrap --follow\n")
|
||||
}
|
||||
|
||||
func handleDevUp(args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Get home directory and .debros path
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
|
||||
// Step 1: Check dependencies
|
||||
fmt.Printf("📋 Checking dependencies...\n\n")
|
||||
checker := development.NewDependencyChecker()
|
||||
if _, err := checker.CheckAll(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✓ All required dependencies available\n\n")
|
||||
|
||||
// Step 2: Check ports
|
||||
fmt.Printf("🔌 Checking port availability...\n\n")
|
||||
portChecker := development.NewPortChecker()
|
||||
if _, err := portChecker.CheckAll(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ %v\n\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Port mapping:\n")
|
||||
for port, service := range development.PortMap() {
|
||||
fmt.Fprintf(os.Stderr, " %d - %s\n", port, service)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("✓ All required ports available\n\n")
|
||||
|
||||
// Step 3: Ensure configs
|
||||
fmt.Printf("⚙️ Preparing configuration files...\n\n")
|
||||
ensurer := development.NewConfigEnsurer(debrosDir)
|
||||
if err := ensurer.EnsureAll(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
// Step 4: Start services
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
if err := pm.StartAll(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Step 5: Show summary
|
||||
fmt.Printf("🎉 Development environment is running!\n\n")
|
||||
fmt.Printf("Key endpoints:\n")
|
||||
fmt.Printf(" Gateway: http://localhost:6001\n")
|
||||
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
|
||||
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
|
||||
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
|
||||
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
|
||||
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
|
||||
fmt.Printf("Useful commands:\n")
|
||||
fmt.Printf(" network-cli dev status - Show status\n")
|
||||
fmt.Printf(" network-cli dev logs bootstrap - Bootstrap logs\n")
|
||||
fmt.Printf(" network-cli dev down - Stop all services\n\n")
|
||||
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
|
||||
}
|
||||
|
||||
func handleDevDown(args []string) {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
ctx := context.Background()
|
||||
|
||||
if err := pm.StopAll(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleDevStatus(args []string) {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
ctx := context.Background()
|
||||
|
||||
pm.Status(ctx)
|
||||
}
|
||||
|
||||
func handleDevLogs(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli dev logs <component> [--follow]\n")
|
||||
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, node2, node3, gateway, ipfs-bootstrap, ipfs-node2, ipfs-node3, olric, anon\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
component := args[0]
|
||||
follow := len(args) > 1 && args[1] == "--follow"
|
||||
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
|
||||
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
|
||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if follow {
|
||||
// Run tail -f
|
||||
tailCmd := fmt.Sprintf("tail -f %s", logPath)
|
||||
fmt.Printf("Following %s (press Ctrl+C to stop)...\n\n", logPath)
|
||||
// syscall.Exec doesn't work in all environments, use exec.Command instead
|
||||
cmd := exec.Command("sh", "-c", tailCmd)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Run()
|
||||
} else {
|
||||
// Cat the file
|
||||
data, _ := os.ReadFile(logPath)
|
||||
fmt.Print(string(data))
|
||||
}
|
||||
}
|
||||
313
pkg/cli/prod_commands.go
Normal file
313
pkg/cli/prod_commands.go
Normal file
@ -0,0 +1,313 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/environments/production"
|
||||
)
|
||||
|
||||
// HandleProdCommand handles production environment commands
|
||||
func HandleProdCommand(args []string) {
|
||||
if len(args) == 0 {
|
||||
showProdHelp()
|
||||
return
|
||||
}
|
||||
|
||||
subcommand := args[0]
|
||||
subargs := args[1:]
|
||||
|
||||
switch subcommand {
|
||||
case "install":
|
||||
handleProdInstall(subargs)
|
||||
case "upgrade":
|
||||
handleProdUpgrade(subargs)
|
||||
case "status":
|
||||
handleProdStatus()
|
||||
case "logs":
|
||||
handleProdLogs(subargs)
|
||||
case "uninstall":
|
||||
handleProdUninstall()
|
||||
case "help":
|
||||
showProdHelp()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown prod subcommand: %s\n", subcommand)
|
||||
showProdHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showProdHelp() {
|
||||
fmt.Printf("Production Environment Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli prod <subcommand> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" install - Full production bootstrap (requires root/sudo)\n")
|
||||
fmt.Printf(" Options:\n")
|
||||
fmt.Printf(" --force - Reconfigure all settings\n")
|
||||
fmt.Printf(" --bootstrap - Install as bootstrap node\n")
|
||||
fmt.Printf(" --peers ADDRS - Comma-separated bootstrap peers (for non-bootstrap)\n")
|
||||
fmt.Printf(" --vps-ip IP - VPS public IP address\n")
|
||||
fmt.Printf(" --domain DOMAIN - Domain for HTTPS (optional)\n")
|
||||
fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n")
|
||||
fmt.Printf(" status - Show status of production services\n")
|
||||
fmt.Printf(" logs <service> - View production service logs\n")
|
||||
fmt.Printf(" Options:\n")
|
||||
fmt.Printf(" --follow - Follow logs in real-time\n")
|
||||
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" sudo network-cli prod install --bootstrap\n")
|
||||
fmt.Printf(" sudo network-cli prod install --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...\n")
|
||||
fmt.Printf(" network-cli prod status\n")
|
||||
fmt.Printf(" network-cli prod logs node --follow\n")
|
||||
}
|
||||
|
||||
func handleProdInstall(args []string) {
|
||||
// Parse arguments
|
||||
force := false
|
||||
isBootstrap := false
|
||||
var vpsIP, domain, peersStr string
|
||||
|
||||
for i, arg := range args {
|
||||
switch arg {
|
||||
case "--force":
|
||||
force = true
|
||||
case "--bootstrap":
|
||||
isBootstrap = true
|
||||
case "--peers":
|
||||
if i+1 < len(args) {
|
||||
peersStr = args[i+1]
|
||||
}
|
||||
case "--vps-ip":
|
||||
if i+1 < len(args) {
|
||||
vpsIP = args[i+1]
|
||||
}
|
||||
case "--domain":
|
||||
if i+1 < len(args) {
|
||||
domain = args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse bootstrap peers if provided
|
||||
var bootstrapPeers []string
|
||||
if peersStr != "" {
|
||||
bootstrapPeers = strings.Split(peersStr, ",")
|
||||
}
|
||||
|
||||
// Validate setup requirements
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Fprintf(os.Stderr, "❌ Production install must be run as root (use sudo)\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
debrosHome := "/home/debros"
|
||||
setup := production.NewProductionSetup(debrosHome, os.Stdout, force)
|
||||
|
||||
// Phase 1: Check prerequisites
|
||||
fmt.Printf("\n📋 Phase 1: Checking prerequisites...\n")
|
||||
if err := setup.Phase1CheckPrerequisites(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 2: Provision environment
|
||||
fmt.Printf("\n🛠️ Phase 2: Provisioning environment...\n")
|
||||
if err := setup.Phase2ProvisionEnvironment(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 2b: Install binaries
|
||||
fmt.Printf("\nPhase 2b: Installing binaries...\n")
|
||||
if err := setup.Phase2bInstallBinaries(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Binary installation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 2c: Initialize services
|
||||
nodeType := "node"
|
||||
if isBootstrap {
|
||||
nodeType = "bootstrap"
|
||||
}
|
||||
fmt.Printf("\nPhase 2c: Initializing services...\n")
|
||||
if err := setup.Phase2cInitializeServices(nodeType); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 3: Generate secrets
|
||||
fmt.Printf("\n🔐 Phase 3: Generating secrets...\n")
|
||||
if err := setup.Phase3GenerateSecrets(isBootstrap); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 4: Generate configs
|
||||
fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n")
|
||||
enableHTTPS := domain != ""
|
||||
if err := setup.Phase4GenerateConfigs(isBootstrap, bootstrapPeers, vpsIP, enableHTTPS, domain); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Configuration generation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Phase 5: Create systemd services
|
||||
fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n")
|
||||
if err := setup.Phase5CreateSystemdServices(nodeType); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Service creation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Log completion
|
||||
setup.LogSetupComplete("< peer ID from config >")
|
||||
fmt.Printf("✅ Production installation complete!\n\n")
|
||||
}
|
||||
|
||||
func handleProdUpgrade(args []string) {
|
||||
// Parse arguments
|
||||
force := false
|
||||
for _, arg := range args {
|
||||
if arg == "--force" {
|
||||
force = true
|
||||
}
|
||||
}
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Fprintf(os.Stderr, "❌ Production upgrade must be run as root (use sudo)\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
debrosHome := "/home/debros"
|
||||
fmt.Printf("🔄 Upgrading production installation...\n")
|
||||
fmt.Printf(" This will preserve existing configurations and data\n\n")
|
||||
|
||||
// For now, just re-run the install with force flag
|
||||
setup := production.NewProductionSetup(debrosHome, os.Stdout, force)
|
||||
|
||||
if err := setup.Phase1CheckPrerequisites(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := setup.Phase2ProvisionEnvironment(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Upgrade complete!\n")
|
||||
fmt.Printf(" Services will use existing configurations\n")
|
||||
fmt.Printf(" To restart services: sudo systemctl restart debros-*\n\n")
|
||||
}
|
||||
|
||||
func handleProdStatus() {
|
||||
fmt.Printf("Production Environment Status\n\n")
|
||||
|
||||
servicesList := []struct {
|
||||
name string
|
||||
desc string
|
||||
}{
|
||||
{"debros-ipfs-bootstrap", "IPFS Daemon (Bootstrap)"},
|
||||
{"debros-ipfs-cluster-bootstrap", "IPFS Cluster (Bootstrap)"},
|
||||
{"debros-rqlite-bootstrap", "RQLite Database (Bootstrap)"},
|
||||
{"debros-olric", "Olric Cache Server"},
|
||||
{"debros-node-bootstrap", "DeBros Node (Bootstrap)"},
|
||||
{"debros-gateway", "DeBros Gateway"},
|
||||
}
|
||||
|
||||
fmt.Printf("Services:\n")
|
||||
for _, svc := range servicesList {
|
||||
cmd := "systemctl"
|
||||
err := exec.Command(cmd, "is-active", "--quiet", svc.name).Run()
|
||||
status := "❌ Inactive"
|
||||
if err == nil {
|
||||
status = "✅ Active"
|
||||
}
|
||||
fmt.Printf(" %s: %s\n", status, svc.desc)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDirectories:\n")
|
||||
debrosDir := "/home/debros/.debros"
|
||||
if _, err := os.Stat(debrosDir); err == nil {
|
||||
fmt.Printf(" ✅ %s exists\n", debrosDir)
|
||||
} else {
|
||||
fmt.Printf(" ❌ %s not found\n", debrosDir)
|
||||
}
|
||||
|
||||
fmt.Printf("\nView logs with: network-cli prod logs <service>\n")
|
||||
}
|
||||
|
||||
func handleProdLogs(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli prod logs <service> [--follow]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
service := args[0]
|
||||
follow := false
|
||||
if len(args) > 1 && (args[1] == "--follow" || args[1] == "-f") {
|
||||
follow = true
|
||||
}
|
||||
|
||||
if follow {
|
||||
fmt.Printf("Following logs for %s (press Ctrl+C to stop)...\n\n", service)
|
||||
cmd := exec.Command("journalctl", "-u", service, "-f")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Run()
|
||||
} else {
|
||||
cmd := exec.Command("journalctl", "-u", service, "-n", "50")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Run()
|
||||
}
|
||||
}
|
||||
|
||||
func handleProdUninstall() {
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Fprintf(os.Stderr, "❌ Production uninstall must be run as root (use sudo)\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("⚠️ This will stop and remove all DeBros production services\n")
|
||||
fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.debros\n\n")
|
||||
fmt.Printf("Continue? (yes/no): ")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
response, _ := reader.ReadString('\n')
|
||||
response = strings.ToLower(strings.TrimSpace(response))
|
||||
|
||||
if response != "yes" && response != "y" {
|
||||
fmt.Printf("Uninstall cancelled\n")
|
||||
return
|
||||
}
|
||||
|
||||
services := []string{
|
||||
"debros-gateway",
|
||||
"debros-node-node",
|
||||
"debros-node-bootstrap",
|
||||
"debros-olric",
|
||||
"debros-rqlite-bootstrap",
|
||||
"debros-rqlite-node",
|
||||
"debros-ipfs-cluster-bootstrap",
|
||||
"debros-ipfs-cluster-node",
|
||||
"debros-ipfs-bootstrap",
|
||||
"debros-ipfs-node",
|
||||
}
|
||||
|
||||
fmt.Printf("Stopping services...\n")
|
||||
for _, svc := range services {
|
||||
exec.Command("systemctl", "stop", svc).Run()
|
||||
exec.Command("systemctl", "disable", svc).Run()
|
||||
unitPath := filepath.Join("/etc/systemd/system", svc+".service")
|
||||
os.Remove(unitPath)
|
||||
}
|
||||
|
||||
exec.Command("systemctl", "daemon-reload").Run()
|
||||
fmt.Printf("✅ Services uninstalled\n")
|
||||
fmt.Printf(" Configuration and data preserved in /home/debros/.debros\n")
|
||||
fmt.Printf(" To remove all data: rm -rf /home/debros/.debros\n\n")
|
||||
}
|
||||
@ -1,327 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// HandleRQLiteCommand handles rqlite-related commands
|
||||
func HandleRQLiteCommand(args []string) {
|
||||
if len(args) == 0 {
|
||||
showRQLiteHelp()
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS != "linux" {
|
||||
fmt.Fprintf(os.Stderr, "❌ RQLite commands are only supported on Linux\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
subcommand := args[0]
|
||||
subargs := args[1:]
|
||||
|
||||
switch subcommand {
|
||||
case "fix":
|
||||
handleRQLiteFix(subargs)
|
||||
case "help":
|
||||
showRQLiteHelp()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand)
|
||||
showRQLiteHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showRQLiteHelp() {
|
||||
fmt.Printf("🗄️ RQLite Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli rqlite <subcommand> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" fix - Fix misconfigured join address and clean stale raft state\n\n")
|
||||
fmt.Printf("Description:\n")
|
||||
fmt.Printf(" The 'fix' command automatically repairs common rqlite cluster issues:\n")
|
||||
fmt.Printf(" - Corrects join address from HTTP port (5001) to Raft port (7001) if misconfigured\n")
|
||||
fmt.Printf(" - Cleans stale raft state that prevents proper cluster formation\n")
|
||||
fmt.Printf(" - Restarts the node service with corrected configuration\n\n")
|
||||
fmt.Printf("Requirements:\n")
|
||||
fmt.Printf(" - Must be run as root (use sudo)\n")
|
||||
fmt.Printf(" - Only works on non-bootstrap nodes (nodes with join_address configured)\n")
|
||||
fmt.Printf(" - Stops and restarts the debros-node service\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" sudo network-cli rqlite fix\n")
|
||||
}
|
||||
|
||||
func handleRQLiteFix(args []string) {
|
||||
requireRoot()
|
||||
|
||||
// Parse optional flags
|
||||
dryRun := false
|
||||
for _, arg := range args {
|
||||
if arg == "--dry-run" || arg == "-n" {
|
||||
dryRun = true
|
||||
}
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Printf("🔍 Dry-run mode - no changes will be made\n\n")
|
||||
}
|
||||
|
||||
fmt.Printf("🔧 RQLite Cluster Repair\n\n")
|
||||
|
||||
// Load config
|
||||
configPath, err := config.DefaultPath("node.yaml")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to determine config path: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cfg, err := loadConfigForRepair(configPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to load config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Check if this is a bootstrap node
|
||||
if cfg.Node.Type == "bootstrap" || cfg.Database.RQLiteJoinAddress == "" {
|
||||
fmt.Printf("ℹ️ This is a bootstrap node (no join address configured)\n")
|
||||
fmt.Printf(" Bootstrap nodes don't need repair - they are the cluster leader\n")
|
||||
fmt.Printf(" Run this command on follower nodes instead\n")
|
||||
return
|
||||
}
|
||||
|
||||
joinAddr := cfg.Database.RQLiteJoinAddress
|
||||
|
||||
// Check if join address needs fixing
|
||||
needsConfigFix := needsFix(joinAddr, cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort)
|
||||
var fixedAddr string
|
||||
|
||||
if needsConfigFix {
|
||||
fmt.Printf("⚠️ Detected misconfigured join address: %s\n", joinAddr)
|
||||
fmt.Printf(" Expected Raft port (%d) but found HTTP port (%d)\n", cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort)
|
||||
|
||||
// Extract host from join address
|
||||
host, _, err := parseJoinAddress(joinAddr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to parse join address: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fix the join address - rqlite expects Raft port for -join
|
||||
fixedAddr = fmt.Sprintf("%s:%d", host, cfg.Database.RQLiteRaftPort)
|
||||
fmt.Printf(" Corrected address: %s\n\n", fixedAddr)
|
||||
} else {
|
||||
fmt.Printf("✅ Join address looks correct: %s\n", joinAddr)
|
||||
fmt.Printf(" Will clean stale raft state to ensure proper cluster formation\n\n")
|
||||
fixedAddr = joinAddr // No change needed
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Printf("🔍 Dry-run: Would clean raft state")
|
||||
if needsConfigFix {
|
||||
fmt.Printf(" and fix config")
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop the service
|
||||
fmt.Printf("⏹️ Stopping debros-node service...\n")
|
||||
if err := stopService("debros-node"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to stop service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf(" ✓ Service stopped\n\n")
|
||||
|
||||
// Update config file if needed
|
||||
if needsConfigFix {
|
||||
fmt.Printf("📝 Updating configuration file...\n")
|
||||
if err := updateConfigJoinAddress(configPath, fixedAddr); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to update config: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, " Service is stopped - please fix manually and restart\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf(" ✓ Config updated: %s\n\n", configPath)
|
||||
}
|
||||
|
||||
// Clean raft state
|
||||
fmt.Printf("🧹 Cleaning stale raft state...\n")
|
||||
dataDir := expandDataDir(cfg.Node.DataDir)
|
||||
raftDir := filepath.Join(dataDir, "rqlite", "raft")
|
||||
if err := cleanRaftState(raftDir); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "⚠️ Failed to clean raft state: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, " Continuing anyway - raft state may still exist\n")
|
||||
} else {
|
||||
fmt.Printf(" ✓ Raft state cleaned\n\n")
|
||||
}
|
||||
|
||||
// Restart the service
|
||||
fmt.Printf("🚀 Restarting debros-node service...\n")
|
||||
if err := startService("debros-node"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to start service: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, " Config has been fixed - please restart manually:\n")
|
||||
fmt.Fprintf(os.Stderr, " sudo systemctl start debros-node\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf(" ✓ Service started\n\n")
|
||||
|
||||
fmt.Printf("✅ Repair complete!\n\n")
|
||||
fmt.Printf("The node should now join the cluster correctly.\n")
|
||||
fmt.Printf("Monitor logs with: sudo network-cli service logs node --follow\n")
|
||||
}
|
||||
|
||||
func loadConfigForRepair(path string) (*config.Config, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open config file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var cfg config.Config
|
||||
if err := config.DecodeStrict(file, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func needsFix(joinAddr string, raftPort int, httpPort int) bool {
|
||||
if joinAddr == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove http:// or https:// prefix if present
|
||||
addr := joinAddr
|
||||
if strings.HasPrefix(addr, "http://") {
|
||||
addr = strings.TrimPrefix(addr, "http://")
|
||||
} else if strings.HasPrefix(addr, "https://") {
|
||||
addr = strings.TrimPrefix(addr, "https://")
|
||||
}
|
||||
|
||||
// Parse host:port
|
||||
_, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return false // Can't parse, assume it's fine
|
||||
}
|
||||
|
||||
// Check if port matches HTTP port (incorrect - should be Raft port)
|
||||
if port == fmt.Sprintf("%d", httpPort) {
|
||||
return true
|
||||
}
|
||||
|
||||
// If it matches Raft port, it's correct
|
||||
if port == fmt.Sprintf("%d", raftPort) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Unknown port - assume it's fine
|
||||
return false
|
||||
}
|
||||
|
||||
func parseJoinAddress(joinAddr string) (host, port string, err error) {
|
||||
// Remove http:// or https:// prefix if present
|
||||
addr := joinAddr
|
||||
if strings.HasPrefix(addr, "http://") {
|
||||
addr = strings.TrimPrefix(addr, "http://")
|
||||
} else if strings.HasPrefix(addr, "https://") {
|
||||
addr = strings.TrimPrefix(addr, "https://")
|
||||
}
|
||||
|
||||
host, port, err = net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("invalid join address format: %w", err)
|
||||
}
|
||||
|
||||
return host, port, nil
|
||||
}
|
||||
|
||||
func updateConfigJoinAddress(configPath string, newJoinAddr string) error {
|
||||
// Read the file
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
// Parse YAML into a generic map to preserve structure
|
||||
var yamlData map[string]interface{}
|
||||
if err := yaml.Unmarshal(data, &yamlData); err != nil {
|
||||
return fmt.Errorf("failed to parse YAML: %w", err)
|
||||
}
|
||||
|
||||
// Navigate to database.rqlite_join_address
|
||||
database, ok := yamlData["database"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("database section not found in config")
|
||||
}
|
||||
|
||||
database["rqlite_join_address"] = newJoinAddr
|
||||
|
||||
// Write back to file
|
||||
updatedData, err := yaml.Marshal(yamlData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal YAML: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, updatedData, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandDataDir(dataDir string) string {
|
||||
expanded := os.ExpandEnv(dataDir)
|
||||
if strings.HasPrefix(expanded, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expanded // Fallback to original
|
||||
}
|
||||
expanded = filepath.Join(home, expanded[1:])
|
||||
}
|
||||
return expanded
|
||||
}
|
||||
|
||||
func cleanRaftState(raftDir string) error {
|
||||
if _, err := os.Stat(raftDir); os.IsNotExist(err) {
|
||||
return nil // Directory doesn't exist, nothing to clean
|
||||
}
|
||||
|
||||
// Remove raft state files
|
||||
filesToRemove := []string{
|
||||
"peers.json",
|
||||
"peers.json.backup",
|
||||
"peers.info",
|
||||
"raft.db",
|
||||
}
|
||||
|
||||
for _, file := range filesToRemove {
|
||||
filePath := filepath.Join(raftDir, file)
|
||||
if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove %s: %w", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopService(serviceName string) error {
|
||||
cmd := exec.Command("systemctl", "stop", serviceName)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("systemctl stop failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startService(serviceName string) error {
|
||||
cmd := exec.Command("systemctl", "start", serviceName)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("systemctl start failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1,243 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HandleServiceCommand handles systemd service management commands
|
||||
func HandleServiceCommand(args []string) {
|
||||
if len(args) == 0 {
|
||||
showServiceHelp()
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS != "linux" {
|
||||
fmt.Fprintf(os.Stderr, "❌ Service commands are only supported on Linux with systemd\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
subcommand := args[0]
|
||||
subargs := args[1:]
|
||||
|
||||
switch subcommand {
|
||||
case "start":
|
||||
handleServiceStart(subargs)
|
||||
case "stop":
|
||||
handleServiceStop(subargs)
|
||||
case "restart":
|
||||
handleServiceRestart(subargs)
|
||||
case "status":
|
||||
handleServiceStatus(subargs)
|
||||
case "logs":
|
||||
handleServiceLogs(subargs)
|
||||
case "help":
|
||||
showServiceHelp()
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown service subcommand: %s\n", subcommand)
|
||||
showServiceHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func showServiceHelp() {
|
||||
fmt.Printf("🔧 Service Management Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli service <subcommand> <target> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" start <target> - Start services\n")
|
||||
fmt.Printf(" stop <target> - Stop services\n")
|
||||
fmt.Printf(" restart <target> - Restart services\n")
|
||||
fmt.Printf(" status <target> - Show service status\n")
|
||||
fmt.Printf(" logs <target> - View service logs\n\n")
|
||||
fmt.Printf("Targets:\n")
|
||||
fmt.Printf(" node - DeBros node service\n")
|
||||
fmt.Printf(" gateway - DeBros gateway service\n")
|
||||
fmt.Printf(" all - All DeBros services\n\n")
|
||||
fmt.Printf("Logs Options:\n")
|
||||
fmt.Printf(" --follow - Follow logs in real-time (-f)\n")
|
||||
fmt.Printf(" --since=<time> - Show logs since time (e.g., '1h', '30m', '2d')\n")
|
||||
fmt.Printf(" -n <lines> - Show last N lines\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli service start node\n")
|
||||
fmt.Printf(" network-cli service status all\n")
|
||||
fmt.Printf(" network-cli service restart gateway\n")
|
||||
fmt.Printf(" network-cli service logs node --follow\n")
|
||||
fmt.Printf(" network-cli service logs gateway --since=1h\n")
|
||||
fmt.Printf(" network-cli service logs node -n 100\n")
|
||||
}
|
||||
|
||||
func getServices(target string) []string {
|
||||
switch target {
|
||||
case "node":
|
||||
return []string{"debros-node"}
|
||||
case "gateway":
|
||||
return []string{"debros-gateway"}
|
||||
case "all":
|
||||
return []string{"debros-node", "debros-gateway"}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "❌ Invalid target: %s (use: node, gateway, or all)\n", target)
|
||||
os.Exit(1)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func requireRoot() {
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Fprintf(os.Stderr, "❌ This command requires root privileges\n")
|
||||
fmt.Fprintf(os.Stderr, " Run with: sudo network-cli service ...\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func handleServiceStart(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli service start <node|gateway|all>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
requireRoot()
|
||||
|
||||
target := args[0]
|
||||
services := getServices(target)
|
||||
|
||||
fmt.Printf("🚀 Starting services...\n")
|
||||
for _, service := range services {
|
||||
cmd := exec.Command("systemctl", "start", service)
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to start %s: %v\n", service, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" ✓ Started %s\n", service)
|
||||
}
|
||||
}
|
||||
|
||||
func handleServiceStop(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli service stop <node|gateway|all>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
requireRoot()
|
||||
|
||||
target := args[0]
|
||||
services := getServices(target)
|
||||
|
||||
fmt.Printf("⏹️ Stopping services...\n")
|
||||
for _, service := range services {
|
||||
cmd := exec.Command("systemctl", "stop", service)
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to stop %s: %v\n", service, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" ✓ Stopped %s\n", service)
|
||||
}
|
||||
}
|
||||
|
||||
func handleServiceRestart(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli service restart <node|gateway|all>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
requireRoot()
|
||||
|
||||
target := args[0]
|
||||
services := getServices(target)
|
||||
|
||||
fmt.Printf("🔄 Restarting services...\n")
|
||||
for _, service := range services {
|
||||
cmd := exec.Command("systemctl", "restart", service)
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to restart %s: %v\n", service, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" ✓ Restarted %s\n", service)
|
||||
}
|
||||
}
|
||||
|
||||
func handleServiceStatus(args []string) {
|
||||
if len(args) == 0 {
|
||||
args = []string{"all"} // Default to all
|
||||
}
|
||||
|
||||
target := args[0]
|
||||
services := getServices(target)
|
||||
|
||||
fmt.Printf("📊 Service Status:\n\n")
|
||||
for _, service := range services {
|
||||
// Use systemctl is-active to get simple status
|
||||
cmd := exec.Command("systemctl", "is-active", service)
|
||||
output, _ := cmd.Output()
|
||||
status := strings.TrimSpace(string(output))
|
||||
|
||||
emoji := "❌"
|
||||
if status == "active" {
|
||||
emoji = "✅"
|
||||
} else if status == "inactive" {
|
||||
emoji = "⚪"
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s: %s\n", emoji, service, status)
|
||||
|
||||
// Show detailed status
|
||||
cmd = exec.Command("systemctl", "status", service, "--no-pager", "-l")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Run()
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
func handleServiceLogs(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli service logs <node|gateway> [--follow] [--since=<time>] [-n <lines>]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
target := args[0]
|
||||
if target == "all" {
|
||||
fmt.Fprintf(os.Stderr, "❌ Cannot show logs for 'all' - specify 'node' or 'gateway'\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
services := getServices(target)
|
||||
if len(services) == 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
service := services[0]
|
||||
|
||||
// Parse options
|
||||
journalArgs := []string{"-u", service, "--no-pager"}
|
||||
|
||||
for i := 1; i < len(args); i++ {
|
||||
arg := args[i]
|
||||
switch {
|
||||
case arg == "--follow" || arg == "-f":
|
||||
journalArgs = append(journalArgs, "-f")
|
||||
case strings.HasPrefix(arg, "--since="):
|
||||
since := strings.TrimPrefix(arg, "--since=")
|
||||
journalArgs = append(journalArgs, "--since="+since)
|
||||
case arg == "-n":
|
||||
if i+1 < len(args) {
|
||||
journalArgs = append(journalArgs, "-n", args[i+1])
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("📜 Logs for %s:\n\n", service)
|
||||
|
||||
cmd := exec.Command("journalctl", journalArgs...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to show logs: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
2529
pkg/cli/setup.go
2529
pkg/cli/setup.go
File diff suppressed because it is too large
Load Diff
173
pkg/environments/development/checks.go
Normal file
173
pkg/environments/development/checks.go
Normal file
@ -0,0 +1,173 @@
|
||||
package development
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Dependency represents an external binary dependency
|
||||
type Dependency struct {
|
||||
Name string
|
||||
Command string
|
||||
MinVersion string // Optional: if set, try to check version
|
||||
InstallHint string
|
||||
}
|
||||
|
||||
// DependencyChecker handles dependency validation
|
||||
type DependencyChecker struct {
|
||||
dependencies []Dependency
|
||||
}
|
||||
|
||||
// NewDependencyChecker creates a new dependency checker
|
||||
func NewDependencyChecker() *DependencyChecker {
|
||||
return &DependencyChecker{
|
||||
dependencies: []Dependency{
|
||||
{
|
||||
Name: "IPFS",
|
||||
Command: "ipfs",
|
||||
MinVersion: "0.25.0",
|
||||
InstallHint: "Install with: brew install ipfs (macOS) or https://docs.ipfs.tech/install/command-line/",
|
||||
},
|
||||
{
|
||||
Name: "IPFS Cluster Service",
|
||||
Command: "ipfs-cluster-service",
|
||||
MinVersion: "1.0.0",
|
||||
InstallHint: "Install with: go install github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest",
|
||||
},
|
||||
{
|
||||
Name: "RQLite",
|
||||
Command: "rqlited",
|
||||
InstallHint: "Install with: brew install rqlite (macOS) or https://github.com/rqlite/rqlite/releases",
|
||||
},
|
||||
{
|
||||
Name: "Olric Server",
|
||||
Command: "olric-server",
|
||||
InstallHint: "Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0",
|
||||
},
|
||||
{
|
||||
Name: "npm (for Anyone)",
|
||||
Command: "npm",
|
||||
InstallHint: "Install Node.js with: brew install node (macOS) or https://nodejs.org/",
|
||||
},
|
||||
{
|
||||
Name: "OpenSSL",
|
||||
Command: "openssl",
|
||||
InstallHint: "Install with: brew install openssl (macOS) - usually pre-installed on Linux",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CheckAll performs all dependency checks and returns a report
|
||||
func (dc *DependencyChecker) CheckAll() ([]string, error) {
|
||||
var missing []string
|
||||
var hints []string
|
||||
|
||||
for _, dep := range dc.dependencies {
|
||||
if _, err := exec.LookPath(dep.Command); err != nil {
|
||||
missing = append(missing, dep.Name)
|
||||
hints = append(hints, fmt.Sprintf(" %s: %s", dep.Name, dep.InstallHint))
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) == 0 {
|
||||
return nil, nil // All OK
|
||||
}
|
||||
|
||||
errMsg := fmt.Sprintf("Missing %d required dependencies:\n%s\n\nInstall them with:\n%s",
|
||||
len(missing), strings.Join(missing, ", "), strings.Join(hints, "\n"))
|
||||
return missing, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
// PortChecker validates that required ports are available
|
||||
type PortChecker struct {
|
||||
ports []int
|
||||
}
|
||||
|
||||
// RequiredPorts defines all ports needed for dev environment
|
||||
var RequiredPorts = []int{
|
||||
// LibP2P
|
||||
4001, 4002, 4003,
|
||||
// IPFS API
|
||||
4501, 4502, 4503,
|
||||
// RQLite HTTP
|
||||
5001, 5002, 5003,
|
||||
// RQLite Raft
|
||||
7001, 7002, 7003,
|
||||
// Gateway
|
||||
6001,
|
||||
// Olric
|
||||
3320, 3322,
|
||||
// Anon SOCKS
|
||||
9050,
|
||||
// IPFS Cluster
|
||||
9094, 9104, 9114,
|
||||
// IPFS Gateway
|
||||
8080, 8081, 8082,
|
||||
}
|
||||
|
||||
// NewPortChecker creates a new port checker with required ports
|
||||
func NewPortChecker() *PortChecker {
|
||||
return &PortChecker{
|
||||
ports: RequiredPorts,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckAll verifies all required ports are available
|
||||
func (pc *PortChecker) CheckAll() ([]int, error) {
|
||||
var unavailable []int
|
||||
|
||||
for _, port := range pc.ports {
|
||||
if !isPortAvailable(port) {
|
||||
unavailable = append(unavailable, port)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unavailable) == 0 {
|
||||
return nil, nil // All OK
|
||||
}
|
||||
|
||||
errMsg := fmt.Sprintf("The following ports are unavailable: %v\n\nFree them or stop conflicting services and try again",
|
||||
unavailable)
|
||||
return unavailable, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
// isPortAvailable checks if a TCP port is available for binding
|
||||
func isPortAvailable(port int) bool {
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
ln.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// PortMap provides a human-readable mapping of ports to services
|
||||
func PortMap() map[int]string {
|
||||
return map[int]string{
|
||||
4001: "Bootstrap P2P",
|
||||
4002: "Node2 P2P",
|
||||
4003: "Node3 P2P",
|
||||
4501: "Bootstrap IPFS API",
|
||||
4502: "Node2 IPFS API",
|
||||
4503: "Node3 IPFS API",
|
||||
5001: "Bootstrap RQLite HTTP",
|
||||
5002: "Node2 RQLite HTTP",
|
||||
5003: "Node3 RQLite HTTP",
|
||||
7001: "Bootstrap RQLite Raft",
|
||||
7002: "Node2 RQLite Raft",
|
||||
7003: "Node3 RQLite Raft",
|
||||
6001: "Gateway",
|
||||
3320: "Olric HTTP API",
|
||||
3322: "Olric Memberlist",
|
||||
9050: "Anon SOCKS Proxy",
|
||||
9094: "Bootstrap IPFS Cluster",
|
||||
9104: "Node2 IPFS Cluster",
|
||||
9114: "Node3 IPFS Cluster",
|
||||
8080: "Bootstrap IPFS Gateway",
|
||||
8081: "Node2 IPFS Gateway",
|
||||
8082: "Node3 IPFS Gateway",
|
||||
}
|
||||
}
|
||||
91
pkg/environments/development/checks_test.go
Normal file
91
pkg/environments/development/checks_test.go
Normal file
@ -0,0 +1,91 @@
|
||||
package development
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPortChecker(t *testing.T) {
|
||||
checker := NewPortChecker()
|
||||
|
||||
if checker == nil {
|
||||
t.Fatal("NewPortChecker returned nil")
|
||||
}
|
||||
|
||||
// Verify all required ports are defined
|
||||
if len(checker.ports) == 0 {
|
||||
t.Fatal("No ports defined in checker")
|
||||
}
|
||||
|
||||
// Check that required port counts match expectations
|
||||
expectedPortCount := 18 // Based on RequiredPorts
|
||||
if len(checker.ports) != expectedPortCount {
|
||||
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPortMap(t *testing.T) {
|
||||
portMap := PortMap()
|
||||
|
||||
if len(portMap) == 0 {
|
||||
t.Fatal("PortMap returned empty map")
|
||||
}
|
||||
|
||||
// Check for key ports
|
||||
expectedPorts := []int{4001, 5001, 7001, 6001, 3320, 9050, 9094}
|
||||
for _, port := range expectedPorts {
|
||||
if _, exists := portMap[port]; !exists {
|
||||
t.Errorf("Expected port %d not found in PortMap", port)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify descriptions exist
|
||||
for port, desc := range portMap {
|
||||
if desc == "" {
|
||||
t.Errorf("Port %d has empty description", port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDependencyChecker(t *testing.T) {
|
||||
checker := NewDependencyChecker()
|
||||
|
||||
if checker == nil {
|
||||
t.Fatal("NewDependencyChecker returned nil")
|
||||
}
|
||||
|
||||
// Verify required dependencies are defined
|
||||
if len(checker.dependencies) == 0 {
|
||||
t.Fatal("No dependencies defined in checker")
|
||||
}
|
||||
|
||||
// Expected minimum dependencies
|
||||
expectedDeps := []string{"ipfs", "rqlited", "olric-server", "npm"}
|
||||
for _, expected := range expectedDeps {
|
||||
found := false
|
||||
for _, dep := range checker.dependencies {
|
||||
if dep.Command == expected {
|
||||
found = true
|
||||
if dep.InstallHint == "" {
|
||||
t.Errorf("Dependency %s has no install hint", expected)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected dependency %s not found", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPortAvailable(t *testing.T) {
|
||||
// Test with a very high port that should be available
|
||||
highPort := 65432
|
||||
if !isPortAvailable(highPort) {
|
||||
t.Logf("Port %d may be in use (this is non-fatal for testing)", highPort)
|
||||
}
|
||||
|
||||
// Port 0 should not be available (reserved)
|
||||
if isPortAvailable(0) {
|
||||
t.Error("Port 0 should not be available")
|
||||
}
|
||||
}
|
||||
270
pkg/environments/development/config.go
Normal file
270
pkg/environments/development/config.go
Normal file
@ -0,0 +1,270 @@
|
||||
package development
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/encryption"
|
||||
"github.com/DeBrosOfficial/network/pkg/environments/templates"
|
||||
)
|
||||
|
||||
// ConfigEnsurer handles all config file creation and validation
|
||||
type ConfigEnsurer struct {
|
||||
debrosDir string
|
||||
}
|
||||
|
||||
// NewConfigEnsurer creates a new config ensurer
|
||||
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
|
||||
return &ConfigEnsurer{
|
||||
debrosDir: debrosDir,
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureAll ensures all necessary config files and secrets exist
|
||||
func (ce *ConfigEnsurer) EnsureAll() error {
|
||||
// Create directories
|
||||
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create .debros directory: %w", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
// Ensure shared secrets
|
||||
if err := ce.ensureSharedSecrets(); err != nil {
|
||||
return fmt.Errorf("failed to ensure shared secrets: %w", err)
|
||||
}
|
||||
|
||||
// Ensure bootstrap config and identity
|
||||
if err := ce.ensureBootstrap(); err != nil {
|
||||
return fmt.Errorf("failed to ensure bootstrap: %w", err)
|
||||
}
|
||||
|
||||
// Ensure node2 and node3 configs
|
||||
if err := ce.ensureNode2And3(); err != nil {
|
||||
return fmt.Errorf("failed to ensure nodes: %w", err)
|
||||
}
|
||||
|
||||
// Ensure gateway config
|
||||
if err := ce.ensureGateway(); err != nil {
|
||||
return fmt.Errorf("failed to ensure gateway: %w", err)
|
||||
}
|
||||
|
||||
// Ensure Olric config
|
||||
if err := ce.ensureOlric(); err != nil {
|
||||
return fmt.Errorf("failed to ensure olric: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
|
||||
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
||||
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
|
||||
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
|
||||
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
|
||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write cluster secret: %w", err)
|
||||
}
|
||||
fmt.Printf("✓ Generated cluster secret\n")
|
||||
}
|
||||
|
||||
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
|
||||
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
|
||||
keyHex := strings.ToUpper(generateRandomHex(64))
|
||||
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
|
||||
if err := os.WriteFile(swarmKeyPath, []byte(content), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write swarm key: %w", err)
|
||||
}
|
||||
fmt.Printf("✓ Generated IPFS swarm key\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBootstrap creates bootstrap identity and config
|
||||
func (ce *ConfigEnsurer) ensureBootstrap() error {
|
||||
bootstrapDir := filepath.Join(ce.debrosDir, "bootstrap")
|
||||
identityPath := filepath.Join(bootstrapDir, "identity.key")
|
||||
|
||||
// Create identity if missing
|
||||
var bootstrapPeerID string
|
||||
if _, err := os.Stat(identityPath); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(bootstrapDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create bootstrap directory: %w", err)
|
||||
}
|
||||
|
||||
info, err := encryption.GenerateIdentity()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate bootstrap identity: %w", err)
|
||||
}
|
||||
|
||||
if err := encryption.SaveIdentity(info, identityPath); err != nil {
|
||||
return fmt.Errorf("failed to save bootstrap identity: %w", err)
|
||||
}
|
||||
|
||||
bootstrapPeerID = info.PeerID.String()
|
||||
fmt.Printf("✓ Generated bootstrap identity (Peer ID: %s)\n", bootstrapPeerID)
|
||||
} else {
|
||||
info, err := encryption.LoadIdentity(identityPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
}
|
||||
bootstrapPeerID = info.PeerID.String()
|
||||
}
|
||||
|
||||
// Ensure bootstrap config - always regenerate to ensure template fixes are applied
|
||||
bootstrapConfigPath := filepath.Join(ce.debrosDir, "bootstrap.yaml")
|
||||
data := templates.BootstrapConfigData{
|
||||
NodeID: "bootstrap",
|
||||
P2PPort: 4001,
|
||||
DataDir: bootstrapDir,
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
|
||||
config, err := templates.RenderBootstrapConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render bootstrap config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(bootstrapConfigPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write bootstrap config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated bootstrap.yaml\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureNode2And3 creates node2 and node3 configs
|
||||
func (ce *ConfigEnsurer) ensureNode2And3() error {
|
||||
// Get bootstrap multiaddr for join
|
||||
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
}
|
||||
|
||||
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
|
||||
|
||||
nodes := []struct {
|
||||
name string
|
||||
p2pPort int
|
||||
rqliteHTTPPort int
|
||||
rqliteRaftPort int
|
||||
clusterAPIPort int
|
||||
ipfsAPIPort int
|
||||
}{
|
||||
{"node2", 4002, 5002, 7002, 9104, 4502},
|
||||
{"node3", 4003, 5003, 7003, 9114, 4503},
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeDir := filepath.Join(ce.debrosDir, node.name)
|
||||
configPath := filepath.Join(ce.debrosDir, fmt.Sprintf("%s.yaml", node.name))
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create %s directory: %w", node.name, err)
|
||||
}
|
||||
|
||||
data := templates.NodeConfigData{
|
||||
NodeID: node.name,
|
||||
P2PPort: node.p2pPort,
|
||||
DataDir: nodeDir,
|
||||
RQLiteHTTPPort: node.rqliteHTTPPort,
|
||||
RQLiteRaftPort: node.rqliteRaftPort,
|
||||
RQLiteJoinAddress: "localhost:7001",
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
ClusterAPIPort: node.clusterAPIPort,
|
||||
IPFSAPIPort: node.ipfsAPIPort,
|
||||
}
|
||||
|
||||
config, err := templates.RenderNodeConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render %s config: %w", node.name, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write %s config: %w", node.name, err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated %s.yaml\n", node.name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureGateway creates gateway config
|
||||
func (ce *ConfigEnsurer) ensureGateway() error {
|
||||
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
// Get bootstrap multiaddr
|
||||
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
}
|
||||
|
||||
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
|
||||
|
||||
data := templates.GatewayConfigData{
|
||||
ListenPort: 6001,
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
OlricServers: []string{"127.0.0.1:3320"},
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
|
||||
config, err := templates.RenderGatewayConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render gateway config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write gateway config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated gateway.yaml\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureOlric creates Olric config
|
||||
func (ce *ConfigEnsurer) ensureOlric() error {
|
||||
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
data := templates.OlricConfigData{
|
||||
BindAddr: "127.0.0.1",
|
||||
HTTPPort: 3320,
|
||||
MemberlistPort: 3322,
|
||||
}
|
||||
|
||||
config, err := templates.RenderOlricConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render olric config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write olric config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated olric-config.yaml\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateRandomHex generates a random hex string of specified length
|
||||
func generateRandomHex(length int) string {
|
||||
bytes := make([]byte, length/2)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
panic(fmt.Sprintf("failed to generate random bytes: %v", err))
|
||||
}
|
||||
return hex.EncodeToString(bytes)
|
||||
}
|
||||
217
pkg/environments/development/health.go
Normal file
217
pkg/environments/development/health.go
Normal file
@ -0,0 +1,217 @@
|
||||
package development
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthCheckResult represents the result of a health check
|
||||
type HealthCheckResult struct {
|
||||
Name string
|
||||
Healthy bool
|
||||
Details string
|
||||
}
|
||||
|
||||
// IPFSHealthCheck verifies IPFS peer connectivity
|
||||
func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeInfo) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: "IPFS Peers"}
|
||||
|
||||
healthyCount := 0
|
||||
for _, node := range nodes {
|
||||
cmd := exec.CommandContext(ctx, "ipfs", "swarm", "peers", "--repo-dir="+node.ipfsPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
result.Details += fmt.Sprintf("%s: error getting peers (%v); ", node.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Split by newlines and filter empty lines
|
||||
peerLines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
peerCount := 0
|
||||
for _, line := range peerLines {
|
||||
if strings.TrimSpace(line) != "" {
|
||||
peerCount++
|
||||
}
|
||||
}
|
||||
|
||||
if peerCount < 2 {
|
||||
result.Details += fmt.Sprintf("%s: only %d peers (want 2+); ", node.name, peerCount)
|
||||
} else {
|
||||
result.Details += fmt.Sprintf("%s: %d peers; ", node.name, peerCount)
|
||||
healthyCount++
|
||||
}
|
||||
}
|
||||
|
||||
result.Healthy = healthyCount == len(nodes)
|
||||
return result
|
||||
}
|
||||
|
||||
// RQLiteHealthCheck verifies RQLite cluster formation
|
||||
func (pm *ProcessManager) RQLiteHealthCheck(ctx context.Context) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: "RQLite Cluster"}
|
||||
|
||||
// Check bootstrap node
|
||||
bootstrapStatus := pm.checkRQLiteNode(ctx, "bootstrap", 5001)
|
||||
if !bootstrapStatus.Healthy {
|
||||
result.Details += fmt.Sprintf("bootstrap: %s; ", bootstrapStatus.Details)
|
||||
return result
|
||||
}
|
||||
|
||||
// Check node2 and node3
|
||||
node2Status := pm.checkRQLiteNode(ctx, "node2", 5002)
|
||||
node3Status := pm.checkRQLiteNode(ctx, "node3", 5003)
|
||||
|
||||
if node2Status.Healthy && node3Status.Healthy {
|
||||
result.Healthy = true
|
||||
result.Details = fmt.Sprintf("bootstrap: leader ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
|
||||
} else {
|
||||
result.Details = fmt.Sprintf("bootstrap: ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// checkRQLiteNode queries a single RQLite node's status
|
||||
func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, httpPort int) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
|
||||
|
||||
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
resp, err := client.Get(urlStr)
|
||||
if err != nil {
|
||||
result.Details = fmt.Sprintf("connection failed: %v", err)
|
||||
return result
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
result.Details = fmt.Sprintf("HTTP %d", resp.StatusCode)
|
||||
return result
|
||||
}
|
||||
|
||||
var status map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||
result.Details = fmt.Sprintf("decode error: %v", err)
|
||||
return result
|
||||
}
|
||||
|
||||
// Check the store.raft structure (RQLite 8 format)
|
||||
store, ok := status["store"].(map[string]interface{})
|
||||
if !ok {
|
||||
result.Details = "store data not found"
|
||||
return result
|
||||
}
|
||||
|
||||
raft, ok := store["raft"].(map[string]interface{})
|
||||
if !ok {
|
||||
result.Details = "raft data not found"
|
||||
return result
|
||||
}
|
||||
|
||||
// Check if we have a leader
|
||||
leader, hasLeader := raft["leader"].(string)
|
||||
if hasLeader && leader != "" {
|
||||
result.Healthy = true
|
||||
result.Details = "cluster member with leader elected"
|
||||
return result
|
||||
}
|
||||
|
||||
// Check node state - accept both Leader and Follower
|
||||
if state, ok := raft["state"].(string); ok {
|
||||
if state == "Leader" {
|
||||
result.Healthy = true
|
||||
result.Details = "this node is leader"
|
||||
return result
|
||||
}
|
||||
if state == "Follower" {
|
||||
result.Healthy = true
|
||||
result.Details = "this node is follower in cluster"
|
||||
return result
|
||||
}
|
||||
result.Details = fmt.Sprintf("state: %s", state)
|
||||
return result
|
||||
}
|
||||
|
||||
result.Details = "not yet connected"
|
||||
return result
|
||||
}
|
||||
|
||||
// LibP2PHealthCheck verifies that network nodes have peer connections
|
||||
func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: "LibP2P/Node Peers"}
|
||||
|
||||
// Check that at least 2 nodes are part of the RQLite cluster (implies peer connectivity)
|
||||
// and that they can communicate via LibP2P (which they use for cluster discovery)
|
||||
healthyNodes := 0
|
||||
for i, name := range []string{"bootstrap", "node2", "node3"} {
|
||||
httpPort := 5001 + i
|
||||
status := pm.checkRQLiteNode(ctx, name, httpPort)
|
||||
if status.Healthy {
|
||||
healthyNodes++
|
||||
result.Details += fmt.Sprintf("%s: connected; ", name)
|
||||
} else {
|
||||
result.Details += fmt.Sprintf("%s: %s; ", name, status.Details)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthy if at least 2 nodes report connectivity (including bootstrap)
|
||||
result.Healthy = healthyNodes >= 2
|
||||
return result
|
||||
}
|
||||
|
||||
// HealthCheckWithRetry performs a health check with retry logic
|
||||
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
|
||||
fmt.Fprintf(pm.logWriter, "\n⚕️ Validating cluster health...\n")
|
||||
|
||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
for attempt := 1; attempt <= retries; attempt++ {
|
||||
// Perform all checks
|
||||
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
|
||||
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
|
||||
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
|
||||
|
||||
// Log results
|
||||
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
|
||||
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
|
||||
}
|
||||
|
||||
// All checks must pass
|
||||
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
|
||||
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
|
||||
return true
|
||||
}
|
||||
|
||||
if attempt < retries {
|
||||
select {
|
||||
case <-time.After(retryInterval):
|
||||
continue
|
||||
case <-deadlineCtx.Done():
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
|
||||
return false
|
||||
}
|
||||
|
||||
// logHealthCheckResult logs a single health check result
|
||||
func (pm *ProcessManager) logHealthCheckResult(w io.Writer, indent string, result HealthCheckResult) {
|
||||
status := "❌"
|
||||
if result.Healthy {
|
||||
status = "✓"
|
||||
}
|
||||
fmt.Fprintf(w, "%s%s %s: %s\n", indent, status, result.Name, result.Details)
|
||||
}
|
||||
716
pkg/environments/development/runner.go
Normal file
716
pkg/environments/development/runner.go
Normal file
@ -0,0 +1,716 @@
|
||||
package development
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ProcessManager manages all dev environment processes
|
||||
type ProcessManager struct {
|
||||
debrosDir string
|
||||
pidsDir string
|
||||
processes map[string]*ManagedProcess
|
||||
mutex sync.Mutex
|
||||
logWriter io.Writer
|
||||
}
|
||||
|
||||
// ManagedProcess tracks a running process
|
||||
type ManagedProcess struct {
|
||||
Name string
|
||||
PID int
|
||||
StartTime time.Time
|
||||
LogPath string
|
||||
}
|
||||
|
||||
// NewProcessManager creates a new process manager
|
||||
func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
|
||||
pidsDir := filepath.Join(debrosDir, ".pids")
|
||||
os.MkdirAll(pidsDir, 0755)
|
||||
|
||||
return &ProcessManager{
|
||||
debrosDir: debrosDir,
|
||||
pidsDir: pidsDir,
|
||||
processes: make(map[string]*ManagedProcess),
|
||||
logWriter: logWriter,
|
||||
}
|
||||
}
|
||||
|
||||
// StartAll starts all development services
|
||||
func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
|
||||
|
||||
// Define IPFS nodes for later use in health checks
|
||||
ipfsNodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
|
||||
// Start in order of dependencies
|
||||
services := []struct {
|
||||
name string
|
||||
fn func(context.Context) error
|
||||
}{
|
||||
{"IPFS", pm.startIPFS},
|
||||
{"IPFS Cluster", pm.startIPFSCluster},
|
||||
{"Olric", pm.startOlric},
|
||||
{"Anon", pm.startAnon},
|
||||
{"Bootstrap Node", pm.startBootstrapNode},
|
||||
{"Node2", pm.startNode2},
|
||||
{"Node3", pm.startNode3},
|
||||
{"Gateway", pm.startGateway},
|
||||
}
|
||||
|
||||
for _, svc := range services {
|
||||
if err := svc.fn(ctx); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, "⚠️ Failed to start %s: %v\n", svc.name, err)
|
||||
// Continue starting others, don't fail
|
||||
}
|
||||
}
|
||||
|
||||
// Run health checks with retries before declaring success
|
||||
const (
|
||||
healthCheckRetries = 20
|
||||
healthCheckInterval = 3 * time.Second
|
||||
healthCheckTimeout = 70 * time.Second
|
||||
)
|
||||
|
||||
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Development environment failed health checks - stopping all services\n")
|
||||
pm.StopAll(ctx)
|
||||
return fmt.Errorf("cluster health checks failed - services stopped")
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\n✅ Development environment started!\n\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopAll stops all running processes
|
||||
func (pm *ProcessManager) StopAll(ctx context.Context) error {
|
||||
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n")
|
||||
|
||||
services := []string{
|
||||
"gateway",
|
||||
"node3",
|
||||
"node2",
|
||||
"bootstrap",
|
||||
"olric",
|
||||
"ipfs-cluster-node3",
|
||||
"ipfs-cluster-node2",
|
||||
"ipfs-cluster-bootstrap",
|
||||
"rqlite-node3",
|
||||
"rqlite-node2",
|
||||
"rqlite-bootstrap",
|
||||
"ipfs-node3",
|
||||
"ipfs-node2",
|
||||
"ipfs-bootstrap",
|
||||
"anon",
|
||||
}
|
||||
|
||||
for _, svc := range services {
|
||||
pm.stopProcess(svc)
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ All services stopped\n\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status reports the status of all services
|
||||
func (pm *ProcessManager) Status(ctx context.Context) {
|
||||
fmt.Fprintf(pm.logWriter, "\n📊 Development Environment Status\n")
|
||||
fmt.Fprintf(pm.logWriter, "================================\n\n")
|
||||
|
||||
services := []struct {
|
||||
name string
|
||||
ports []int
|
||||
}{
|
||||
{"Bootstrap IPFS", []int{4501, 4101}},
|
||||
{"Bootstrap RQLite", []int{5001, 7001}},
|
||||
{"Node2 IPFS", []int{4502, 4102}},
|
||||
{"Node2 RQLite", []int{5002, 7002}},
|
||||
{"Node3 IPFS", []int{4503, 4103}},
|
||||
{"Node3 RQLite", []int{5003, 7003}},
|
||||
{"Bootstrap Cluster", []int{9094}},
|
||||
{"Node2 Cluster", []int{9104}},
|
||||
{"Node3 Cluster", []int{9114}},
|
||||
{"Bootstrap Node (P2P)", []int{4001}},
|
||||
{"Node2 (P2P)", []int{4002}},
|
||||
{"Node3 (P2P)", []int{4003}},
|
||||
{"Gateway", []int{6001}},
|
||||
{"Olric", []int{3320, 3322}},
|
||||
{"Anon SOCKS", []int{9050}},
|
||||
}
|
||||
|
||||
for _, svc := range services {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", svc.name))
|
||||
running := false
|
||||
if pidBytes, err := os.ReadFile(pidPath); err == nil {
|
||||
pid, _ := strconv.Atoi(string(pidBytes))
|
||||
if checkProcessRunning(pid) {
|
||||
running = true
|
||||
}
|
||||
}
|
||||
|
||||
status := "❌ stopped"
|
||||
if running {
|
||||
status = "✅ running"
|
||||
}
|
||||
|
||||
portStr := fmt.Sprintf("ports: %v", svc.ports)
|
||||
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
|
||||
files := []string{"bootstrap.yaml", "node2.yaml", "node3.yaml", "gateway.yaml", "olric-config.yaml"}
|
||||
for _, f := range files {
|
||||
path := filepath.Join(pm.debrosDir, f)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
|
||||
} else {
|
||||
fmt.Fprintf(pm.logWriter, " ✗ %s\n", f)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.debrosDir)
|
||||
}
|
||||
|
||||
// Helper functions for starting individual services
|
||||
|
||||
// ipfsNodeInfo holds information about an IPFS node for peer discovery
|
||||
type ipfsNodeInfo struct {
|
||||
name string
|
||||
ipfsPath string
|
||||
apiPort int
|
||||
swarmPort int
|
||||
gatewayPort int
|
||||
peerID string
|
||||
}
|
||||
|
||||
// readIPFSConfigValue reads a single config value from IPFS repo without daemon running
|
||||
func readIPFSConfigValue(ctx context.Context, repoPath string, key string) (string, error) {
|
||||
configPath := filepath.Join(repoPath, "config")
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read IPFS config: %w", err)
|
||||
}
|
||||
|
||||
// Simple JSON parse to extract the value - only works for string values
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.Contains(line, key) {
|
||||
// Extract the value after the colon
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
value := strings.TrimSpace(parts[1])
|
||||
value = strings.Trim(value, `",`)
|
||||
if value != "" {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("key %s not found in IPFS config", key)
|
||||
}
|
||||
|
||||
// seedIPFSPeersWithHTTP configures each IPFS node to bootstrap with its local peers using HTTP API
|
||||
func (pm *ProcessManager) seedIPFSPeersWithHTTP(ctx context.Context, nodes []ipfsNodeInfo) error {
|
||||
fmt.Fprintf(pm.logWriter, " Seeding IPFS local bootstrap peers via HTTP API...\n")
|
||||
|
||||
// Wait for all IPFS daemons to be ready before trying to configure them
|
||||
for _, node := range nodes {
|
||||
if err := pm.waitIPFSReady(ctx, node); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to wait for IPFS readiness for %s: %v\n", node.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// For each node, clear default bootstrap and add local peers via HTTP
|
||||
for i, node := range nodes {
|
||||
// Clear bootstrap peers
|
||||
httpURL := fmt.Sprintf("http://127.0.0.1:%d/api/v0/bootstrap/rm?all=true", node.apiPort)
|
||||
if err := pm.ipfsHTTPCall(ctx, httpURL, "POST"); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to clear bootstrap for %s: %v\n", node.name, err)
|
||||
}
|
||||
|
||||
// Add other nodes as bootstrap peers
|
||||
for j, otherNode := range nodes {
|
||||
if i == j {
|
||||
continue // Skip self
|
||||
}
|
||||
|
||||
multiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", otherNode.swarmPort, otherNode.peerID)
|
||||
httpURL := fmt.Sprintf("http://127.0.0.1:%d/api/v0/bootstrap/add?arg=%s", node.apiPort, url.QueryEscape(multiaddr))
|
||||
if err := pm.ipfsHTTPCall(ctx, httpURL, "POST"); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to add bootstrap peer for %s: %v\n", node.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitIPFSReady polls the IPFS daemon's HTTP API until it's ready
|
||||
func (pm *ProcessManager) waitIPFSReady(ctx context.Context, node ipfsNodeInfo) error {
|
||||
maxRetries := 30
|
||||
retryInterval := 500 * time.Millisecond
|
||||
|
||||
for attempt := 0; attempt < maxRetries; attempt++ {
|
||||
httpURL := fmt.Sprintf("http://127.0.0.1:%d/api/v0/version", node.apiPort)
|
||||
if err := pm.ipfsHTTPCall(ctx, httpURL, "POST"); err == nil {
|
||||
return nil // IPFS is ready
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(retryInterval):
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("IPFS daemon %s did not become ready after %d seconds", node.name, (maxRetries * int(retryInterval.Seconds())))
|
||||
}
|
||||
|
||||
// ipfsHTTPCall makes an HTTP call to IPFS API
|
||||
func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, method string) error {
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
req, err := http.NewRequestWithContext(ctx, method, urlStr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("HTTP call failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
||||
nodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
|
||||
// Phase 1: Initialize repos and configure addresses
|
||||
for i := range nodes {
|
||||
os.MkdirAll(nodes[i].ipfsPath, 0755)
|
||||
|
||||
// Initialize IPFS if needed
|
||||
if _, err := os.Stat(filepath.Join(nodes[i].ipfsPath, "config")); os.IsNotExist(err) {
|
||||
fmt.Fprintf(pm.logWriter, " Initializing IPFS (%s)...\n", nodes[i].name)
|
||||
cmd := exec.CommandContext(ctx, "ipfs", "init", "--profile=server", "--repo-dir="+nodes[i].ipfsPath)
|
||||
if _, err := cmd.CombinedOutput(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: ipfs init failed: %v\n", err)
|
||||
}
|
||||
|
||||
// Copy swarm key
|
||||
swarmKeyPath := filepath.Join(pm.debrosDir, "swarm.key")
|
||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
|
||||
}
|
||||
}
|
||||
|
||||
// Always reapply address settings to ensure correct ports (before daemon starts)
|
||||
apiAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", nodes[i].apiPort)
|
||||
gatewayAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", nodes[i].gatewayPort)
|
||||
swarmAddrs := fmt.Sprintf("[\"/ip4/0.0.0.0/tcp/%d\", \"/ip6/::/tcp/%d\"]", nodes[i].swarmPort, nodes[i].swarmPort)
|
||||
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "Addresses.API", apiAddr).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set API address: %v\n", err)
|
||||
}
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "Addresses.Gateway", gatewayAddr).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set Gateway address: %v\n", err)
|
||||
}
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "--json", "Addresses.Swarm", swarmAddrs).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set Swarm addresses: %v\n", err)
|
||||
}
|
||||
|
||||
// Read peer ID from config BEFORE daemon starts
|
||||
peerID, err := readIPFSConfigValue(ctx, nodes[i].ipfsPath, "PeerID")
|
||||
if err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to read peer ID for %s: %v\n", nodes[i].name, err)
|
||||
} else {
|
||||
nodes[i].peerID = peerID
|
||||
fmt.Fprintf(pm.logWriter, " Peer ID for %s: %s\n", nodes[i].name, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: Start all IPFS daemons
|
||||
for i := range nodes {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
|
||||
|
||||
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start ipfs-%s: %w", nodes[i].name, err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
pm.processes[fmt.Sprintf("ipfs-%s", nodes[i].name)] = &ManagedProcess{
|
||||
Name: fmt.Sprintf("ipfs-%s", nodes[i].name),
|
||||
PID: cmd.Process.Pid,
|
||||
StartTime: time.Now(),
|
||||
LogPath: logPath,
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ IPFS (%s) started (PID: %d, API: %d, Swarm: %d)\n", nodes[i].name, cmd.Process.Pid, nodes[i].apiPort, nodes[i].swarmPort)
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Phase 3: Seed IPFS peers via HTTP API after all daemons are running
|
||||
if err := pm.seedIPFSPeersWithHTTP(ctx, nodes); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, "⚠️ Failed to seed IPFS peers: %v\n", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
||||
nodes := []struct {
|
||||
name string
|
||||
clusterPath string
|
||||
restAPIPort int
|
||||
clusterPort int
|
||||
ipfsPort int
|
||||
}{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs-cluster"), 9094, 9096, 4501},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs-cluster"), 9104, 9106, 4502},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs-cluster"), 9114, 9116, 4503},
|
||||
}
|
||||
|
||||
// Wait for all IPFS daemons to be ready before starting cluster services
|
||||
fmt.Fprintf(pm.logWriter, " Waiting for IPFS daemons to be ready...\n")
|
||||
ipfsNodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
for _, ipfsNode := range ipfsNodes {
|
||||
if err := pm.waitIPFSReady(ctx, ipfsNode); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: IPFS %s did not become ready: %v\n", ipfsNode.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
serviceJSON := filepath.Join(node.clusterPath, "service.json")
|
||||
if _, err := os.Stat(serviceJSON); os.IsNotExist(err) {
|
||||
os.MkdirAll(node.clusterPath, 0755)
|
||||
fmt.Fprintf(pm.logWriter, " Initializing IPFS Cluster (%s)...\n", node.name)
|
||||
cmd := exec.CommandContext(ctx, "ipfs-cluster-service", "init", "--force")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
|
||||
cmd.Run()
|
||||
}
|
||||
|
||||
// Ensure correct ports in service.json BEFORE starting daemon
|
||||
if err := pm.ensureIPFSClusterPorts(node.clusterPath, node.restAPIPort, node.clusterPort); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to update IPFS Cluster config for %s: %v\n", node.name, err)
|
||||
}
|
||||
|
||||
// Start cluster service
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||
|
||||
cmd := exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " ⚠️ Failed to start ipfs-cluster-%s: %v\n", node.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
fmt.Fprintf(pm.logWriter, "✓ IPFS Cluster (%s) started (PID: %d, API: %d)\n", node.name, cmd.Process.Pid, node.restAPIPort)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureIPFSClusterPorts updates service.json with correct per-node ports and IPFS connector settings
|
||||
func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort int, clusterPort int) error {
|
||||
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
||||
|
||||
// Read existing config
|
||||
data, err := os.ReadFile(serviceJSONPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read service.json: %w", err)
|
||||
}
|
||||
|
||||
var config map[string]interface{}
|
||||
if err := json.Unmarshal(data, &config); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal service.json: %w", err)
|
||||
}
|
||||
|
||||
// Calculate unique ports for this node based on restAPIPort offset
|
||||
// bootstrap=9094 -> proxy=9095, pinsvc=9097, cluster=9096
|
||||
// node2=9104 -> proxy=9105, pinsvc=9107, cluster=9106
|
||||
// node3=9114 -> proxy=9115, pinsvc=9117, cluster=9116
|
||||
portOffset := restAPIPort - 9094
|
||||
proxyPort := 9095 + portOffset
|
||||
pinsvcPort := 9097 + portOffset
|
||||
|
||||
// Infer IPFS port from REST API port
|
||||
// 9094 -> 4501 (bootstrap), 9104 -> 4502 (node2), 9114 -> 4503 (node3)
|
||||
ipfsPort := 4501 + (portOffset / 10)
|
||||
|
||||
// Update API settings
|
||||
if api, ok := config["api"].(map[string]interface{}); ok {
|
||||
// Update REST API listen address
|
||||
if restapi, ok := api["restapi"].(map[string]interface{}); ok {
|
||||
restapi["http_listen_multiaddress"] = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort)
|
||||
}
|
||||
|
||||
// Update IPFS Proxy settings
|
||||
if proxy, ok := api["ipfsproxy"].(map[string]interface{}); ok {
|
||||
proxy["listen_multiaddress"] = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort)
|
||||
proxy["node_multiaddress"] = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort)
|
||||
}
|
||||
|
||||
// Update Pinning Service API port
|
||||
if pinsvc, ok := api["pinsvcapi"].(map[string]interface{}); ok {
|
||||
pinsvc["http_listen_multiaddress"] = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", pinsvcPort)
|
||||
}
|
||||
}
|
||||
|
||||
// Update cluster listen multiaddress
|
||||
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
||||
listenAddrs := make([]string, 0)
|
||||
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterPort))
|
||||
cluster["listen_multiaddress"] = listenAddrs
|
||||
}
|
||||
|
||||
// Update IPFS connector settings to point to correct IPFS API port
|
||||
if connector, ok := config["ipfs_connector"].(map[string]interface{}); ok {
|
||||
if ipfshttp, ok := connector["ipfshttp"].(map[string]interface{}); ok {
|
||||
ipfshttp["node_multiaddress"] = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort)
|
||||
}
|
||||
}
|
||||
|
||||
// Write updated config
|
||||
updatedData, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal updated config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(serviceJSONPath, updatedData, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write service.json: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startRQLite(ctx context.Context) error {
|
||||
nodes := []struct {
|
||||
name string
|
||||
dataDir string
|
||||
httpPort int
|
||||
raftPort int
|
||||
joinAddr string
|
||||
}{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/rqlite"), 5001, 7001, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/rqlite"), 5002, 7002, "localhost:7001"},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/rqlite"), 5003, 7003, "localhost:7001"},
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
os.MkdirAll(node.dataDir, 0755)
|
||||
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("rqlite-%s.pid", node.name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", node.name))
|
||||
|
||||
var args []string
|
||||
args = append(args, fmt.Sprintf("-http-addr=0.0.0.0:%d", node.httpPort))
|
||||
args = append(args, fmt.Sprintf("-http-adv-addr=localhost:%d", node.httpPort))
|
||||
args = append(args, fmt.Sprintf("-raft-addr=0.0.0.0:%d", node.raftPort))
|
||||
args = append(args, fmt.Sprintf("-raft-adv-addr=localhost:%d", node.raftPort))
|
||||
if node.joinAddr != "" {
|
||||
args = append(args, "-join", node.joinAddr, "-join-attempts", "30", "-join-interval", "10s")
|
||||
}
|
||||
args = append(args, node.dataDir)
|
||||
cmd := exec.CommandContext(ctx, "rqlited", args...)
|
||||
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start rqlite-%s: %w", node.name, err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
pm.processes[fmt.Sprintf("rqlite-%s", node.name)] = &ManagedProcess{
|
||||
Name: fmt.Sprintf("rqlite-%s", node.name),
|
||||
PID: cmd.Process.Pid,
|
||||
StartTime: time.Now(),
|
||||
LogPath: logPath,
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ RQLite (%s) started (PID: %d, HTTP: %d, Raft: %d)\n", node.name, cmd.Process.Pid, node.httpPort, node.raftPort)
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startOlric(ctx context.Context) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "olric.log")
|
||||
configPath := filepath.Join(pm.debrosDir, "olric-config.yaml")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "olric-server")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start olric: %w", err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
fmt.Fprintf(pm.logWriter, "✓ Olric started (PID: %d)\n", cmd.Process.Pid)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil // Skip on non-macOS for now
|
||||
}
|
||||
|
||||
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "anon.log")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " ⚠️ Failed to start Anon: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
fmt.Fprintf(pm.logWriter, "✓ Anon proxy started (PID: %d, SOCKS: 9050)\n", cmd.Process.Pid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startBootstrapNode(ctx context.Context) error {
|
||||
return pm.startNode("bootstrap", "bootstrap.yaml", filepath.Join(pm.debrosDir, "logs", "bootstrap.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode2(ctx context.Context) error {
|
||||
return pm.startNode("node2", "node2.yaml", filepath.Join(pm.debrosDir, "logs", "node2.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode3(ctx context.Context) error {
|
||||
return pm.startNode("node3", "node3.yaml", filepath.Join(pm.debrosDir, "logs", "node3.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||
cmd := exec.Command("./bin/node", "--config", configFile)
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start %s: %w", name, err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s started (PID: %d)\n", strings.Title(name), cmd.Process.Pid)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startGateway(ctx context.Context) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "gateway.log")
|
||||
|
||||
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start gateway: %w", err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
fmt.Fprintf(pm.logWriter, "✓ Gateway started (PID: %d, listen: 6001)\n", cmd.Process.Pid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopProcess terminates a managed process
|
||||
func (pm *ProcessManager) stopProcess(name string) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||
pidBytes, err := os.ReadFile(pidPath)
|
||||
if err != nil {
|
||||
return nil // Process not running or PID not found
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(pidBytes))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
os.Remove(pidPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
proc.Signal(os.Interrupt)
|
||||
os.Remove(pidPath)
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkProcessRunning checks if a process with given PID is running
|
||||
func checkProcessRunning(pid int) bool {
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Send signal 0 to check if process exists (doesn't actually send signal)
|
||||
err = proc.Signal(os.Signal(nil))
|
||||
return err == nil
|
||||
}
|
||||
215
pkg/environments/production/checks.go
Normal file
215
pkg/environments/production/checks.go
Normal file
@ -0,0 +1,215 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OSInfo contains detected operating system information
|
||||
type OSInfo struct {
|
||||
ID string // ubuntu, debian, etc.
|
||||
Version string // 22.04, 24.04, 12, etc.
|
||||
Name string // Full name: "ubuntu 24.04"
|
||||
}
|
||||
|
||||
// PrivilegeChecker validates root access and user context
|
||||
type PrivilegeChecker struct{}
|
||||
|
||||
// CheckRoot verifies the process is running as root
|
||||
func (pc *PrivilegeChecker) CheckRoot() error {
|
||||
if os.Geteuid() != 0 {
|
||||
return fmt.Errorf("this command must be run as root (use sudo)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckLinuxOS verifies the process is running on Linux
|
||||
func (pc *PrivilegeChecker) CheckLinuxOS() error {
|
||||
if runtime.GOOS != "linux" {
|
||||
return fmt.Errorf("production setup is only supported on Linux (detected: %s)", runtime.GOOS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OSDetector detects the Linux distribution
|
||||
type OSDetector struct{}
|
||||
|
||||
// Detect returns information about the detected OS
|
||||
func (od *OSDetector) Detect() (*OSInfo, error) {
|
||||
data, err := os.ReadFile("/etc/os-release")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot detect operating system: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
var id, version string
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "ID=") {
|
||||
id = strings.Trim(strings.TrimPrefix(line, "ID="), "\"")
|
||||
}
|
||||
if strings.HasPrefix(line, "VERSION_ID=") {
|
||||
version = strings.Trim(strings.TrimPrefix(line, "VERSION_ID="), "\"")
|
||||
}
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("could not detect OS ID from /etc/os-release")
|
||||
}
|
||||
|
||||
name := id
|
||||
if version != "" {
|
||||
name = fmt.Sprintf("%s %s", id, version)
|
||||
}
|
||||
|
||||
return &OSInfo{
|
||||
ID: id,
|
||||
Version: version,
|
||||
Name: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsSupportedOS checks if the OS is supported for production deployment
|
||||
func (od *OSDetector) IsSupportedOS(info *OSInfo) bool {
|
||||
supported := map[string][]string{
|
||||
"ubuntu": {"22.04", "24.04", "25.04"},
|
||||
"debian": {"12"},
|
||||
}
|
||||
|
||||
versions, ok := supported[info.ID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, v := range versions {
|
||||
if info.Version == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ArchitectureDetector detects the system architecture
|
||||
type ArchitectureDetector struct{}
|
||||
|
||||
// Detect returns the detected architecture as a string usable for downloads
|
||||
func (ad *ArchitectureDetector) Detect() (string, error) {
|
||||
arch := runtime.GOARCH
|
||||
switch arch {
|
||||
case "amd64":
|
||||
return "amd64", nil
|
||||
case "arm64":
|
||||
return "arm64", nil
|
||||
case "arm":
|
||||
return "arm", nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported architecture: %s", arch)
|
||||
}
|
||||
}
|
||||
|
||||
// DependencyChecker validates external tool availability
|
||||
type DependencyChecker struct {
|
||||
skipOptional bool
|
||||
}
|
||||
|
||||
// NewDependencyChecker creates a new checker
|
||||
func NewDependencyChecker(skipOptional bool) *DependencyChecker {
|
||||
return &DependencyChecker{
|
||||
skipOptional: skipOptional,
|
||||
}
|
||||
}
|
||||
|
||||
// Dependency represents an external binary dependency
|
||||
type Dependency struct {
|
||||
Name string
|
||||
Command string
|
||||
Optional bool
|
||||
InstallHint string
|
||||
}
|
||||
|
||||
// CheckAll validates all required dependencies
|
||||
func (dc *DependencyChecker) CheckAll() ([]Dependency, error) {
|
||||
dependencies := []Dependency{
|
||||
{
|
||||
Name: "curl",
|
||||
Command: "curl",
|
||||
Optional: false,
|
||||
InstallHint: "Usually pre-installed; if missing: apt-get install curl",
|
||||
},
|
||||
{
|
||||
Name: "git",
|
||||
Command: "git",
|
||||
Optional: false,
|
||||
InstallHint: "Install with: apt-get install git",
|
||||
},
|
||||
{
|
||||
Name: "make",
|
||||
Command: "make",
|
||||
Optional: false,
|
||||
InstallHint: "Install with: apt-get install make",
|
||||
},
|
||||
}
|
||||
|
||||
var missing []Dependency
|
||||
for _, dep := range dependencies {
|
||||
if _, err := exec.LookPath(dep.Command); err != nil {
|
||||
if !dep.Optional || !dc.skipOptional {
|
||||
missing = append(missing, dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
errMsg := "missing required dependencies:\n"
|
||||
for _, dep := range missing {
|
||||
errMsg += fmt.Sprintf(" - %s (%s): %s\n", dep.Name, dep.Command, dep.InstallHint)
|
||||
}
|
||||
return missing, fmt.Errorf("%s", errMsg)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ExternalToolChecker validates external tool versions and availability
|
||||
type ExternalToolChecker struct{}
|
||||
|
||||
// CheckIPFSAvailable checks if IPFS is available in PATH
|
||||
func (etc *ExternalToolChecker) CheckIPFSAvailable() bool {
|
||||
_, err := exec.LookPath("ipfs")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// CheckIPFSClusterAvailable checks if IPFS Cluster Service is available
|
||||
func (etc *ExternalToolChecker) CheckIPFSClusterAvailable() bool {
|
||||
_, err := exec.LookPath("ipfs-cluster-service")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// CheckRQLiteAvailable checks if RQLite is available
|
||||
func (etc *ExternalToolChecker) CheckRQLiteAvailable() bool {
|
||||
_, err := exec.LookPath("rqlited")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// CheckOlricAvailable checks if Olric Server is available
|
||||
func (etc *ExternalToolChecker) CheckOlricAvailable() bool {
|
||||
_, err := exec.LookPath("olric-server")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// CheckAnonAvailable checks if Anon is available (optional)
|
||||
func (etc *ExternalToolChecker) CheckAnonAvailable() bool {
|
||||
_, err := exec.LookPath("anon")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// CheckGoAvailable checks if Go is installed
|
||||
func (etc *ExternalToolChecker) CheckGoAvailable() bool {
|
||||
_, err := exec.LookPath("go")
|
||||
return err == nil
|
||||
}
|
||||
238
pkg/environments/production/config.go
Normal file
238
pkg/environments/production/config.go
Normal file
@ -0,0 +1,238 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/environments/templates"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ConfigGenerator manages generation of node, gateway, and service configs
|
||||
type ConfigGenerator struct {
|
||||
debrosDir string
|
||||
}
|
||||
|
||||
// NewConfigGenerator creates a new config generator
|
||||
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
|
||||
return &ConfigGenerator{
|
||||
debrosDir: debrosDir,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateNodeConfig generates node.yaml configuration
|
||||
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string) (string, error) {
|
||||
var nodeID string
|
||||
if isBootstrap {
|
||||
nodeID = "bootstrap"
|
||||
} else {
|
||||
nodeID = "node"
|
||||
}
|
||||
|
||||
if isBootstrap {
|
||||
data := templates.BootstrapConfigData{
|
||||
NodeID: nodeID,
|
||||
P2PPort: 4001,
|
||||
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
return templates.RenderBootstrapConfig(data)
|
||||
}
|
||||
|
||||
// Regular node
|
||||
rqliteJoinAddr := "localhost:7001"
|
||||
if vpsIP != "" {
|
||||
rqliteJoinAddr = vpsIP + ":7001"
|
||||
}
|
||||
|
||||
data := templates.NodeConfigData{
|
||||
NodeID: nodeID,
|
||||
P2PPort: 4001,
|
||||
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
RQLiteJoinAddress: rqliteJoinAddr,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
return templates.RenderNodeConfig(data)
|
||||
}
|
||||
|
||||
// GenerateGatewayConfig generates gateway.yaml configuration
|
||||
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
|
||||
tlsCacheDir := ""
|
||||
if enableHTTPS {
|
||||
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
|
||||
}
|
||||
|
||||
data := templates.GatewayConfigData{
|
||||
ListenPort: 6001,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
OlricServers: olricServers,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
EnableHTTPS: enableHTTPS,
|
||||
DomainName: domain,
|
||||
TLSCacheDir: tlsCacheDir,
|
||||
RQLiteDSN: "", // Empty for now, can be configured later
|
||||
}
|
||||
return templates.RenderGatewayConfig(data)
|
||||
}
|
||||
|
||||
// GenerateOlricConfig generates Olric configuration
|
||||
func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, memberlistPort int) (string, error) {
|
||||
data := templates.OlricConfigData{
|
||||
BindAddr: bindAddr,
|
||||
HTTPPort: httpPort,
|
||||
MemberlistPort: memberlistPort,
|
||||
}
|
||||
return templates.RenderOlricConfig(data)
|
||||
}
|
||||
|
||||
// SecretGenerator manages generation of shared secrets and keys
|
||||
type SecretGenerator struct {
|
||||
debrosDir string
|
||||
}
|
||||
|
||||
// NewSecretGenerator creates a new secret generator
|
||||
func NewSecretGenerator(debrosDir string) *SecretGenerator {
|
||||
return &SecretGenerator{
|
||||
debrosDir: debrosDir,
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureClusterSecret gets or generates the IPFS Cluster secret
|
||||
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
||||
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
|
||||
secretDir := filepath.Dir(secretPath)
|
||||
|
||||
// Ensure secrets directory exists
|
||||
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing secret
|
||||
if data, err := os.ReadFile(secretPath); err == nil {
|
||||
secret := strings.TrimSpace(string(data))
|
||||
if len(secret) == 64 {
|
||||
return secret, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new secret (32 bytes = 64 hex chars)
|
||||
bytes := make([]byte, 32)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate cluster secret: %w", err)
|
||||
}
|
||||
secret := hex.EncodeToString(bytes)
|
||||
|
||||
// Write and protect
|
||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||
return "", fmt.Errorf("failed to save cluster secret: %w", err)
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// EnsureSwarmKey gets or generates the IPFS private swarm key
|
||||
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
||||
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
|
||||
secretDir := filepath.Dir(swarmKeyPath)
|
||||
|
||||
// Ensure secrets directory exists
|
||||
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing key
|
||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||
if strings.Contains(string(data), "/key/swarm/psk/1.0.0/") {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new key (32 bytes)
|
||||
keyBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate swarm key: %w", err)
|
||||
}
|
||||
|
||||
keyHex := strings.ToUpper(hex.EncodeToString(keyBytes))
|
||||
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
|
||||
|
||||
// Write and protect
|
||||
if err := os.WriteFile(swarmKeyPath, []byte(content), 0600); err != nil {
|
||||
return nil, fmt.Errorf("failed to save swarm key: %w", err)
|
||||
}
|
||||
|
||||
return []byte(content), nil
|
||||
}
|
||||
|
||||
// EnsureNodeIdentity gets or generates the node's LibP2P identity
|
||||
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
|
||||
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
|
||||
keyPath := filepath.Join(keyDir, "identity.key")
|
||||
|
||||
// Ensure data directory exists
|
||||
if err := os.MkdirAll(keyDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create data directory: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing key
|
||||
if data, err := os.ReadFile(keyPath); err == nil {
|
||||
priv, err := crypto.UnmarshalPrivateKey(data)
|
||||
if err == nil {
|
||||
pub := priv.GetPublic()
|
||||
peerID, _ := peer.IDFromPublicKey(pub)
|
||||
return peerID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new identity
|
||||
priv, pub, err := crypto.GenerateKeyPair(crypto.Ed25519, 2048)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate identity: %w", err)
|
||||
}
|
||||
|
||||
peerID, _ := peer.IDFromPublicKey(pub)
|
||||
|
||||
// Marshal and save private key
|
||||
keyData, err := crypto.MarshalPrivateKey(priv)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(keyPath, keyData, 0600); err != nil {
|
||||
return "", fmt.Errorf("failed to save identity key: %w", err)
|
||||
}
|
||||
|
||||
return peerID, nil
|
||||
}
|
||||
|
||||
// SaveConfig writes a configuration file to disk
|
||||
func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
|
||||
configDir := filepath.Join(sg.debrosDir, "configs")
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create configs directory: %w", err)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(configDir, filename)
|
||||
if err := os.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config %s: %w", filename, err)
|
||||
}
|
||||
|
||||
// Fix ownership
|
||||
exec.Command("chown", "debros:debros", configPath).Run()
|
||||
|
||||
return nil
|
||||
}
|
||||
307
pkg/environments/production/installers.go
Normal file
307
pkg/environments/production/installers.go
Normal file
@ -0,0 +1,307 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BinaryInstaller handles downloading and installing external binaries
|
||||
type BinaryInstaller struct {
|
||||
arch string
|
||||
logWriter interface{} // io.Writer
|
||||
}
|
||||
|
||||
// NewBinaryInstaller creates a new binary installer
|
||||
func NewBinaryInstaller(arch string, logWriter interface{}) *BinaryInstaller {
|
||||
return &BinaryInstaller{
|
||||
arch: arch,
|
||||
logWriter: logWriter,
|
||||
}
|
||||
}
|
||||
|
||||
// InstallRQLite downloads and installs RQLite
|
||||
func (bi *BinaryInstaller) InstallRQLite() error {
|
||||
if _, err := exec.LookPath("rqlited"); err == nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite already installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing RQLite...\n")
|
||||
|
||||
version := "8.43.0"
|
||||
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", version, bi.arch)
|
||||
url := fmt.Sprintf("https://github.com/rqlite/rqlite/releases/download/v%s/%s", version, tarball)
|
||||
|
||||
// Download
|
||||
cmd := exec.Command("wget", "-q", url, "-O", "/tmp/"+tarball)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to download RQLite: %w", err)
|
||||
}
|
||||
|
||||
// Extract
|
||||
cmd = exec.Command("tar", "-C", "/tmp", "-xzf", "/tmp/"+tarball)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to extract RQLite: %w", err)
|
||||
}
|
||||
|
||||
// Copy binaries
|
||||
dir := fmt.Sprintf("/tmp/rqlite-v%s-linux-%s", version, bi.arch)
|
||||
exec.Command("cp", dir+"/rqlited", "/usr/local/bin/").Run()
|
||||
exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run()
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallIPFS downloads and installs IPFS (Kubo)
|
||||
func (bi *BinaryInstaller) InstallIPFS() error {
|
||||
if _, err := exec.LookPath("ipfs"); err == nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS already installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS (Kubo)...\n")
|
||||
|
||||
// Use official install script
|
||||
cmd := exec.Command("bash", "-c", "curl -fsSL https://dist.ipfs.tech/kubo/v0.27.0/install.sh | bash")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to install IPFS: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallIPFSCluster downloads and installs IPFS Cluster Service
|
||||
func (bi *BinaryInstaller) InstallIPFSCluster() error {
|
||||
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster already installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS Cluster Service...\n")
|
||||
|
||||
// Check if Go is available
|
||||
if _, err := exec.LookPath("go"); err != nil {
|
||||
return fmt.Errorf("Go not found - required to install IPFS Cluster. Please install Go first")
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest")
|
||||
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to install IPFS Cluster: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallOlric downloads and installs Olric server
|
||||
func (bi *BinaryInstaller) InstallOlric() error {
|
||||
if _, err := exec.LookPath("olric-server"); err == nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric already installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Olric...\n")
|
||||
|
||||
// Check if Go is available
|
||||
if _, err := exec.LookPath("go"); err != nil {
|
||||
return fmt.Errorf("Go not found - required to install Olric. Please install Go first")
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "install", "github.com/olric-data/olric/cmd/olric-server@v0.7.0")
|
||||
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to install Olric: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallGo downloads and installs Go toolchain
|
||||
func (bi *BinaryInstaller) InstallGo() error {
|
||||
if _, err := exec.LookPath("go"); err == nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go already installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Go...\n")
|
||||
|
||||
goTarball := fmt.Sprintf("go1.21.6.linux-%s.tar.gz", bi.arch)
|
||||
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
|
||||
|
||||
// Download
|
||||
cmd := exec.Command("wget", "-q", goURL, "-O", "/tmp/"+goTarball)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to download Go: %w", err)
|
||||
}
|
||||
|
||||
// Extract
|
||||
cmd = exec.Command("tar", "-C", "/usr/local", "-xzf", "/tmp/"+goTarball)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to extract Go: %w", err)
|
||||
}
|
||||
|
||||
// Add to PATH
|
||||
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/go/bin")
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallDeBrosBinaries clones and builds DeBros binaries
|
||||
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome string) error {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building DeBros binaries...\n")
|
||||
|
||||
srcDir := filepath.Join(debrosHome, "src")
|
||||
binDir := filepath.Join(debrosHome, "bin")
|
||||
|
||||
// Ensure directories exist
|
||||
os.MkdirAll(srcDir, 0755)
|
||||
os.MkdirAll(binDir, 0755)
|
||||
|
||||
// Clone repository if not present
|
||||
if _, err := os.Stat(filepath.Join(srcDir, "Makefile")); os.IsNotExist(err) {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Cloning repository...\n")
|
||||
cmd := exec.Command("git", "clone", "--branch", branch, "--depth", "1", "https://github.com/DeBrosOfficial/network.git", srcDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to clone repository: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build binaries
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building binaries...\n")
|
||||
cmd := exec.Command("make", "build")
|
||||
cmd.Dir = srcDir
|
||||
cmd.Env = append(os.Environ(), "HOME="+debrosHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
|
||||
}
|
||||
|
||||
// Copy binaries
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Copying binaries...\n")
|
||||
cmd = exec.Command("sh", "-c", fmt.Sprintf("cp -r %s/bin/* %s/", srcDir, binDir))
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to copy binaries: %w", err)
|
||||
}
|
||||
|
||||
exec.Command("chmod", "-R", "755", binDir).Run()
|
||||
exec.Command("chown", "-R", "debros:debros", binDir).Run()
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ DeBros binaries installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallSystemDependencies installs system-level dependencies via apt
|
||||
func (bi *BinaryInstaller) InstallSystemDependencies() error {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing system dependencies...\n")
|
||||
|
||||
// Update package list
|
||||
cmd := exec.Command("apt-get", "update")
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Warning: apt update failed\n")
|
||||
}
|
||||
|
||||
// Install dependencies
|
||||
cmd = exec.Command("apt-get", "install", "-y", "curl", "git", "make", "build-essential", "wget")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to install dependencies: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ System dependencies installed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializeIPFSRepo initializes an IPFS repository for a node
|
||||
func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swarmKeyPath string) error {
|
||||
configPath := filepath.Join(ipfsRepoPath, "config")
|
||||
if _, err := os.Stat(configPath); err == nil {
|
||||
// Already initialized
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo for %s...\n", nodeType)
|
||||
|
||||
os.MkdirAll(ipfsRepoPath, 0755)
|
||||
|
||||
// Initialize IPFS
|
||||
cmd := exec.Command("ipfs", "init", "--profile=server", "--repo-dir="+ipfsRepoPath)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to initialize IPFS: %v\n%s", err, string(output))
|
||||
}
|
||||
|
||||
// Copy swarm key if present
|
||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||
os.WriteFile(filepath.Join(ipfsRepoPath, "swarm.key"), data, 0600)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration
|
||||
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret string, ipfsAPIPort int) error {
|
||||
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
||||
if _, err := os.Stat(serviceJSONPath); err == nil {
|
||||
// Already initialized
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS Cluster config for %s...\n", nodeType)
|
||||
|
||||
os.MkdirAll(clusterPath, 0755)
|
||||
|
||||
// For now, just create a minimal service.json
|
||||
// This will be properly configured during service startup
|
||||
cfgContent := fmt.Sprintf(`{
|
||||
"cluster": {
|
||||
"peername": "%s",
|
||||
"secret": "%s",
|
||||
"listen_multiaddress": ["/ip4/0.0.0.0/tcp/9096"],
|
||||
"leave_on_shutdown": false
|
||||
},
|
||||
"api": {
|
||||
"restapi": {
|
||||
"http_listen_multiaddress": "/ip4/0.0.0.0/tcp/9094"
|
||||
}
|
||||
},
|
||||
"ipfs_connector": {
|
||||
"ipfshttp": {
|
||||
"node_multiaddress": "/ip4/127.0.0.1/tcp/%d"
|
||||
}
|
||||
},
|
||||
"consensus": {
|
||||
"crdt": {
|
||||
"cluster_name": "debros",
|
||||
"trusted_peers": ["*"]
|
||||
}
|
||||
},
|
||||
"datastore": {
|
||||
"type": "badger",
|
||||
"path": "%s/badger"
|
||||
}
|
||||
}`, nodeType, strings.TrimSpace(clusterSecret), ipfsAPIPort, clusterPath)
|
||||
|
||||
if err := os.WriteFile(serviceJSONPath, []byte(cfgContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write cluster config: %w", err)
|
||||
}
|
||||
|
||||
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializeRQLiteDataDir initializes RQLite data directory
|
||||
func (bi *BinaryInstaller) InitializeRQLiteDataDir(nodeType, dataDir string) error {
|
||||
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir for %s...\n", nodeType)
|
||||
|
||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create RQLite data directory: %w", err)
|
||||
}
|
||||
|
||||
exec.Command("chown", "-R", "debros:debros", dataDir).Run()
|
||||
return nil
|
||||
}
|
||||
436
pkg/environments/production/orchestrator.go
Normal file
436
pkg/environments/production/orchestrator.go
Normal file
@ -0,0 +1,436 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProductionSetup orchestrates the entire production deployment
|
||||
type ProductionSetup struct {
|
||||
osInfo *OSInfo
|
||||
arch string
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
logWriter io.Writer
|
||||
forceReconfigure bool
|
||||
skipOptionalDeps bool
|
||||
privChecker *PrivilegeChecker
|
||||
osDetector *OSDetector
|
||||
archDetector *ArchitectureDetector
|
||||
fsProvisioner *FilesystemProvisioner
|
||||
userProvisioner *UserProvisioner
|
||||
stateDetector *StateDetector
|
||||
configGenerator *ConfigGenerator
|
||||
secretGenerator *SecretGenerator
|
||||
serviceGenerator *SystemdServiceGenerator
|
||||
serviceController *SystemdController
|
||||
binaryInstaller *BinaryInstaller
|
||||
branch string
|
||||
}
|
||||
|
||||
// NewProductionSetup creates a new production setup orchestrator
|
||||
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool) *ProductionSetup {
|
||||
debrosDir := debrosHome + "/.debros"
|
||||
arch, _ := (&ArchitectureDetector{}).Detect()
|
||||
|
||||
return &ProductionSetup{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: debrosDir,
|
||||
logWriter: logWriter,
|
||||
forceReconfigure: forceReconfigure,
|
||||
arch: arch,
|
||||
branch: "main",
|
||||
privChecker: &PrivilegeChecker{},
|
||||
osDetector: &OSDetector{},
|
||||
archDetector: &ArchitectureDetector{},
|
||||
fsProvisioner: NewFilesystemProvisioner(debrosHome),
|
||||
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
|
||||
stateDetector: NewStateDetector(debrosDir),
|
||||
configGenerator: NewConfigGenerator(debrosDir),
|
||||
secretGenerator: NewSecretGenerator(debrosDir),
|
||||
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
|
||||
serviceController: NewSystemdController(),
|
||||
binaryInstaller: NewBinaryInstaller(arch, logWriter),
|
||||
}
|
||||
}
|
||||
|
||||
// logf writes a formatted message to the log writer
|
||||
func (ps *ProductionSetup) logf(format string, args ...interface{}) {
|
||||
if ps.logWriter != nil {
|
||||
fmt.Fprintf(ps.logWriter, format+"\n", args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Phase1CheckPrerequisites performs initial environment validation
|
||||
func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
|
||||
ps.logf("Phase 1: Checking prerequisites...")
|
||||
|
||||
// Check root
|
||||
if err := ps.privChecker.CheckRoot(); err != nil {
|
||||
return fmt.Errorf("privilege check failed: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Running as root")
|
||||
|
||||
// Check Linux OS
|
||||
if err := ps.privChecker.CheckLinuxOS(); err != nil {
|
||||
return fmt.Errorf("OS check failed: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Running on Linux")
|
||||
|
||||
// Detect OS
|
||||
osInfo, err := ps.osDetector.Detect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect OS: %w", err)
|
||||
}
|
||||
ps.osInfo = osInfo
|
||||
ps.logf(" ✓ Detected OS: %s", osInfo.Name)
|
||||
|
||||
// Check if supported
|
||||
if !ps.osDetector.IsSupportedOS(osInfo) {
|
||||
ps.logf(" ⚠️ OS %s is not officially supported (Ubuntu 22/24/25, Debian 12)", osInfo.Name)
|
||||
ps.logf(" Proceeding anyway, but issues may occur")
|
||||
}
|
||||
|
||||
// Detect architecture
|
||||
arch, err := ps.archDetector.Detect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect architecture: %w", err)
|
||||
}
|
||||
ps.arch = arch
|
||||
ps.logf(" ✓ Detected architecture: %s", arch)
|
||||
|
||||
// Check basic dependencies
|
||||
depChecker := NewDependencyChecker(ps.skipOptionalDeps)
|
||||
if missing, err := depChecker.CheckAll(); err != nil {
|
||||
ps.logf(" ❌ Missing dependencies:")
|
||||
for _, dep := range missing {
|
||||
ps.logf(" - %s: %s", dep.Name, dep.InstallHint)
|
||||
}
|
||||
return err
|
||||
}
|
||||
ps.logf(" ✓ Basic dependencies available")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase2ProvisionEnvironment sets up users and filesystems
|
||||
func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
|
||||
ps.logf("Phase 2: Provisioning environment...")
|
||||
|
||||
// Create debros user
|
||||
if !ps.userProvisioner.UserExists() {
|
||||
if err := ps.userProvisioner.CreateUser(); err != nil {
|
||||
return fmt.Errorf("failed to create debros user: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Created 'debros' user")
|
||||
} else {
|
||||
ps.logf(" ✓ 'debros' user already exists")
|
||||
}
|
||||
|
||||
// Set up sudoers access if invoked via sudo
|
||||
sudoUser := os.Getenv("SUDO_USER")
|
||||
if sudoUser != "" {
|
||||
if err := ps.userProvisioner.SetupSudoersAccess(sudoUser); err != nil {
|
||||
ps.logf(" ⚠️ Failed to setup sudoers: %v", err)
|
||||
} else {
|
||||
ps.logf(" ✓ Sudoers access configured")
|
||||
}
|
||||
}
|
||||
|
||||
// Create directory structure
|
||||
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
||||
return fmt.Errorf("failed to create directory structure: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Directory structure created")
|
||||
|
||||
// Fix ownership
|
||||
if err := ps.fsProvisioner.FixOwnership(); err != nil {
|
||||
return fmt.Errorf("failed to fix ownership: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Ownership fixed")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase2bInstallBinaries installs external binaries and DeBros components
|
||||
func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
||||
ps.logf("Phase 2b: Installing binaries...")
|
||||
|
||||
// Install system dependencies
|
||||
if err := ps.binaryInstaller.InstallSystemDependencies(); err != nil {
|
||||
ps.logf(" ⚠️ System dependencies warning: %v", err)
|
||||
}
|
||||
|
||||
// Install Go if not present
|
||||
if err := ps.binaryInstaller.InstallGo(); err != nil {
|
||||
return fmt.Errorf("failed to install Go: %w", err)
|
||||
}
|
||||
|
||||
// Install binaries
|
||||
if err := ps.binaryInstaller.InstallRQLite(); err != nil {
|
||||
ps.logf(" ⚠️ RQLite install warning: %v", err)
|
||||
}
|
||||
|
||||
if err := ps.binaryInstaller.InstallIPFS(); err != nil {
|
||||
ps.logf(" ⚠️ IPFS install warning: %v", err)
|
||||
}
|
||||
|
||||
if err := ps.binaryInstaller.InstallIPFSCluster(); err != nil {
|
||||
ps.logf(" ⚠️ IPFS Cluster install warning: %v", err)
|
||||
}
|
||||
|
||||
if err := ps.binaryInstaller.InstallOlric(); err != nil {
|
||||
ps.logf(" ⚠️ Olric install warning: %v", err)
|
||||
}
|
||||
|
||||
// Install DeBros binaries
|
||||
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome); err != nil {
|
||||
return fmt.Errorf("failed to install DeBros binaries: %w", err)
|
||||
}
|
||||
|
||||
ps.logf(" ✓ All binaries installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase2cInitializeServices initializes service repositories and configurations
|
||||
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string) error {
|
||||
ps.logf("Phase 2c: Initializing services...")
|
||||
|
||||
// Get cluster secret for IPFS
|
||||
clusterSecret, err := os.ReadFile(ps.debrosDir + "/secrets/cluster-secret")
|
||||
if err != nil {
|
||||
clusterSecret = []byte("")
|
||||
}
|
||||
|
||||
// Initialize IPFS repo
|
||||
ipfsRepoPath := ps.debrosDir + "/data/ipfs"
|
||||
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, ps.debrosDir+"/secrets/swarm.key"); err != nil {
|
||||
ps.logf(" ⚠️ IPFS initialization warning: %v", err)
|
||||
}
|
||||
|
||||
// Initialize IPFS Cluster config
|
||||
clusterPath := ps.debrosDir + "/data/ipfs-cluster"
|
||||
ipfsAPIPort := 4501
|
||||
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, string(clusterSecret), ipfsAPIPort); err != nil {
|
||||
ps.logf(" ⚠️ IPFS Cluster initialization warning: %v", err)
|
||||
}
|
||||
|
||||
// Initialize RQLite data directory
|
||||
rqliteDataDir := ps.debrosDir + "/data/rqlite"
|
||||
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
|
||||
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
|
||||
}
|
||||
|
||||
ps.logf(" ✓ Services initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase3GenerateSecrets generates shared secrets and keys
|
||||
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
|
||||
ps.logf("Phase 3: Generating secrets...")
|
||||
|
||||
// Cluster secret
|
||||
if _, err := ps.secretGenerator.EnsureClusterSecret(); err != nil {
|
||||
return fmt.Errorf("failed to ensure cluster secret: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Cluster secret ensured")
|
||||
|
||||
// Swarm key
|
||||
if _, err := ps.secretGenerator.EnsureSwarmKey(); err != nil {
|
||||
return fmt.Errorf("failed to ensure swarm key: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ IPFS swarm key ensured")
|
||||
|
||||
// Node identity
|
||||
nodeType := "node"
|
||||
if isBootstrap {
|
||||
nodeType = "bootstrap"
|
||||
}
|
||||
|
||||
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure node identity: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node identity ensured (Peer ID: %s)", peerID.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase4GenerateConfigs generates node, gateway, and service configs
|
||||
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string) error {
|
||||
ps.logf("Phase 4: Generating configurations...")
|
||||
|
||||
// Node config
|
||||
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate node config: %w", err)
|
||||
}
|
||||
|
||||
var configFile string
|
||||
if isBootstrap {
|
||||
configFile = "bootstrap.yaml"
|
||||
} else {
|
||||
configFile = "node.yaml"
|
||||
}
|
||||
|
||||
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
|
||||
return fmt.Errorf("failed to save node config: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node config generated: %s", configFile)
|
||||
|
||||
// Gateway config
|
||||
olricServers := []string{"127.0.0.1:3320"}
|
||||
gatewayConfig, err := ps.configGenerator.GenerateGatewayConfig(bootstrapPeers, enableHTTPS, domain, olricServers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate gateway config: %w", err)
|
||||
}
|
||||
|
||||
if err := ps.secretGenerator.SaveConfig("gateway.yaml", gatewayConfig); err != nil {
|
||||
return fmt.Errorf("failed to save gateway config: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Gateway config generated")
|
||||
|
||||
// Olric config
|
||||
olricConfig, err := ps.configGenerator.GenerateOlricConfig("localhost", 3320, 3322)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate olric config: %w", err)
|
||||
}
|
||||
|
||||
// Create olric config directory
|
||||
olricConfigDir := ps.debrosDir + "/configs/olric"
|
||||
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create olric config directory: %w", err)
|
||||
}
|
||||
|
||||
olricConfigPath := olricConfigDir + "/config.yaml"
|
||||
if err := os.WriteFile(olricConfigPath, []byte(olricConfig), 0644); err != nil {
|
||||
return fmt.Errorf("failed to save olric config: %w", err)
|
||||
}
|
||||
exec.Command("chown", "debros:debros", olricConfigPath).Run()
|
||||
ps.logf(" ✓ Olric config generated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase5CreateSystemdServices creates and enables systemd units
|
||||
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string) error {
|
||||
ps.logf("Phase 5: Creating systemd services...")
|
||||
|
||||
// IPFS service
|
||||
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType)
|
||||
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
|
||||
return fmt.Errorf("failed to write IPFS service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ IPFS service created: %s", unitName)
|
||||
|
||||
// IPFS Cluster service
|
||||
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType)
|
||||
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
|
||||
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
|
||||
|
||||
// RQLite service (only for bootstrap in single-node, or conditionally)
|
||||
rqliteUnit := ps.serviceGenerator.GenerateRQLiteService(nodeType, 5001, 7001, "")
|
||||
rqliteUnitName := fmt.Sprintf("debros-rqlite-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(rqliteUnitName, rqliteUnit); err != nil {
|
||||
return fmt.Errorf("failed to write RQLite service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ RQLite service created: %s", rqliteUnitName)
|
||||
|
||||
// Olric service
|
||||
olricUnit := ps.serviceGenerator.GenerateOlricService()
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-olric.service", olricUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Olric service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Olric service created")
|
||||
|
||||
// Node service
|
||||
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
|
||||
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Node service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node service created: %s", nodeUnitName)
|
||||
|
||||
// Gateway service (optional, only on specific nodes)
|
||||
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Gateway service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Gateway service created")
|
||||
|
||||
// Reload systemd daemon
|
||||
if err := ps.serviceController.DaemonReload(); err != nil {
|
||||
return fmt.Errorf("failed to reload systemd: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Systemd daemon reloaded")
|
||||
|
||||
// Enable services
|
||||
services := []string{unitName, clusterUnitName, rqliteUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service"}
|
||||
for _, svc := range services {
|
||||
if err := ps.serviceController.EnableService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
|
||||
} else {
|
||||
ps.logf(" ✓ Service enabled: %s", svc)
|
||||
}
|
||||
}
|
||||
|
||||
// Start services in dependency order
|
||||
ps.logf(" Starting services...")
|
||||
|
||||
// Start infrastructure first (IPFS, RQLite, Olric)
|
||||
infraServices := []string{unitName, rqliteUnitName, "debros-olric.service"}
|
||||
for _, svc := range infraServices {
|
||||
if err := ps.serviceController.StartService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||
} else {
|
||||
ps.logf(" - %s started", svc)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait a moment for infrastructure to stabilize
|
||||
exec.Command("sleep", "2").Run()
|
||||
|
||||
// Start IPFS Cluster
|
||||
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
|
||||
} else {
|
||||
ps.logf(" - %s started", clusterUnitName)
|
||||
}
|
||||
|
||||
// Start application services
|
||||
appServices := []string{nodeUnitName, "debros-gateway.service"}
|
||||
for _, svc := range appServices {
|
||||
if err := ps.serviceController.StartService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||
} else {
|
||||
ps.logf(" - %s started", svc)
|
||||
}
|
||||
}
|
||||
|
||||
ps.logf(" ✓ All services started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// LogSetupComplete logs completion information
|
||||
func (ps *ProductionSetup) LogSetupComplete(peerID string) {
|
||||
ps.logf("\n" + strings.Repeat("=", 70))
|
||||
ps.logf("Setup Complete!")
|
||||
ps.logf(strings.Repeat("=", 70))
|
||||
ps.logf("\nNode Peer ID: %s", peerID)
|
||||
ps.logf("\nService Management:")
|
||||
ps.logf(" systemctl status debros-ipfs-bootstrap")
|
||||
ps.logf(" systemctl logs debros-node-bootstrap")
|
||||
ps.logf(" sudo tail -f %s/logs/node.log", ps.debrosDir)
|
||||
ps.logf("\nStart All Services:")
|
||||
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-rqlite-bootstrap debros-olric debros-node-bootstrap debros-gateway")
|
||||
ps.logf("\nVerify Installation:")
|
||||
ps.logf(" curl http://localhost:6001/health")
|
||||
ps.logf(" curl http://localhost:5001/status\n")
|
||||
}
|
||||
208
pkg/environments/production/provisioner.go
Normal file
208
pkg/environments/production/provisioner.go
Normal file
@ -0,0 +1,208 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FilesystemProvisioner manages directory creation and permissions
|
||||
type FilesystemProvisioner struct {
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
logWriter interface{} // Can be io.Writer for logging
|
||||
}
|
||||
|
||||
// NewFilesystemProvisioner creates a new provisioner
|
||||
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
|
||||
return &FilesystemProvisioner{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: filepath.Join(debrosHome, ".debros"),
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureDirectoryStructure creates all required directories
|
||||
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
||||
dirs := []string{
|
||||
fp.debrosDir,
|
||||
filepath.Join(fp.debrosDir, "configs"),
|
||||
filepath.Join(fp.debrosDir, "secrets"),
|
||||
filepath.Join(fp.debrosDir, "data"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs", "repo"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs-cluster"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "rqlite"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "ipfs", "repo"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "ipfs-cluster"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "rqlite"),
|
||||
filepath.Join(fp.debrosDir, "logs"),
|
||||
filepath.Join(fp.debrosDir, "tls-cache"),
|
||||
filepath.Join(fp.debrosDir, "backups"),
|
||||
filepath.Join(fp.debrosHome, "bin"),
|
||||
filepath.Join(fp.debrosHome, "src"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FixOwnership changes ownership of .debros directory to debros user
|
||||
func (fp *FilesystemProvisioner) FixOwnership() error {
|
||||
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosDir, err)
|
||||
}
|
||||
|
||||
// Also fix home directory ownership
|
||||
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosHome, err)
|
||||
}
|
||||
|
||||
// Fix bin directory
|
||||
binDir := filepath.Join(fp.debrosHome, "bin")
|
||||
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", binDir, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserProvisioner manages system user creation and sudoers setup
|
||||
type UserProvisioner struct {
|
||||
username string
|
||||
home string
|
||||
shell string
|
||||
}
|
||||
|
||||
// NewUserProvisioner creates a new user provisioner
|
||||
func NewUserProvisioner(username, home, shell string) *UserProvisioner {
|
||||
if shell == "" {
|
||||
shell = "/bin/bash"
|
||||
}
|
||||
return &UserProvisioner{
|
||||
username: username,
|
||||
home: home,
|
||||
shell: shell,
|
||||
}
|
||||
}
|
||||
|
||||
// UserExists checks if the system user exists
|
||||
func (up *UserProvisioner) UserExists() bool {
|
||||
cmd := exec.Command("id", up.username)
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
// CreateUser creates the system user
|
||||
func (up *UserProvisioner) CreateUser() error {
|
||||
if up.UserExists() {
|
||||
return nil // User already exists
|
||||
}
|
||||
|
||||
cmd := exec.Command("useradd", "-r", "-m", "-s", up.shell, "-d", up.home, up.username)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to create user %s: %w", up.username, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupSudoersAccess creates sudoers rule for the invoking user
|
||||
func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
|
||||
if invokerUser == "" {
|
||||
return nil // Skip if no invoker
|
||||
}
|
||||
|
||||
sudoersRule := fmt.Sprintf("%s ALL=(debros) NOPASSWD: ALL\n", invokerUser)
|
||||
sudoersFile := "/etc/sudoers.d/debros-access"
|
||||
|
||||
// Check if rule already exists
|
||||
if existing, err := os.ReadFile(sudoersFile); err == nil {
|
||||
if strings.Contains(string(existing), invokerUser) {
|
||||
return nil // Rule already set
|
||||
}
|
||||
}
|
||||
|
||||
// Write sudoers rule
|
||||
if err := os.WriteFile(sudoersFile, []byte(sudoersRule), 0440); err != nil {
|
||||
return fmt.Errorf("failed to create sudoers rule: %w", err)
|
||||
}
|
||||
|
||||
// Validate sudoers file
|
||||
cmd := exec.Command("visudo", "-c", "-f", sudoersFile)
|
||||
if err := cmd.Run(); err != nil {
|
||||
os.Remove(sudoersFile) // Clean up on validation failure
|
||||
return fmt.Errorf("sudoers rule validation failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateDetector checks for existing production state
|
||||
type StateDetector struct {
|
||||
debrosDir string
|
||||
}
|
||||
|
||||
// NewStateDetector creates a state detector
|
||||
func NewStateDetector(debrosDir string) *StateDetector {
|
||||
return &StateDetector{
|
||||
debrosDir: debrosDir,
|
||||
}
|
||||
}
|
||||
|
||||
// IsConfigured checks if basic configs exist
|
||||
func (sd *StateDetector) IsConfigured() bool {
|
||||
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
|
||||
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
|
||||
_, err1 := os.Stat(nodeConfig)
|
||||
_, err2 := os.Stat(gatewayConfig)
|
||||
return err1 == nil || err2 == nil
|
||||
}
|
||||
|
||||
// HasSecrets checks if cluster secret and swarm key exist
|
||||
func (sd *StateDetector) HasSecrets() bool {
|
||||
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
|
||||
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
|
||||
_, err1 := os.Stat(clusterSecret)
|
||||
_, err2 := os.Stat(swarmKey)
|
||||
return err1 == nil && err2 == nil
|
||||
}
|
||||
|
||||
// HasIPFSData checks if IPFS repo is initialized
|
||||
func (sd *StateDetector) HasIPFSData() bool {
|
||||
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
|
||||
_, err := os.Stat(ipfsRepoPath)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasRQLiteData checks if RQLite data exists
|
||||
func (sd *StateDetector) HasRQLiteData() bool {
|
||||
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
|
||||
info, err := os.Stat(rqliteDataPath)
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
|
||||
// CheckBinaryInstallation checks if required binaries are in PATH
|
||||
func (sd *StateDetector) CheckBinaryInstallation() error {
|
||||
binaries := []string{"ipfs", "ipfs-cluster-service", "rqlited", "olric-server"}
|
||||
var missing []string
|
||||
|
||||
for _, bin := range binaries {
|
||||
if _, err := exec.LookPath(bin); err != nil {
|
||||
missing = append(missing, bin)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("missing binaries: %s", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
333
pkg/environments/production/services.go
Normal file
333
pkg/environments/production/services.go
Normal file
@ -0,0 +1,333 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SystemdServiceGenerator generates systemd unit files
|
||||
type SystemdServiceGenerator struct {
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
}
|
||||
|
||||
// NewSystemdServiceGenerator creates a new service generator
|
||||
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
|
||||
return &SystemdServiceGenerator{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: debrosDir,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateIPFSService generates the IPFS daemon systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string) string {
|
||||
var ipfsRepoPath string
|
||||
if nodeType == "bootstrap" {
|
||||
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
|
||||
} else {
|
||||
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=IPFS Daemon (%s)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
Environment=IPFS_PATH=%s
|
||||
ExecStartPre=/bin/bash -c 'if [ -f %s/secrets/swarm.key ] && [ ! -f %s/swarm.key ]; then cp %s/secrets/swarm.key %s/swarm.key && chmod 600 %s/swarm.key; fi'
|
||||
ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=%s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-%s
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, ipfsRepoPath, ssg.debrosDir, ipfsRepoPath, ipfsRepoPath, ipfsRepoPath, nodeType, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string) string {
|
||||
var clusterPath string
|
||||
if nodeType == "bootstrap" {
|
||||
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=IPFS Cluster Service (%s)
|
||||
After=debros-ipfs-%s.service
|
||||
Wants=debros-ipfs-%s.service
|
||||
Requires=debros-ipfs-%s.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
Environment=CLUSTER_PATH=%s
|
||||
ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config %s/service.json
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-cluster-%s
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, clusterPath, clusterPath, nodeType, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// GenerateRQLiteService generates the RQLite systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, httpPort, raftPort int, joinAddr string) string {
|
||||
var dataDir string
|
||||
if nodeType == "bootstrap" {
|
||||
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
|
||||
} else {
|
||||
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
|
||||
}
|
||||
|
||||
args := fmt.Sprintf(
|
||||
`-http-addr 0.0.0.0:%d -http-adv-addr 127.0.0.1:%d -raft-adv-addr 127.0.0.1:%d -raft-addr 0.0.0.0:%d`,
|
||||
httpPort, httpPort, raftPort, raftPort,
|
||||
)
|
||||
|
||||
if joinAddr != "" {
|
||||
args += fmt.Sprintf(` -join %s -join-attempts 30 -join-interval 10s`, joinAddr)
|
||||
}
|
||||
|
||||
args += fmt.Sprintf(` %s`, dataDir)
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=RQLite Database (%s)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
ExecStart=/usr/local/bin/rqlited %s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=rqlite-%s
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, ssg.debrosHome, args, nodeType, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// GenerateOlricService generates the Olric systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateOlricService() string {
|
||||
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=Olric Cache Server
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
Environment=OLRIC_SERVER_CONFIG=%s
|
||||
ExecStart=/usr/local/bin/olric-server
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=olric
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.debrosHome, olricConfigPath, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// GenerateNodeService generates the DeBros Node systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
|
||||
var configFile string
|
||||
if nodeType == "bootstrap" {
|
||||
configFile = "bootstrap.yaml"
|
||||
} else {
|
||||
configFile = "node.yaml"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=DeBros Network Node (%s)
|
||||
After=debros-ipfs-cluster-%s.service
|
||||
Wants=debros-ipfs-cluster-%s.service
|
||||
Requires=debros-ipfs-cluster-%s.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
ExecStart=%s/bin/node --config %s/configs/%s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=debros-node-%s
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, nodeType, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// GenerateGatewayService generates the DeBros Gateway systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
|
||||
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=DeBros Gateway
|
||||
After=%s
|
||||
Wants=%s
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
ExecStart=%s/bin/gateway --config %s/configs/gateway.yaml
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=debros-gateway
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeService, nodeService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, ssg.debrosDir)
|
||||
}
|
||||
|
||||
// SystemdController manages systemd service operations
|
||||
type SystemdController struct {
|
||||
systemdDir string
|
||||
}
|
||||
|
||||
// NewSystemdController creates a new controller
|
||||
func NewSystemdController() *SystemdController {
|
||||
return &SystemdController{
|
||||
systemdDir: "/etc/systemd/system",
|
||||
}
|
||||
}
|
||||
|
||||
// WriteServiceUnit writes a systemd unit file
|
||||
func (sc *SystemdController) WriteServiceUnit(name string, content string) error {
|
||||
unitPath := filepath.Join(sc.systemdDir, name)
|
||||
if err := os.WriteFile(unitPath, []byte(content), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write unit file %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DaemonReload reloads the systemd daemon
|
||||
func (sc *SystemdController) DaemonReload() error {
|
||||
cmd := exec.Command("systemctl", "daemon-reload")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to reload systemd daemon: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableService enables a service to start on boot
|
||||
func (sc *SystemdController) EnableService(name string) error {
|
||||
cmd := exec.Command("systemctl", "enable", name)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to enable service %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartService starts a service immediately
|
||||
func (sc *SystemdController) StartService(name string) error {
|
||||
cmd := exec.Command("systemctl", "start", name)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to start service %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartService restarts a service
|
||||
func (sc *SystemdController) RestartService(name string) error {
|
||||
cmd := exec.Command("systemctl", "restart", name)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to restart service %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopService stops a service
|
||||
func (sc *SystemdController) StopService(name string) error {
|
||||
cmd := exec.Command("systemctl", "stop", name)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to stop service %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatusService gets the status of a service
|
||||
func (sc *SystemdController) StatusService(name string) (bool, error) {
|
||||
cmd := exec.Command("systemctl", "is-active", "--quiet", name)
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check for "inactive" vs actual error
|
||||
if strings.Contains(err.Error(), "exit status 3") {
|
||||
return false, nil // Service is inactive
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to check service status %s: %w", name, err)
|
||||
}
|
||||
41
pkg/environments/templates/bootstrap.yaml
Normal file
41
pkg/environments/templates/bootstrap.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
node:
|
||||
id: "{{.NodeID}}"
|
||||
type: "bootstrap"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||
data_dir: "{{.DataDir}}"
|
||||
max_connections: 50
|
||||
|
||||
database:
|
||||
data_dir: "{{.DataDir}}/rqlite"
|
||||
replication_factor: 3
|
||||
shard_count: 16
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: { { .RQLiteHTTPPort } }
|
||||
rqlite_raft_port: { { .RQLiteRaftPort } }
|
||||
rqlite_join_address: ""
|
||||
cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||
timeout: "60s"
|
||||
replication_factor: 3
|
||||
enable_encryption: true
|
||||
|
||||
discovery:
|
||||
bootstrap_peers: []
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: { { .P2PPort } }
|
||||
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
enable_tls: false
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
19
pkg/environments/templates/gateway.yaml
Normal file
19
pkg/environments/templates/gateway.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
listen_addr: ":{{.ListenPort}}"
|
||||
client_namespace: "default"
|
||||
rqlite_dsn: "{{.RQLiteDSN}}"
|
||||
bootstrap_peers:
|
||||
{{range .BootstrapPeers}} - "{{.}}"
|
||||
{{end}}
|
||||
enable_https: {{.EnableHTTPS}}
|
||||
{{if .EnableHTTPS}}domain_name: "{{.DomainName}}"
|
||||
tls_cache_dir: "{{.TLSCacheDir}}"
|
||||
{{end}}
|
||||
olric_servers:
|
||||
{{range .OlricServers}} - "{{.}}"
|
||||
{{end}}
|
||||
olric_timeout: "10s"
|
||||
ipfs_cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||
ipfs_api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||
ipfs_timeout: "60s"
|
||||
ipfs_replication_factor: 3
|
||||
|
||||
44
pkg/environments/templates/node.yaml
Normal file
44
pkg/environments/templates/node.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
node:
|
||||
id: "{{.NodeID}}"
|
||||
type: "node"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||
data_dir: "{{.DataDir}}"
|
||||
max_connections: 50
|
||||
|
||||
database:
|
||||
data_dir: "{{.DataDir}}/rqlite"
|
||||
replication_factor: 3
|
||||
shard_count: 16
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: {{.RQLiteHTTPPort}}
|
||||
rqlite_raft_port: {{.RQLiteRaftPort}}
|
||||
rqlite_join_address: "{{.RQLiteJoinAddress}}"
|
||||
cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||
timeout: "60s"
|
||||
replication_factor: 3
|
||||
enable_encryption: true
|
||||
|
||||
discovery:
|
||||
bootstrap_peers:
|
||||
{{range .BootstrapPeers}} - "{{.}}"
|
||||
{{end}}
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: {{.P2PPort}}
|
||||
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
enable_tls: false
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
|
||||
8
pkg/environments/templates/olric.yaml
Normal file
8
pkg/environments/templates/olric.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
server:
|
||||
bindAddr: "{{.BindAddr}}"
|
||||
bindPort: { { .HTTPPort } }
|
||||
|
||||
memberlist:
|
||||
environment: local
|
||||
bindAddr: "{{.BindAddr}}"
|
||||
bindPort: { { .MemberlistPort } }
|
||||
191
pkg/environments/templates/render.go
Normal file
191
pkg/environments/templates/render.go
Normal file
@ -0,0 +1,191 @@
|
||||
package templates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
//go:embed *.yaml *.service
|
||||
var templatesFS embed.FS
|
||||
|
||||
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
|
||||
type BootstrapConfigData struct {
|
||||
NodeID string
|
||||
P2PPort int
|
||||
DataDir string
|
||||
RQLiteHTTPPort int
|
||||
RQLiteRaftPort int
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501
|
||||
}
|
||||
|
||||
// NodeConfigData holds parameters for node.yaml rendering
|
||||
type NodeConfigData struct {
|
||||
NodeID string
|
||||
P2PPort int
|
||||
DataDir string
|
||||
RQLiteHTTPPort int
|
||||
RQLiteRaftPort int
|
||||
RQLiteJoinAddress string
|
||||
BootstrapPeers []string
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501+
|
||||
}
|
||||
|
||||
// GatewayConfigData holds parameters for gateway.yaml rendering
|
||||
type GatewayConfigData struct {
|
||||
ListenPort int
|
||||
BootstrapPeers []string
|
||||
OlricServers []string
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501
|
||||
EnableHTTPS bool
|
||||
DomainName string
|
||||
TLSCacheDir string
|
||||
RQLiteDSN string
|
||||
}
|
||||
|
||||
// OlricConfigData holds parameters for olric.yaml rendering
|
||||
type OlricConfigData struct {
|
||||
BindAddr string
|
||||
HTTPPort int
|
||||
MemberlistPort int
|
||||
}
|
||||
|
||||
// SystemdIPFSData holds parameters for systemd IPFS service rendering
|
||||
type SystemdIPFSData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
IPFSRepoPath string
|
||||
SecretsDir string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
|
||||
type SystemdIPFSClusterData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
ClusterPath string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
|
||||
type SystemdRQLiteData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
HTTPPort int
|
||||
RaftPort int
|
||||
DataDir string
|
||||
JoinAddr string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdOlricData holds parameters for systemd Olric service rendering
|
||||
type SystemdOlricData struct {
|
||||
HomeDir string
|
||||
ConfigPath string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdNodeData holds parameters for systemd Node service rendering
|
||||
type SystemdNodeData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
ConfigFile string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdGatewayData holds parameters for systemd Gateway service rendering
|
||||
type SystemdGatewayData struct {
|
||||
HomeDir string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// RenderBootstrapConfig renders the bootstrap config template with the given data
|
||||
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
|
||||
return renderTemplate("bootstrap.yaml", data)
|
||||
}
|
||||
|
||||
// RenderNodeConfig renders the node config template with the given data
|
||||
func RenderNodeConfig(data NodeConfigData) (string, error) {
|
||||
return renderTemplate("node.yaml", data)
|
||||
}
|
||||
|
||||
// RenderGatewayConfig renders the gateway config template with the given data
|
||||
func RenderGatewayConfig(data GatewayConfigData) (string, error) {
|
||||
return renderTemplate("gateway.yaml", data)
|
||||
}
|
||||
|
||||
// RenderOlricConfig renders the olric config template with the given data
|
||||
func RenderOlricConfig(data OlricConfigData) (string, error) {
|
||||
return renderTemplate("olric.yaml", data)
|
||||
}
|
||||
|
||||
// RenderIPFSService renders the IPFS systemd service template
|
||||
func RenderIPFSService(data SystemdIPFSData) (string, error) {
|
||||
return renderTemplate("systemd_ipfs.service", data)
|
||||
}
|
||||
|
||||
// RenderIPFSClusterService renders the IPFS Cluster systemd service template
|
||||
func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
|
||||
return renderTemplate("systemd_ipfs_cluster.service", data)
|
||||
}
|
||||
|
||||
// RenderRQLiteService renders the RQLite systemd service template
|
||||
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
|
||||
return renderTemplate("systemd_rqlite.service", data)
|
||||
}
|
||||
|
||||
// RenderOlricService renders the Olric systemd service template
|
||||
func RenderOlricService(data SystemdOlricData) (string, error) {
|
||||
return renderTemplate("systemd_olric.service", data)
|
||||
}
|
||||
|
||||
// RenderNodeService renders the DeBros Node systemd service template
|
||||
func RenderNodeService(data SystemdNodeData) (string, error) {
|
||||
return renderTemplate("systemd_node.service", data)
|
||||
}
|
||||
|
||||
// RenderGatewayService renders the DeBros Gateway systemd service template
|
||||
func RenderGatewayService(data SystemdGatewayData) (string, error) {
|
||||
return renderTemplate("systemd_gateway.service", data)
|
||||
}
|
||||
|
||||
// normalizeTemplate normalizes template placeholders from spaced format { { .Var } } to {{.Var}}
|
||||
func normalizeTemplate(content string) string {
|
||||
// Match patterns like { { .Variable } } or { {.Variable} } or { { .Variable} } etc.
|
||||
// and convert them to {{.Variable}}
|
||||
// Pattern matches: { { .Something } } -> {{.Something}}
|
||||
// This regex specifically matches Go template variables (starting with .)
|
||||
re := regexp.MustCompile(`\{\s*\{\s*(\.\S+)\s*\}\s*\}`)
|
||||
normalized := re.ReplaceAllString(content, "{{$1}}")
|
||||
return normalized
|
||||
}
|
||||
|
||||
// renderTemplate is a helper that renders any template from the embedded FS
|
||||
func renderTemplate(name string, data interface{}) (string, error) {
|
||||
// Read template content
|
||||
tmplBytes, err := templatesFS.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
|
||||
// Normalize template content to handle both { { .Var } } and {{.Var}} formats
|
||||
normalizedContent := normalizeTemplate(string(tmplBytes))
|
||||
|
||||
// Parse normalized template
|
||||
tmpl, err := template.New(name).Parse(normalizedContent)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to render template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
166
pkg/environments/templates/render_test.go
Normal file
166
pkg/environments/templates/render_test.go
Normal file
@ -0,0 +1,166 @@
|
||||
package templates
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRenderBootstrapConfig(t *testing.T) {
|
||||
data := BootstrapConfigData{
|
||||
NodeID: "bootstrap",
|
||||
P2PPort: 4001,
|
||||
DataDir: "/home/debros/.debros/bootstrap",
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPORT: 5001,
|
||||
}
|
||||
|
||||
result, err := RenderBootstrapConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderBootstrapConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"id: \"bootstrap\"",
|
||||
"type: \"bootstrap\"",
|
||||
"tcp/4001",
|
||||
"rqlite_port: 5001",
|
||||
"rqlite_raft_port: 7001",
|
||||
"cluster_api_url: \"http://localhost:9094\"",
|
||||
"api_url: \"http://localhost:5001\"",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(result, check) {
|
||||
t.Errorf("Bootstrap config missing: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderNodeConfig(t *testing.T) {
|
||||
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
|
||||
data := NodeConfigData{
|
||||
NodeID: "node2",
|
||||
P2PPort: 4002,
|
||||
DataDir: "/home/debros/.debros/node2",
|
||||
RQLiteHTTPPort: 5002,
|
||||
RQLiteRaftPort: 7002,
|
||||
RQLiteJoinAddress: "localhost:5001",
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
ClusterAPIPort: 9104,
|
||||
IPFSAPIPORT: 5002,
|
||||
}
|
||||
|
||||
result, err := RenderNodeConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderNodeConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"id: \"node2\"",
|
||||
"type: \"node\"",
|
||||
"tcp/4002",
|
||||
"rqlite_port: 5002",
|
||||
"rqlite_raft_port: 7002",
|
||||
"rqlite_join_address: \"localhost:5001\"",
|
||||
bootstrapMultiaddr,
|
||||
"cluster_api_url: \"http://localhost:9104\"",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(result, check) {
|
||||
t.Errorf("Node config missing: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderGatewayConfig(t *testing.T) {
|
||||
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
|
||||
data := GatewayConfigData{
|
||||
ListenPort: 6001,
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
OlricServers: []string{"127.0.0.1:3320"},
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPORT: 5001,
|
||||
}
|
||||
|
||||
result, err := RenderGatewayConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderGatewayConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"listen_addr: \":6001\"",
|
||||
bootstrapMultiaddr,
|
||||
"127.0.0.1:3320",
|
||||
"ipfs_cluster_api_url: \"http://localhost:9094\"",
|
||||
"ipfs_api_url: \"http://localhost:5001\"",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(result, check) {
|
||||
t.Errorf("Gateway config missing: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderOlricConfig(t *testing.T) {
|
||||
data := OlricConfigData{
|
||||
BindAddr: "127.0.0.1",
|
||||
HTTPPort: 3320,
|
||||
MemberlistPort: 3322,
|
||||
}
|
||||
|
||||
result, err := RenderOlricConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderOlricConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"bindAddr: \"127.0.0.1\"",
|
||||
"bindPort: 3320",
|
||||
"memberlist",
|
||||
"bindPort: 3322",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(result, check) {
|
||||
t.Errorf("Olric config missing: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderWithMultipleBootstrapPeers(t *testing.T) {
|
||||
peers := []string{
|
||||
"/ip4/127.0.0.1/tcp/4001/p2p/Qm1111",
|
||||
"/ip4/127.0.0.1/tcp/4002/p2p/Qm2222",
|
||||
}
|
||||
|
||||
data := NodeConfigData{
|
||||
NodeID: "node-test",
|
||||
P2PPort: 4002,
|
||||
DataDir: "/test/data",
|
||||
RQLiteHTTPPort: 5002,
|
||||
RQLiteRaftPort: 7002,
|
||||
RQLiteJoinAddress: "localhost:5001",
|
||||
BootstrapPeers: peers,
|
||||
ClusterAPIPort: 9104,
|
||||
IPFSAPIPORT: 5002,
|
||||
}
|
||||
|
||||
result, err := RenderNodeConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderNodeConfig with multiple peers failed: %v", err)
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
if !strings.Contains(result, peer) {
|
||||
t.Errorf("Bootstrap peer missing: %s", peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
29
pkg/environments/templates/systemd_gateway.service
Normal file
29
pkg/environments/templates/systemd_gateway.service
Normal file
@ -0,0 +1,29 @@
|
||||
[Unit]
|
||||
Description=DeBros Gateway
|
||||
After=debros-node-node.service
|
||||
Wants=debros-node-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/configs/gateway.yaml
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=debros-gateway
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
27
pkg/environments/templates/systemd_ipfs.service
Normal file
27
pkg/environments/templates/systemd_ipfs.service
Normal file
@ -0,0 +1,27 @@
|
||||
[Unit]
|
||||
Description=IPFS Daemon ({{.NodeType}})
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=IPFS_PATH={{.IPFSRepoPath}}
|
||||
ExecStartPre=/bin/bash -c 'if [ -f {{.SecretsDir}}/swarm.key ] && [ ! -f {{.IPFSRepoPath}}/swarm.key ]; then cp {{.SecretsDir}}/swarm.key {{.IPFSRepoPath}}/swarm.key && chmod 600 {{.IPFSRepoPath}}/swarm.key; fi'
|
||||
ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir={{.IPFSRepoPath}}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-{{.NodeType}}
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
28
pkg/environments/templates/systemd_ipfs_cluster.service
Normal file
28
pkg/environments/templates/systemd_ipfs_cluster.service
Normal file
@ -0,0 +1,28 @@
|
||||
[Unit]
|
||||
Description=IPFS Cluster Service ({{.NodeType}})
|
||||
After=debros-ipfs-{{.NodeType}}.service
|
||||
Wants=debros-ipfs-{{.NodeType}}.service
|
||||
Requires=debros-ipfs-{{.NodeType}}.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=CLUSTER_PATH={{.ClusterPath}}
|
||||
ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config {{.ClusterPath}}/service.json
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-cluster-{{.NodeType}}
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
27
pkg/environments/templates/systemd_node.service
Normal file
27
pkg/environments/templates/systemd_node.service
Normal file
@ -0,0 +1,27 @@
|
||||
[Unit]
|
||||
Description=DeBros Network Node ({{.NodeType}})
|
||||
After=debros-ipfs-cluster-{{.NodeType}}.service
|
||||
Wants=debros-ipfs-cluster-{{.NodeType}}.service
|
||||
Requires=debros-ipfs-cluster-{{.NodeType}}.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=debros-node-{{.NodeType}}
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
26
pkg/environments/templates/systemd_olric.service
Normal file
26
pkg/environments/templates/systemd_olric.service
Normal file
@ -0,0 +1,26 @@
|
||||
[Unit]
|
||||
Description=Olric Cache Server
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=OLRIC_SERVER_CONFIG={{.ConfigPath}}
|
||||
ExecStart=/usr/local/bin/olric-server
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=olric
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
25
pkg/environments/templates/systemd_rqlite.service
Normal file
25
pkg/environments/templates/systemd_rqlite.service
Normal file
@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=RQLite Database ({{.NodeType}})
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=rqlite-{{.NodeType}}
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -1,17 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# DeBros Network Installation Script
|
||||
# Downloads network-cli from GitHub releases and runs interactive setup
|
||||
# Downloads network-cli from GitHub releases and runs the new 'network-cli prod install' flow
|
||||
#
|
||||
# Supported: Ubuntu 18.04+, Debian 10+
|
||||
# Supported: Ubuntu 20.04+, Debian 11+
|
||||
#
|
||||
# Usage:
|
||||
# curl -fsSL https://install.debros.network | bash
|
||||
# OR
|
||||
# bash scripts/install-debros-network.sh
|
||||
# OR with specific flags:
|
||||
# bash scripts/install-debros-network.sh --bootstrap
|
||||
# bash scripts/install-debros-network.sh --vps-ip 1.2.3.4 --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...
|
||||
# bash scripts/install-debros-network.sh --domain example.com
|
||||
|
||||
set -e
|
||||
trap 'error "An error occurred. Installation aborted."; execute_traps; exit 1' ERR
|
||||
trap 'error "An error occurred. Installation aborted."; exit 1' ERR
|
||||
|
||||
# Color codes
|
||||
RED='\033[0;31m'
|
||||
@ -26,58 +30,11 @@ GITHUB_REPO="DeBrosOfficial/network"
|
||||
GITHUB_API="https://api.github.com/repos/$GITHUB_REPO"
|
||||
INSTALL_DIR="/usr/local/bin"
|
||||
|
||||
# Upgrade detection flags
|
||||
PREVIOUS_INSTALL=false
|
||||
SETUP_EXECUTED=false
|
||||
PREVIOUS_VERSION=""
|
||||
LATEST_VERSION=""
|
||||
VERSION_CHANGED=false
|
||||
|
||||
# Cleanup handlers (for proper trap stacking)
|
||||
declare -a CLEANUP_HANDLERS
|
||||
|
||||
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; }
|
||||
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; }
|
||||
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
|
||||
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; }
|
||||
|
||||
# Stack-based trap cleanup
|
||||
push_trap() {
|
||||
local handler="$1"
|
||||
local signal="${2:-EXIT}"
|
||||
CLEANUP_HANDLERS+=("$handler")
|
||||
}
|
||||
|
||||
execute_traps() {
|
||||
for ((i=${#CLEANUP_HANDLERS[@]}-1; i>=0; i--)); do
|
||||
eval "${CLEANUP_HANDLERS[$i]}"
|
||||
done
|
||||
}
|
||||
|
||||
# REQUIRE INTERACTIVE MODE
|
||||
if [ ! -t 0 ]; then
|
||||
error "This script requires an interactive terminal."
|
||||
echo -e ""
|
||||
echo -e "${YELLOW}Please run this script directly:${NOCOLOR}"
|
||||
echo -e "${CYAN} bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
|
||||
echo -e ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if running as root
|
||||
if [[ $EUID -eq 0 ]]; then
|
||||
error "This script should NOT be run as root"
|
||||
echo -e "${YELLOW}Run as a regular user with sudo privileges:${NOCOLOR}"
|
||||
echo -e "${CYAN} bash $0${NOCOLOR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for sudo
|
||||
if ! command -v sudo &>/dev/null; then
|
||||
error "sudo command not found. Please ensure you have sudo privileges."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
display_banner() {
|
||||
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||
echo -e "${CYAN}
|
||||
@ -88,7 +45,7 @@ display_banner() {
|
||||
|____/ \\___|____/|_| \\___/|___/ |_| \\_|\\___|\\__| \\_/\\_/ \\___/|_| |_|\\_\\
|
||||
${NOCOLOR}"
|
||||
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||
echo -e "${GREEN} Quick Install Script ${NOCOLOR}"
|
||||
echo -e "${GREEN} Production Installation ${NOCOLOR}"
|
||||
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||
}
|
||||
|
||||
@ -102,15 +59,13 @@ detect_os() {
|
||||
OS=$ID
|
||||
VERSION=$VERSION_ID
|
||||
|
||||
# Only support Debian and Ubuntu
|
||||
# Support Debian and Ubuntu
|
||||
case $OS in
|
||||
ubuntu|debian)
|
||||
log "Detected OS: $OS ${VERSION:-unknown}"
|
||||
;;
|
||||
*)
|
||||
error "Unsupported operating system: $OS"
|
||||
echo -e "${YELLOW}This script only supports Ubuntu 18.04+ and Debian 10+${NOCOLOR}"
|
||||
exit 1
|
||||
warning "Unsupported operating system: $OS (may not work)"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@ -133,37 +88,22 @@ check_architecture() {
|
||||
log "Architecture: $ARCH (using $GITHUB_ARCH)"
|
||||
}
|
||||
|
||||
check_dependencies() {
|
||||
log "Checking required tools..."
|
||||
|
||||
local missing_deps=()
|
||||
|
||||
for cmd in curl tar; do
|
||||
if ! command -v $cmd &>/dev/null; then
|
||||
missing_deps+=("$cmd")
|
||||
check_root() {
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
error "This script must be run as root"
|
||||
echo -e "${YELLOW}Please run with sudo:${NOCOLOR}"
|
||||
echo -e "${CYAN} sudo bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_deps[@]} -gt 0 ]; then
|
||||
log "Installing missing dependencies: ${missing_deps[*]}"
|
||||
sudo apt update
|
||||
sudo apt install -y "${missing_deps[@]}"
|
||||
fi
|
||||
|
||||
success "All required tools available"
|
||||
}
|
||||
|
||||
get_latest_release() {
|
||||
log "Fetching latest release information..."
|
||||
log "Fetching latest release..."
|
||||
|
||||
# Check if jq is available for robust JSON parsing
|
||||
if command -v jq &>/dev/null; then
|
||||
# Use jq for structured JSON parsing
|
||||
LATEST_RELEASE=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" | \
|
||||
jq -r '.[] | select(.prerelease == false and .draft == false) | .tag_name' | head -1)
|
||||
else
|
||||
# Fallback to grep-based parsing
|
||||
log "Note: jq not available, using basic parsing (consider installing jq for robustness)"
|
||||
LATEST_RELEASE=$(curl -fsSL "$GITHUB_API/releases" | \
|
||||
grep -v "prerelease.*true" | \
|
||||
grep -v "draft.*true" | \
|
||||
@ -173,574 +113,58 @@ get_latest_release() {
|
||||
fi
|
||||
|
||||
if [ -z "$LATEST_RELEASE" ]; then
|
||||
error "Could not determine latest release"
|
||||
error "Could not determine latest release version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Latest release: $LATEST_RELEASE"
|
||||
}
|
||||
|
||||
download_and_install() {
|
||||
log "Downloading network-cli..."
|
||||
download_and_install_cli() {
|
||||
BINARY_NAME="network-cli_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}"
|
||||
DOWNLOAD_URL="$GITHUB_REPO/releases/download/$LATEST_RELEASE/$BINARY_NAME"
|
||||
|
||||
# Construct download URL
|
||||
DOWNLOAD_URL="https://github.com/$GITHUB_REPO/releases/download/$LATEST_RELEASE/debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
|
||||
CHECKSUM_URL="https://github.com/$GITHUB_REPO/releases/download/$LATEST_RELEASE/checksums.txt"
|
||||
|
||||
# Create temporary directory
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
push_trap "rm -rf $TEMP_DIR" EXIT
|
||||
|
||||
# Download
|
||||
log "Downloading from: $DOWNLOAD_URL"
|
||||
if ! curl -fsSL -o "$TEMP_DIR/network-cli.tar.gz" "$DOWNLOAD_URL"; then
|
||||
log "Downloading network-cli from GitHub releases..."
|
||||
if ! curl -fsSL -o /tmp/network-cli "https://github.com/$DOWNLOAD_URL"; then
|
||||
error "Failed to download network-cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Try to download and verify checksum
|
||||
CHECKSUM_FILE="$TEMP_DIR/checksums.txt"
|
||||
if curl -fsSL -o "$CHECKSUM_FILE" "$CHECKSUM_URL" 2>/dev/null; then
|
||||
log "Verifying checksum..."
|
||||
cd "$TEMP_DIR"
|
||||
if command -v sha256sum &>/dev/null; then
|
||||
if sha256sum -c "$CHECKSUM_FILE" --ignore-missing >/dev/null 2>&1; then
|
||||
success "Checksum verified"
|
||||
else
|
||||
warning "Checksum verification failed (continuing anyway)"
|
||||
fi
|
||||
else
|
||||
log "sha256sum not available, skipping checksum verification"
|
||||
fi
|
||||
else
|
||||
log "Checksums not available for this release (continuing without verification)"
|
||||
fi
|
||||
chmod +x /tmp/network-cli
|
||||
|
||||
# Extract
|
||||
log "Extracting network-cli..."
|
||||
cd "$TEMP_DIR"
|
||||
tar xzf network-cli.tar.gz
|
||||
|
||||
# Install
|
||||
log "Installing to $INSTALL_DIR..."
|
||||
sudo cp network-cli "$INSTALL_DIR/"
|
||||
sudo chmod +x "$INSTALL_DIR/network-cli"
|
||||
log "Installing network-cli to $INSTALL_DIR..."
|
||||
mv /tmp/network-cli "$INSTALL_DIR/network-cli"
|
||||
|
||||
success "network-cli installed successfully"
|
||||
}
|
||||
|
||||
check_existing_installation() {
|
||||
if command -v network-cli &>/dev/null 2>&1; then
|
||||
PREVIOUS_INSTALL=true
|
||||
PREVIOUS_VERSION=$(network-cli version 2>/dev/null | head -n1 || echo "unknown")
|
||||
echo -e ""
|
||||
echo -e "${YELLOW}⚠️ Existing installation detected: ${PREVIOUS_VERSION}${NOCOLOR}"
|
||||
echo -e ""
|
||||
|
||||
# Version will be compared after fetching latest release
|
||||
# If they match, we skip the service stop/restart to minimize downtime
|
||||
else
|
||||
log "No previous installation detected - performing fresh install"
|
||||
fi
|
||||
}
|
||||
|
||||
compare_versions() {
|
||||
# Compare previous and latest versions
|
||||
if [ "$PREVIOUS_INSTALL" = true ] && [ ! -z "$PREVIOUS_VERSION" ] && [ ! -z "$LATEST_VERSION" ]; then
|
||||
if [ "$PREVIOUS_VERSION" = "$LATEST_VERSION" ]; then
|
||||
VERSION_CHANGED=false
|
||||
log "Installed version ($PREVIOUS_VERSION) matches latest release ($LATEST_VERSION)"
|
||||
log "Skipping service restart - no upgrade needed"
|
||||
return 0
|
||||
else
|
||||
VERSION_CHANGED=true
|
||||
log "Version change detected: $PREVIOUS_VERSION → $LATEST_VERSION"
|
||||
log "Services will be stopped before updating."
|
||||
echo -e ""
|
||||
|
||||
# Check if services are running
|
||||
if sudo network-cli service status all >/dev/null 2>&1; then
|
||||
log "Stopping DeBros services before upgrade..."
|
||||
log "Note: Anon (if running) will not be stopped as it may be managed separately"
|
||||
if sudo network-cli service stop all; then
|
||||
success "DeBros services stopped successfully"
|
||||
else
|
||||
warning "Failed to stop some services (continuing anyway)"
|
||||
fi
|
||||
else
|
||||
log "DeBros services already stopped or not running"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
verify_installation() {
|
||||
if command -v network-cli &>/dev/null; then
|
||||
INSTALLED_VERSION=$(network-cli version 2>/dev/null || echo "unknown")
|
||||
success "network-cli is ready: $INSTALLED_VERSION"
|
||||
return 0
|
||||
else
|
||||
error "network-cli not found in PATH"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if port 9050 is in use (Anon SOCKS port)
|
||||
is_anon_running() {
|
||||
# Check if port 9050 is listening
|
||||
if command -v ss &>/dev/null; then
|
||||
if ss -tlnp 2>/dev/null | grep -q ":9050"; then
|
||||
return 0
|
||||
fi
|
||||
elif command -v netstat &>/dev/null; then
|
||||
if netstat -tlnp 2>/dev/null | grep -q ":9050"; then
|
||||
return 0
|
||||
fi
|
||||
elif command -v lsof &>/dev/null; then
|
||||
# Try to check without sudo first (in case of passwordless sudo issues)
|
||||
if sudo -n lsof -i :9050 >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Fallback: assume Anon is not running if we can't determine
|
||||
return 1
|
||||
}
|
||||
|
||||
install_anon() {
|
||||
echo -e ""
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e "${GREEN}Step 1.5: Install Anyone Relay (Anon)${NOCOLOR}"
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e ""
|
||||
|
||||
log "Checking Anyone relay (Anon) status..."
|
||||
|
||||
# Check if Anon is already running on port 9050
|
||||
if is_anon_running; then
|
||||
success "Anon is already running on port 9050"
|
||||
log "Skipping Anon installation - using existing instance"
|
||||
configure_anon_logs
|
||||
configure_firewall_for_anon
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if anon binary is already installed
|
||||
if command -v anon &>/dev/null; then
|
||||
success "Anon binary already installed"
|
||||
log "Anon is installed but not running. You can start it manually if needed."
|
||||
configure_anon_logs
|
||||
configure_firewall_for_anon
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Installing Anyone relay for anonymous networking..."
|
||||
|
||||
# Install via APT (official method from docs.anyone.io)
|
||||
log "Adding Anyone APT repository..."
|
||||
|
||||
# Add GPG key
|
||||
if ! curl -fsSL https://deb.anyone.io/gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/anyone-archive-keyring.gpg 2>/dev/null; then
|
||||
warning "Failed to add Anyone GPG key"
|
||||
log "You can manually install later with:"
|
||||
log " curl -fsSL https://deb.anyone.io/gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/anyone-archive-keyring.gpg"
|
||||
log " echo 'deb [signed-by=/usr/share/keyrings/anyone-archive-keyring.gpg] https://deb.anyone.io/ anyone main' | sudo tee /etc/apt/sources.list.d/anyone.list"
|
||||
log " sudo apt update && sudo apt install -y anon"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Add repository
|
||||
echo "deb [signed-by=/usr/share/keyrings/anyone-archive-keyring.gpg] https://deb.anyone.io/ anyone main" | sudo tee /etc/apt/sources.list.d/anyone.list >/dev/null
|
||||
|
||||
# Preseed terms acceptance to avoid interactive prompt
|
||||
log "Pre-accepting Anon terms and conditions..."
|
||||
# Try multiple debconf question formats
|
||||
echo "anon anon/terms boolean true" | sudo debconf-set-selections
|
||||
echo "anon anon/terms seen true" | sudo debconf-set-selections
|
||||
# Also try with select/string format
|
||||
echo "anon anon/terms select true" | sudo debconf-set-selections || true
|
||||
|
||||
# Query debconf to verify the question exists and set it properly
|
||||
# Some packages use different question formats
|
||||
sudo debconf-get-selections | grep -i anon || true
|
||||
|
||||
# Create anonrc directory and file with AgreeToTerms before installation
|
||||
# This ensures terms are accepted even if the post-install script checks the file
|
||||
sudo mkdir -p /etc/anon
|
||||
if [ ! -f /etc/anon/anonrc ]; then
|
||||
echo "AgreeToTerms 1" | sudo tee /etc/anon/anonrc >/dev/null
|
||||
fi
|
||||
|
||||
# Also create a terms-agreement file if Anon checks for it
|
||||
# Check multiple possible locations where Anon might look for terms acceptance
|
||||
sudo mkdir -p /var/lib/anon
|
||||
echo "agreed" | sudo tee /var/lib/anon/terms-agreement >/dev/null 2>&1 || true
|
||||
sudo mkdir -p /usr/share/anon
|
||||
echo "agreed" | sudo tee /usr/share/anon/terms-agreement >/dev/null 2>&1 || true
|
||||
# Also create near the GPG keyring directory (as the user suggested)
|
||||
sudo mkdir -p /usr/share/keyrings/anon
|
||||
echo "agreed" | sudo tee /usr/share/keyrings/anon/terms-agreement >/dev/null 2>&1 || true
|
||||
# Create in the keyring directory itself as a marker file
|
||||
echo "agreed" | sudo tee /usr/share/keyrings/anyone-terms-agreed >/dev/null 2>&1 || true
|
||||
|
||||
# Update and install with non-interactive frontend
|
||||
log "Installing Anon package..."
|
||||
sudo apt update -qq
|
||||
|
||||
# Use DEBIAN_FRONTEND=noninteractive and set debconf values directly via apt-get options
|
||||
# This is more reliable than just debconf-set-selections
|
||||
if ! sudo DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y \
|
||||
-o Dpkg::Options::="--force-confdef" \
|
||||
-o Dpkg::Options::="--force-confold" \
|
||||
anon; then
|
||||
warning "Anon installation failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify installation
|
||||
if ! command -v anon &>/dev/null; then
|
||||
warning "Anon installation may have failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
success "Anon installed successfully"
|
||||
|
||||
# Configure with sensible defaults
|
||||
configure_anon_defaults
|
||||
|
||||
# Configure log directory
|
||||
configure_anon_logs
|
||||
|
||||
# Configure firewall if present
|
||||
configure_firewall_for_anon
|
||||
|
||||
# Enable and start service
|
||||
log "Enabling Anon service..."
|
||||
sudo systemctl enable anon 2>/dev/null || true
|
||||
sudo systemctl start anon 2>/dev/null || true
|
||||
|
||||
if systemctl is-active --quiet anon; then
|
||||
success "Anon service is running"
|
||||
else
|
||||
warning "Anon service may not be running. Check: sudo systemctl status anon"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
configure_anon_defaults() {
|
||||
log "Configuring Anon with default settings..."
|
||||
|
||||
HOSTNAME=$(hostname -s 2>/dev/null || echo "debros-node")
|
||||
|
||||
# Create or update anonrc with our defaults
|
||||
if [ -f /etc/anon/anonrc ]; then
|
||||
# Backup existing config
|
||||
sudo cp /etc/anon/anonrc /etc/anon/anonrc.bak 2>/dev/null || true
|
||||
|
||||
# Update key settings if not already set
|
||||
if ! grep -q "^Nickname" /etc/anon/anonrc; then
|
||||
echo "Nickname ${HOSTNAME}" | sudo tee -a /etc/anon/anonrc >/dev/null
|
||||
fi
|
||||
|
||||
if ! grep -q "^ControlPort" /etc/anon/anonrc; then
|
||||
echo "ControlPort 9051" | sudo tee -a /etc/anon/anonrc >/dev/null
|
||||
fi
|
||||
|
||||
if ! grep -q "^SocksPort" /etc/anon/anonrc; then
|
||||
echo "SocksPort 9050" | sudo tee -a /etc/anon/anonrc >/dev/null
|
||||
fi
|
||||
|
||||
# Auto-accept terms in config file
|
||||
if ! grep -q "^AgreeToTerms" /etc/anon/anonrc; then
|
||||
echo "AgreeToTerms 1" | sudo tee -a /etc/anon/anonrc >/dev/null
|
||||
fi
|
||||
|
||||
log " Nickname: ${HOSTNAME}"
|
||||
log " ORPort: 9001 (default)"
|
||||
log " ControlPort: 9051"
|
||||
log " SOCKSPort: 9050"
|
||||
log " AgreeToTerms: 1 (auto-accepted)"
|
||||
fi
|
||||
}
|
||||
|
||||
configure_anon_logs() {
|
||||
log "Configuring Anon logs..."
|
||||
|
||||
# Create log directory
|
||||
sudo mkdir -p /home/debros/.debros/logs/anon
|
||||
|
||||
# Change ownership to debian-anon (the user anon runs as)
|
||||
sudo chown -R debian-anon:debian-anon /home/debros/.debros/logs/anon 2>/dev/null || true
|
||||
|
||||
# Update anonrc to point logs to our directory
|
||||
if [ -f /etc/anon/anonrc ]; then
|
||||
sudo sed -i.bak 's|Log notice file.*|Log notice file /home/debros/.debros/logs/anon/notices.log|g' /etc/anon/anonrc
|
||||
success "Anon logs configured to /home/debros/.debros/logs/anon"
|
||||
fi
|
||||
}
|
||||
|
||||
configure_firewall_for_anon() {
|
||||
log "Checking firewall configuration..."
|
||||
|
||||
# Check for UFW
|
||||
if command -v ufw &>/dev/null && sudo ufw status | grep -q "Status: active"; then
|
||||
log "UFW detected and active, adding Anon ports..."
|
||||
sudo ufw allow 9001/tcp comment 'Anon ORPort' 2>/dev/null || true
|
||||
sudo ufw allow 9051/tcp comment 'Anon ControlPort' 2>/dev/null || true
|
||||
success "UFW rules added for Anon"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check for firewalld
|
||||
if command -v firewall-cmd &>/dev/null && sudo firewall-cmd --state 2>/dev/null | grep -q "running"; then
|
||||
log "firewalld detected and active, adding Anon ports..."
|
||||
sudo firewall-cmd --permanent --add-port=9001/tcp 2>/dev/null || true
|
||||
sudo firewall-cmd --permanent --add-port=9051/tcp 2>/dev/null || true
|
||||
sudo firewall-cmd --reload 2>/dev/null || true
|
||||
success "firewalld rules added for Anon"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check for iptables
|
||||
if command -v iptables &>/dev/null; then
|
||||
# Check if iptables has any rules (indicating it's in use)
|
||||
if sudo iptables -L -n | grep -q "Chain INPUT"; then
|
||||
log "iptables detected, adding Anon ports..."
|
||||
sudo iptables -A INPUT -p tcp --dport 9001 -j ACCEPT -m comment --comment "Anon ORPort" 2>/dev/null || true
|
||||
sudo iptables -A INPUT -p tcp --dport 9051 -j ACCEPT -m comment --comment "Anon ControlPort" 2>/dev/null || true
|
||||
|
||||
# Try to save rules if iptables-persistent is available
|
||||
if command -v netfilter-persistent &>/dev/null; then
|
||||
sudo netfilter-persistent save 2>/dev/null || true
|
||||
elif command -v iptables-save &>/dev/null; then
|
||||
sudo iptables-save | sudo tee /etc/iptables/rules.v4 >/dev/null 2>&1 || true
|
||||
fi
|
||||
success "iptables rules added for Anon"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
log "No active firewall detected, skipping firewall configuration"
|
||||
}
|
||||
|
||||
configure_firewall_for_olric() {
|
||||
log "Checking firewall configuration for Olric..."
|
||||
|
||||
# Check for UFW
|
||||
if command -v ufw &>/dev/null && sudo ufw status | grep -q "Status: active"; then
|
||||
log "UFW detected and active, adding Olric ports..."
|
||||
sudo ufw allow 3320/tcp comment 'Olric HTTP API' 2>/dev/null || true
|
||||
sudo ufw allow 3322/tcp comment 'Olric Memberlist' 2>/dev/null || true
|
||||
success "UFW rules added for Olric"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check for firewalld
|
||||
if command -v firewall-cmd &>/dev/null && sudo firewall-cmd --state 2>/dev/null | grep -q "running"; then
|
||||
log "firewalld detected and active, adding Olric ports..."
|
||||
sudo firewall-cmd --permanent --add-port=3320/tcp 2>/dev/null || true
|
||||
sudo firewall-cmd --permanent --add-port=3322/tcp 2>/dev/null || true
|
||||
sudo firewall-cmd --reload 2>/dev/null || true
|
||||
success "firewalld rules added for Olric"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check for iptables
|
||||
if command -v iptables &>/dev/null; then
|
||||
# Check if iptables has any rules (indicating it's in use)
|
||||
if sudo iptables -L -n | grep -q "Chain INPUT"; then
|
||||
log "iptables detected, adding Olric ports..."
|
||||
sudo iptables -A INPUT -p tcp --dport 3320 -j ACCEPT -m comment --comment "Olric HTTP API" 2>/dev/null || true
|
||||
sudo iptables -A INPUT -p tcp --dport 3322 -j ACCEPT -m comment --comment "Olric Memberlist" 2>/dev/null || true
|
||||
|
||||
# Try to save rules if iptables-persistent is available
|
||||
if command -v netfilter-persistent &>/dev/null; then
|
||||
sudo netfilter-persistent save 2>/dev/null || true
|
||||
elif command -v iptables-save &>/dev/null; then
|
||||
sudo iptables-save | sudo tee /etc/iptables/rules.v4 >/dev/null 2>&1 || true
|
||||
fi
|
||||
success "iptables rules added for Olric"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
log "No active firewall detected for Olric, skipping firewall configuration"
|
||||
}
|
||||
|
||||
run_setup() {
|
||||
echo -e ""
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e "${GREEN}Step 2: Run Interactive Setup${NOCOLOR}"
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e ""
|
||||
|
||||
log "The setup command will:"
|
||||
log " • Create system user and directories"
|
||||
log " • Install dependencies (RQLite, etc.)"
|
||||
log " • Build DeBros binaries"
|
||||
log " • Configure network settings"
|
||||
log " • Create and start systemd services"
|
||||
echo -e ""
|
||||
|
||||
echo -e "${YELLOW}Ready to run setup? This will prompt for configuration details.${NOCOLOR}"
|
||||
echo -n "Continue? (yes/no): "
|
||||
read -r CONTINUE_SETUP
|
||||
|
||||
if [[ "$CONTINUE_SETUP" != "yes" && "$CONTINUE_SETUP" != "y" ]]; then
|
||||
echo -e ""
|
||||
success "network-cli installed successfully!"
|
||||
echo -e ""
|
||||
echo -e "${CYAN}To complete setup later, run:${NOCOLOR}"
|
||||
echo -e "${GREEN} sudo network-cli setup${NOCOLOR}"
|
||||
echo -e ""
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e ""
|
||||
log "Running setup (requires sudo)..."
|
||||
SETUP_EXECUTED=true
|
||||
sudo network-cli setup
|
||||
}
|
||||
|
||||
perform_health_check() {
|
||||
echo -e ""
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
log "Performing post-install health checks..."
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e ""
|
||||
|
||||
local health_ok=true
|
||||
|
||||
# Give services a moment to start if they were just restarted
|
||||
sleep 2
|
||||
|
||||
# Check gateway health
|
||||
if curl -sf http://localhost:6001/health >/dev/null 2>&1; then
|
||||
success "Gateway health check passed"
|
||||
else
|
||||
warning "Gateway health check failed - check logs with: sudo network-cli service logs gateway"
|
||||
health_ok=false
|
||||
fi
|
||||
|
||||
# Check if node is running (may not respond immediately)
|
||||
if sudo network-cli service status node >/dev/null 2>&1; then
|
||||
success "Node service is running"
|
||||
else
|
||||
warning "Node service is not running - check with: sudo network-cli service status node"
|
||||
health_ok=false
|
||||
fi
|
||||
|
||||
echo -e ""
|
||||
if [ "$health_ok" = true ]; then
|
||||
success "All health checks passed!"
|
||||
else
|
||||
warning "Some health checks failed - review logs and start services if needed"
|
||||
fi
|
||||
echo -e ""
|
||||
}
|
||||
|
||||
show_completion() {
|
||||
echo -e ""
|
||||
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||
success "DeBros Network installation complete!"
|
||||
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||
echo -e ""
|
||||
echo -e "${GREEN}Next Steps:${NOCOLOR}"
|
||||
echo -e " • Verify installation: ${CYAN}curl http://localhost:6001/health${NOCOLOR}"
|
||||
echo -e " • Check services: ${CYAN}sudo network-cli service status all${NOCOLOR}"
|
||||
echo -e " • View logs: ${CYAN}sudo network-cli service logs node --follow${NOCOLOR}"
|
||||
echo -e " • Authenticate: ${CYAN}network-cli auth login${NOCOLOR}"
|
||||
echo -e ""
|
||||
echo -e "${CYAN}Environment Management:${NOCOLOR}"
|
||||
echo -e " • Switch to devnet: ${CYAN}network-cli devnet enable${NOCOLOR}"
|
||||
echo -e " • Switch to testnet: ${CYAN}network-cli testnet enable${NOCOLOR}"
|
||||
echo -e " • Show environment: ${CYAN}network-cli env current${NOCOLOR}"
|
||||
echo -e ""
|
||||
echo -e "${CYAN}Anyone Relay (Anon):${NOCOLOR}"
|
||||
echo -e " • Check Anon status: ${CYAN}sudo systemctl status anon${NOCOLOR}"
|
||||
echo -e " • View Anon logs: ${CYAN}sudo tail -f /home/debros/.debros/logs/anon/notices.log${NOCOLOR}"
|
||||
echo -e " • Proxy endpoint: ${CYAN}POST http://localhost:6001/v1/proxy/anon${NOCOLOR}"
|
||||
echo -e ""
|
||||
echo -e "${CYAN}🔐 Shared Secrets (for adding more nodes):${NOCOLOR}"
|
||||
echo -e " • Swarm key: ${CYAN}cat /home/debros/.debros/swarm.key${NOCOLOR}"
|
||||
echo -e " • Cluster secret: ${CYAN}sudo cat /home/debros/.debros/cluster-secret${NOCOLOR}"
|
||||
echo -e " • Copy these to bootstrap node before setting up secondary nodes${NOCOLOR}"
|
||||
echo -e ""
|
||||
echo -e "${CYAN}Documentation: https://docs.debros.io${NOCOLOR}"
|
||||
echo -e ""
|
||||
}
|
||||
|
||||
main() {
|
||||
display_banner
|
||||
|
||||
echo -e ""
|
||||
log "Starting DeBros Network installation..."
|
||||
echo -e ""
|
||||
|
||||
# Check for existing installation and stop services if needed
|
||||
check_existing_installation
|
||||
|
||||
detect_os
|
||||
check_architecture
|
||||
check_dependencies
|
||||
|
||||
echo -e ""
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e "${GREEN}Step 1: Install network-cli${NOCOLOR}"
|
||||
echo -e "${BLUE}========================================${NOCOLOR}"
|
||||
echo -e ""
|
||||
|
||||
get_latest_release
|
||||
LATEST_VERSION="$LATEST_RELEASE"
|
||||
|
||||
# Compare versions and determine if upgrade is needed
|
||||
compare_versions
|
||||
|
||||
download_and_install
|
||||
|
||||
# Verify installation
|
||||
if ! verify_installation; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install Anon (optional but recommended)
|
||||
install_anon || warning "Anon installation skipped or failed"
|
||||
|
||||
# Run setup
|
||||
run_setup
|
||||
|
||||
# If this was an upgrade and setup wasn't run, restart services
|
||||
if [ "$PREVIOUS_INSTALL" = true ] && [ "$VERSION_CHANGED" = true ] && [ "$SETUP_EXECUTED" = false ]; then
|
||||
echo -e ""
|
||||
log "Restarting services that were stopped earlier..."
|
||||
|
||||
# Check services individually and provide detailed feedback
|
||||
failed_services=()
|
||||
if ! sudo network-cli service start all 2>&1 | tee /tmp/service-start.log; then
|
||||
# Parse which services failed
|
||||
while IFS= read -r line; do
|
||||
if [[ $line =~ "Failed to start" ]]; then
|
||||
service_name=$(echo "$line" | grep -oP '(?<=Failed to start\s)\S+(?=:)' || echo "unknown")
|
||||
failed_services+=("$service_name")
|
||||
fi
|
||||
done < /tmp/service-start.log
|
||||
|
||||
if [ ${#failed_services[@]} -gt 0 ]; then
|
||||
error "Failed to restart: ${failed_services[*]}"
|
||||
error "Please check service status: sudo network-cli service status all"
|
||||
fi
|
||||
else
|
||||
success "Services restarted successfully"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Post-install health check
|
||||
perform_health_check
|
||||
|
||||
# Show completion message
|
||||
show_completion
|
||||
}
|
||||
|
||||
main "$@"
|
||||
# Main flow
|
||||
display_banner
|
||||
|
||||
# Check prerequisites
|
||||
check_root
|
||||
detect_os
|
||||
check_architecture
|
||||
|
||||
# Download and install
|
||||
get_latest_release
|
||||
download_and_install_cli
|
||||
|
||||
# Show next steps
|
||||
echo ""
|
||||
echo -e "${GREEN}Installation complete!${NOCOLOR}"
|
||||
echo ""
|
||||
echo -e "${CYAN}Next, run the production setup:${NOCOLOR}"
|
||||
echo ""
|
||||
echo "Bootstrap node (first node):"
|
||||
echo -e " ${BLUE}sudo network-cli prod install --bootstrap${NOCOLOR}"
|
||||
echo ""
|
||||
echo "Secondary node (join existing cluster):"
|
||||
echo -e " ${BLUE}sudo network-cli prod install --vps-ip <bootstrap_ip> --peers <multiaddr>${NOCOLOR}"
|
||||
echo ""
|
||||
echo "With HTTPS/domain:"
|
||||
echo -e " ${BLUE}sudo network-cli prod install --bootstrap --domain example.com${NOCOLOR}"
|
||||
echo ""
|
||||
echo "For more help:"
|
||||
echo -e " ${BLUE}network-cli prod --help${NOCOLOR}"
|
||||
echo ""
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user