Renamed debros to orama

This commit is contained in:
anonpenguin23 2026-02-14 14:14:04 +02:00
parent ba4e2688e4
commit 29d255676f
116 changed files with 2482 additions and 937 deletions

View File

@ -57,9 +57,9 @@ jobs:
mkdir -p build/usr/local/bin mkdir -p build/usr/local/bin
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-node cmd/node/main.go
# Build the entire gateway package so helper files (e.g., config parsing) are included # Build the entire gateway package so helper files (e.g., config parsing) are included
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway ./cmd/gateway go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-gateway ./cmd/gateway
- name: Create Debian package structure - name: Create Debian package structure
run: | run: |

2
.gitignore vendored
View File

@ -104,7 +104,7 @@ website/
terms-agreement terms-agreement
cli ./cli
./inspector ./inspector
results/ results/

View File

@ -2,7 +2,7 @@
# Builds and releases orama (CLI) and orama-node binaries # Builds and releases orama (CLI) and orama-node binaries
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages) # Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
project_name: debros-network project_name: orama-network
env: env:
- GO111MODULE=on - GO111MODULE=on
@ -75,7 +75,7 @@ nfpms:
- orama - orama
vendor: DeBros vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io> maintainer: DeBros <support@orama.io>
description: CLI tool for the Orama decentralized network description: CLI tool for the Orama decentralized network
license: MIT license: MIT
formats: formats:
@ -97,7 +97,7 @@ nfpms:
- orama-node - orama-node
vendor: DeBros vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io> maintainer: DeBros <support@orama.io>
description: Node daemon for the Orama decentralized network description: Node daemon for the Orama decentralized network
license: MIT license: MIT
formats: formats:

View File

@ -32,7 +32,7 @@ This Code applies within all project spaces and when an individual is officially
## Enforcement ## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@debros.io Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@orama.io
All complaints will be reviewed and investigated promptly and fairly. All complaints will be reviewed and investigated promptly and fairly.

View File

@ -81,47 +81,16 @@ build: deps
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/orama version" @echo "Build complete! Run ./bin/orama version"
# Cross-compile all binaries for Linux (used with --pre-built flag on VPS) # Cross-compile CLI for Linux (only binary needed locally; VPS builds everything else from source)
# Builds: DeBros binaries + Olric + CoreDNS (with rqlite plugin) + Caddy (with orama DNS module)
build-linux: deps build-linux: deps
@echo "Cross-compiling all binaries for linux/amd64 (version=$(VERSION))..." @echo "Cross-compiling CLI for linux/amd64 (version=$(VERSION))..."
@mkdir -p bin-linux @mkdir -p bin-linux
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/identity ./cmd/identity
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-node ./cmd/node
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama cmd/cli/main.go GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama cmd/cli/main.go
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/rqlite-mcp ./cmd/rqlite-mcp @echo "✓ CLI built at bin-linux/orama"
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -trimpath -o bin-linux/gateway ./cmd/gateway
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-cli ./cmd/cli
@echo "Building Olric for linux/amd64..."
GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -trimpath -o bin-linux/olric-server github.com/olric-data/olric/cmd/olric-server
@echo "Building IPFS Cluster Service for linux/amd64..."
GOOS=linux GOARCH=amd64 GOBIN=$(CURDIR)/bin-linux go install -ldflags "-s -w" -trimpath github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest
@echo "✓ All Linux binaries built in bin-linux/"
@echo "" @echo ""
@echo "Next steps:" @echo "Next steps:"
@echo " 1. Build CoreDNS: make build-linux-coredns" @echo " ./scripts/generate-source-archive.sh"
@echo " 2. Build Caddy: make build-linux-caddy" @echo " ./bin/orama install --vps-ip <ip> --nameserver --domain ..."
@echo " 3. Or build all: make build-linux-all"
# Build CoreDNS with rqlite plugin for Linux
build-linux-coredns:
@bash scripts/build-linux-coredns.sh
# Build Caddy with orama DNS module for Linux
build-linux-caddy:
@bash scripts/build-linux-caddy.sh
# Build everything for Linux (all binaries + CoreDNS + Caddy)
build-linux-all: build-linux build-linux-coredns build-linux-caddy
@echo ""
@echo "✅ All Linux binaries ready in bin-linux/:"
@ls -la bin-linux/
@echo ""
@echo "Deploy to VPS:"
@echo " scp bin-linux/* ubuntu@<ip>:/home/debros/bin/"
@echo " scp bin-linux/coredns ubuntu@<ip>:/usr/local/bin/coredns"
@echo " scp bin-linux/caddy ubuntu@<ip>:/usr/bin/caddy"
@echo " sudo orama install --pre-built --no-pull ..."
# Install git hooks # Install git hooks
install-hooks: install-hooks:

View File

@ -360,13 +360,13 @@ All configuration lives in `~/.orama/`:
```bash ```bash
# Check status # Check status
systemctl status debros-node systemctl status orama-node
# View logs # View logs
journalctl -u debros-node -f journalctl -u orama-node -f
# Check log files # Check log files
tail -f /home/debros/.orama/logs/node.log tail -f /home/orama/.orama/logs/node.log
``` ```
### Port Conflicts ### Port Conflicts
@ -398,7 +398,7 @@ rqlite -H localhost -p 5001
```bash ```bash
# Production reset (⚠️ DESTROYS DATA) # Production reset (⚠️ DESTROYS DATA)
sudo orama uninstall sudo orama uninstall
sudo rm -rf /home/debros/.orama sudo rm -rf /home/orama/.orama
sudo orama install sudo orama install
``` ```

2
debian/control vendored
View File

@ -4,7 +4,7 @@ Section: net
Priority: optional Priority: optional
Architecture: amd64 Architecture: amd64
Depends: libc6 Depends: libc6
Maintainer: DeBros Team <dev@debros.io> Maintainer: DeBros Team <dev@orama.io>
Description: Orama Network - Distributed P2P Database System Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage, RQLite for distributed SQL, IPFS for content-addressed storage,

View File

@ -8,11 +8,11 @@ Run this as root or with sudo on the target VPS:
```bash ```bash
# 1. Stop and disable all services # 1. Stop and disable all services
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
# 2. Remove systemd service files # 2. Remove systemd service files
sudo rm -f /etc/systemd/system/debros-*.service sudo rm -f /etc/systemd/system/orama-*.service
sudo rm -f /etc/systemd/system/coredns.service sudo rm -f /etc/systemd/system/coredns.service
sudo rm -f /etc/systemd/system/caddy.service sudo rm -f /etc/systemd/system/caddy.service
sudo systemctl daemon-reload sudo systemctl daemon-reload
@ -31,14 +31,14 @@ sudo ufw --force reset
sudo ufw allow 22/tcp sudo ufw allow 22/tcp
sudo ufw --force enable sudo ufw --force enable
# 5. Remove debros user and home directory # 5. Remove orama user and home directory
sudo userdel -r debros 2>/dev/null sudo userdel -r orama 2>/dev/null
sudo rm -rf /home/debros sudo rm -rf /home/orama
# 6. Remove sudoers files # 6. Remove sudoers files
sudo rm -f /etc/sudoers.d/debros-access sudo rm -f /etc/sudoers.d/orama-access
sudo rm -f /etc/sudoers.d/debros-deployments sudo rm -f /etc/sudoers.d/orama-deployments
sudo rm -f /etc/sudoers.d/debros-wireguard sudo rm -f /etc/sudoers.d/orama-wireguard
# 7. Remove CoreDNS config # 7. Remove CoreDNS config
sudo rm -rf /etc/coredns sudo rm -rf /etc/coredns
@ -62,17 +62,17 @@ echo "Node cleaned. Ready for fresh install."
| Category | Paths | | Category | Paths |
|----------|-------| |----------|-------|
| **User** | `debros` system user and `/home/debros/` | | **User** | `orama` system user and `/home/orama/` |
| **App data** | `/home/debros/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) | | **App data** | `/home/orama/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) |
| **Source code** | `/home/debros/src/` | | **Source code** | `/home/orama/src/` |
| **Binaries** | `/home/debros/bin/orama-node`, `/home/debros/bin/gateway` | | **Binaries** | `/home/orama/bin/orama-node`, `/home/orama/bin/gateway` |
| **Systemd** | `debros-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` | | **Systemd** | `orama-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` |
| **WireGuard** | `/etc/wireguard/wg0.conf`, `wg-quick@wg0` systemd unit | | **WireGuard** | `/etc/wireguard/wg0.conf`, `wg-quick@wg0` systemd unit |
| **Firewall** | All UFW rules (reset to default + SSH only) | | **Firewall** | All UFW rules (reset to default + SSH only) |
| **Sudoers** | `/etc/sudoers.d/debros-*` | | **Sudoers** | `/etc/sudoers.d/orama-*` |
| **CoreDNS** | `/etc/coredns/Corefile` | | **CoreDNS** | `/etc/coredns/Corefile` |
| **Caddy** | `/etc/caddy/Caddyfile`, `/var/lib/caddy/` (TLS certs) | | **Caddy** | `/etc/caddy/Caddyfile`, `/var/lib/caddy/` (TLS certs) |
| **Anyone Relay** | `debros-anyone-relay.service`, `debros-anyone-client.service` | | **Anyone Relay** | `orama-anyone-relay.service`, `orama-anyone-client.service` |
| **Temp files** | `/tmp/orama`, `/tmp/network-source.*`, build dirs | | **Temp files** | `/tmp/orama`, `/tmp/network-source.*`, build dirs |
## What This Does NOT Remove ## What This Does NOT Remove
@ -121,18 +121,18 @@ for entry in "${NODES[@]}"; do
IFS=: read -r userhost pass <<< "$entry" IFS=: read -r userhost pass <<< "$entry"
echo "Cleaning $userhost..." echo "Cleaning $userhost..."
sshpass -p "$pass" ssh -o StrictHostKeyChecking=no "$userhost" 'bash -s' << 'CLEAN' sshpass -p "$pass" ssh -o StrictHostKeyChecking=no "$userhost" 'bash -s' << 'CLEAN'
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
sudo rm -f /etc/systemd/system/debros-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service sudo rm -f /etc/systemd/system/orama-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service
sudo systemctl daemon-reload sudo systemctl daemon-reload
sudo systemctl stop wg-quick@wg0 2>/dev/null sudo systemctl stop wg-quick@wg0 2>/dev/null
sudo wg-quick down wg0 2>/dev/null sudo wg-quick down wg0 2>/dev/null
sudo systemctl disable wg-quick@wg0 2>/dev/null sudo systemctl disable wg-quick@wg0 2>/dev/null
sudo rm -f /etc/wireguard/wg0.conf sudo rm -f /etc/wireguard/wg0.conf
sudo ufw --force reset && sudo ufw allow 22/tcp && sudo ufw --force enable sudo ufw --force reset && sudo ufw allow 22/tcp && sudo ufw --force enable
sudo userdel -r debros 2>/dev/null sudo userdel -r orama 2>/dev/null
sudo rm -rf /home/debros sudo rm -rf /home/orama
sudo rm -f /etc/sudoers.d/debros-access /etc/sudoers.d/debros-deployments /etc/sudoers.d/debros-wireguard sudo rm -f /etc/sudoers.d/orama-access /etc/sudoers.d/orama-deployments /etc/sudoers.d/orama-wireguard
sudo rm -rf /etc/coredns /etc/caddy /var/lib/caddy sudo rm -rf /etc/coredns /etc/caddy /var/lib/caddy
sudo rm -f /tmp/orama /tmp/network-source.tar.gz sudo rm -f /tmp/orama /tmp/network-source.tar.gz
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build

View File

@ -41,7 +41,7 @@ You can find peer public keys with `wg show wg0`.
Check the Olric config on each node: Check the Olric config on each node:
```bash ```bash
cat /home/debros/.orama/data/namespaces/<name>/configs/olric-*.yaml cat /home/orama/.orama/data/namespaces/<name>/configs/olric-*.yaml
``` ```
If `bindAddr` is `0.0.0.0`, the node will try to bind to IPv6 on dual-stack hosts, breaking memberlist gossip. If `bindAddr` is `0.0.0.0`, the node will try to bind to IPv6 on dual-stack hosts, breaking memberlist gossip.
@ -53,7 +53,7 @@ This was fixed in code (BindAddr validation in `SpawnOlric`), so new namespaces
### Check 3: Olric logs show "Failed UDP ping" constantly ### Check 3: Olric logs show "Failed UDP ping" constantly
```bash ```bash
journalctl -u debros-namespace-olric@<name>.service --no-pager -n 30 journalctl -u orama-namespace-olric@<name>.service --no-pager -n 30
``` ```
If every UDP ping fails but TCP stream connections succeed, it's the WireGuard packet loss issue (see Check 1). If every UDP ping fails but TCP stream connections succeed, it's the WireGuard packet loss issue (see Check 1).
@ -69,7 +69,7 @@ If every UDP ping fails but TCP stream connections succeed, it's the WireGuard p
**Fix:** Edit the gateway config manually: **Fix:** Edit the gateway config manually:
```bash ```bash
vim /home/debros/.orama/data/namespaces/<name>/configs/gateway-*.yaml vim /home/orama/.orama/data/namespaces/<name>/configs/gateway-*.yaml
``` ```
Add/fix: Add/fix:
@ -95,7 +95,7 @@ This was fixed in code, so new namespaces get the correct config.
**Check:** **Check:**
```bash ```bash
ls /home/debros/.orama/data/namespaces/<name>/cluster-state.json ls /home/orama/.orama/data/namespaces/<name>/cluster-state.json
``` ```
If the file doesn't exist, the node can't restore the namespace. If the file doesn't exist, the node can't restore the namespace.
@ -119,14 +119,14 @@ This was fixed in code — `ProvisionCluster` now saves state to all nodes (incl
**Symptom:** After `orama upgrade --restart` or `orama prod restart`, namespace gateway/olric/rqlite services don't start. **Symptom:** After `orama upgrade --restart` or `orama prod restart`, namespace gateway/olric/rqlite services don't start.
**Cause:** `orama prod stop` disables systemd template services (`debros-namespace-gateway@<name>.service`). They have `PartOf=debros-node.service`, but that only propagates restart to **enabled** services. **Cause:** `orama prod stop` disables systemd template services (`orama-namespace-gateway@<name>.service`). They have `PartOf=orama-node.service`, but that only propagates restart to **enabled** services.
**Fix:** Re-enable the services before restarting: **Fix:** Re-enable the services before restarting:
```bash ```bash
systemctl enable debros-namespace-rqlite@<name>.service systemctl enable orama-namespace-rqlite@<name>.service
systemctl enable debros-namespace-olric@<name>.service systemctl enable orama-namespace-olric@<name>.service
systemctl enable debros-namespace-gateway@<name>.service systemctl enable orama-namespace-gateway@<name>.service
sudo orama prod restart sudo orama prod restart
``` ```
@ -153,8 +153,8 @@ ssh -n user@host 'command'
## General Debugging Tips ## General Debugging Tips
- **Always use `sudo orama prod restart`** instead of raw `systemctl` commands - **Always use `sudo orama prod restart`** instead of raw `systemctl` commands
- **Namespace data lives at:** `/home/debros/.orama/data/namespaces/<name>/` - **Namespace data lives at:** `/home/orama/.orama/data/namespaces/<name>/`
- **Check service logs:** `journalctl -u debros-namespace-olric@<name>.service --no-pager -n 50` - **Check service logs:** `journalctl -u orama-namespace-olric@<name>.service --no-pager -n 50`
- **Check WireGuard:** `wg show wg0` — look for recent handshakes and transfer bytes - **Check WireGuard:** `wg show wg0` — look for recent handshakes and transfer bytes
- **Check gateway health:** `curl http://localhost:<port>/v1/health` from the node itself - **Check gateway health:** `curl http://localhost:<port>/v1/health` from the node itself
- **Node IPs:** Check `scripts/remote-nodes.conf` for credentials, `wg show wg0` for WG IPs - **Node IPs:** Check `scripts/remote-nodes.conf` for credentials, `wg show wg0` for WG IPs

View File

@ -369,7 +369,7 @@ orama db create my-database
# Output: # Output:
# ✅ Database created: my-database # ✅ Database created: my-database
# Home Node: node-abc123 # Home Node: node-abc123
# File Path: /home/debros/.orama/data/sqlite/your-namespace/my-database.db # File Path: /home/orama/.orama/data/sqlite/your-namespace/my-database.db
``` ```
### Executing Queries ### Executing Queries
@ -588,7 +588,7 @@ func main() {
// DATABASE_NAME env var is automatically set by Orama // DATABASE_NAME env var is automatically set by Orama
dbPath := os.Getenv("DATABASE_PATH") dbPath := os.Getenv("DATABASE_PATH")
if dbPath == "" { if dbPath == "" {
dbPath = "/home/debros/.orama/data/sqlite/" + os.Getenv("NAMESPACE") + "/myapp-db.db" dbPath = "/home/orama/.orama/data/sqlite/" + os.Getenv("NAMESPACE") + "/myapp-db.db"
} }
var err error var err error

View File

@ -41,7 +41,7 @@ Install nodes **one at a time**, waiting for each to complete before starting th
```bash ```bash
# SSH: <user>@<ns1-ip> # SSH: <user>@<ns1-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--vps-ip <ns1-ip> \ --vps-ip <ns1-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
--base-domain <your-domain.com> \ --base-domain <your-domain.com> \
@ -58,7 +58,7 @@ orama invite --expiry 24h
```bash ```bash
# SSH: <user>@<ns2-ip> # SSH: <user>@<ns2-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns2-ip> \ --vps-ip <ns2-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
@ -77,7 +77,7 @@ sudo orama install --no-pull --pre-built \
```bash ```bash
# SSH: <user>@<ns3-ip> # SSH: <user>@<ns3-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns3-ip> \ --vps-ip <ns3-ip> \
--domain <your-domain.com> \ --domain <your-domain.com> \
@ -96,7 +96,7 @@ sudo orama install --no-pull --pre-built \
```bash ```bash
# SSH: <user>@<node4-ip> # SSH: <user>@<node4-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node4-ip> \ --vps-ip <node4-ip> \
--domain node4.<your-domain.com> \ --domain node4.<your-domain.com> \
@ -115,7 +115,7 @@ sudo orama install --no-pull --pre-built \
```bash ```bash
# SSH: <user>@<node5-ip> # SSH: <user>@<node5-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node5-ip> \ --vps-ip <node5-ip> \
--domain node5.<your-domain.com> \ --domain node5.<your-domain.com> \
@ -134,7 +134,7 @@ sudo orama install --no-pull --pre-built \
```bash ```bash
# SSH: <user>@<node6-ip> # SSH: <user>@<node6-ip>
sudo orama install --no-pull --pre-built \ sudo orama install \
--join http://<ns1-ip> --token <TOKEN> \ --join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node6-ip> \ --vps-ip <node6-ip> \
--domain node6.<your-domain.com> \ --domain node6.<your-domain.com> \
@ -155,5 +155,5 @@ curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num
curl -s http://localhost:6001/health curl -s http://localhost:6001/health
# Check Anyone relay (on nodes with relays) # Check Anyone relay (on nodes with relays)
systemctl status debros-anyone-relay systemctl status orama-anyone-relay
``` ```

View File

@ -28,83 +28,29 @@ make test
## Deploying to VPS ## Deploying to VPS
There are two deployment workflows: **development** (fast iteration, no git required) and **production** (via git). Source is always deployed via SCP (no git on VPS). The CLI is the only binary cross-compiled locally; everything else is built from source on the VPS.
### Development Deployment (Fast Iteration) ### Deploy Workflow
Use this when iterating quickly — no need to commit or push to git.
```bash ```bash
# 1. Build the CLI for Linux # 1. Cross-compile the CLI for Linux
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli make build-linux
# 2. Generate a source archive (excludes .git, node_modules, bin/, etc.) # 2. Generate a source archive (includes CLI binary + full source)
./scripts/generate-source-archive.sh ./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz # Creates: /tmp/network-source.tar.gz
# 3. Copy CLI and source to the VPS # 3. Install on a new VPS (handles SCP, extract, and remote install automatically)
sshpass -p '<password>' scp -o StrictHostKeyChecking=no orama-cli-linux ubuntu@<ip>:/tmp/orama ./bin/orama install --vps-ip <ip> --nameserver --domain <domain> --base-domain <domain>
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
# 4. On the VPS: extract source and install the CLI # Or upgrade an existing VPS
ssh ubuntu@<ip> ./bin/orama upgrade --restart
sudo rm -rf /home/debros/src && sudo mkdir -p /home/debros/src
sudo tar xzf /tmp/network-source.tar.gz -C /home/debros/src
sudo chown -R debros:debros /home/debros/src
sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama
# 5. Upgrade using local source (skips git pull)
sudo orama upgrade --no-pull --restart
``` ```
### Development Deployment with Pre-Built Binaries (Fastest) The `orama install` command automatically:
1. Uploads the source archive via SCP
Cross-compile everything locally and skip all Go compilation on the VPS. This is significantly faster because your local machine compiles much faster than the VPS. 2. Extracts source to `/home/orama/src` and installs the CLI to `/usr/local/bin/orama`
3. Runs `orama install` on the VPS which builds all binaries from source (Go, CoreDNS, Caddy, Olric, etc.)
```bash
# 1. Cross-compile all binaries for Linux (DeBros + Olric + CoreDNS + Caddy)
make build-linux-all
# Outputs everything to bin-linux/
# 2. Generate a single deploy archive (source + pre-built binaries)
./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz (includes bin-linux/ if present)
# 3. Copy the single archive to the VPS
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
# 4. Extract and install everything on the VPS
sshpass -p '<password>' ssh -o StrictHostKeyChecking=no ubuntu@<ip> \
'sudo bash -s' < scripts/extract-deploy.sh
# 5. Install/upgrade with --pre-built (skips ALL Go compilation on VPS)
sudo orama install --no-pull --pre-built --vps-ip <ip> ...
# or
sudo orama upgrade --no-pull --pre-built --restart
```
**What `--pre-built` skips:** Go installation, `make build`, Olric `go install`, CoreDNS build, Caddy/xcaddy build.
**What `--pre-built` still runs:** apt dependencies, RQLite/IPFS/IPFS Cluster downloads (pre-built binary downloads, fast), Anyone relay setup, config generation, systemd service creation.
### Production Deployment (Via Git)
For production releases — pulls source from GitHub on the VPS.
```bash
# 1. Commit and push your changes
git push origin <branch>
# 2. Build the CLI for Linux
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli
# 3. Deploy the CLI to the VPS
sshpass -p '<password>' scp orama-cli-linux ubuntu@<ip>:/tmp/orama
ssh ubuntu@<ip> "sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama"
# 4. Run upgrade (downloads source from GitHub)
ssh ubuntu@<ip> "sudo orama upgrade --branch <branch> --restart"
```
### Upgrading a Multi-Node Cluster (CRITICAL) ### Upgrading a Multi-Node Cluster (CRITICAL)
@ -115,10 +61,10 @@ ssh ubuntu@<ip> "sudo orama upgrade --branch <branch> --restart"
Always upgrade nodes **one at a time**, waiting for each to rejoin before proceeding: Always upgrade nodes **one at a time**, waiting for each to rejoin before proceeding:
```bash ```bash
# 1. Build locally # 1. Build CLI + generate archive
make build-linux-all make build-linux
./scripts/generate-source-archive.sh ./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz (includes bin-linux/) # Creates: /tmp/network-source.tar.gz
# 2. Upload to ONE node first (the "hub" node) # 2. Upload to ONE node first (the "hub" node)
sshpass -p '<password>' scp /tmp/network-source.tar.gz ubuntu@<hub-ip>:/tmp/ sshpass -p '<password>' scp /tmp/network-source.tar.gz ubuntu@<hub-ip>:/tmp/
@ -139,8 +85,7 @@ done
ssh ubuntu@<any-node> 'curl -s http://localhost:5001/status | jq -r .store.raft.state' ssh ubuntu@<any-node> 'curl -s http://localhost:5001/status | jq -r .store.raft.state'
# 6. Upgrade FOLLOWER nodes one at a time # 6. Upgrade FOLLOWER nodes one at a time
# First stop services, then upgrade, which restarts them ssh ubuntu@<follower-ip> 'sudo orama prod stop && sudo orama upgrade --restart'
ssh ubuntu@<follower-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart'
# Wait for rejoin before proceeding to next node # Wait for rejoin before proceeding to next node
ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft.num_peers' ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft.num_peers'
@ -149,7 +94,7 @@ ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft
# Repeat for each follower... # Repeat for each follower...
# 7. Upgrade the LEADER node last # 7. Upgrade the LEADER node last
ssh ubuntu@<leader-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart' ssh ubuntu@<leader-ip> 'sudo orama prod stop && sudo orama upgrade --restart'
``` ```
#### What NOT to Do #### What NOT to Do
@ -157,7 +102,7 @@ ssh ubuntu@<leader-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --p
- **DON'T** stop all nodes, replace binaries, then start all nodes - **DON'T** stop all nodes, replace binaries, then start all nodes
- **DON'T** run `orama upgrade --restart` on multiple nodes in parallel - **DON'T** run `orama upgrade --restart` on multiple nodes in parallel
- **DON'T** clear RQLite data directories unless doing a full cluster rebuild - **DON'T** clear RQLite data directories unless doing a full cluster rebuild
- **DON'T** use `systemctl stop debros-node` on multiple nodes simultaneously - **DON'T** use `systemctl stop orama-node` on multiple nodes simultaneously
#### Recovery from Cluster Split #### Recovery from Cluster Split
@ -168,8 +113,8 @@ If nodes get stuck in "Candidate" state or show "leader not found" errors:
3. On each other node, clear RQLite data and restart: 3. On each other node, clear RQLite data and restart:
```bash ```bash
sudo orama prod stop sudo orama prod stop
sudo rm -rf /home/debros/.orama/data/rqlite sudo rm -rf /home/orama/.orama/data/rqlite
sudo systemctl start debros-node sudo systemctl start orama-node
``` ```
4. The node should automatically rejoin using its configured `rqlite_join_address` 4. The node should automatically rejoin using its configured `rqlite_join_address`
@ -180,7 +125,7 @@ ps aux | grep rqlited
``` ```
If `-join` is missing, the node bootstrapped standalone. You'll need to either: If `-join` is missing, the node bootstrapped standalone. You'll need to either:
- Restart debros-node (it should detect empty data and use join) - Restart orama-node (it should detect empty data and use join)
- Or do a full cluster rebuild from CLEAN_NODE.md - Or do a full cluster rebuild from CLEAN_NODE.md
### Deploying to Multiple Nodes ### Deploying to Multiple Nodes
@ -201,9 +146,6 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
| `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) | | `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) |
| `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) | | `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) |
| `--token <token>` | Invite token for joining (from `orama invite` on existing node) | | `--token <token>` | Invite token for joining (from `orama invite` on existing node) |
| `--branch <branch>` | Git branch to use (default: main) |
| `--no-pull` | Skip git clone/pull, use existing `/home/debros/src` |
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk (see above) |
| `--force` | Force reconfiguration even if already installed | | `--force` | Force reconfiguration even if already installed |
| `--skip-firewall` | Skip UFW firewall setup | | `--skip-firewall` | Skip UFW firewall setup |
| `--skip-checks` | Skip minimum resource checks (RAM/CPU) | | `--skip-checks` | Skip minimum resource checks (RAM/CPU) |
@ -234,9 +176,6 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
| Flag | Description | | Flag | Description |
|------|-------------| |------|-------------|
| `--branch <branch>` | Git branch to pull from |
| `--no-pull` | Skip git pull, use existing source |
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk |
| `--restart` | Restart all services after upgrade | | `--restart` | Restart all services after upgrade |
| `--anyone-relay` | Enable Anyone relay (same flags as install) | | `--anyone-relay` | Enable Anyone relay (same flags as install) |
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited) | | `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited) |
@ -247,7 +186,7 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
Use these commands to manage services on production nodes: Use these commands to manage services on production nodes:
```bash ```bash
# Stop all services (debros-node, coredns, caddy) # Stop all services (orama-node, coredns, caddy)
sudo orama prod stop sudo orama prod stop
# Start all services # Start all services
@ -318,12 +257,7 @@ Before running `orama install` on a VPS, ensure:
sudo systemctl stop ipfs sudo systemctl stop ipfs
``` ```
3. **Ensure `make` is installed.** Required for building CoreDNS and Caddy from source: 3. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
```bash
sudo apt-get install -y make
```
4. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
## Recovering from Failed Joins ## Recovering from Failed Joins

View File

@ -62,7 +62,7 @@ Multiple subsystems can be combined: `--subsystem rqlite,olric,dns`
| **ipfs** | Daemon active, cluster active, swarm peer count, cluster peer count, cluster errors, repo usage %, swarm key present, bootstrap list empty, cross-node version consistency | | **ipfs** | Daemon active, cluster active, swarm peer count, cluster peer count, cluster errors, repo usage %, swarm key present, bootstrap list empty, cross-node version consistency |
| **dns** | CoreDNS active, Caddy active, ports (53/80/443), memory, restart count, log errors, Corefile exists, SOA/NS/wildcard/base-A resolution, TLS cert expiry, cross-node nameserver availability | | **dns** | CoreDNS active, Caddy active, ports (53/80/443), memory, restart count, log errors, Corefile exists, SOA/NS/wildcard/base-A resolution, TLS cert expiry, cross-node nameserver availability |
| **wireguard** | Interface up, service active, correct 10.0.0.x IP, listen port 51820, peer count vs expected, MTU 1420, config exists + permissions 600, peer handshakes (fresh/stale/never), peer traffic, catch-all route detection, cross-node peer count + MTU consistency | | **wireguard** | Interface up, service active, correct 10.0.0.x IP, listen port 51820, peer count vs expected, MTU 1420, config exists + permissions 600, peer handshakes (fresh/stale/never), peer traffic, catch-all route detection, cross-node peer count + MTU consistency |
| **system** | Core services (debros-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (debros), panic count, expected ports | | **system** | Core services (orama-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (orama), panic count, expected ports |
| **network** | Internet reachability, default route, WireGuard route, TCP connection count, TIME_WAIT count, TCP retransmission rate, WireGuard mesh ping (all peers) | | **network** | Internet reachability, default route, WireGuard route, TCP connection count, TIME_WAIT count, TCP retransmission rate, WireGuard mesh ping (all peers) |
| **namespace** | Per-namespace: RQLite up + raft state + readyz, Olric memberlist, Gateway HTTP health. Cross-namespace: all-healthy check, RQLite quorum per namespace | | **namespace** | Per-namespace: RQLite up + raft state + readyz, Olric memberlist, Gateway HTTP health. Cross-namespace: all-healthy check, RQLite quorum per namespace |

View File

@ -297,7 +297,7 @@ func GetRQLiteNodes() []string {
// queryAPIKeyFromRQLite queries the SQLite database directly for an API key // queryAPIKeyFromRQLite queries the SQLite database directly for an API key
func queryAPIKeyFromRQLite() (string, error) { func queryAPIKeyFromRQLite() (string, error) {
// 1. Check environment variable first // 1. Check environment variable first
if envKey := os.Getenv("DEBROS_API_KEY"); envKey != "" { if envKey := os.Getenv("ORAMA_API_KEY"); envKey != "" {
return envKey, nil return envKey, nil
} }
@ -424,7 +424,7 @@ func GetAPIKey() string {
cacheMutex.RUnlock() cacheMutex.RUnlock()
// 1. Check env var // 1. Check env var
if envKey := os.Getenv("DEBROS_API_KEY"); envKey != "" { if envKey := os.Getenv("ORAMA_API_KEY"); envKey != "" {
cacheMutex.Lock() cacheMutex.Lock()
apiKeyCache = envKey apiKeyCache = envKey
cacheMutex.Unlock() cacheMutex.Unlock()
@ -1188,8 +1188,8 @@ type E2ETestEnv struct {
GatewayURL string GatewayURL string
APIKey string APIKey string
Namespace string Namespace string
BaseDomain string // Domain for deployment routing (e.g., "dbrs.space") BaseDomain string // Domain for deployment routing (e.g., "dbrs.space")
Config *E2EConfig // Full E2E configuration (for production tests) Config *E2EConfig // Full E2E configuration (for production tests)
HTTPClient *http.Client HTTPClient *http.Client
SkipCleanup bool SkipCleanup bool
} }

View File

@ -57,16 +57,16 @@ func TestDomainRouting_BasicRouting(t *testing.T) {
t.Logf("✓ Standard domain routing works: %s", domain) t.Logf("✓ Standard domain routing works: %s", domain)
}) })
t.Run("Non-debros domain passes through", func(t *testing.T) { t.Run("Non-orama domain passes through", func(t *testing.T) {
// Request with non-debros domain should not route to deployment // Request with non-orama domain should not route to deployment
resp := e2e.TestDeploymentWithHostHeader(t, env, "example.com", "/") resp := e2e.TestDeploymentWithHostHeader(t, env, "example.com", "/")
defer resp.Body.Close() defer resp.Body.Close()
// Should either return 404 or pass to default handler // Should either return 404 or pass to default handler
assert.NotEqual(t, http.StatusOK, resp.StatusCode, assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Non-debros domain should not route to deployment") "Non-orama domain should not route to deployment")
t.Logf("✓ Non-debros domains correctly pass through (status: %d)", resp.StatusCode) t.Logf("✓ Non-orama domains correctly pass through (status: %d)", resp.StatusCode)
}) })
t.Run("API paths bypass domain routing", func(t *testing.T) { t.Run("API paths bypass domain routing", func(t *testing.T) {

View File

@ -118,7 +118,7 @@ func TestNetwork_ProxyAnonSuccess(t *testing.T) {
Body: map[string]interface{}{ Body: map[string]interface{}{
"url": "https://httpbin.org/get", "url": "https://httpbin.org/get",
"method": "GET", "method": "GET",
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, "headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
}, },
} }
@ -178,7 +178,7 @@ func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
Body: map[string]interface{}{ Body: map[string]interface{}{
"url": "https://httpbin.org/post", "url": "https://httpbin.org/post",
"method": "POST", "method": "POST",
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, "headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
"body": "test_data", "body": "test_data",
}, },
} }

View File

@ -1,4 +1,4 @@
-- DeBros Gateway - Initial database schema (SQLite/RQLite dialect) -- Orama Gateway - Initial database schema (SQLite/RQLite dialect)
-- This file scaffolds core tables used by the HTTP gateway for auth, observability, and namespacing. -- This file scaffolds core tables used by the HTTP gateway for auth, observability, and namespacing.
-- Apply via your migration tooling or manual execution in RQLite. -- Apply via your migration tooling or manual execution in RQLite.

View File

@ -1,4 +1,4 @@
-- DeBros Gateway - Core schema (Phase 2) -- Orama Gateway - Core schema (Phase 2)
-- Adds apps, nonces, subscriptions, refresh_tokens, audit_events, namespace_ownership -- Adds apps, nonces, subscriptions, refresh_tokens, audit_events, namespace_ownership
-- SQLite/RQLite dialect -- SQLite/RQLite dialect

View File

@ -1,4 +1,4 @@
-- DeBros Gateway - Wallet to API Key linkage (Phase 3) -- Orama Gateway - Wallet to API Key linkage (Phase 3)
-- Ensures one API key per (namespace, wallet) and enables lookup -- Ensures one API key per (namespace, wallet) and enables lookup
BEGIN; BEGIN;

View File

@ -169,10 +169,10 @@ func (creds *Credentials) UpdateLastUsed() {
// GetDefaultGatewayURL returns the default gateway URL from environment config, env vars, or fallback // GetDefaultGatewayURL returns the default gateway URL from environment config, env vars, or fallback
func GetDefaultGatewayURL() string { func GetDefaultGatewayURL() string {
// Check environment variables first (for backwards compatibility) // Check environment variables first (for backwards compatibility)
if envURL := os.Getenv("DEBROS_GATEWAY_URL"); envURL != "" { if envURL := os.Getenv("ORAMA_GATEWAY_URL"); envURL != "" {
return envURL return envURL
} }
if envURL := os.Getenv("DEBROS_GATEWAY"); envURL != "" { if envURL := os.Getenv("ORAMA_GATEWAY"); envURL != "" {
return envURL return envURL
} }

View File

@ -225,7 +225,7 @@ func (as *AuthServer) handleHealth(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{ json.NewEncoder(w).Encode(map[string]string{
"status": "ok", "status": "ok",
"server": "debros-auth-callback", "server": "orama-auth-callback",
}) })
} }

View File

@ -115,8 +115,8 @@ func (cm *CertificateManager) generateCACertificate() ([]byte, []byte, error) {
template := x509.Certificate{ template := x509.Certificate{
SerialNumber: big.NewInt(1), SerialNumber: big.NewInt(1),
Subject: pkix.Name{ Subject: pkix.Name{
CommonName: "DeBros Network Root CA", CommonName: "Orama Network Root CA",
Organization: []string{"DeBros"}, Organization: []string{"Orama"},
}, },
NotBefore: time.Now(), NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0), // 10 year validity NotAfter: time.Now().AddDate(10, 0, 0), // 10 year validity

View File

@ -66,7 +66,7 @@ func showAuthHelp() {
fmt.Printf(" orama auth whoami # Check who you're logged in as\n") fmt.Printf(" orama auth whoami # Check who you're logged in as\n")
fmt.Printf(" orama auth logout # Clear all stored credentials\n\n") fmt.Printf(" orama auth logout # Clear all stored credentials\n\n")
fmt.Printf("Environment Variables:\n") fmt.Printf("Environment Variables:\n")
fmt.Printf(" DEBROS_GATEWAY_URL - Gateway URL (overrides environment config)\n\n") fmt.Printf(" ORAMA_GATEWAY_URL - Gateway URL (overrides environment config)\n\n")
fmt.Printf("Authentication Flow (RootWallet):\n") fmt.Printf("Authentication Flow (RootWallet):\n")
fmt.Printf(" 1. Run 'orama auth login'\n") fmt.Printf(" 1. Run 'orama auth login'\n")
fmt.Printf(" 2. Your wallet address is read from RootWallet automatically\n") fmt.Printf(" 2. Your wallet address is read from RootWallet automatically\n")
@ -295,7 +295,7 @@ func handleAuthStatus() {
// Uses the active environment or allows entering a custom domain // Uses the active environment or allows entering a custom domain
func promptForGatewayURL() string { func promptForGatewayURL() string {
// Check environment variable first (allows override without prompting) // Check environment variable first (allows override without prompting)
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" { if url := os.Getenv("ORAMA_GATEWAY_URL"); url != "" {
return url return url
} }
@ -346,7 +346,7 @@ func promptForGatewayURL() string {
// Used by other commands that don't need interactive node selection // Used by other commands that don't need interactive node selection
func getGatewayURL() string { func getGatewayURL() string {
// Check environment variable first (for backwards compatibility) // Check environment variable first (for backwards compatibility)
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" { if url := os.Getenv("ORAMA_GATEWAY_URL"); url != "" {
return url return url
} }

View File

@ -0,0 +1,80 @@
package cluster
import (
"fmt"
"os"
)
// HandleCommand handles cluster subcommands.
func HandleCommand(args []string) {
if len(args) == 0 {
ShowHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "status":
HandleStatus(subargs)
case "health":
HandleHealth(subargs)
case "rqlite":
HandleRQLite(subargs)
case "watch":
HandleWatch(subargs)
case "help":
ShowHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown cluster subcommand: %s\n", subcommand)
ShowHelp()
os.Exit(1)
}
}
// hasFlag checks if a flag is present in the args slice.
func hasFlag(args []string, flag string) bool {
for _, a := range args {
if a == flag {
return true
}
}
return false
}
// getFlagValue returns the value of a flag from the args slice.
// Returns empty string if the flag is not found or has no value.
func getFlagValue(args []string, flag string) string {
for i, a := range args {
if a == flag && i+1 < len(args) {
return args[i+1]
}
}
return ""
}
// ShowHelp displays help information for cluster commands.
func ShowHelp() {
fmt.Printf("Cluster Management Commands\n\n")
fmt.Printf("Usage: orama cluster <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" status - Show cluster node status (RQLite + Olric)\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --all - SSH into all nodes from remote-nodes.conf (TODO)\n")
fmt.Printf(" health - Run cluster health checks\n")
fmt.Printf(" rqlite <subcommand> - RQLite-specific commands\n")
fmt.Printf(" status - Show detailed Raft state for local node\n")
fmt.Printf(" voters - Show current voter list\n")
fmt.Printf(" backup [--output FILE] - Trigger manual backup\n")
fmt.Printf(" watch - Live cluster status monitor\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --interval SECONDS - Refresh interval (default: 10)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" orama cluster status\n")
fmt.Printf(" orama cluster health\n")
fmt.Printf(" orama cluster rqlite status\n")
fmt.Printf(" orama cluster rqlite voters\n")
fmt.Printf(" orama cluster rqlite backup --output /tmp/backup.db\n")
fmt.Printf(" orama cluster watch --interval 5\n")
}

244
pkg/cli/cluster/health.go Normal file
View File

@ -0,0 +1,244 @@
package cluster
import (
"fmt"
"os"
)
// checkResult represents the outcome of a single health check.
type checkResult struct {
Name string
Status string // "PASS", "FAIL", "WARN"
Detail string
}
// HandleHealth handles the "orama cluster health" command.
func HandleHealth(args []string) {
fmt.Printf("Cluster Health Check\n")
fmt.Printf("====================\n\n")
var results []checkResult
// Check 1: RQLite reachable
status, err := queryRQLiteStatus()
if err != nil {
results = append(results, checkResult{
Name: "RQLite reachable",
Status: "FAIL",
Detail: fmt.Sprintf("Cannot connect to RQLite: %v", err),
})
printHealthResults(results)
os.Exit(1)
return
}
results = append(results, checkResult{
Name: "RQLite reachable",
Status: "PASS",
Detail: fmt.Sprintf("HTTP API responding on %s", status.HTTP.Address),
})
// Check 2: Raft state is leader or follower (not candidate or shutdown)
raftState := status.Store.Raft.State
switch raftState {
case "Leader", "Follower":
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "PASS",
Detail: fmt.Sprintf("Node is %s", raftState),
})
case "Candidate":
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "WARN",
Detail: "Node is Candidate (election in progress)",
})
default:
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "FAIL",
Detail: fmt.Sprintf("Node is in unexpected state: %s", raftState),
})
}
// Check 3: Leader exists
if status.Store.Raft.Leader != "" {
results = append(results, checkResult{
Name: "Leader exists",
Status: "PASS",
Detail: fmt.Sprintf("Leader: %s", status.Store.Raft.Leader),
})
} else {
results = append(results, checkResult{
Name: "Leader exists",
Status: "FAIL",
Detail: "No leader detected in Raft cluster",
})
}
// Check 4: Applied index is advancing (commit == applied means caught up)
if status.Store.Raft.AppliedIndex >= status.Store.Raft.CommitIndex {
results = append(results, checkResult{
Name: "Log replication",
Status: "PASS",
Detail: fmt.Sprintf("Applied index (%d) >= commit index (%d)",
status.Store.Raft.AppliedIndex, status.Store.Raft.CommitIndex),
})
} else {
lag := status.Store.Raft.CommitIndex - status.Store.Raft.AppliedIndex
severity := "WARN"
if lag > 1000 {
severity = "FAIL"
}
results = append(results, checkResult{
Name: "Log replication",
Status: severity,
Detail: fmt.Sprintf("Applied index (%d) behind commit index (%d) by %d entries",
status.Store.Raft.AppliedIndex, status.Store.Raft.CommitIndex, lag),
})
}
// Check 5: Query nodes to validate cluster membership
nodes, err := queryRQLiteNodes(true)
if err != nil {
results = append(results, checkResult{
Name: "Cluster nodes reachable",
Status: "FAIL",
Detail: fmt.Sprintf("Cannot query /nodes: %v", err),
})
} else {
totalNodes := len(nodes)
voters := 0
nonVoters := 0
reachable := 0
leaders := 0
for _, node := range nodes {
if node.Voter {
voters++
} else {
nonVoters++
}
if node.Reachable {
reachable++
}
if node.Leader {
leaders++
}
}
// Check 5a: Node count
results = append(results, checkResult{
Name: "Cluster membership",
Status: "PASS",
Detail: fmt.Sprintf("%d nodes (%d voters, %d non-voters)", totalNodes, voters, nonVoters),
})
// Check 5b: All nodes reachable
if reachable == totalNodes {
results = append(results, checkResult{
Name: "All nodes reachable",
Status: "PASS",
Detail: fmt.Sprintf("%d/%d nodes reachable", reachable, totalNodes),
})
} else {
unreachable := totalNodes - reachable
results = append(results, checkResult{
Name: "All nodes reachable",
Status: "WARN",
Detail: fmt.Sprintf("%d/%d nodes reachable (%d unreachable)", reachable, totalNodes, unreachable),
})
}
// Check 5c: Exactly one leader
if leaders == 1 {
results = append(results, checkResult{
Name: "Single leader",
Status: "PASS",
Detail: "Exactly 1 leader in cluster",
})
} else if leaders == 0 {
results = append(results, checkResult{
Name: "Single leader",
Status: "FAIL",
Detail: "No leader found among nodes",
})
} else {
results = append(results, checkResult{
Name: "Single leader",
Status: "FAIL",
Detail: fmt.Sprintf("Multiple leaders detected: %d (split-brain?)", leaders),
})
}
// Check 5d: Quorum check (majority of voters must be reachable)
quorum := (voters / 2) + 1
reachableVoters := 0
for _, node := range nodes {
if node.Voter && node.Reachable {
reachableVoters++
}
}
if reachableVoters >= quorum {
results = append(results, checkResult{
Name: "Quorum healthy",
Status: "PASS",
Detail: fmt.Sprintf("%d/%d voters reachable (quorum requires %d)", reachableVoters, voters, quorum),
})
} else {
results = append(results, checkResult{
Name: "Quorum healthy",
Status: "FAIL",
Detail: fmt.Sprintf("%d/%d voters reachable (quorum requires %d)", reachableVoters, voters, quorum),
})
}
}
printHealthResults(results)
// Exit with non-zero if any failures
for _, r := range results {
if r.Status == "FAIL" {
os.Exit(1)
}
}
}
// printHealthResults prints the health check results in a formatted table.
func printHealthResults(results []checkResult) {
// Find the longest check name for alignment
maxName := 0
for _, r := range results {
if len(r.Name) > maxName {
maxName = len(r.Name)
}
}
for _, r := range results {
indicator := " "
switch r.Status {
case "PASS":
indicator = "PASS"
case "FAIL":
indicator = "FAIL"
case "WARN":
indicator = "WARN"
}
fmt.Printf(" [%s] %-*s %s\n", indicator, maxName, r.Name, r.Detail)
}
fmt.Println()
// Summary
pass, fail, warn := 0, 0, 0
for _, r := range results {
switch r.Status {
case "PASS":
pass++
case "FAIL":
fail++
case "WARN":
warn++
}
}
fmt.Printf("Summary: %d passed, %d failed, %d warnings\n", pass, fail, warn)
}

187
pkg/cli/cluster/rqlite.go Normal file
View File

@ -0,0 +1,187 @@
package cluster
import (
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// HandleRQLite handles the "orama cluster rqlite" subcommand group.
func HandleRQLite(args []string) {
if len(args) == 0 {
showRQLiteHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "status":
handleRQLiteStatus()
case "voters":
handleRQLiteVoters()
case "backup":
handleRQLiteBackup(subargs)
case "help":
showRQLiteHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand)
showRQLiteHelp()
os.Exit(1)
}
}
// handleRQLiteStatus shows detailed Raft state for the local node.
func handleRQLiteStatus() {
fmt.Printf("RQLite Raft Status\n")
fmt.Printf("==================\n\n")
status, err := queryRQLiteStatus()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Printf("Node Configuration\n")
fmt.Printf(" Node ID: %s\n", status.Store.NodeID)
fmt.Printf(" Raft Address: %s\n", status.Store.Address)
fmt.Printf(" HTTP Address: %s\n", status.HTTP.Address)
fmt.Printf(" Data Directory: %s\n", status.Store.Dir)
fmt.Println()
fmt.Printf("Raft State\n")
fmt.Printf(" State: %s\n", strings.ToUpper(status.Store.Raft.State))
fmt.Printf(" Current Term: %d\n", status.Store.Raft.Term)
fmt.Printf(" Applied Index: %d\n", status.Store.Raft.AppliedIndex)
fmt.Printf(" Commit Index: %d\n", status.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", status.Store.Raft.Leader)
if status.Store.Raft.AppliedIndex < status.Store.Raft.CommitIndex {
lag := status.Store.Raft.CommitIndex - status.Store.Raft.AppliedIndex
fmt.Printf(" Replication Lag: %d entries behind\n", lag)
} else {
fmt.Printf(" Replication Lag: none (fully caught up)\n")
}
if status.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", status.Node.Uptime)
}
fmt.Println()
}
// handleRQLiteVoters shows the current voter list from /nodes.
func handleRQLiteVoters() {
fmt.Printf("RQLite Cluster Voters\n")
fmt.Printf("=====================\n\n")
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
voters := 0
nonVoters := 0
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
"NODE ID", "ADDRESS", "ROLE", "LEADER", "REACHABLE")
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
strings.Repeat("-", 20),
strings.Repeat("-", 30),
strings.Repeat("-", 8),
strings.Repeat("-", 10),
strings.Repeat("-", 10))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 20 {
nodeID = nodeID[:17] + "..."
}
role := "non-voter"
if node.Voter {
role = "voter"
voters++
} else {
nonVoters++
}
leader := "no"
if node.Leader {
leader = "yes"
}
reachable := "no"
if node.Reachable {
reachable = "yes"
}
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
nodeID, node.Address, role, leader, reachable)
}
fmt.Printf("\nTotal: %d voters, %d non-voters\n", voters, nonVoters)
quorum := (voters / 2) + 1
fmt.Printf("Quorum requirement: %d/%d voters\n", quorum, voters)
}
// handleRQLiteBackup triggers a manual backup via the RQLite backup endpoint.
func handleRQLiteBackup(args []string) {
outputFile := getFlagValue(args, "--output")
if outputFile == "" {
outputFile = fmt.Sprintf("rqlite-backup-%s.db", time.Now().Format("20060102-150405"))
}
fmt.Printf("RQLite Backup\n")
fmt.Printf("=============\n\n")
fmt.Printf("Requesting backup from %s/db/backup ...\n", rqliteBaseURL)
client := &http.Client{Timeout: 60 * time.Second}
resp, err := client.Get(rqliteBaseURL + "/db/backup")
if err != nil {
fmt.Fprintf(os.Stderr, "Error: cannot connect to RQLite: %v\n", err)
os.Exit(1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
fmt.Fprintf(os.Stderr, "Error: backup request returned HTTP %d: %s\n", resp.StatusCode, string(body))
os.Exit(1)
}
outFile, err := os.Create(outputFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: cannot create output file: %v\n", err)
os.Exit(1)
}
defer outFile.Close()
written, err := io.Copy(outFile, resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to write backup: %v\n", err)
os.Exit(1)
}
fmt.Printf("Backup saved to: %s (%d bytes)\n", outputFile, written)
}
// showRQLiteHelp displays help for rqlite subcommands.
func showRQLiteHelp() {
fmt.Printf("RQLite Commands\n\n")
fmt.Printf("Usage: orama cluster rqlite <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" status - Show detailed Raft state for local node\n")
fmt.Printf(" voters - Show current voter list from cluster\n")
fmt.Printf(" backup - Trigger manual database backup\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --output FILE - Output file path (default: rqlite-backup-<timestamp>.db)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" orama cluster rqlite status\n")
fmt.Printf(" orama cluster rqlite voters\n")
fmt.Printf(" orama cluster rqlite backup --output /tmp/backup.db\n")
}

248
pkg/cli/cluster/status.go Normal file
View File

@ -0,0 +1,248 @@
package cluster
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
const (
rqliteBaseURL = "http://localhost:5001"
httpTimeout = 10 * time.Second
)
// rqliteStatus represents the relevant fields from the RQLite /status endpoint.
type rqliteStatus struct {
Store struct {
Raft struct {
State string `json:"state"`
AppliedIndex uint64 `json:"applied_index"`
CommitIndex uint64 `json:"commit_index"`
Term uint64 `json:"current_term"`
Leader string `json:"leader"`
} `json:"raft"`
Dir string `json:"dir"`
NodeID string `json:"node_id"`
Address string `json:"addr"`
} `json:"store"`
HTTP struct {
Address string `json:"addr"`
} `json:"http"`
Node struct {
Uptime string `json:"uptime"`
} `json:"node"`
}
// rqliteNode represents a node from the /nodes endpoint.
type rqliteNode struct {
ID string `json:"id"`
Address string `json:"addr"`
Leader bool `json:"leader"`
Voter bool `json:"voter"`
Reachable bool `json:"reachable"`
Time float64 `json:"time"`
TimeS string `json:"time_s"`
}
// HandleStatus handles the "orama cluster status" command.
func HandleStatus(args []string) {
if hasFlag(args, "--all") {
fmt.Printf("Remote node aggregation via SSH is not yet implemented.\n")
fmt.Printf("Currently showing local node status only.\n\n")
}
fmt.Printf("Cluster Status\n")
fmt.Printf("==============\n\n")
// Query RQLite status
status, err := queryRQLiteStatus()
if err != nil {
fmt.Fprintf(os.Stderr, "Error querying RQLite status: %v\n", err)
fmt.Printf("RQLite may not be running on this node.\n\n")
} else {
printLocalStatus(status)
}
// Query RQLite nodes
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Fprintf(os.Stderr, "Error querying RQLite nodes: %v\n", err)
} else {
printNodesTable(nodes)
}
// Query Olric status (best-effort)
printOlricStatus()
}
// queryRQLiteStatus queries the local RQLite /status endpoint.
func queryRQLiteStatus() (*rqliteStatus, error) {
client := &http.Client{Timeout: httpTimeout}
resp, err := client.Get(rqliteBaseURL + "/status")
if err != nil {
return nil, fmt.Errorf("connect to RQLite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response: %w", err)
}
var status rqliteStatus
if err := json.Unmarshal(body, &status); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
return &status, nil
}
// queryRQLiteNodes queries the local RQLite /nodes endpoint.
// If includeNonVoters is true, appends ?nonvoters to the query.
func queryRQLiteNodes(includeNonVoters bool) (map[string]*rqliteNode, error) {
client := &http.Client{Timeout: httpTimeout}
url := rqliteBaseURL + "/nodes"
if includeNonVoters {
url += "?nonvoters"
}
resp, err := client.Get(url)
if err != nil {
return nil, fmt.Errorf("connect to RQLite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response: %w", err)
}
var nodes map[string]*rqliteNode
if err := json.Unmarshal(body, &nodes); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
return nodes, nil
}
// printLocalStatus prints the local node's RQLite status.
func printLocalStatus(s *rqliteStatus) {
fmt.Printf("Local Node\n")
fmt.Printf(" Node ID: %s\n", s.Store.NodeID)
fmt.Printf(" Raft Address: %s\n", s.Store.Address)
fmt.Printf(" HTTP Address: %s\n", s.HTTP.Address)
fmt.Printf(" Raft State: %s\n", strings.ToUpper(s.Store.Raft.State))
fmt.Printf(" Raft Term: %d\n", s.Store.Raft.Term)
fmt.Printf(" Applied Index: %d\n", s.Store.Raft.AppliedIndex)
fmt.Printf(" Commit Index: %d\n", s.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", s.Store.Raft.Leader)
if s.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", s.Node.Uptime)
}
fmt.Println()
}
// printNodesTable prints a formatted table of all cluster nodes.
func printNodesTable(nodes map[string]*rqliteNode) {
if len(nodes) == 0 {
fmt.Printf("No nodes found in cluster.\n\n")
return
}
fmt.Printf("Cluster Nodes (%d total)\n", len(nodes))
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
"NODE ID", "ADDRESS", "VOTER", "LEADER", "REACHABLE", "LATENCY")
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
strings.Repeat("-", 20),
strings.Repeat("-", 30),
strings.Repeat("-", 8),
strings.Repeat("-", 10),
strings.Repeat("-", 10),
strings.Repeat("-", 12))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 20 {
nodeID = nodeID[:17] + "..."
}
voter := "no"
if node.Voter {
voter = "yes"
}
leader := "no"
if node.Leader {
leader = "yes"
}
reachable := "no"
if node.Reachable {
reachable = "yes"
}
latency := "-"
if node.TimeS != "" {
latency = node.TimeS
} else if node.Time > 0 {
latency = fmt.Sprintf("%.3fs", node.Time)
}
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
nodeID, node.Address, voter, leader, reachable, latency)
}
fmt.Println()
}
// printOlricStatus attempts to query the local Olric status endpoint.
func printOlricStatus() {
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://localhost:3320/")
if err != nil {
fmt.Printf("Olric: not reachable on localhost:3320 (%v)\n\n", err)
return
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Printf("Olric: reachable but could not read response\n\n")
return
}
if resp.StatusCode == http.StatusOK {
fmt.Printf("Olric: reachable (HTTP %d)\n", resp.StatusCode)
// Try to parse as JSON for a nicer display
var data map[string]interface{}
if err := json.Unmarshal(body, &data); err == nil {
for key, val := range data {
fmt.Printf(" %s: %v\n", key, val)
}
} else {
// Not JSON, print raw (truncated)
raw := strings.TrimSpace(string(body))
if len(raw) > 200 {
raw = raw[:200] + "..."
}
if raw != "" {
fmt.Printf(" Response: %s\n", raw)
}
}
} else {
fmt.Printf("Olric: reachable but returned HTTP %d\n", resp.StatusCode)
}
fmt.Println()
}

136
pkg/cli/cluster/watch.go Normal file
View File

@ -0,0 +1,136 @@
package cluster
import (
"fmt"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
)
// HandleWatch handles the "orama cluster watch" command.
// It polls RQLite status and nodes at a configurable interval and reprints a summary.
func HandleWatch(args []string) {
interval := 10 * time.Second
// Parse --interval flag
intervalStr := getFlagValue(args, "--interval")
if intervalStr != "" {
secs, err := strconv.Atoi(intervalStr)
if err != nil || secs < 1 {
fmt.Fprintf(os.Stderr, "Error: --interval must be a positive integer (seconds)\n")
os.Exit(1)
}
interval = time.Duration(secs) * time.Second
}
// Set up signal handling for clean exit
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
fmt.Printf("Watching cluster status (interval: %s, Ctrl+C to exit)\n\n", interval)
// Initial render
renderWatchScreen()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
renderWatchScreen()
case <-sigCh:
fmt.Printf("\nWatch stopped.\n")
return
}
}
}
// renderWatchScreen clears the terminal and prints a summary of cluster state.
func renderWatchScreen() {
// Clear screen using ANSI escape codes
fmt.Print("\033[2J\033[H")
now := time.Now().Format("2006-01-02 15:04:05")
fmt.Printf("Cluster Watch [%s]\n", now)
fmt.Printf("=======================================\n\n")
// Query RQLite status
status, err := queryRQLiteStatus()
if err != nil {
fmt.Printf("RQLite: UNREACHABLE (%v)\n\n", err)
} else {
fmt.Printf("Local Node: %s\n", status.Store.NodeID)
fmt.Printf(" State: %-10s Term: %-6d Applied: %-8d Commit: %-8d\n",
strings.ToUpper(status.Store.Raft.State),
status.Store.Raft.Term,
status.Store.Raft.AppliedIndex,
status.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", status.Store.Raft.Leader)
if status.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", status.Node.Uptime)
}
fmt.Println()
}
// Query nodes
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Printf("Nodes: UNAVAILABLE (%v)\n\n", err)
} else {
total := len(nodes)
voters := 0
reachable := 0
for _, n := range nodes {
if n.Voter {
voters++
}
if n.Reachable {
reachable++
}
}
fmt.Printf("Cluster: %d nodes (%d voters), %d/%d reachable\n\n",
total, voters, reachable, total)
// Compact table
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
"ID", "ADDRESS", "VOTER", "LEADER", "UP")
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
strings.Repeat("-", 18),
strings.Repeat("-", 28),
strings.Repeat("-", 7),
strings.Repeat("-", 7),
strings.Repeat("-", 7))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 18 {
nodeID = nodeID[:15] + "..."
}
voter := " "
if node.Voter {
voter = "yes"
}
leader := " "
if node.Leader {
leader = "yes"
}
up := "no"
if node.Reachable {
up = "yes"
}
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
nodeID, node.Address, voter, leader, up)
}
}
fmt.Printf("\nPress Ctrl+C to exit\n")
}

View File

@ -0,0 +1,10 @@
package cli
import (
"github.com/DeBrosOfficial/network/pkg/cli/cluster"
)
// HandleClusterCommand handles cluster management commands.
func HandleClusterCommand(args []string) {
cluster.HandleCommand(args)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/DeBrosOfficial/network/pkg/config" "github.com/DeBrosOfficial/network/pkg/config"
) )
// Environment represents a DeBros network environment // Environment represents a Orama network environment
type Environment struct { type Environment struct {
Name string `json:"name"` Name string `json:"name"`
GatewayURL string `json:"gateway_url"` GatewayURL string `json:"gateway_url"`

View File

@ -89,15 +89,11 @@ func ShowHelp() {
fmt.Printf(" --ipfs-addrs ADDRS - IPFS swarm addresses (auto-discovered)\n") fmt.Printf(" --ipfs-addrs ADDRS - IPFS swarm addresses (auto-discovered)\n")
fmt.Printf(" --ipfs-cluster-peer ID - IPFS Cluster peer ID (auto-discovered)\n") fmt.Printf(" --ipfs-cluster-peer ID - IPFS Cluster peer ID (auto-discovered)\n")
fmt.Printf(" --ipfs-cluster-addrs ADDRS - IPFS Cluster addresses (auto-discovered)\n") fmt.Printf(" --ipfs-cluster-addrs ADDRS - IPFS Cluster addresses (auto-discovered)\n")
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, default: main)\n")
fmt.Printf(" --no-pull - Skip git clone/pull, use existing /home/debros/src\n")
fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n") fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n")
fmt.Printf(" --dry-run - Show what would be done without making changes\n") fmt.Printf(" --dry-run - Show what would be done without making changes\n")
fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n") fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n")
fmt.Printf(" Options:\n") fmt.Printf(" Options:\n")
fmt.Printf(" --restart - Automatically restart services after upgrade\n") fmt.Printf(" --restart - Automatically restart services after upgrade\n")
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly)\n")
fmt.Printf(" --no-pull - Skip git clone/pull, use existing source\n")
fmt.Printf(" migrate - Migrate from old unified setup (requires root/sudo)\n") fmt.Printf(" migrate - Migrate from old unified setup (requires root/sudo)\n")
fmt.Printf(" Options:\n") fmt.Printf(" Options:\n")
fmt.Printf(" --dry-run - Show what would be migrated without making changes\n") fmt.Printf(" --dry-run - Show what would be migrated without making changes\n")

View File

@ -30,7 +30,7 @@ type Orchestrator struct {
// NewOrchestrator creates a new install orchestrator // NewOrchestrator creates a new install orchestrator
func NewOrchestrator(flags *Flags) (*Orchestrator, error) { func NewOrchestrator(flags *Flags) (*Orchestrator, error) {
oramaHome := "/home/debros" oramaHome := "/home/orama"
oramaDir := oramaHome + "/.orama" oramaDir := oramaHome + "/.orama"
// Normalize peers // Normalize peers
@ -547,9 +547,9 @@ func (o *Orchestrator) installNamespaceTemplates() error {
systemdDir := "/etc/systemd/system" systemdDir := "/etc/systemd/system"
templates := []string{ templates := []string{
"debros-namespace-rqlite@.service", "orama-namespace-rqlite@.service",
"debros-namespace-olric@.service", "orama-namespace-olric@.service",
"debros-namespace-gateway@.service", "orama-namespace-gateway@.service",
} }
installedCount := 0 installedCount := 0

View File

@ -0,0 +1,215 @@
package install
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/DeBrosOfficial/network/pkg/inspector"
)
// RemoteOrchestrator orchestrates a remote install via SSH.
// It uploads the source archive, extracts it on the VPS, and runs
// the actual install command remotely.
type RemoteOrchestrator struct {
flags *Flags
node inspector.Node
archive string
}
// NewRemoteOrchestrator creates a new remote orchestrator.
// It resolves SSH credentials and checks prerequisites.
func NewRemoteOrchestrator(flags *Flags) (*RemoteOrchestrator, error) {
if flags.VpsIP == "" {
return nil, fmt.Errorf("--vps-ip is required\nExample: orama install --vps-ip 1.2.3.4 --nameserver --domain orama-testnet.network")
}
// Check source archive exists
if _, err := os.Stat(sourceArchivePath); os.IsNotExist(err) {
return nil, fmt.Errorf("source archive not found at %s\nRun: make build-linux && ./scripts/generate-source-archive.sh", sourceArchivePath)
}
// Resolve SSH credentials
node, err := resolveSSHCredentials(flags.VpsIP)
if err != nil {
return nil, fmt.Errorf("failed to resolve SSH credentials: %w", err)
}
return &RemoteOrchestrator{
flags: flags,
node: node,
archive: sourceArchivePath,
}, nil
}
// Execute runs the full remote install process.
func (r *RemoteOrchestrator) Execute() error {
fmt.Printf("Installing on %s via SSH (%s@%s)...\n\n", r.flags.VpsIP, r.node.User, r.node.Host)
// Step 1: Upload archive
fmt.Printf("Uploading source archive...\n")
if err := r.uploadArchive(); err != nil {
return fmt.Errorf("upload failed: %w", err)
}
fmt.Printf(" Done.\n\n")
// Step 2: Extract on VPS
fmt.Printf("Extracting on VPS...\n")
if err := r.extractOnVPS(); err != nil {
return fmt.Errorf("extract failed: %w", err)
}
fmt.Printf(" Done.\n\n")
// Step 3: Run remote install
fmt.Printf("Running install on VPS...\n\n")
if err := r.runRemoteInstall(); err != nil {
return err
}
return nil
}
// uploadArchive copies the source archive to the VPS.
func (r *RemoteOrchestrator) uploadArchive() error {
return uploadFile(r.node, r.archive, "/tmp/network-source.tar.gz")
}
// extractOnVPS runs extract-deploy.sh on the VPS.
func (r *RemoteOrchestrator) extractOnVPS() error {
// Extract source archive and install only the CLI binary.
// All other binaries are built from source on the VPS during install.
extractCmd := r.sudoPrefix() + "bash -c '" +
`ARCHIVE="/tmp/network-source.tar.gz" && ` +
`SRC_DIR="/home/orama/src" && ` +
`BIN_DIR="/home/orama/bin" && ` +
`id -u orama &>/dev/null || useradd -m -s /bin/bash orama && ` +
`rm -rf "$SRC_DIR" && mkdir -p "$SRC_DIR" "$BIN_DIR" && ` +
`tar xzf "$ARCHIVE" -C "$SRC_DIR" && ` +
`chown -R orama:orama "$SRC_DIR" && ` +
// Install pre-built CLI binary (only binary cross-compiled locally)
`if [ -f "$SRC_DIR/bin-linux/orama" ]; then ` +
`cp "$SRC_DIR/bin-linux/orama" /usr/local/bin/orama && ` +
`chmod +x /usr/local/bin/orama; fi && ` +
`chown -R orama:orama "$BIN_DIR" && ` +
`echo "Extract complete."` +
"'"
return runSSHStreaming(r.node, extractCmd)
}
// runRemoteInstall executes `orama install` on the VPS.
func (r *RemoteOrchestrator) runRemoteInstall() error {
cmd := r.buildRemoteCommand()
return runSSHStreaming(r.node, cmd)
}
// buildRemoteCommand constructs the `sudo orama install` command string
// with all flags passed through.
func (r *RemoteOrchestrator) buildRemoteCommand() string {
var args []string
if r.node.User != "root" {
args = append(args, "sudo")
}
args = append(args, "orama", "install")
args = append(args, "--vps-ip", r.flags.VpsIP)
if r.flags.Domain != "" {
args = append(args, "--domain", r.flags.Domain)
}
if r.flags.BaseDomain != "" {
args = append(args, "--base-domain", r.flags.BaseDomain)
}
if r.flags.Nameserver {
args = append(args, "--nameserver")
}
if r.flags.JoinAddress != "" {
args = append(args, "--join", r.flags.JoinAddress)
}
if r.flags.Token != "" {
args = append(args, "--token", r.flags.Token)
}
if r.flags.Force {
args = append(args, "--force")
}
if r.flags.SkipChecks {
args = append(args, "--skip-checks")
}
if r.flags.SkipFirewall {
args = append(args, "--skip-firewall")
}
if r.flags.DryRun {
args = append(args, "--dry-run")
}
// Anyone relay flags
if r.flags.AnyoneRelay {
args = append(args, "--anyone-relay")
}
if r.flags.AnyoneClient {
args = append(args, "--anyone-client")
}
if r.flags.AnyoneExit {
args = append(args, "--anyone-exit")
}
if r.flags.AnyoneMigrate {
args = append(args, "--anyone-migrate")
}
if r.flags.AnyoneNickname != "" {
args = append(args, "--anyone-nickname", r.flags.AnyoneNickname)
}
if r.flags.AnyoneContact != "" {
args = append(args, "--anyone-contact", r.flags.AnyoneContact)
}
if r.flags.AnyoneWallet != "" {
args = append(args, "--anyone-wallet", r.flags.AnyoneWallet)
}
if r.flags.AnyoneORPort != 9001 {
args = append(args, "--anyone-orport", strconv.Itoa(r.flags.AnyoneORPort))
}
if r.flags.AnyoneFamily != "" {
args = append(args, "--anyone-family", r.flags.AnyoneFamily)
}
if r.flags.AnyoneBandwidth != 30 {
args = append(args, "--anyone-bandwidth", strconv.Itoa(r.flags.AnyoneBandwidth))
}
if r.flags.AnyoneAccounting != 0 {
args = append(args, "--anyone-accounting", strconv.Itoa(r.flags.AnyoneAccounting))
}
return joinShellArgs(args)
}
// sudoPrefix returns "sudo " for non-root SSH users, empty for root.
func (r *RemoteOrchestrator) sudoPrefix() string {
if r.node.User == "root" {
return ""
}
return "sudo "
}
// joinShellArgs joins arguments, quoting those with special characters.
func joinShellArgs(args []string) string {
var parts []string
for _, a := range args {
if needsQuoting(a) {
parts = append(parts, "'"+a+"'")
} else {
parts = append(parts, a)
}
}
return strings.Join(parts, " ")
}
// needsQuoting returns true if the string contains characters
// that need shell quoting.
func needsQuoting(s string) bool {
for _, c := range s {
switch c {
case ' ', '$', '!', '&', '(', ')', '<', '>', '|', ';', '"', '`', '\\', '#', '^', '*', '?', '{', '}', '[', ']', '~':
return true
}
}
return false
}

View File

@ -0,0 +1,142 @@
package install
import (
"bufio"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/DeBrosOfficial/network/pkg/inspector"
)
const sourceArchivePath = "/tmp/network-source.tar.gz"
// resolveSSHCredentials finds SSH credentials for the given VPS IP.
// First checks remote-nodes.conf, then prompts interactively.
func resolveSSHCredentials(vpsIP string) (inspector.Node, error) {
confPath := findRemoteNodesConf()
if confPath != "" {
nodes, err := inspector.LoadNodes(confPath)
if err == nil {
for _, n := range nodes {
if n.Host == vpsIP {
// Expand ~ in SSH key path
if n.SSHKey != "" && strings.HasPrefix(n.SSHKey, "~") {
home, _ := os.UserHomeDir()
n.SSHKey = filepath.Join(home, n.SSHKey[1:])
}
return n, nil
}
}
}
}
// Not found in config — prompt interactively
return promptSSHCredentials(vpsIP), nil
}
// findRemoteNodesConf searches for the remote-nodes.conf file.
func findRemoteNodesConf() string {
candidates := []string{
"scripts/remote-nodes.conf",
"../scripts/remote-nodes.conf",
"network/scripts/remote-nodes.conf",
}
for _, c := range candidates {
if _, err := os.Stat(c); err == nil {
return c
}
}
return ""
}
// promptSSHCredentials asks the user for SSH credentials interactively.
func promptSSHCredentials(vpsIP string) inspector.Node {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("\nSSH credentials for %s\n", vpsIP)
fmt.Print(" SSH user (default: ubuntu): ")
user, _ := reader.ReadString('\n')
user = strings.TrimSpace(user)
if user == "" {
user = "ubuntu"
}
fmt.Print(" SSH password: ")
password, _ := reader.ReadString('\n')
password = strings.TrimSpace(password)
return inspector.Node{
User: user,
Host: vpsIP,
Password: password,
}
}
// uploadFile copies a local file to a remote host via SCP.
func uploadFile(node inspector.Node, localPath, remotePath string) error {
dest := fmt.Sprintf("%s@%s:%s", node.User, node.Host, remotePath)
var cmd *exec.Cmd
if node.SSHKey != "" {
cmd = exec.Command("scp",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=10",
"-i", node.SSHKey,
localPath, dest,
)
} else {
if _, err := exec.LookPath("sshpass"); err != nil {
return fmt.Errorf("sshpass not found — install it: brew install hudochenkov/sshpass/sshpass")
}
cmd = exec.Command("sshpass", "-p", node.Password,
"scp",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=10",
localPath, dest,
)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("SCP failed: %w", err)
}
return nil
}
// runSSHStreaming executes a command on a remote host via SSH,
// streaming stdout/stderr to the local terminal in real-time.
func runSSHStreaming(node inspector.Node, command string) error {
var cmd *exec.Cmd
if node.SSHKey != "" {
cmd = exec.Command("ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=10",
"-i", node.SSHKey,
fmt.Sprintf("%s@%s", node.User, node.Host),
command,
)
} else {
cmd = exec.Command("sshpass", "-p", node.Password,
"ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=10",
fmt.Sprintf("%s@%s", node.User, node.Host),
command,
)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin // Allow password prompts from remote sudo
if err := cmd.Run(); err != nil {
return fmt.Errorf("SSH command failed: %w", err)
}
return nil
}

View File

@ -68,7 +68,7 @@ func Handle(args []string) {
// readNodeDomain reads the domain from the node config file // readNodeDomain reads the domain from the node config file
func readNodeDomain() (string, error) { func readNodeDomain() (string, error) {
configPath := "/home/debros/.orama/configs/node.yaml" configPath := "/home/orama/.orama/configs/node.yaml"
data, err := os.ReadFile(configPath) data, err := os.ReadFile(configPath)
if err != nil { if err != nil {
return "", fmt.Errorf("read config: %w", err) return "", fmt.Errorf("read config: %w", err)

View File

@ -0,0 +1,143 @@
package lifecycle
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/utils"
)
// HandlePostUpgrade brings the node back online after an upgrade:
// 1. Resets failed + unmasks + enables all services
// 2. Starts services in dependency order
// 3. Waits for global RQLite to be ready
// 4. Waits for each namespace RQLite to be ready
// 5. Removes maintenance flag
func HandlePostUpgrade() {
if os.Geteuid() != 0 {
fmt.Fprintf(os.Stderr, "Error: post-upgrade must be run as root (use sudo)\n")
os.Exit(1)
}
fmt.Printf("Post-upgrade: bringing node back online...\n")
// 1. Get all services
services := utils.GetProductionServices()
if len(services) == 0 {
fmt.Printf(" Warning: no Orama services found\n")
return
}
// Reset failed state
resetArgs := []string{"reset-failed"}
resetArgs = append(resetArgs, services...)
exec.Command("systemctl", resetArgs...).Run()
// Unmask and enable all services
for _, svc := range services {
masked, err := utils.IsServiceMasked(svc)
if err == nil && masked {
exec.Command("systemctl", "unmask", svc).Run()
}
enabled, err := utils.IsServiceEnabled(svc)
if err == nil && !enabled {
exec.Command("systemctl", "enable", svc).Run()
}
}
fmt.Printf(" Services reset and enabled\n")
// 2. Start services in dependency order
fmt.Printf(" Starting services...\n")
utils.StartServicesOrdered(services, "start")
fmt.Printf(" Services started\n")
// 3. Wait for global RQLite (port 5001) to be ready
fmt.Printf(" Waiting for global RQLite (port 5001)...\n")
if err := waitForRQLiteReady(5001, 120*time.Second); err != nil {
fmt.Printf(" Warning: global RQLite not ready: %v\n", err)
} else {
fmt.Printf(" Global RQLite ready\n")
}
// 4. Wait for each namespace RQLite with a global timeout of 5 minutes
nsPorts := getNamespaceRQLitePorts()
if len(nsPorts) > 0 {
fmt.Printf(" Waiting for %d namespace RQLite instances...\n", len(nsPorts))
globalDeadline := time.Now().Add(5 * time.Minute)
healthy := 0
failed := 0
for ns, port := range nsPorts {
remaining := time.Until(globalDeadline)
if remaining <= 0 {
fmt.Printf(" Warning: global timeout reached, skipping remaining namespaces\n")
failed += len(nsPorts) - healthy - failed
break
}
timeout := 90 * time.Second
if remaining < timeout {
timeout = remaining
}
fmt.Printf(" Waiting for namespace '%s' (port %d)...\n", ns, port)
if err := waitForRQLiteReady(port, timeout); err != nil {
fmt.Printf(" Warning: namespace '%s' RQLite not ready: %v\n", ns, err)
failed++
} else {
fmt.Printf(" Namespace '%s' ready\n", ns)
healthy++
}
}
fmt.Printf(" Namespace RQLite: %d healthy, %d failed\n", healthy, failed)
}
// 5. Remove maintenance flag
if err := os.Remove(maintenanceFlagPath); err != nil && !os.IsNotExist(err) {
fmt.Printf(" Warning: failed to remove maintenance flag: %v\n", err)
} else {
fmt.Printf(" Maintenance flag removed\n")
}
fmt.Printf("Post-upgrade complete. Node is back online.\n")
}
// waitForRQLiteReady polls an RQLite instance's /status endpoint until it
// reports Leader or Follower state, or the timeout expires.
func waitForRQLiteReady(port int, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
client := &http.Client{Timeout: 2 * time.Second}
url := fmt.Sprintf("http://localhost:%d/status", port)
for time.Now().Before(deadline) {
resp, err := client.Get(url)
if err != nil {
time.Sleep(2 * time.Second)
continue
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
var status struct {
Store struct {
Raft struct {
State string `json:"state"`
} `json:"raft"`
} `json:"store"`
}
if err := json.Unmarshal(body, &status); err == nil {
state := status.Store.Raft.State
if state == "Leader" || state == "Follower" {
return nil
}
}
time.Sleep(2 * time.Second)
}
return fmt.Errorf("timeout after %s waiting for RQLite on port %d", timeout, port)
}

View File

@ -0,0 +1,132 @@
package lifecycle
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"go.uber.org/zap"
)
const (
maintenanceFlagPath = "/home/orama/.orama/maintenance.flag"
)
// HandlePreUpgrade prepares the node for a safe rolling upgrade:
// 1. Checks quorum safety
// 2. Writes maintenance flag
// 3. Transfers leadership on global RQLite (port 5001) if leader
// 4. Transfers leadership on each namespace RQLite
// 5. Waits 15s for metadata propagation (H5 fix)
func HandlePreUpgrade() {
if os.Geteuid() != 0 {
fmt.Fprintf(os.Stderr, "Error: pre-upgrade must be run as root (use sudo)\n")
os.Exit(1)
}
fmt.Printf("Pre-upgrade: preparing node for safe restart...\n")
// 1. Check quorum safety
if warning := checkQuorumSafety(); warning != "" {
fmt.Fprintf(os.Stderr, " UNSAFE: %s\n", warning)
fmt.Fprintf(os.Stderr, " Aborting pre-upgrade. Use 'orama stop --force' to override.\n")
os.Exit(1)
}
fmt.Printf(" Quorum check passed\n")
// 2. Write maintenance flag
if err := os.MkdirAll(filepath.Dir(maintenanceFlagPath), 0755); err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to create flag directory: %v\n", err)
}
if err := os.WriteFile(maintenanceFlagPath, []byte(time.Now().Format(time.RFC3339)), 0644); err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to write maintenance flag: %v\n", err)
} else {
fmt.Printf(" Maintenance flag written\n")
}
// 3. Transfer leadership on global RQLite (port 5001)
logger, _ := zap.NewProduction()
defer logger.Sync()
fmt.Printf(" Checking global RQLite leadership (port 5001)...\n")
if err := rqlite.TransferLeadership(5001, logger); err != nil {
fmt.Printf(" Warning: global leadership transfer: %v\n", err)
} else {
fmt.Printf(" Global RQLite leadership handled\n")
}
// 4. Transfer leadership on each namespace RQLite
nsPorts := getNamespaceRQLitePorts()
for ns, port := range nsPorts {
fmt.Printf(" Checking namespace '%s' RQLite leadership (port %d)...\n", ns, port)
if err := rqlite.TransferLeadership(port, logger); err != nil {
fmt.Printf(" Warning: namespace '%s' leadership transfer: %v\n", ns, err)
} else {
fmt.Printf(" Namespace '%s' RQLite leadership handled\n", ns)
}
}
// 5. Wait for metadata propagation (H5 fix: 15s, not 3s)
// The peer exchange cycle is 30s, but we force-triggered metadata updates
// via leadership transfer. 15s is sufficient for at least one exchange cycle.
fmt.Printf(" Waiting 15s for metadata propagation...\n")
time.Sleep(15 * time.Second)
fmt.Printf("Pre-upgrade complete. Node is ready for restart.\n")
}
// getNamespaceRQLitePorts scans namespace env files to find RQLite HTTP ports.
// Returns map of namespace_name → HTTP port.
func getNamespaceRQLitePorts() map[string]int {
namespacesDir := "/home/orama/.orama/data/namespaces"
ports := make(map[string]int)
entries, err := os.ReadDir(namespacesDir)
if err != nil {
return ports
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
ns := entry.Name()
envFile := filepath.Join(namespacesDir, ns, "rqlite.env")
port := parseHTTPPortFromEnv(envFile)
if port > 0 {
ports[ns] = port
}
}
return ports
}
// parseHTTPPortFromEnv reads an env file and extracts the HTTP port from
// the HTTP_ADDR=0.0.0.0:PORT line.
func parseHTTPPortFromEnv(envFile string) int {
f, err := os.Open(envFile)
if err != nil {
return 0
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, "HTTP_ADDR=") {
addr := strings.TrimPrefix(line, "HTTP_ADDR=")
// Format: 0.0.0.0:PORT
if idx := strings.LastIndex(addr, ":"); idx >= 0 {
if port, err := strconv.Atoi(addr[idx+1:]); err == nil {
return port
}
}
}
}
return 0
}

View File

@ -0,0 +1,145 @@
package lifecycle
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
// checkQuorumSafety queries local RQLite to determine if stopping this node
// would break quorum. Returns a warning message if unsafe, empty string if safe.
func checkQuorumSafety() string {
// Query local RQLite status to check if we're a voter
status, err := getLocalRQLiteStatus()
if err != nil {
// RQLite may not be running — safe to stop
return ""
}
raftState, _ := status["state"].(string)
isVoter, _ := status["voter"].(bool)
// If we're not a voter, stopping is always safe for quorum
if !isVoter {
return ""
}
// Query /nodes to count reachable voters
nodes, err := getLocalRQLiteNodes()
if err != nil {
return fmt.Sprintf("Cannot verify quorum safety (failed to query nodes: %v). This node is a %s voter.", err, raftState)
}
reachableVoters := 0
totalVoters := 0
for _, node := range nodes {
voter, _ := node["voter"].(bool)
reachable, _ := node["reachable"].(bool)
if voter {
totalVoters++
if reachable {
reachableVoters++
}
}
}
// After removing this voter, remaining voters must form quorum:
// quorum = (totalVoters / 2) + 1, so we need reachableVoters - 1 >= quorum
remainingVoters := reachableVoters - 1
quorumNeeded := (totalVoters-1)/2 + 1
if remainingVoters < quorumNeeded {
role := raftState
if role == "Leader" {
role = "the LEADER"
}
return fmt.Sprintf(
"Stopping this node (%s, %s) would break RQLite quorum (%d/%d reachable voters would remain, need %d).",
role, "voter", remainingVoters, totalVoters-1, quorumNeeded)
}
if raftState == "Leader" {
// Not quorum-breaking but warn about leadership
fmt.Printf(" Note: This node is the RQLite leader. Leadership will transfer on shutdown.\n")
}
return ""
}
// getLocalRQLiteStatus queries local RQLite /status and extracts raft info
func getLocalRQLiteStatus() (map[string]interface{}, error) {
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://localhost:5001/status")
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var status map[string]interface{}
if err := json.Unmarshal(body, &status); err != nil {
return nil, err
}
// Extract raft state from nested structure
store, _ := status["store"].(map[string]interface{})
if store == nil {
return nil, fmt.Errorf("no store in status")
}
raft, _ := store["raft"].(map[string]interface{})
if raft == nil {
return nil, fmt.Errorf("no raft in status")
}
// Add voter status from the node info
result := map[string]interface{}{
"state": raft["state"],
"voter": true, // Local node queries /status which doesn't include voter flag, assume voter if we got here
}
return result, nil
}
// getLocalRQLiteNodes queries local RQLite /nodes?nonvoters to get cluster members
func getLocalRQLiteNodes() ([]map[string]interface{}, error) {
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://localhost:5001/nodes?nonvoters&timeout=3s")
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// RQLite /nodes returns a map of node_id -> node_info
var nodesMap map[string]map[string]interface{}
if err := json.Unmarshal(body, &nodesMap); err != nil {
return nil, err
}
var nodes []map[string]interface{}
for _, node := range nodesMap {
nodes = append(nodes, node)
}
return nodes, nil
}
// containsService checks if a service name exists in the service list
func containsService(services []string, name string) bool {
for _, s := range services {
if s == name {
return true
}
}
return false
}

View File

@ -35,22 +35,22 @@ func HandleRestartWithFlags(force bool) {
} }
} }
fmt.Printf("Restarting all DeBros production services...\n") fmt.Printf("Restarting all Orama production services...\n")
services := utils.GetProductionServices() services := utils.GetProductionServices()
if len(services) == 0 { if len(services) == 0 {
fmt.Printf(" No DeBros services found\n") fmt.Printf(" No Orama services found\n")
return return
} }
// Ordered stop: gateway first, then node (RQLite), then supporting services // Ordered stop: gateway first, then node (RQLite), then supporting services
fmt.Printf("\n Stopping services (ordered)...\n") fmt.Printf("\n Stopping services (ordered)...\n")
shutdownOrder := [][]string{ shutdownOrder := [][]string{
{"debros-gateway"}, {"orama-gateway"},
{"debros-node"}, {"orama-node"},
{"debros-olric"}, {"orama-olric"},
{"debros-ipfs-cluster", "debros-ipfs"}, {"orama-ipfs-cluster", "orama-ipfs"},
{"debros-anyone-relay", "anyone-client"}, {"orama-anyone-relay", "anyone-client"},
{"coredns", "caddy"}, {"coredns", "caddy"},
} }

View File

@ -16,11 +16,11 @@ func HandleStart() {
os.Exit(1) os.Exit(1)
} }
fmt.Printf("Starting all DeBros production services...\n") fmt.Printf("Starting all Orama production services...\n")
services := utils.GetProductionServices() services := utils.GetProductionServices()
if len(services) == 0 { if len(services) == 0 {
fmt.Printf(" ⚠️ No DeBros services found\n") fmt.Printf(" ⚠️ No Orama services found\n")
return return
} }

View File

@ -36,7 +36,7 @@ func HandleStopWithFlags(force bool) {
} }
} }
fmt.Printf("Stopping all DeBros production services...\n") fmt.Printf("Stopping all Orama production services...\n")
// First, stop all namespace services // First, stop all namespace services
fmt.Printf("\n Stopping namespace services...\n") fmt.Printf("\n Stopping namespace services...\n")
@ -44,7 +44,7 @@ func HandleStopWithFlags(force bool) {
services := utils.GetProductionServices() services := utils.GetProductionServices()
if len(services) == 0 { if len(services) == 0 {
fmt.Printf(" No DeBros services found\n") fmt.Printf(" No Orama services found\n")
return return
} }
@ -53,12 +53,12 @@ func HandleStopWithFlags(force bool) {
// Ordered shutdown: gateway first, then node (RQLite), then supporting services // Ordered shutdown: gateway first, then node (RQLite), then supporting services
// This ensures we stop accepting requests before shutting down the database // This ensures we stop accepting requests before shutting down the database
shutdownOrder := [][]string{ shutdownOrder := [][]string{
{"debros-gateway"}, // 1. Stop accepting new requests {"orama-gateway"}, // 1. Stop accepting new requests
{"debros-node"}, // 2. Stop node (includes RQLite with leadership transfer) {"orama-node"}, // 2. Stop node (includes RQLite with leadership transfer)
{"debros-olric"}, // 3. Stop cache {"orama-olric"}, // 3. Stop cache
{"debros-ipfs-cluster", "debros-ipfs"}, // 4. Stop storage {"orama-ipfs-cluster", "orama-ipfs"}, // 4. Stop storage
{"debros-anyone-relay", "anyone-client"}, // 5. Stop privacy relay {"orama-anyone-relay", "anyone-client"}, // 5. Stop privacy relay
{"coredns", "caddy"}, // 6. Stop DNS/TLS last {"coredns", "caddy"}, // 6. Stop DNS/TLS last
} }
// First, disable all services to prevent auto-restart // First, disable all services to prevent auto-restart
@ -157,7 +157,7 @@ func HandleStopWithFlags(force bool) {
if hadError { if hadError {
fmt.Fprintf(os.Stderr, "\n⚠ Some services may still be restarting due to Restart=always\n") fmt.Fprintf(os.Stderr, "\n⚠ Some services may still be restarting due to Restart=always\n")
fmt.Fprintf(os.Stderr, " Check status with: systemctl list-units 'debros-*'\n") fmt.Fprintf(os.Stderr, " Check status with: systemctl list-units 'orama-*'\n")
fmt.Fprintf(os.Stderr, " If services are still restarting, they may need manual intervention\n") fmt.Fprintf(os.Stderr, " If services are still restarting, they may need manual intervention\n")
} else { } else {
fmt.Printf("\n✅ All services stopped and disabled (will not auto-start on boot)\n") fmt.Printf("\n✅ All services stopped and disabled (will not auto-start on boot)\n")
@ -168,7 +168,7 @@ func HandleStopWithFlags(force bool) {
// stopAllNamespaceServices stops all running namespace services // stopAllNamespaceServices stops all running namespace services
func stopAllNamespaceServices() { func stopAllNamespaceServices() {
// Find all running namespace services using systemctl list-units // Find all running namespace services using systemctl list-units
cmd := exec.Command("systemctl", "list-units", "--type=service", "--all", "--no-pager", "--no-legend", "debros-namespace-*@*.service") cmd := exec.Command("systemctl", "list-units", "--type=service", "--all", "--no-pager", "--no-legend", "orama-namespace-*@*.service")
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
fmt.Printf(" ⚠️ Warning: Failed to list namespace services: %v\n", err) fmt.Printf(" ⚠️ Warning: Failed to list namespace services: %v\n", err)
@ -181,7 +181,7 @@ func stopAllNamespaceServices() {
fields := strings.Fields(line) fields := strings.Fields(line)
if len(fields) > 0 { if len(fields) > 0 {
serviceName := fields[0] serviceName := fields[0]
if strings.HasPrefix(serviceName, "debros-namespace-") { if strings.HasPrefix(serviceName, "orama-namespace-") {
namespaceServices = append(namespaceServices, serviceName) namespaceServices = append(namespaceServices, serviceName)
} }
} }

View File

@ -27,7 +27,7 @@ func Handle(args []string) {
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err) fmt.Fprintf(os.Stderr, "❌ %v\n", err)
fmt.Fprintf(os.Stderr, "\nAvailable service aliases: node, ipfs, cluster, gateway, olric\n") fmt.Fprintf(os.Stderr, "\nAvailable service aliases: node, ipfs, cluster, gateway, olric\n")
fmt.Fprintf(os.Stderr, "Or use full service name like: debros-node\n") fmt.Fprintf(os.Stderr, "Or use full service name like: orama-node\n")
os.Exit(1) os.Exit(1)
} }
@ -51,7 +51,7 @@ func showUsage() {
fmt.Fprintf(os.Stderr, "\nService aliases:\n") fmt.Fprintf(os.Stderr, "\nService aliases:\n")
fmt.Fprintf(os.Stderr, " node, ipfs, cluster, gateway, olric\n") fmt.Fprintf(os.Stderr, " node, ipfs, cluster, gateway, olric\n")
fmt.Fprintf(os.Stderr, "\nOr use full service name:\n") fmt.Fprintf(os.Stderr, "\nOr use full service name:\n")
fmt.Fprintf(os.Stderr, " debros-node, debros-gateway, etc.\n") fmt.Fprintf(os.Stderr, " orama-node, orama-gateway, etc.\n")
} }
func handleMultipleServices(serviceNames []string, serviceAlias string, follow bool) { func handleMultipleServices(serviceNames []string, serviceAlias string, follow bool) {

View File

@ -28,7 +28,7 @@ func Handle(args []string) {
os.Exit(1) os.Exit(1)
} }
oramaDir := "/home/debros/.orama" oramaDir := "/home/orama/.orama"
fmt.Printf("🔄 Checking for installations to migrate...\n\n") fmt.Printf("🔄 Checking for installations to migrate...\n\n")
@ -70,9 +70,9 @@ func Handle(args []string) {
func stopOldServices() { func stopOldServices() {
oldServices := []string{ oldServices := []string{
"debros-ipfs", "orama-ipfs",
"debros-ipfs-cluster", "orama-ipfs-cluster",
"debros-node", "orama-node",
} }
fmt.Printf("\n Stopping old services...\n") fmt.Printf("\n Stopping old services...\n")
@ -141,9 +141,9 @@ func migrateConfigFiles(oramaDir string) {
func removeOldServices() { func removeOldServices() {
oldServices := []string{ oldServices := []string{
"debros-ipfs", "orama-ipfs",
"debros-ipfs-cluster", "orama-ipfs-cluster",
"debros-node", "orama-node",
} }
fmt.Printf("\n Removing old service files...\n") fmt.Printf("\n Removing old service files...\n")

View File

@ -24,9 +24,9 @@ func (v *Validator) CheckNeedsMigration() bool {
} }
oldServices := []string{ oldServices := []string{
"debros-ipfs", "orama-ipfs",
"debros-ipfs-cluster", "orama-ipfs-cluster",
"debros-node", "orama-node",
} }
oldConfigs := []string{ oldConfigs := []string{

View File

@ -13,21 +13,21 @@ func Handle() {
// Unified service names (no bootstrap/node distinction) // Unified service names (no bootstrap/node distinction)
serviceNames := []string{ serviceNames := []string{
"debros-ipfs", "orama-ipfs",
"debros-ipfs-cluster", "orama-ipfs-cluster",
// Note: RQLite is managed by node process, not as separate service // Note: RQLite is managed by node process, not as separate service
"debros-olric", "orama-olric",
"debros-node", "orama-node",
"debros-gateway", "orama-gateway",
} }
// Friendly descriptions // Friendly descriptions
descriptions := map[string]string{ descriptions := map[string]string{
"debros-ipfs": "IPFS Daemon", "orama-ipfs": "IPFS Daemon",
"debros-ipfs-cluster": "IPFS Cluster", "orama-ipfs-cluster": "IPFS Cluster",
"debros-olric": "Olric Cache Server", "orama-olric": "Olric Cache Server",
"debros-node": "DeBros Node (includes RQLite)", "orama-node": "Orama Node (includes RQLite)",
"debros-gateway": "DeBros Gateway", "orama-gateway": "Orama Gateway",
} }
fmt.Printf("Services:\n") fmt.Printf("Services:\n")
@ -47,7 +47,7 @@ func Handle() {
} }
fmt.Printf("\nDirectories:\n") fmt.Printf("\nDirectories:\n")
oramaDir := "/home/debros/.orama" oramaDir := "/home/orama/.orama"
if _, err := os.Stat(oramaDir); err == nil { if _, err := os.Stat(oramaDir); err == nil {
fmt.Printf(" ✅ %s exists\n", oramaDir) fmt.Printf(" ✅ %s exists\n", oramaDir)
} else { } else {

View File

@ -16,8 +16,8 @@ func Handle() {
os.Exit(1) os.Exit(1)
} }
fmt.Printf("⚠️ This will stop and remove all DeBros production services\n") fmt.Printf("⚠️ This will stop and remove all Orama production services\n")
fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.orama\n\n") fmt.Printf("⚠️ Configuration and data will be preserved in /home/orama/.orama\n\n")
fmt.Printf("Continue? (yes/no): ") fmt.Printf("Continue? (yes/no): ")
reader := bufio.NewReader(os.Stdin) reader := bufio.NewReader(os.Stdin)
@ -30,12 +30,12 @@ func Handle() {
} }
services := []string{ services := []string{
"debros-gateway", "orama-gateway",
"debros-node", "orama-node",
"debros-olric", "orama-olric",
"debros-ipfs-cluster", "orama-ipfs-cluster",
"debros-ipfs", "orama-ipfs",
"debros-anyone-client", "orama-anyone-client",
} }
fmt.Printf("Stopping services...\n") fmt.Printf("Stopping services...\n")
@ -48,6 +48,6 @@ func Handle() {
exec.Command("systemctl", "daemon-reload").Run() exec.Command("systemctl", "daemon-reload").Run()
fmt.Printf("✅ Services uninstalled\n") fmt.Printf("✅ Services uninstalled\n")
fmt.Printf(" Configuration and data preserved in /home/debros/.orama\n") fmt.Printf(" Configuration and data preserved in /home/orama/.orama\n")
fmt.Printf(" To remove all data: rm -rf /home/debros/.orama\n\n") fmt.Printf(" To remove all data: rm -rf /home/orama/.orama\n\n")
} }

View File

@ -26,7 +26,7 @@ type Orchestrator struct {
// NewOrchestrator creates a new upgrade orchestrator // NewOrchestrator creates a new upgrade orchestrator
func NewOrchestrator(flags *Flags) *Orchestrator { func NewOrchestrator(flags *Flags) *Orchestrator {
oramaHome := "/home/debros" oramaHome := "/home/orama"
oramaDir := oramaHome + "/.orama" oramaDir := oramaHome + "/.orama"
// Load existing preferences // Load existing preferences
@ -187,7 +187,7 @@ func (o *Orchestrator) Execute() error {
fmt.Printf(" To apply changes, restart services:\n") fmt.Printf(" To apply changes, restart services:\n")
fmt.Printf(" sudo systemctl daemon-reload\n") fmt.Printf(" sudo systemctl daemon-reload\n")
fmt.Printf(" sudo systemctl restart debros-*\n") fmt.Printf(" sudo systemctl restart orama-*\n")
fmt.Printf("\n") fmt.Printf("\n")
return nil return nil
@ -357,7 +357,7 @@ func (o *Orchestrator) stopServices() error {
fmt.Printf("\n⏹ Stopping all services before upgrade...\n") fmt.Printf("\n⏹ Stopping all services before upgrade...\n")
serviceController := production.NewSystemdController() serviceController := production.NewSystemdController()
// First, stop all namespace services (debros-namespace-*@*.service) // First, stop all namespace services (orama-namespace-*@*.service)
fmt.Printf(" Stopping namespace services...\n") fmt.Printf(" Stopping namespace services...\n")
if err := o.stopAllNamespaceServices(serviceController); err != nil { if err := o.stopAllNamespaceServices(serviceController); err != nil {
fmt.Printf(" ⚠️ Warning: Failed to stop namespace services: %v\n", err) fmt.Printf(" ⚠️ Warning: Failed to stop namespace services: %v\n", err)
@ -365,15 +365,15 @@ func (o *Orchestrator) stopServices() error {
// Stop services in reverse dependency order // Stop services in reverse dependency order
services := []string{ services := []string{
"caddy.service", // Depends on node "caddy.service", // Depends on node
"coredns.service", // Depends on node "coredns.service", // Depends on node
"debros-gateway.service", // Legacy "orama-gateway.service", // Legacy
"debros-node.service", // Depends on cluster, olric "orama-node.service", // Depends on cluster, olric
"debros-ipfs-cluster.service", // Depends on IPFS "orama-ipfs-cluster.service", // Depends on IPFS
"debros-ipfs.service", // Base IPFS "orama-ipfs.service", // Base IPFS
"debros-olric.service", // Independent "orama-olric.service", // Independent
"debros-anyone-client.service", // Client mode "orama-anyone-client.service", // Client mode
"debros-anyone-relay.service", // Relay mode "orama-anyone-relay.service", // Relay mode
} }
for _, svc := range services { for _, svc := range services {
unitPath := filepath.Join("/etc/systemd/system", svc) unitPath := filepath.Join("/etc/systemd/system", svc)
@ -393,7 +393,7 @@ func (o *Orchestrator) stopServices() error {
// stopAllNamespaceServices stops all running namespace services // stopAllNamespaceServices stops all running namespace services
func (o *Orchestrator) stopAllNamespaceServices(serviceController *production.SystemdController) error { func (o *Orchestrator) stopAllNamespaceServices(serviceController *production.SystemdController) error {
// Find all running namespace services using systemctl list-units // Find all running namespace services using systemctl list-units
cmd := exec.Command("systemctl", "list-units", "--type=service", "--state=running", "--no-pager", "--no-legend", "debros-namespace-*@*.service") cmd := exec.Command("systemctl", "list-units", "--type=service", "--state=running", "--no-pager", "--no-legend", "orama-namespace-*@*.service")
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
return fmt.Errorf("failed to list namespace services: %w", err) return fmt.Errorf("failed to list namespace services: %w", err)
@ -405,7 +405,7 @@ func (o *Orchestrator) stopAllNamespaceServices(serviceController *production.Sy
fields := strings.Fields(line) fields := strings.Fields(line)
if len(fields) > 0 { if len(fields) > 0 {
serviceName := fields[0] serviceName := fields[0]
if strings.HasPrefix(serviceName, "debros-namespace-") { if strings.HasPrefix(serviceName, "orama-namespace-") {
if err := serviceController.StopService(serviceName); err != nil { if err := serviceController.StopService(serviceName); err != nil {
fmt.Printf(" ⚠️ Warning: Failed to stop %s: %v\n", serviceName, err) fmt.Printf(" ⚠️ Warning: Failed to stop %s: %v\n", serviceName, err)
} else { } else {
@ -428,9 +428,9 @@ func (o *Orchestrator) installNamespaceTemplates() error {
systemdDir := "/etc/systemd/system" systemdDir := "/etc/systemd/system"
templates := []string{ templates := []string{
"debros-namespace-rqlite@.service", "orama-namespace-rqlite@.service",
"debros-namespace-olric@.service", "orama-namespace-olric@.service",
"debros-namespace-gateway@.service", "orama-namespace-gateway@.service",
} }
installedCount := 0 installedCount := 0
@ -636,7 +636,7 @@ func (o *Orchestrator) restartServices() error {
services := utils.GetProductionServices() services := utils.GetProductionServices()
// Re-enable all services BEFORE restarting them. // Re-enable all services BEFORE restarting them.
// orama prod stop disables services, and debros-node's PartOf= dependency // orama prod stop disables services, and orama-node's PartOf= dependency
// won't propagate restart to disabled services. We must re-enable first // won't propagate restart to disabled services. We must re-enable first
// so that all services restart with the updated binary. // so that all services restart with the updated binary.
for _, svc := range services { for _, svc := range services {
@ -664,13 +664,13 @@ func (o *Orchestrator) restartServices() error {
// Define the order for rolling restart - node service first (contains RQLite) // Define the order for rolling restart - node service first (contains RQLite)
// This ensures the cluster can reform before other services start // This ensures the cluster can reform before other services start
priorityOrder := []string{ priorityOrder := []string{
"debros-node", // Start node first - contains RQLite cluster "orama-node", // Start node first - contains RQLite cluster
"debros-olric", // Distributed cache "orama-olric", // Distributed cache
"debros-ipfs", // IPFS daemon "orama-ipfs", // IPFS daemon
"debros-ipfs-cluster", // IPFS cluster "orama-ipfs-cluster", // IPFS cluster
"debros-gateway", // Gateway (legacy) "orama-gateway", // Gateway (legacy)
"coredns", // DNS server "coredns", // DNS server
"caddy", // Reverse proxy "caddy", // Reverse proxy
} }
// Restart services in priority order with health checks // Restart services in priority order with health checks
@ -685,7 +685,7 @@ func (o *Orchestrator) restartServices() error {
fmt.Printf(" ✓ Started %s\n", svc) fmt.Printf(" ✓ Started %s\n", svc)
// For the node service, wait for RQLite cluster health // For the node service, wait for RQLite cluster health
if svc == "debros-node" { if svc == "orama-node" {
fmt.Printf(" Waiting for RQLite cluster to become healthy...\n") fmt.Printf(" Waiting for RQLite cluster to become healthy...\n")
if err := o.waitForClusterHealth(2 * time.Minute); err != nil { if err := o.waitForClusterHealth(2 * time.Minute); err != nil {
fmt.Printf(" ⚠️ Cluster health check warning: %v\n", err) fmt.Printf(" ⚠️ Cluster health check warning: %v\n", err)
@ -793,7 +793,7 @@ func (o *Orchestrator) waitForClusterHealth(timeout time.Duration) error {
// by looking for the systemd service file or the anonrc config file. // by looking for the systemd service file or the anonrc config file.
func detectAnyoneRelay(oramaDir string) bool { func detectAnyoneRelay(oramaDir string) bool {
// Check if systemd service file exists // Check if systemd service file exists
if _, err := os.Stat("/etc/systemd/system/debros-anyone-relay.service"); err == nil { if _, err := os.Stat("/etc/systemd/system/orama-anyone-relay.service"); err == nil {
return true return true
} }
// Check if anonrc config exists // Check if anonrc config exists

View File

@ -77,7 +77,7 @@ func ShowDryRunSummaryWithRelay(vpsIP, domain, branch string, peers []string, jo
} else { } else {
fmt.Printf(" - anyone-client (npm)\n") fmt.Printf(" - anyone-client (npm)\n")
} }
fmt.Printf(" - DeBros binaries (built from %s branch)\n", branch) fmt.Printf(" - Orama binaries (built from %s branch)\n", branch)
fmt.Printf("\n🔐 Secrets that would be generated:\n") fmt.Printf("\n🔐 Secrets that would be generated:\n")
fmt.Printf(" - Cluster secret (64-hex)\n") fmt.Printf(" - Cluster secret (64-hex)\n")
@ -89,14 +89,14 @@ func ShowDryRunSummaryWithRelay(vpsIP, domain, branch string, peers []string, jo
fmt.Printf(" - %s/configs/olric/config.yaml\n", oramaDir) fmt.Printf(" - %s/configs/olric/config.yaml\n", oramaDir)
fmt.Printf("\n⚙ Systemd services that would be created:\n") fmt.Printf("\n⚙ Systemd services that would be created:\n")
fmt.Printf(" - debros-ipfs.service\n") fmt.Printf(" - orama-ipfs.service\n")
fmt.Printf(" - debros-ipfs-cluster.service\n") fmt.Printf(" - orama-ipfs-cluster.service\n")
fmt.Printf(" - debros-olric.service\n") fmt.Printf(" - orama-olric.service\n")
fmt.Printf(" - debros-node.service (includes embedded gateway + RQLite)\n") fmt.Printf(" - orama-node.service (includes embedded gateway + RQLite)\n")
if relayInfo != nil && relayInfo.Enabled { if relayInfo != nil && relayInfo.Enabled {
fmt.Printf(" - debros-anyone-relay.service (relay operator mode)\n") fmt.Printf(" - orama-anyone-relay.service (relay operator mode)\n")
} else { } else {
fmt.Printf(" - debros-anyone-client.service\n") fmt.Printf(" - orama-anyone-client.service\n")
} }
fmt.Printf("\n🌐 Ports that would be used:\n") fmt.Printf("\n🌐 Ports that would be used:\n")

View File

@ -23,23 +23,23 @@ type PortSpec struct {
} }
var ServicePorts = map[string][]PortSpec{ var ServicePorts = map[string][]PortSpec{
"debros-gateway": { "orama-gateway": {
{Name: "Gateway API", Port: constants.GatewayAPIPort}, {Name: "Gateway API", Port: constants.GatewayAPIPort},
}, },
"debros-olric": { "orama-olric": {
{Name: "Olric HTTP", Port: constants.OlricHTTPPort}, {Name: "Olric HTTP", Port: constants.OlricHTTPPort},
{Name: "Olric Memberlist", Port: constants.OlricMemberlistPort}, {Name: "Olric Memberlist", Port: constants.OlricMemberlistPort},
}, },
"debros-node": { "orama-node": {
{Name: "RQLite HTTP", Port: constants.RQLiteHTTPPort}, {Name: "RQLite HTTP", Port: constants.RQLiteHTTPPort},
{Name: "RQLite Raft", Port: constants.RQLiteRaftPort}, {Name: "RQLite Raft", Port: constants.RQLiteRaftPort},
}, },
"debros-ipfs": { "orama-ipfs": {
{Name: "IPFS API", Port: 4501}, {Name: "IPFS API", Port: 4501},
{Name: "IPFS Gateway", Port: 8080}, {Name: "IPFS Gateway", Port: 8080},
{Name: "IPFS Swarm", Port: 4101}, {Name: "IPFS Swarm", Port: 4101},
}, },
"debros-ipfs-cluster": { "orama-ipfs-cluster": {
{Name: "IPFS Cluster API", Port: 9094}, {Name: "IPFS Cluster API", Port: 9094},
}, },
} }
@ -63,13 +63,13 @@ func DefaultPorts() []PortSpec {
func ResolveServiceName(alias string) ([]string, error) { func ResolveServiceName(alias string) ([]string, error) {
// Service alias mapping (unified - no bootstrap/node distinction) // Service alias mapping (unified - no bootstrap/node distinction)
aliases := map[string][]string{ aliases := map[string][]string{
"node": {"debros-node"}, "node": {"orama-node"},
"ipfs": {"debros-ipfs"}, "ipfs": {"orama-ipfs"},
"cluster": {"debros-ipfs-cluster"}, "cluster": {"orama-ipfs-cluster"},
"ipfs-cluster": {"debros-ipfs-cluster"}, "ipfs-cluster": {"orama-ipfs-cluster"},
"gateway": {"debros-gateway"}, "gateway": {"orama-gateway"},
"olric": {"debros-olric"}, "olric": {"orama-olric"},
"rqlite": {"debros-node"}, // RQLite logs are in node logs "rqlite": {"orama-node"}, // RQLite logs are in node logs
} }
// Check if it's an alias // Check if it's an alias
@ -153,18 +153,18 @@ func IsServiceMasked(service string) (bool, error) {
return false, nil return false, nil
} }
// GetProductionServices returns a list of all DeBros production service names that exist, // GetProductionServices returns a list of all Orama production service names that exist,
// including both global services and namespace-specific services // including both global services and namespace-specific services
func GetProductionServices() []string { func GetProductionServices() []string {
// Global/default service names // Global/default service names
globalServices := []string{ globalServices := []string{
"debros-gateway", "orama-gateway",
"debros-node", "orama-node",
"debros-olric", "orama-olric",
"debros-ipfs-cluster", "orama-ipfs-cluster",
"debros-ipfs", "orama-ipfs",
"debros-anyone-client", "orama-anyone-client",
"debros-anyone-relay", "orama-anyone-relay",
} }
var existing []string var existing []string
@ -179,10 +179,10 @@ func GetProductionServices() []string {
// Discover namespace service instances from the namespaces data directory. // Discover namespace service instances from the namespaces data directory.
// We can't rely on scanning /etc/systemd/system because that only contains // We can't rely on scanning /etc/systemd/system because that only contains
// template files (e.g. debros-namespace-gateway@.service) with no instance name. // template files (e.g. orama-namespace-gateway@.service) with no instance name.
// Restarting a template without an instance is a no-op. // Restarting a template without an instance is a no-op.
// Instead, scan the data directory where each subdirectory is a provisioned namespace. // Instead, scan the data directory where each subdirectory is a provisioned namespace.
namespacesDir := "/home/debros/.orama/data/namespaces" namespacesDir := "/home/orama/.orama/data/namespaces"
nsEntries, err := os.ReadDir(namespacesDir) nsEntries, err := os.ReadDir(namespacesDir)
if err == nil { if err == nil {
serviceTypes := []string{"rqlite", "olric", "gateway"} serviceTypes := []string{"rqlite", "olric", "gateway"}
@ -195,7 +195,7 @@ func GetProductionServices() []string {
// Only add if the env file exists (service was provisioned) // Only add if the env file exists (service was provisioned)
envFile := filepath.Join(namespacesDir, ns, svcType+".env") envFile := filepath.Join(namespacesDir, ns, svcType+".env")
if _, err := os.Stat(envFile); err == nil { if _, err := os.Stat(envFile); err == nil {
svcName := fmt.Sprintf("debros-namespace-%s@%s", svcType, ns) svcName := fmt.Sprintf("orama-namespace-%s@%s", svcType, ns)
existing = append(existing, svcName) existing = append(existing, svcName)
} }
} }
@ -304,7 +304,7 @@ func StartServicesOrdered(services []string, action string) {
for _, svc := range services { for _, svc := range services {
matched := false matched := false
for _, svcType := range NamespaceServiceOrder { for _, svcType := range NamespaceServiceOrder {
prefix := "debros-namespace-" + svcType + "@" prefix := "orama-namespace-" + svcType + "@"
if strings.HasPrefix(svc, prefix) { if strings.HasPrefix(svc, prefix) {
nsServices[svcType] = append(nsServices[svcType], svc) nsServices[svcType] = append(nsServices[svcType], svc)
matched = true matched = true
@ -348,4 +348,3 @@ func StartServicesOrdered(services []string, action string) {
} }
} }
} }

View File

@ -13,7 +13,7 @@ import (
// These can be overridden by environment variables or config. // These can be overridden by environment variables or config.
func DefaultBootstrapPeers() []string { func DefaultBootstrapPeers() []string {
// Check environment variable first // Check environment variable first
if envPeers := os.Getenv("DEBROS_BOOTSTRAP_PEERS"); envPeers != "" { if envPeers := os.Getenv("ORAMA_BOOTSTRAP_PEERS"); envPeers != "" {
peers := splitCSVOrSpace(envPeers) peers := splitCSVOrSpace(envPeers)
// Filter out empty strings // Filter out empty strings
result := make([]string, 0, len(peers)) result := make([]string, 0, len(peers))

View File

@ -8,11 +8,11 @@ import (
) )
func TestDefaultBootstrapPeersNonEmpty(t *testing.T) { func TestDefaultBootstrapPeersNonEmpty(t *testing.T) {
old := os.Getenv("DEBROS_BOOTSTRAP_PEERS") old := os.Getenv("ORAMA_BOOTSTRAP_PEERS")
t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) }) t.Cleanup(func() { os.Setenv("ORAMA_BOOTSTRAP_PEERS", old) })
// Set a valid peer // Set a valid peer
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
_ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer) _ = os.Setenv("ORAMA_BOOTSTRAP_PEERS", validPeer)
peers := DefaultBootstrapPeers() peers := DefaultBootstrapPeers()
if len(peers) == 0 { if len(peers) == 0 {
t.Fatalf("expected non-empty default peers") t.Fatalf("expected non-empty default peers")

View File

@ -27,7 +27,7 @@ type DatabaseConfig struct {
RaftElectionTimeout time.Duration `yaml:"raft_election_timeout"` // default: 5s RaftElectionTimeout time.Duration `yaml:"raft_election_timeout"` // default: 5s
RaftHeartbeatTimeout time.Duration `yaml:"raft_heartbeat_timeout"` // default: 2s RaftHeartbeatTimeout time.Duration `yaml:"raft_heartbeat_timeout"` // default: 2s
RaftApplyTimeout time.Duration `yaml:"raft_apply_timeout"` // default: 30s RaftApplyTimeout time.Duration `yaml:"raft_apply_timeout"` // default: 30s
RaftLeaderLeaseTimeout time.Duration `yaml:"raft_leader_lease_timeout"` // default: 5s RaftLeaderLeaseTimeout time.Duration `yaml:"raft_leader_lease_timeout"` // default: 2s (must be <= heartbeat timeout)
// Dynamic discovery configuration (always enabled) // Dynamic discovery configuration (always enabled)
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s

View File

@ -20,7 +20,7 @@ func ExpandPath(path string) (string, error) {
return path, nil return path, nil
} }
// ConfigDir returns the path to the DeBros config directory (~/.orama). // ConfigDir returns the path to the Orama config directory (~/.orama).
func ConfigDir() (string, error) { func ConfigDir() (string, error) {
home, err := os.UserHomeDir() home, err := os.UserHomeDir()
if err != nil { if err != nil {

View File

@ -119,7 +119,7 @@ sudo chmod +x /usr/local/bin/coredns
# 2. Create directories # 2. Create directories
sudo mkdir -p /etc/coredns sudo mkdir -p /etc/coredns
sudo mkdir -p /var/lib/coredns sudo mkdir -p /var/lib/coredns
sudo chown debros:debros /var/lib/coredns sudo chown orama:orama /var/lib/coredns
# 3. Copy configuration # 3. Copy configuration
sudo cp configs/coredns/Corefile /etc/coredns/ sudo cp configs/coredns/Corefile /etc/coredns/
@ -425,8 +425,8 @@ Adjust in `client.go` if needed for higher load.
```bash ```bash
# Increase file descriptor limit # Increase file descriptor limit
# Add to /etc/security/limits.conf: # Add to /etc/security/limits.conf:
debros soft nofile 65536 orama soft nofile 65536
debros hard nofile 65536 orama hard nofile 65536
``` ```
## Next Steps ## Next Steps

View File

@ -20,7 +20,7 @@ import (
// Manager manages deployment processes via systemd (Linux) or direct process spawning (macOS/other) // Manager manages deployment processes via systemd (Linux) or direct process spawning (macOS/other)
type Manager struct { type Manager struct {
logger *zap.Logger logger *zap.Logger
useSystemd bool useSystemd bool
// For non-systemd mode: track running processes // For non-systemd mode: track running processes
@ -310,8 +310,8 @@ After=network.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory={{.WorkDir}} WorkingDirectory={{.WorkDir}}
{{range .Env}}Environment="{{.}}" {{range .Env}}Environment="{{.}}"
@ -373,7 +373,7 @@ WantedBy=multi-user.target
return err return err
} }
// Use sudo tee to write to systemd directory (debros user needs sudo access) // Use sudo tee to write to systemd directory (orama user needs sudo access)
cmd := exec.Command("sudo", "tee", serviceFile) cmd := exec.Command("sudo", "tee", serviceFile)
cmd.Stdin = &buf cmd.Stdin = &buf
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()

View File

@ -91,7 +91,7 @@ func TestGetStartCommand(t *testing.T) {
// On macOS (test environment), useSystemd will be false, so node/npm use short paths. // On macOS (test environment), useSystemd will be false, so node/npm use short paths.
// We explicitly set it to test both modes. // We explicitly set it to test both modes.
workDir := "/home/debros/deployments/alice/myapp" workDir := "/home/orama/deployments/alice/myapp"
tests := []struct { tests := []struct {
name string name string
@ -227,9 +227,9 @@ func TestMapRestartPolicy(t *testing.T) {
func TestParseSystemctlShow(t *testing.T) { func TestParseSystemctlShow(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
input string input string
want map[string]string want map[string]string
}{ }{
{ {
name: "typical output", name: "typical output",

View File

@ -18,7 +18,7 @@ import (
) )
// Protocol ID for peer exchange // Protocol ID for peer exchange
const PeerExchangeProtocol = "/debros/peer-exchange/1.0.0" const PeerExchangeProtocol = "/orama/peer-exchange/1.0.0"
// libp2pPort is the standard port used for libp2p peer connections. // libp2pPort is the standard port used for libp2p peer connections.
// Filtering on this port prevents cross-connecting with IPFS (4101) or IPFS Cluster (9096/9098). // Filtering on this port prevents cross-connecting with IPFS (4101) or IPFS Cluster (9096/9098).

View File

@ -307,14 +307,14 @@ func ensureSecretFilePermissions(secretPath string) error {
return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err) return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err)
} }
if usr, err := user.Lookup("debros"); err == nil { if usr, err := user.Lookup("orama"); err == nil {
uid, err := strconv.Atoi(usr.Uid) uid, err := strconv.Atoi(usr.Uid)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse debros UID: %w", err) return fmt.Errorf("failed to parse orama UID: %w", err)
} }
gid, err := strconv.Atoi(usr.Gid) gid, err := strconv.Atoi(usr.Gid)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse debros GID: %w", err) return fmt.Errorf("failed to parse orama GID: %w", err)
} }
if err := os.Chown(secretPath, uid, gid); err != nil { if err := os.Chown(secretPath, uid, gid); err != nil {
return fmt.Errorf("failed to change ownership of %s: %w", secretPath, err) return fmt.Errorf("failed to change ownership of %s: %w", secretPath, err)
@ -439,8 +439,8 @@ func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
} }
// Fix ownership // Fix ownership
if err := exec.Command("chown", "debros:debros", configPath).Run(); err != nil { if err := exec.Command("chown", "orama:orama", configPath).Run(); err != nil {
fmt.Printf("Warning: failed to chown %s to debros:debros: %v\n", configPath, err) fmt.Printf("Warning: failed to chown %s to orama:orama: %v\n", configPath, err)
} }
return nil return nil

View File

@ -27,7 +27,7 @@ type BinaryInstaller struct {
// NewBinaryInstaller creates a new binary installer // NewBinaryInstaller creates a new binary installer
func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller { func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller {
oramaHome := "/home/debros" oramaHome := "/home/orama"
return &BinaryInstaller{ return &BinaryInstaller{
arch: arch, arch: arch,
logWriter: logWriter, logWriter: logWriter,
@ -72,7 +72,7 @@ func (bi *BinaryInstaller) ResolveBinaryPath(binary string, extraPaths ...string
return installers.ResolveBinaryPath(binary, extraPaths...) return installers.ResolveBinaryPath(binary, extraPaths...)
} }
// InstallDeBrosBinaries builds DeBros binaries from source // InstallDeBrosBinaries builds Orama binaries from source
func (bi *BinaryInstaller) InstallDeBrosBinaries(oramaHome string) error { func (bi *BinaryInstaller) InstallDeBrosBinaries(oramaHome string) error {
return bi.gateway.InstallDeBrosBinaries(oramaHome) return bi.gateway.InstallDeBrosBinaries(oramaHome)
} }

View File

@ -194,7 +194,7 @@ func (ari *AnyoneRelayInstaller) Install() error {
os.Remove(installScript) os.Remove(installScript)
// Stop and disable the default 'anon' systemd service that the apt package // Stop and disable the default 'anon' systemd service that the apt package
// auto-enables. We use our own 'debros-anyone-relay' service instead. // auto-enables. We use our own 'orama-anyone-relay' service instead.
exec.Command("systemctl", "stop", "anon").Run() exec.Command("systemctl", "stop", "anon").Run()
exec.Command("systemctl", "disable", "anon").Run() exec.Command("systemctl", "disable", "anon").Run()

View File

@ -77,7 +77,7 @@ func (ci *CaddyInstaller) Install() error {
if _, err := exec.LookPath("xcaddy"); err != nil { if _, err := exec.LookPath("xcaddy"); err != nil {
fmt.Fprintf(ci.logWriter, " Installing xcaddy...\n") fmt.Fprintf(ci.logWriter, " Installing xcaddy...\n")
cmd := exec.Command("go", "install", xcaddyRepo) cmd := exec.Command("go", "install", xcaddyRepo)
cmd.Env = append(os.Environ(), "PATH="+goPath, "GOBIN=/usr/local/bin") cmd.Env = append(os.Environ(), "PATH="+goPath, "GOBIN=/usr/local/bin", "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to install xcaddy: %w\n%s", err, string(output)) return fmt.Errorf("failed to install xcaddy: %w\n%s", err, string(output))
} }
@ -105,7 +105,7 @@ func (ci *CaddyInstaller) Install() error {
// Run go mod tidy // Run go mod tidy
tidyCmd := exec.Command("go", "mod", "tidy") tidyCmd := exec.Command("go", "mod", "tidy")
tidyCmd.Dir = moduleDir tidyCmd.Dir = moduleDir
tidyCmd.Env = append(os.Environ(), "PATH="+goPath) tidyCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := tidyCmd.CombinedOutput(); err != nil { if output, err := tidyCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output)) return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output))
} }
@ -122,7 +122,7 @@ func (ci *CaddyInstaller) Install() error {
"--with", "github.com/DeBrosOfficial/caddy-dns-orama="+moduleDir, "--with", "github.com/DeBrosOfficial/caddy-dns-orama="+moduleDir,
"--output", filepath.Join(buildDir, "caddy")) "--output", filepath.Join(buildDir, "caddy"))
buildCmd.Dir = buildDir buildCmd.Dir = buildDir
buildCmd.Env = append(os.Environ(), "PATH="+goPath) buildCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := buildCmd.CombinedOutput(); err != nil { if output, err := buildCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build Caddy: %w\n%s", err, string(output)) return fmt.Errorf("failed to build Caddy: %w\n%s", err, string(output))
} }

View File

@ -171,21 +171,21 @@ func (ci *CoreDNSInstaller) Install() error {
getCmd := exec.Command("go", "get", "github.com/miekg/dns@latest") getCmd := exec.Command("go", "get", "github.com/miekg/dns@latest")
getCmd.Dir = buildDir getCmd.Dir = buildDir
getCmd.Env = append(os.Environ(), "PATH="+goPath) getCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := getCmd.CombinedOutput(); err != nil { if output, err := getCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to get miekg/dns: %w\n%s", err, string(output)) return fmt.Errorf("failed to get miekg/dns: %w\n%s", err, string(output))
} }
getCmd = exec.Command("go", "get", "go.uber.org/zap@latest") getCmd = exec.Command("go", "get", "go.uber.org/zap@latest")
getCmd.Dir = buildDir getCmd.Dir = buildDir
getCmd.Env = append(os.Environ(), "PATH="+goPath) getCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := getCmd.CombinedOutput(); err != nil { if output, err := getCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to get zap: %w\n%s", err, string(output)) return fmt.Errorf("failed to get zap: %w\n%s", err, string(output))
} }
tidyCmd := exec.Command("go", "mod", "tidy") tidyCmd := exec.Command("go", "mod", "tidy")
tidyCmd.Dir = buildDir tidyCmd.Dir = buildDir
tidyCmd.Env = append(os.Environ(), "PATH="+goPath) tidyCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := tidyCmd.CombinedOutput(); err != nil { if output, err := tidyCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output)) return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output))
} }
@ -194,7 +194,7 @@ func (ci *CoreDNSInstaller) Install() error {
fmt.Fprintf(ci.logWriter, " Generating plugin code...\n") fmt.Fprintf(ci.logWriter, " Generating plugin code...\n")
genCmd := exec.Command("go", "generate") genCmd := exec.Command("go", "generate")
genCmd.Dir = buildDir genCmd.Dir = buildDir
genCmd.Env = append(os.Environ(), "PATH="+goPath) genCmd.Env = append(os.Environ(), "PATH="+goPath, "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := genCmd.CombinedOutput(); err != nil { if output, err := genCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to generate: %w\n%s", err, string(output)) return fmt.Errorf("failed to generate: %w\n%s", err, string(output))
} }
@ -203,7 +203,7 @@ func (ci *CoreDNSInstaller) Install() error {
fmt.Fprintf(ci.logWriter, " Building CoreDNS binary...\n") fmt.Fprintf(ci.logWriter, " Building CoreDNS binary...\n")
buildCmd := exec.Command("go", "build", "-o", "coredns") buildCmd := exec.Command("go", "build", "-o", "coredns")
buildCmd.Dir = buildDir buildCmd.Dir = buildDir
buildCmd.Env = append(os.Environ(), "PATH="+goPath, "CGO_ENABLED=0") buildCmd.Env = append(os.Environ(), "PATH="+goPath, "CGO_ENABLED=0", "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := buildCmd.CombinedOutput(); err != nil { if output, err := buildCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build CoreDNS: %w\n%s", err, string(output)) return fmt.Errorf("failed to build CoreDNS: %w\n%s", err, string(output))
} }

View File

@ -9,7 +9,7 @@ import (
"strings" "strings"
) )
// GatewayInstaller handles DeBros binary installation (including gateway) // GatewayInstaller handles Orama binary installation (including gateway)
type GatewayInstaller struct { type GatewayInstaller struct {
*BaseInstaller *BaseInstaller
} }
@ -27,7 +27,7 @@ func (gi *GatewayInstaller) IsInstalled() bool {
return false // Always build to ensure latest version return false // Always build to ensure latest version
} }
// Install clones and builds DeBros binaries // Install clones and builds Orama binaries
func (gi *GatewayInstaller) Install() error { func (gi *GatewayInstaller) Install() error {
// This is a placeholder - actual installation is handled by InstallDeBrosBinaries // This is a placeholder - actual installation is handled by InstallDeBrosBinaries
return nil return nil
@ -39,10 +39,10 @@ func (gi *GatewayInstaller) Configure() error {
return nil return nil
} }
// InstallDeBrosBinaries builds DeBros binaries from source at /home/debros/src. // InstallDeBrosBinaries builds Orama binaries from source at /home/orama/src.
// Source must already be present (uploaded via SCP archive). // Source must already be present (uploaded via SCP archive).
func (gi *GatewayInstaller) InstallDeBrosBinaries(oramaHome string) error { func (gi *GatewayInstaller) InstallDeBrosBinaries(oramaHome string) error {
fmt.Fprintf(gi.logWriter, " Building DeBros binaries...\n") fmt.Fprintf(gi.logWriter, " Building Orama binaries...\n")
srcDir := filepath.Join(oramaHome, "src") srcDir := filepath.Join(oramaHome, "src")
binDir := filepath.Join(oramaHome, "bin") binDir := filepath.Join(oramaHome, "bin")
@ -64,7 +64,7 @@ func (gi *GatewayInstaller) InstallDeBrosBinaries(oramaHome string) error {
fmt.Fprintf(gi.logWriter, " Building binaries...\n") fmt.Fprintf(gi.logWriter, " Building binaries...\n")
cmd := exec.Command("make", "build") cmd := exec.Command("make", "build")
cmd.Dir = srcDir cmd.Dir = srcDir
cmd.Env = append(os.Environ(), "HOME="+oramaHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin") cmd.Env = append(os.Environ(), "HOME="+oramaHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin", "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build: %v\n%s", err, string(output)) return fmt.Errorf("failed to build: %v\n%s", err, string(output))
} }
@ -117,7 +117,7 @@ func (gi *GatewayInstaller) InstallDeBrosBinaries(oramaHome string) error {
if err := exec.Command("chmod", "-R", "755", binDir).Run(); err != nil { if err := exec.Command("chmod", "-R", "755", binDir).Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chmod bin directory: %v\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chmod bin directory: %v\n", err)
} }
if err := exec.Command("chown", "-R", "debros:debros", binDir).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", binDir).Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err)
} }
@ -132,26 +132,29 @@ func (gi *GatewayInstaller) InstallDeBrosBinaries(oramaHome string) error {
} }
} }
fmt.Fprintf(gi.logWriter, " ✓ DeBros binaries installed\n") fmt.Fprintf(gi.logWriter, " ✓ Orama binaries installed\n")
return nil return nil
} }
// InstallGo downloads and installs Go toolchain // InstallGo downloads and installs Go toolchain
func (gi *GatewayInstaller) InstallGo() error { func (gi *GatewayInstaller) InstallGo() error {
requiredVersion := "1.22.5" requiredVersion := "1.24.6"
if goPath, err := exec.LookPath("go"); err == nil { if goPath, err := exec.LookPath("go"); err == nil {
// Check version - upgrade if too old // Check version - upgrade if too old
out, _ := exec.Command(goPath, "version").Output() out, _ := exec.Command(goPath, "version").Output()
if strings.Contains(string(out), "go"+requiredVersion) || strings.Contains(string(out), "go1.23") || strings.Contains(string(out), "go1.24") { if strings.Contains(string(out), "go"+requiredVersion) {
fmt.Fprintf(gi.logWriter, " ✓ Go already installed (%s)\n", strings.TrimSpace(string(out))) fmt.Fprintf(gi.logWriter, " ✓ Go already installed (%s)\n", strings.TrimSpace(string(out)))
return nil return nil
} }
fmt.Fprintf(gi.logWriter, " Upgrading Go (current: %s, need >= %s)...\n", strings.TrimSpace(string(out)), requiredVersion) fmt.Fprintf(gi.logWriter, " Upgrading Go (current: %s, need %s)...\n", strings.TrimSpace(string(out)), requiredVersion)
os.RemoveAll("/usr/local/go") os.RemoveAll("/usr/local/go")
} else { } else {
fmt.Fprintf(gi.logWriter, " Installing Go...\n") fmt.Fprintf(gi.logWriter, " Installing Go...\n")
} }
// Always remove old Go installation to avoid mixing versions
os.RemoveAll("/usr/local/go")
goTarball := fmt.Sprintf("go%s.linux-%s.tar.gz", requiredVersion, gi.arch) goTarball := fmt.Sprintf("go%s.linux-%s.tar.gz", requiredVersion, gi.arch)
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball) goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
@ -214,12 +217,12 @@ func (gi *GatewayInstaller) InstallAnyoneClient() error {
fmt.Fprintf(gi.logWriter, " Initializing NPM cache...\n") fmt.Fprintf(gi.logWriter, " Initializing NPM cache...\n")
// Create nested cache directories with proper permissions // Create nested cache directories with proper permissions
debrosHome := "/home/debros" oramaHome := "/home/orama"
npmCacheDirs := []string{ npmCacheDirs := []string{
filepath.Join(debrosHome, ".npm"), filepath.Join(oramaHome, ".npm"),
filepath.Join(debrosHome, ".npm", "_cacache"), filepath.Join(oramaHome, ".npm", "_cacache"),
filepath.Join(debrosHome, ".npm", "_cacache", "tmp"), filepath.Join(oramaHome, ".npm", "_cacache", "tmp"),
filepath.Join(debrosHome, ".npm", "_logs"), filepath.Join(oramaHome, ".npm", "_logs"),
} }
for _, dir := range npmCacheDirs { for _, dir := range npmCacheDirs {
@ -227,8 +230,8 @@ func (gi *GatewayInstaller) InstallAnyoneClient() error {
fmt.Fprintf(gi.logWriter, " ⚠️ Failed to create %s: %v\n", dir, err) fmt.Fprintf(gi.logWriter, " ⚠️ Failed to create %s: %v\n", dir, err)
continue continue
} }
// Fix ownership to debros user (sequential to avoid race conditions) // Fix ownership to orama user (sequential to avoid race conditions)
if err := exec.Command("chown", "debros:debros", dir).Run(); err != nil { if err := exec.Command("chown", "orama:orama", dir).Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown %s: %v\n", dir, err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown %s: %v\n", dir, err)
} }
if err := exec.Command("chmod", "700", dir).Run(); err != nil { if err := exec.Command("chmod", "700", dir).Run(); err != nil {
@ -236,14 +239,14 @@ func (gi *GatewayInstaller) InstallAnyoneClient() error {
} }
} }
// Recursively fix ownership of entire .npm directory to ensure all nested files are owned by debros // Recursively fix ownership of entire .npm directory to ensure all nested files are owned by orama
if err := exec.Command("chown", "-R", "debros:debros", filepath.Join(debrosHome, ".npm")).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", filepath.Join(oramaHome, ".npm")).Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown .npm directory: %v\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown .npm directory: %v\n", err)
} }
// Run npm cache verify as debros user with proper environment // Run npm cache verify as orama user with proper environment
cacheInitCmd := exec.Command("sudo", "-u", "debros", "npm", "cache", "verify", "--silent") cacheInitCmd := exec.Command("sudo", "-u", "orama", "npm", "cache", "verify", "--silent")
cacheInitCmd.Env = append(os.Environ(), "HOME="+debrosHome) cacheInitCmd.Env = append(os.Environ(), "HOME="+oramaHome)
if err := cacheInitCmd.Run(); err != nil { if err := cacheInitCmd.Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ NPM cache verify warning: %v (continuing anyway)\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ NPM cache verify warning: %v (continuing anyway)\n", err)
} }
@ -255,11 +258,11 @@ func (gi *GatewayInstaller) InstallAnyoneClient() error {
} }
// Create terms-agreement file to bypass interactive prompt when running as a service // Create terms-agreement file to bypass interactive prompt when running as a service
termsFile := filepath.Join(debrosHome, "terms-agreement") termsFile := filepath.Join(oramaHome, "terms-agreement")
if err := os.WriteFile(termsFile, []byte("agreed"), 0644); err != nil { if err := os.WriteFile(termsFile, []byte("agreed"), 0644); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to create terms-agreement: %v\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to create terms-agreement: %v\n", err)
} else { } else {
if err := exec.Command("chown", "debros:debros", termsFile).Run(); err != nil { if err := exec.Command("chown", "orama:orama", termsFile).Run(); err != nil {
fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown terms-agreement: %v\n", err) fmt.Fprintf(gi.logWriter, " ⚠️ Warning: failed to chown terms-agreement: %v\n", err)
} }
} }

View File

@ -217,7 +217,7 @@ func (ii *IPFSInstaller) InitializeRepo(ipfsRepoPath string, swarmKeyPath string
} }
// Fix ownership (best-effort, don't fail if it doesn't work) // Fix ownership (best-effort, don't fail if it doesn't work)
if err := exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", ipfsRepoPath).Run(); err != nil {
fmt.Fprintf(ii.logWriter, " ⚠️ Warning: failed to chown IPFS repo: %v\n", err) fmt.Fprintf(ii.logWriter, " ⚠️ Warning: failed to chown IPFS repo: %v\n", err)
} }

View File

@ -43,7 +43,7 @@ func (ici *IPFSClusterInstaller) Install() error {
} }
cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest") cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest")
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin") cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin", "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to install IPFS Cluster: %w", err) return fmt.Errorf("failed to install IPFS Cluster: %w", err)
} }
@ -77,7 +77,7 @@ func (ici *IPFSClusterInstaller) InitializeConfig(clusterPath, clusterSecret str
} }
// Fix ownership before running init (best-effort) // Fix ownership before running init (best-effort)
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", clusterPath).Run(); err != nil {
fmt.Fprintf(ici.logWriter, " ⚠️ Warning: failed to chown cluster path before init: %v\n", err) fmt.Fprintf(ici.logWriter, " ⚠️ Warning: failed to chown cluster path before init: %v\n", err)
} }
@ -120,7 +120,7 @@ func (ici *IPFSClusterInstaller) InitializeConfig(clusterPath, clusterSecret str
} }
// Fix ownership again after updates (best-effort) // Fix ownership again after updates (best-effort)
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", clusterPath).Run(); err != nil {
fmt.Fprintf(ici.logWriter, " ⚠️ Warning: failed to chown cluster path after updates: %v\n", err) fmt.Fprintf(ici.logWriter, " ⚠️ Warning: failed to chown cluster path after updates: %v\n", err)
} }

View File

@ -42,7 +42,7 @@ func (oi *OlricInstaller) Install() error {
} }
cmd := exec.Command("go", "install", fmt.Sprintf("github.com/olric-data/olric/cmd/olric-server@%s", oi.version)) cmd := exec.Command("go", "install", fmt.Sprintf("github.com/olric-data/olric/cmd/olric-server@%s", oi.version))
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin") cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin", "GOPROXY=https://proxy.golang.org|direct", "GONOSUMDB=*")
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to install Olric: %w", err) return fmt.Errorf("failed to install Olric: %w", err)
} }

View File

@ -79,7 +79,7 @@ func (ri *RQLiteInstaller) InitializeDataDir(dataDir string) error {
return fmt.Errorf("failed to create RQLite data directory: %w", err) return fmt.Errorf("failed to create RQLite data directory: %w", err)
} }
if err := exec.Command("chown", "-R", "debros:debros", dataDir).Run(); err != nil { if err := exec.Command("chown", "-R", "orama:orama", dataDir).Run(); err != nil {
fmt.Fprintf(ri.logWriter, " ⚠️ Warning: failed to chown RQLite data dir: %v\n", err) fmt.Fprintf(ri.logWriter, " ⚠️ Warning: failed to chown RQLite data dir: %v\n", err)
} }
return nil return nil

View File

@ -30,13 +30,13 @@ type AnyoneRelayConfig struct {
type ProductionSetup struct { type ProductionSetup struct {
osInfo *OSInfo osInfo *OSInfo
arch string arch string
oramaHome string oramaHome string
oramaDir string oramaDir string
logWriter io.Writer logWriter io.Writer
forceReconfigure bool forceReconfigure bool
skipOptionalDeps bool skipOptionalDeps bool
skipResourceChecks bool skipResourceChecks bool
isNameserver bool // Whether this node is a nameserver (runs CoreDNS + Caddy) isNameserver bool // Whether this node is a nameserver (runs CoreDNS + Caddy)
isAnyoneClient bool // Whether this node runs Anyone as client-only (SOCKS5 proxy) isAnyoneClient bool // Whether this node runs Anyone as client-only (SOCKS5 proxy)
anyoneRelayConfig *AnyoneRelayConfig // Configuration for Anyone relay mode anyoneRelayConfig *AnyoneRelayConfig // Configuration for Anyone relay mode
privChecker *PrivilegeChecker privChecker *PrivilegeChecker
@ -73,12 +73,12 @@ func ReadBranchPreference(oramaDir string) string {
func SaveBranchPreference(oramaDir, branch string) error { func SaveBranchPreference(oramaDir, branch string) error {
branchFile := filepath.Join(oramaDir, ".branch") branchFile := filepath.Join(oramaDir, ".branch")
if err := os.MkdirAll(oramaDir, 0755); err != nil { if err := os.MkdirAll(oramaDir, 0755); err != nil {
return fmt.Errorf("failed to create debros directory: %w", err) return fmt.Errorf("failed to create orama directory: %w", err)
} }
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil { if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
return fmt.Errorf("failed to save branch preference: %w", err) return fmt.Errorf("failed to save branch preference: %w", err)
} }
exec.Command("chown", "debros:debros", branchFile).Run() exec.Command("chown", "orama:orama", branchFile).Run()
return nil return nil
} }
@ -88,8 +88,8 @@ func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure
arch, _ := (&ArchitectureDetector{}).Detect() arch, _ := (&ArchitectureDetector{}).Detect()
return &ProductionSetup{ return &ProductionSetup{
oramaHome: oramaHome, oramaHome: oramaHome,
oramaDir: oramaDir, oramaDir: oramaDir,
logWriter: logWriter, logWriter: logWriter,
forceReconfigure: forceReconfigure, forceReconfigure: forceReconfigure,
arch: arch, arch: arch,
@ -100,7 +100,7 @@ func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure
resourceChecker: NewResourceChecker(), resourceChecker: NewResourceChecker(),
portChecker: NewPortChecker(), portChecker: NewPortChecker(),
fsProvisioner: NewFilesystemProvisioner(oramaHome), fsProvisioner: NewFilesystemProvisioner(oramaHome),
userProvisioner: NewUserProvisioner("debros", oramaHome, "/bin/bash"), userProvisioner: NewUserProvisioner("orama", oramaHome, "/bin/bash"),
stateDetector: NewStateDetector(oramaDir), stateDetector: NewStateDetector(oramaDir),
configGenerator: NewConfigGenerator(oramaDir), configGenerator: NewConfigGenerator(oramaDir),
secretGenerator: NewSecretGenerator(oramaDir), secretGenerator: NewSecretGenerator(oramaDir),
@ -231,14 +231,14 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
func (ps *ProductionSetup) Phase2ProvisionEnvironment() error { func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
ps.logf("Phase 2: Provisioning environment...") ps.logf("Phase 2: Provisioning environment...")
// Create debros user // Create orama user
if !ps.userProvisioner.UserExists() { if !ps.userProvisioner.UserExists() {
if err := ps.userProvisioner.CreateUser(); err != nil { if err := ps.userProvisioner.CreateUser(); err != nil {
return fmt.Errorf("failed to create debros user: %w", err) return fmt.Errorf("failed to create orama user: %w", err)
} }
ps.logf(" ✓ Created 'debros' user") ps.logf(" ✓ Created 'orama' user")
} else { } else {
ps.logf(" ✓ 'debros' user already exists") ps.logf(" ✓ 'orama' user already exists")
} }
// Set up sudoers access if invoked via sudo // Set up sudoers access if invoked via sudo
@ -251,21 +251,21 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
} }
} }
// Set up deployment sudoers (allows debros user to manage orama-deploy-* services) // Set up deployment sudoers (allows orama user to manage orama-deploy-* services)
if err := ps.userProvisioner.SetupDeploymentSudoers(); err != nil { if err := ps.userProvisioner.SetupDeploymentSudoers(); err != nil {
ps.logf(" ⚠️ Failed to setup deployment sudoers: %v", err) ps.logf(" ⚠️ Failed to setup deployment sudoers: %v", err)
} else { } else {
ps.logf(" ✓ Deployment sudoers configured") ps.logf(" ✓ Deployment sudoers configured")
} }
// Set up namespace sudoers (allows debros user to manage debros-namespace-* services) // Set up namespace sudoers (allows orama user to manage orama-namespace-* services)
if err := ps.userProvisioner.SetupNamespaceSudoers(); err != nil { if err := ps.userProvisioner.SetupNamespaceSudoers(); err != nil {
ps.logf(" ⚠️ Failed to setup namespace sudoers: %v", err) ps.logf(" ⚠️ Failed to setup namespace sudoers: %v", err)
} else { } else {
ps.logf(" ✓ Namespace sudoers configured") ps.logf(" ✓ Namespace sudoers configured")
} }
// Set up WireGuard sudoers (allows debros user to manage WG peers) // Set up WireGuard sudoers (allows orama user to manage WG peers)
if err := ps.userProvisioner.SetupWireGuardSudoers(); err != nil { if err := ps.userProvisioner.SetupWireGuardSudoers(); err != nil {
ps.logf(" ⚠️ Failed to setup wireguard sudoers: %v", err) ps.logf(" ⚠️ Failed to setup wireguard sudoers: %v", err)
} else { } else {
@ -287,7 +287,7 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
return nil return nil
} }
// Phase2bInstallBinaries installs external binaries and DeBros components // Phase2bInstallBinaries installs external binaries and Orama components
func (ps *ProductionSetup) Phase2bInstallBinaries() error { func (ps *ProductionSetup) Phase2bInstallBinaries() error {
ps.logf("Phase 2b: Installing binaries...") ps.logf("Phase 2b: Installing binaries...")
@ -305,9 +305,9 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
ps.logf(" ⚠️ Olric install warning: %v", err) ps.logf(" ⚠️ Olric install warning: %v", err)
} }
// Install DeBros binaries (source must be at /home/debros/src via SCP) // Install Orama binaries (source must be at /home/orama/src via SCP)
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.oramaHome); err != nil { if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.oramaHome); err != nil {
return fmt.Errorf("failed to install DeBros binaries: %w", err) return fmt.Errorf("failed to install Orama binaries: %w", err)
} }
// Install CoreDNS only for nameserver nodes // Install CoreDNS only for nameserver nodes
@ -471,7 +471,7 @@ func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vps
} }
// Ensure all directories and files created during service initialization have correct ownership // Ensure all directories and files created during service initialization have correct ownership
// This is critical because directories/files created as root need to be owned by debros user // This is critical because directories/files created as root need to be owned by orama user
if err := ps.fsProvisioner.FixOwnership(); err != nil { if err := ps.fsProvisioner.FixOwnership(); err != nil {
return fmt.Errorf("failed to fix ownership after service initialization: %w", err) return fmt.Errorf("failed to fix ownership after service initialization: %w", err)
} }
@ -564,7 +564,7 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP s
if err := os.WriteFile(olricConfigPath, []byte(olricConfig), 0644); err != nil { if err := os.WriteFile(olricConfigPath, []byte(olricConfig), 0644); err != nil {
return fmt.Errorf("failed to save olric config: %w", err) return fmt.Errorf("failed to save olric config: %w", err)
} }
exec.Command("chown", "debros:debros", olricConfigPath).Run() exec.Command("chown", "orama:orama", olricConfigPath).Run()
ps.logf(" ✓ Olric config generated") ps.logf(" ✓ Olric config generated")
// Configure CoreDNS (if baseDomain is provided - this is the zone name) // Configure CoreDNS (if baseDomain is provided - this is the zone name)
@ -633,44 +633,44 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
// IPFS service (unified - no bootstrap/node distinction) // IPFS service (unified - no bootstrap/node distinction)
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary) ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs.service", ipfsUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-ipfs.service", ipfsUnit); err != nil {
return fmt.Errorf("failed to write IPFS service: %w", err) return fmt.Errorf("failed to write IPFS service: %w", err)
} }
ps.logf(" ✓ IPFS service created: debros-ipfs.service") ps.logf(" ✓ IPFS service created: orama-ipfs.service")
// IPFS Cluster service // IPFS Cluster service
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary) clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs-cluster.service", clusterUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-ipfs-cluster.service", clusterUnit); err != nil {
return fmt.Errorf("failed to write IPFS Cluster service: %w", err) return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
} }
ps.logf(" ✓ IPFS Cluster service created: debros-ipfs-cluster.service") ps.logf(" ✓ IPFS Cluster service created: orama-ipfs-cluster.service")
// RQLite is managed internally by each node - no separate systemd service needed // RQLite is managed internally by each node - no separate systemd service needed
// Olric service // Olric service
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary) olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
if err := ps.serviceController.WriteServiceUnit("debros-olric.service", olricUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-olric.service", olricUnit); err != nil {
return fmt.Errorf("failed to write Olric service: %w", err) return fmt.Errorf("failed to write Olric service: %w", err)
} }
ps.logf(" ✓ Olric service created") ps.logf(" ✓ Olric service created")
// Node service (unified - includes embedded gateway) // Node service (unified - includes embedded gateway)
nodeUnit := ps.serviceGenerator.GenerateNodeService() nodeUnit := ps.serviceGenerator.GenerateNodeService()
if err := ps.serviceController.WriteServiceUnit("debros-node.service", nodeUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-node.service", nodeUnit); err != nil {
return fmt.Errorf("failed to write Node service: %w", err) return fmt.Errorf("failed to write Node service: %w", err)
} }
ps.logf(" ✓ Node service created: debros-node.service (with embedded gateway)") ps.logf(" ✓ Node service created: orama-node.service (with embedded gateway)")
// Anyone Relay service (only created when --anyone-relay flag is used) // Anyone Relay service (only created when --anyone-relay flag is used)
if ps.IsAnyoneRelay() { if ps.IsAnyoneRelay() {
anyoneUnit := ps.serviceGenerator.GenerateAnyoneRelayService() anyoneUnit := ps.serviceGenerator.GenerateAnyoneRelayService()
if err := ps.serviceController.WriteServiceUnit("debros-anyone-relay.service", anyoneUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-anyone-relay.service", anyoneUnit); err != nil {
return fmt.Errorf("failed to write Anyone Relay service: %w", err) return fmt.Errorf("failed to write Anyone Relay service: %w", err)
} }
ps.logf(" ✓ Anyone Relay service created (operator mode, ORPort: %d)", ps.anyoneRelayConfig.ORPort) ps.logf(" ✓ Anyone Relay service created (operator mode, ORPort: %d)", ps.anyoneRelayConfig.ORPort)
} else if ps.IsAnyoneClient() { } else if ps.IsAnyoneClient() {
anyoneUnit := ps.serviceGenerator.GenerateAnyoneRelayService() anyoneUnit := ps.serviceGenerator.GenerateAnyoneRelayService()
if err := ps.serviceController.WriteServiceUnit("debros-anyone-relay.service", anyoneUnit); err != nil { if err := ps.serviceController.WriteServiceUnit("orama-anyone-relay.service", anyoneUnit); err != nil {
return fmt.Errorf("failed to write Anyone client service: %w", err) return fmt.Errorf("failed to write Anyone client service: %w", err)
} }
ps.logf(" ✓ Anyone client service created (SocksPort 9050)") ps.logf(" ✓ Anyone client service created (SocksPort 9050)")
@ -712,13 +712,13 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
ps.logf(" ✓ Systemd daemon reloaded") ps.logf(" ✓ Systemd daemon reloaded")
// Enable services (unified names - no bootstrap/node distinction) // Enable services (unified names - no bootstrap/node distinction)
// Note: debros-gateway.service is no longer needed - each node has an embedded gateway // Note: orama-gateway.service is no longer needed - each node has an embedded gateway
// Note: debros-rqlite.service is NOT created - RQLite is managed by each node internally // Note: orama-rqlite.service is NOT created - RQLite is managed by each node internally
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service"} services := []string{"orama-ipfs.service", "orama-ipfs-cluster.service", "orama-olric.service", "orama-node.service"}
// Add Anyone service if configured (relay or client) // Add Anyone service if configured (relay or client)
if ps.IsAnyoneRelay() || ps.IsAnyoneClient() { if ps.IsAnyoneRelay() || ps.IsAnyoneClient() {
services = append(services, "debros-anyone-relay.service") services = append(services, "orama-anyone-relay.service")
} }
// Add CoreDNS only for nameserver nodes // Add CoreDNS only for nameserver nodes
@ -744,7 +744,7 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
ps.logf(" Starting services...") ps.logf(" Starting services...")
// Start infrastructure first (IPFS, Olric, Anyone) - RQLite is managed internally by each node // Start infrastructure first (IPFS, Olric, Anyone) - RQLite is managed internally by each node
infraServices := []string{"debros-ipfs.service", "debros-olric.service"} infraServices := []string{"orama-ipfs.service", "orama-olric.service"}
// Add Anyone service if configured (relay or client) // Add Anyone service if configured (relay or client)
if ps.IsAnyoneRelay() { if ps.IsAnyoneRelay() {
@ -754,12 +754,12 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
} }
if ps.portChecker.IsPortInUse(orPort) { if ps.portChecker.IsPortInUse(orPort) {
ps.logf(" ORPort %d is already in use (existing anon relay running)", orPort) ps.logf(" ORPort %d is already in use (existing anon relay running)", orPort)
ps.logf(" Skipping debros-anyone-relay startup - using existing service") ps.logf(" Skipping orama-anyone-relay startup - using existing service")
} else { } else {
infraServices = append(infraServices, "debros-anyone-relay.service") infraServices = append(infraServices, "orama-anyone-relay.service")
} }
} else if ps.IsAnyoneClient() { } else if ps.IsAnyoneClient() {
infraServices = append(infraServices, "debros-anyone-relay.service") infraServices = append(infraServices, "orama-anyone-relay.service")
} }
for _, svc := range infraServices { for _, svc := range infraServices {
@ -774,17 +774,17 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Start IPFS Cluster // Start IPFS Cluster
if err := ps.serviceController.RestartService("debros-ipfs-cluster.service"); err != nil { if err := ps.serviceController.RestartService("orama-ipfs-cluster.service"); err != nil {
ps.logf(" ⚠️ Failed to start debros-ipfs-cluster.service: %v", err) ps.logf(" ⚠️ Failed to start orama-ipfs-cluster.service: %v", err)
} else { } else {
ps.logf(" - debros-ipfs-cluster.service started") ps.logf(" - orama-ipfs-cluster.service started")
} }
// Start node service (gateway is embedded in node, no separate service needed) // Start node service (gateway is embedded in node, no separate service needed)
if err := ps.serviceController.RestartService("debros-node.service"); err != nil { if err := ps.serviceController.RestartService("orama-node.service"); err != nil {
ps.logf(" ⚠️ Failed to start debros-node.service: %v", err) ps.logf(" ⚠️ Failed to start orama-node.service: %v", err)
} else { } else {
ps.logf(" - debros-node.service started (with embedded gateway)") ps.logf(" - orama-node.service started (with embedded gateway)")
} }
// Start CoreDNS (nameserver nodes only) // Start CoreDNS (nameserver nodes only)
@ -798,7 +798,7 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
} }
} }
// Start Caddy on ALL nodes (any node may host namespaces and need TLS) // Start Caddy on ALL nodes (any node may host namespaces and need TLS)
// Caddy depends on debros-node.service (gateway on :6001), so start after node // Caddy depends on orama-node.service (gateway on :6001), so start after node
if _, err := os.Stat("/usr/bin/caddy"); err == nil { if _, err := os.Stat("/usr/bin/caddy"); err == nil {
if err := ps.serviceController.RestartService("caddy.service"); err != nil { if err := ps.serviceController.RestartService("caddy.service"); err != nil {
ps.logf(" ⚠️ Failed to start caddy.service: %v", err) ps.logf(" ⚠️ Failed to start caddy.service: %v", err)
@ -955,8 +955,8 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
ps.logf(strings.Repeat("=", 70)) ps.logf(strings.Repeat("=", 70))
ps.logf("\nNode Peer ID: %s", peerID) ps.logf("\nNode Peer ID: %s", peerID)
ps.logf("\nService Management:") ps.logf("\nService Management:")
ps.logf(" systemctl status debros-ipfs") ps.logf(" systemctl status orama-ipfs")
ps.logf(" journalctl -u debros-node -f") ps.logf(" journalctl -u orama-node -f")
ps.logf(" tail -f %s/logs/node.log", ps.oramaDir) ps.logf(" tail -f %s/logs/node.log", ps.oramaDir)
ps.logf("\nLog Files:") ps.logf("\nLog Files:")
ps.logf(" %s/logs/ipfs.log", ps.oramaDir) ps.logf(" %s/logs/ipfs.log", ps.oramaDir)
@ -969,7 +969,7 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
if ps.IsAnyoneRelay() { if ps.IsAnyoneRelay() {
ps.logf(" /var/log/anon/notices.log (Anyone Relay)") ps.logf(" /var/log/anon/notices.log (Anyone Relay)")
ps.logf("\nStart All Services:") ps.logf("\nStart All Services:")
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-node") ps.logf(" systemctl start orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-node")
ps.logf("\nAnyone Relay Operator:") ps.logf("\nAnyone Relay Operator:")
ps.logf(" ORPort: %d", ps.anyoneRelayConfig.ORPort) ps.logf(" ORPort: %d", ps.anyoneRelayConfig.ORPort)
ps.logf(" Wallet: %s", ps.anyoneRelayConfig.Wallet) ps.logf(" Wallet: %s", ps.anyoneRelayConfig.Wallet)
@ -978,7 +978,7 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
ps.logf(" IMPORTANT: You need 100 $ANYONE tokens in your wallet to receive rewards") ps.logf(" IMPORTANT: You need 100 $ANYONE tokens in your wallet to receive rewards")
} else { } else {
ps.logf("\nStart All Services:") ps.logf("\nStart All Services:")
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-node") ps.logf(" systemctl start orama-ipfs orama-ipfs-cluster orama-olric orama-node")
} }
ps.logf("\nVerify Installation:") ps.logf("\nVerify Installation:")

View File

@ -12,7 +12,7 @@ import (
type FilesystemProvisioner struct { type FilesystemProvisioner struct {
oramaHome string oramaHome string
oramaDir string oramaDir string
logWriter interface{} // Can be io.Writer for logging logWriter interface{} // Can be io.Writer for logging
} }
// NewFilesystemProvisioner creates a new provisioner // NewFilesystemProvisioner creates a new provisioner
@ -81,30 +81,30 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
return nil return nil
} }
// FixOwnership changes ownership of .orama directory to debros user // FixOwnership changes ownership of .orama directory to orama user
func (fp *FilesystemProvisioner) FixOwnership() error { func (fp *FilesystemProvisioner) FixOwnership() error {
// Fix entire .orama directory recursively (includes all data, configs, logs, etc.) // Fix entire .orama directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.oramaDir) cmd := exec.Command("chown", "-R", "orama:orama", fp.oramaDir)
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output)) return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output))
} }
// Also fix home directory ownership // Also fix home directory ownership
cmd = exec.Command("chown", "debros:debros", fp.oramaHome) cmd = exec.Command("chown", "orama:orama", fp.oramaHome)
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output)) return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output))
} }
// Fix bin directory // Fix bin directory
binDir := filepath.Join(fp.oramaHome, "bin") binDir := filepath.Join(fp.oramaHome, "bin")
cmd = exec.Command("chown", "-R", "debros:debros", binDir) cmd = exec.Command("chown", "-R", "orama:orama", binDir)
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output)) return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
} }
// Fix npm cache directory // Fix npm cache directory
npmDir := filepath.Join(fp.oramaHome, ".npm") npmDir := filepath.Join(fp.oramaHome, ".npm")
cmd = exec.Command("chown", "-R", "debros:debros", npmDir) cmd = exec.Command("chown", "-R", "orama:orama", npmDir)
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", npmDir, err, string(output)) return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", npmDir, err, string(output))
} }
@ -157,8 +157,8 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
return nil // Skip if no invoker return nil // Skip if no invoker
} }
sudoersRule := fmt.Sprintf("%s ALL=(debros) NOPASSWD: ALL\n", invokerUser) sudoersRule := fmt.Sprintf("%s ALL=(orama) NOPASSWD: ALL\n", invokerUser)
sudoersFile := "/etc/sudoers.d/debros-access" sudoersFile := "/etc/sudoers.d/orama-access"
// Check if rule already exists // Check if rule already exists
if existing, err := os.ReadFile(sudoersFile); err == nil { if existing, err := os.ReadFile(sudoersFile); err == nil {
@ -182,31 +182,31 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
return nil return nil
} }
// SetupDeploymentSudoers configures the debros user with permissions needed for // SetupDeploymentSudoers configures the orama user with permissions needed for
// managing user deployments via systemd services. // managing user deployments via systemd services.
func (up *UserProvisioner) SetupDeploymentSudoers() error { func (up *UserProvisioner) SetupDeploymentSudoers() error {
sudoersFile := "/etc/sudoers.d/debros-deployments" sudoersFile := "/etc/sudoers.d/orama-deployments"
// Check if already configured // Check if already configured
if _, err := os.Stat(sudoersFile); err == nil { if _, err := os.Stat(sudoersFile); err == nil {
return nil // Already configured return nil // Already configured
} }
sudoersContent := `# DeBros Network - Deployment Management Permissions sudoersContent := `# Orama Network - Deployment Management Permissions
# Allows debros user to manage systemd services for user deployments # Allows orama user to manage systemd services for user deployments
# Systemd service management for orama-deploy-* services # Systemd service management for orama-deploy-* services
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl daemon-reload orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl daemon-reload
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl start orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl start orama-deploy-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl stop orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl stop orama-deploy-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart orama-deploy-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl enable orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl enable orama-deploy-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl disable orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl disable orama-deploy-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl status orama-deploy-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl status orama-deploy-*
# Service file management (tee to write, rm to remove) # Service file management (tee to write, rm to remove)
debros ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/systemd/system/orama-deploy-*.service orama ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/systemd/system/orama-deploy-*.service
debros ALL=(ALL) NOPASSWD: /bin/rm -f /etc/systemd/system/orama-deploy-*.service orama ALL=(ALL) NOPASSWD: /bin/rm -f /etc/systemd/system/orama-deploy-*.service
` `
// Write sudoers rule // Write sudoers rule
@ -224,36 +224,36 @@ debros ALL=(ALL) NOPASSWD: /bin/rm -f /etc/systemd/system/orama-deploy-*.service
return nil return nil
} }
// SetupNamespaceSudoers configures the debros user with permissions needed for // SetupNamespaceSudoers configures the orama user with permissions needed for
// managing namespace cluster services via systemd. // managing namespace cluster services via systemd.
func (up *UserProvisioner) SetupNamespaceSudoers() error { func (up *UserProvisioner) SetupNamespaceSudoers() error {
sudoersFile := "/etc/sudoers.d/debros-namespaces" sudoersFile := "/etc/sudoers.d/orama-namespaces"
// Check if already configured // Check if already configured
if _, err := os.Stat(sudoersFile); err == nil { if _, err := os.Stat(sudoersFile); err == nil {
return nil // Already configured return nil // Already configured
} }
sudoersContent := `# DeBros Network - Namespace Cluster Management Permissions sudoersContent := `# Orama Network - Namespace Cluster Management Permissions
# Allows debros user to manage systemd services for namespace clusters # Allows orama user to manage systemd services for namespace clusters
# Systemd service management for debros-namespace-* services # Systemd service management for orama-namespace-* services
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl daemon-reload orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl daemon-reload
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl start debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl start orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl stop debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl stop orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl enable debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl enable orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl disable debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl disable orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl status debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl status orama-namespace-*
debros ALL=(ALL) NOPASSWD: /usr/bin/systemctl is-active debros-namespace-* orama ALL=(ALL) NOPASSWD: /usr/bin/systemctl is-active orama-namespace-*
# Service file management (tee to write, rm to remove) # Service file management (tee to write, rm to remove)
debros ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/systemd/system/debros-namespace-*.service orama ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/systemd/system/orama-namespace-*.service
debros ALL=(ALL) NOPASSWD: /bin/rm -f /etc/systemd/system/debros-namespace-*.service orama ALL=(ALL) NOPASSWD: /bin/rm -f /etc/systemd/system/orama-namespace-*.service
# Environment file management for namespace services # Environment file management for namespace services
debros ALL=(ALL) NOPASSWD: /usr/bin/tee /home/debros/.orama/namespace/*/env/* orama ALL=(ALL) NOPASSWD: /usr/bin/tee /home/orama/.orama/namespace/*/env/*
debros ALL=(ALL) NOPASSWD: /usr/bin/mkdir -p /home/debros/.orama/namespace/*/env orama ALL=(ALL) NOPASSWD: /usr/bin/mkdir -p /home/orama/.orama/namespace/*/env
` `
// Write sudoers rule // Write sudoers rule
@ -271,17 +271,17 @@ debros ALL=(ALL) NOPASSWD: /usr/bin/mkdir -p /home/debros/.orama/namespace/*/env
return nil return nil
} }
// SetupWireGuardSudoers configures the debros user with permissions to manage WireGuard // SetupWireGuardSudoers configures the orama user with permissions to manage WireGuard
func (up *UserProvisioner) SetupWireGuardSudoers() error { func (up *UserProvisioner) SetupWireGuardSudoers() error {
sudoersFile := "/etc/sudoers.d/debros-wireguard" sudoersFile := "/etc/sudoers.d/orama-wireguard"
sudoersContent := `# DeBros Network - WireGuard Management Permissions sudoersContent := `# Orama Network - WireGuard Management Permissions
# Allows debros user to manage WireGuard peers # Allows orama user to manage WireGuard peers
debros ALL=(ALL) NOPASSWD: /usr/bin/wg set wg0 * orama ALL=(ALL) NOPASSWD: /usr/bin/wg set wg0 *
debros ALL=(ALL) NOPASSWD: /usr/bin/wg show wg0 orama ALL=(ALL) NOPASSWD: /usr/bin/wg show wg0
debros ALL=(ALL) NOPASSWD: /usr/bin/wg showconf wg0 orama ALL=(ALL) NOPASSWD: /usr/bin/wg showconf wg0
debros ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/wireguard/wg0.conf orama ALL=(ALL) NOPASSWD: /usr/bin/tee /etc/wireguard/wg0.conf
` `
// Write sudoers rule (always overwrite to ensure latest) // Write sudoers rule (always overwrite to ensure latest)

View File

@ -34,8 +34,8 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME=%[1]s Environment=HOME=%[1]s
Environment=IPFS_PATH=%[2]s Environment=IPFS_PATH=%[2]s
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi' ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
@ -44,7 +44,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[4]s StandardOutput=append:%[4]s
StandardError=append:%[4]s StandardError=append:%[4]s
SyslogIdentifier=debros-ipfs SyslogIdentifier=orama-ipfs
NoNewPrivileges=yes NoNewPrivileges=yes
PrivateTmp=yes PrivateTmp=yes
@ -80,14 +80,14 @@ func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(clusterBinary str
return fmt.Sprintf(`[Unit] return fmt.Sprintf(`[Unit]
Description=IPFS Cluster Service Description=IPFS Cluster Service
After=debros-ipfs.service After=orama-ipfs.service
Wants=debros-ipfs.service Wants=orama-ipfs.service
Requires=debros-ipfs.service Requires=orama-ipfs.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory=%[1]s WorkingDirectory=%[1]s
Environment=HOME=%[1]s Environment=HOME=%[1]s
Environment=IPFS_CLUSTER_PATH=%[2]s Environment=IPFS_CLUSTER_PATH=%[2]s
@ -99,7 +99,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[3]s StandardOutput=append:%[3]s
StandardError=append:%[3]s StandardError=append:%[3]s
SyslogIdentifier=debros-ipfs-cluster SyslogIdentifier=orama-ipfs-cluster
NoNewPrivileges=yes NoNewPrivileges=yes
PrivateTmp=yes PrivateTmp=yes
@ -150,15 +150,15 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME=%[1]s Environment=HOME=%[1]s
ExecStart=%[5]s %[2]s ExecStart=%[5]s %[2]s
Restart=always Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[3]s StandardOutput=append:%[3]s
StandardError=append:%[3]s StandardError=append:%[3]s
SyslogIdentifier=debros-rqlite SyslogIdentifier=orama-rqlite
NoNewPrivileges=yes NoNewPrivileges=yes
PrivateTmp=yes PrivateTmp=yes
@ -191,8 +191,8 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME=%[1]s Environment=HOME=%[1]s
Environment=OLRIC_SERVER_CONFIG=%[2]s Environment=OLRIC_SERVER_CONFIG=%[2]s
ExecStart=%[5]s ExecStart=%[5]s
@ -222,7 +222,7 @@ WantedBy=multi-user.target
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary) `, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
} }
// GenerateNodeService generates the DeBros Node systemd unit // GenerateNodeService generates the Orama Node systemd unit
func (ssg *SystemdServiceGenerator) GenerateNodeService() string { func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
configFile := "node.yaml" configFile := "node.yaml"
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log") logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
@ -230,15 +230,15 @@ func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
// Use absolute paths directly as they will be resolved by systemd at runtime // Use absolute paths directly as they will be resolved by systemd at runtime
return fmt.Sprintf(`[Unit] return fmt.Sprintf(`[Unit]
Description=DeBros Network Node Description=Orama Network Node
After=debros-ipfs-cluster.service debros-olric.service wg-quick@wg0.service After=orama-ipfs-cluster.service orama-olric.service wg-quick@wg0.service
Wants=debros-ipfs-cluster.service debros-olric.service Wants=orama-ipfs-cluster.service orama-olric.service
Requires=wg-quick@wg0.service Requires=wg-quick@wg0.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory=%[1]s WorkingDirectory=%[1]s
Environment=HOME=%[1]s Environment=HOME=%[1]s
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
@ -246,7 +246,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[4]s StandardOutput=append:%[4]s
StandardError=append:%[4]s StandardError=append:%[4]s
SyslogIdentifier=debros-node SyslogIdentifier=orama-node
NoNewPrivileges=yes NoNewPrivileges=yes
PrivateTmp=yes PrivateTmp=yes
@ -269,18 +269,18 @@ WantedBy=multi-user.target
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile) `, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
} }
// GenerateGatewayService generates the DeBros Gateway systemd unit // GenerateGatewayService generates the Orama Gateway systemd unit
func (ssg *SystemdServiceGenerator) GenerateGatewayService() string { func (ssg *SystemdServiceGenerator) GenerateGatewayService() string {
logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log") logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log")
return fmt.Sprintf(`[Unit] return fmt.Sprintf(`[Unit]
Description=DeBros Gateway Description=Orama Gateway
After=debros-node.service debros-olric.service After=orama-node.service orama-olric.service
Wants=debros-node.service debros-olric.service Wants=orama-node.service orama-olric.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory=%[1]s WorkingDirectory=%[1]s
Environment=HOME=%[1]s Environment=HOME=%[1]s
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
@ -288,7 +288,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=append:%[3]s StandardOutput=append:%[3]s
StandardError=append:%[3]s StandardError=append:%[3]s
SyslogIdentifier=debros-gateway SyslogIdentifier=orama-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CapabilityBoundingSet=CAP_NET_BIND_SERVICE
@ -325,8 +325,8 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME=%[1]s Environment=HOME=%[1]s
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
WorkingDirectory=%[1]s WorkingDirectory=%[1]s
@ -400,8 +400,8 @@ func (ssg *SystemdServiceGenerator) GenerateCoreDNSService() string {
return `[Unit] return `[Unit]
Description=CoreDNS DNS Server with RQLite backend Description=CoreDNS DNS Server with RQLite backend
Documentation=https://coredns.io Documentation=https://coredns.io
After=network-online.target debros-node.service After=network-online.target orama-node.service
Wants=network-online.target debros-node.service Wants=network-online.target orama-node.service
[Service] [Service]
Type=simple Type=simple
@ -429,9 +429,9 @@ func (ssg *SystemdServiceGenerator) GenerateCaddyService() string {
return `[Unit] return `[Unit]
Description=Caddy HTTP/2 Server Description=Caddy HTTP/2 Server
Documentation=https://caddyserver.com/docs/ Documentation=https://caddyserver.com/docs/
After=network-online.target debros-node.service coredns.service After=network-online.target orama-node.service coredns.service
Wants=network-online.target Wants=network-online.target
Wants=debros-node.service Wants=orama-node.service
[Service] [Service]
Type=simple Type=simple

View File

@ -47,8 +47,8 @@ func TestGenerateRQLiteService(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
ssg := &SystemdServiceGenerator{ ssg := &SystemdServiceGenerator{
oramaHome: "/home/debros", oramaHome: "/home/orama",
oramaDir: "/home/debros/.orama", oramaDir: "/home/orama/.orama",
} }
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP) unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
@ -81,8 +81,8 @@ func TestGenerateRQLiteService(t *testing.T) {
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments // TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
func TestGenerateRQLiteServiceArgs(t *testing.T) { func TestGenerateRQLiteServiceArgs(t *testing.T) {
ssg := &SystemdServiceGenerator{ ssg := &SystemdServiceGenerator{
oramaHome: "/home/debros", oramaHome: "/home/orama",
oramaDir: "/home/debros/.orama", oramaDir: "/home/orama/.orama",
} }
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2") unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")

View File

@ -145,7 +145,7 @@ func (wp *WireGuardProvisioner) WriteConfig() error {
} }
} }
// Fallback to sudo tee (for non-root, e.g. debros user) // Fallback to sudo tee (for non-root, e.g. orama user)
cmd := exec.Command("sudo", "tee", confPath) cmd := exec.Command("sudo", "tee", confPath)
cmd.Stdin = strings.NewReader(content) cmd.Stdin = strings.NewReader(content)
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {

View File

@ -17,8 +17,8 @@ type NodeConfigData struct {
P2PPort int P2PPort int
DataDir string DataDir string
RQLiteHTTPPort int RQLiteHTTPPort int
RQLiteRaftPort int // External Raft port for advertisement (7001 for SNI) RQLiteRaftPort int // External Raft port for advertisement (7001 for SNI)
RQLiteRaftInternalPort int // Internal Raft port for local binding (7002 when SNI enabled) RQLiteRaftInternalPort int // Internal Raft port for local binding (7002 when SNI enabled)
RQLiteJoinAddress string // Optional: join address for joining existing cluster RQLiteJoinAddress string // Optional: join address for joining existing cluster
BootstrapPeers []string // List of peer multiaddrs to connect to BootstrapPeers []string // List of peer multiaddrs to connect to
ClusterAPIPort int ClusterAPIPort int
@ -58,13 +58,13 @@ type GatewayConfigData struct {
// OlricConfigData holds parameters for olric.yaml rendering // OlricConfigData holds parameters for olric.yaml rendering
type OlricConfigData struct { type OlricConfigData struct {
ServerBindAddr string // HTTP API bind address (127.0.0.1 for security) ServerBindAddr string // HTTP API bind address (127.0.0.1 for security)
HTTPPort int HTTPPort int
MemberlistBindAddr string // Memberlist bind address (WG IP for clustering) MemberlistBindAddr string // Memberlist bind address (WG IP for clustering)
MemberlistPort int MemberlistPort int
MemberlistEnvironment string // "local", "lan", or "wan" MemberlistEnvironment string // "local", "lan", or "wan"
MemberlistAdvertiseAddr string // Advertise address (WG IP) so other nodes can reach us MemberlistAdvertiseAddr string // Advertise address (WG IP) so other nodes can reach us
Peers []string // Seed peers for memberlist (host:port) Peers []string // Seed peers for memberlist (host:port)
} }
// SystemdIPFSData holds parameters for systemd IPFS service rendering // SystemdIPFSData holds parameters for systemd IPFS service rendering
@ -72,33 +72,33 @@ type SystemdIPFSData struct {
HomeDir string HomeDir string
IPFSRepoPath string IPFSRepoPath string
SecretsDir string SecretsDir string
OramaDir string OramaDir string
} }
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering // SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
type SystemdIPFSClusterData struct { type SystemdIPFSClusterData struct {
HomeDir string HomeDir string
ClusterPath string ClusterPath string
OramaDir string OramaDir string
} }
// SystemdOlricData holds parameters for systemd Olric service rendering // SystemdOlricData holds parameters for systemd Olric service rendering
type SystemdOlricData struct { type SystemdOlricData struct {
HomeDir string HomeDir string
ConfigPath string ConfigPath string
OramaDir string OramaDir string
} }
// SystemdNodeData holds parameters for systemd Node service rendering // SystemdNodeData holds parameters for systemd Node service rendering
type SystemdNodeData struct { type SystemdNodeData struct {
HomeDir string HomeDir string
ConfigFile string ConfigFile string
OramaDir string OramaDir string
} }
// SystemdGatewayData holds parameters for systemd Gateway service rendering // SystemdGatewayData holds parameters for systemd Gateway service rendering
type SystemdGatewayData struct { type SystemdGatewayData struct {
HomeDir string HomeDir string
OramaDir string OramaDir string
} }
@ -132,12 +132,12 @@ func RenderOlricService(data SystemdOlricData) (string, error) {
return renderTemplate("systemd_olric.service", data) return renderTemplate("systemd_olric.service", data)
} }
// RenderNodeService renders the DeBros Node systemd service template // RenderNodeService renders the Orama Node systemd service template
func RenderNodeService(data SystemdNodeData) (string, error) { func RenderNodeService(data SystemdNodeData) (string, error) {
return renderTemplate("systemd_node.service", data) return renderTemplate("systemd_node.service", data)
} }
// RenderGatewayService renders the DeBros Gateway systemd service template // RenderGatewayService renders the Orama Gateway systemd service template
func RenderGatewayService(data SystemdGatewayData) (string, error) { func RenderGatewayService(data SystemdGatewayData) (string, error) {
return renderTemplate("systemd_gateway.service", data) return renderTemplate("systemd_gateway.service", data)
} }

View File

@ -10,7 +10,7 @@ func TestRenderNodeConfig(t *testing.T) {
data := NodeConfigData{ data := NodeConfigData{
NodeID: "node2", NodeID: "node2",
P2PPort: 4002, P2PPort: 4002,
DataDir: "/home/debros/.orama/node2", DataDir: "/home/orama/.orama/node2",
RQLiteHTTPPort: 5002, RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002, RQLiteRaftPort: 7002,
RQLiteJoinAddress: "localhost:5001", RQLiteJoinAddress: "localhost:5001",

View File

@ -1,12 +1,12 @@
[Unit] [Unit]
Description=DeBros Gateway Description=Orama Gateway
After=debros-node.service After=orama-node.service
Wants=debros-node.service Wants=orama-node.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory={{.HomeDir}} WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}} Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
@ -14,7 +14,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=journal StandardOutput=journal
StandardError=journal StandardError=journal
SyslogIdentifier=debros-gateway SyslogIdentifier=orama-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CapabilityBoundingSet=CAP_NET_BIND_SERVICE

View File

@ -5,8 +5,8 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME={{.HomeDir}} Environment=HOME={{.HomeDir}}
Environment=IPFS_PATH={{.IPFSRepoPath}} Environment=IPFS_PATH={{.IPFSRepoPath}}
ExecStartPre=/bin/bash -c 'if [ -f {{.SecretsDir}}/swarm.key ] && [ ! -f {{.IPFSRepoPath}}/swarm.key ]; then cp {{.SecretsDir}}/swarm.key {{.IPFSRepoPath}}/swarm.key && chmod 600 {{.IPFSRepoPath}}/swarm.key; fi' ExecStartPre=/bin/bash -c 'if [ -f {{.SecretsDir}}/swarm.key ] && [ ! -f {{.IPFSRepoPath}}/swarm.key ]; then cp {{.SecretsDir}}/swarm.key {{.IPFSRepoPath}}/swarm.key && chmod 600 {{.IPFSRepoPath}}/swarm.key; fi'

View File

@ -1,13 +1,13 @@
[Unit] [Unit]
Description=IPFS Cluster Service ({{.NodeType}}) Description=IPFS Cluster Service ({{.NodeType}})
After=debros-ipfs-{{.NodeType}}.service After=orama-ipfs-{{.NodeType}}.service
Wants=debros-ipfs-{{.NodeType}}.service Wants=orama-ipfs-{{.NodeType}}.service
Requires=debros-ipfs-{{.NodeType}}.service Requires=orama-ipfs-{{.NodeType}}.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory={{.HomeDir}} WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}} Environment=HOME={{.HomeDir}}
Environment=CLUSTER_PATH={{.ClusterPath}} Environment=CLUSTER_PATH={{.ClusterPath}}

View File

@ -1,13 +1,13 @@
[Unit] [Unit]
Description=DeBros Network Node ({{.NodeType}}) Description=Orama Network Node ({{.NodeType}})
After=debros-ipfs-cluster-{{.NodeType}}.service After=orama-ipfs-cluster-{{.NodeType}}.service
Wants=debros-ipfs-cluster-{{.NodeType}}.service Wants=orama-ipfs-cluster-{{.NodeType}}.service
Requires=debros-ipfs-cluster-{{.NodeType}}.service Requires=orama-ipfs-cluster-{{.NodeType}}.service
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
WorkingDirectory={{.HomeDir}} WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}} Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}} ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
@ -15,7 +15,7 @@ Restart=always
RestartSec=5 RestartSec=5
StandardOutput=journal StandardOutput=journal
StandardError=journal StandardError=journal
SyslogIdentifier=debros-node-{{.NodeType}} SyslogIdentifier=orama-node-{{.NodeType}}
NoNewPrivileges=yes NoNewPrivileges=yes
PrivateTmp=yes PrivateTmp=yes

View File

@ -5,8 +5,8 @@ Wants=network-online.target
[Service] [Service]
Type=simple Type=simple
User=debros User=orama
Group=debros Group=orama
Environment=HOME={{.HomeDir}} Environment=HOME={{.HomeDir}}
Environment=OLRIC_SERVER_CONFIG={{.ConfigPath}} Environment=OLRIC_SERVER_CONFIG={{.ConfigPath}}
ExecStart=/usr/local/bin/olric-server ExecStart=/usr/local/bin/olric-server

View File

@ -127,7 +127,7 @@ func (g *Gateway) anonProxyHandler(w http.ResponseWriter, r *http.Request) {
// Set default User-Agent if not provided // Set default User-Agent if not provided
if proxyReq.Header.Get("User-Agent") == "" { if proxyReq.Header.Get("User-Agent") == "" {
proxyReq.Header.Set("User-Agent", "DeBros-Gateway/1.0") proxyReq.Header.Set("User-Agent", "Orama-Gateway/1.0")
} }
// Log the proxy request // Log the proxy request

View File

@ -157,7 +157,7 @@ func (s *Service) ParseAndVerifyJWT(token string) (*JWTClaims, error) {
return nil, errors.New("invalid claims json") return nil, errors.New("invalid claims json")
} }
// Validate issuer // Validate issuer
if claims.Iss != "debros-gateway" { if claims.Iss != "orama-gateway" {
return nil, errors.New("invalid issuer") return nil, errors.New("invalid issuer")
} }
// Validate registered claims // Validate registered claims
@ -199,7 +199,7 @@ func (s *Service) generateEdDSAJWT(ns, subject string, ttl time.Duration) (strin
now := time.Now().UTC() now := time.Now().UTC()
exp := now.Add(ttl) exp := now.Add(ttl)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "iss": "orama-gateway",
"sub": subject, "sub": subject,
"aud": "gateway", "aud": "gateway",
"iat": now.Unix(), "iat": now.Unix(),
@ -229,7 +229,7 @@ func (s *Service) generateRSAJWT(ns, subject string, ttl time.Duration) (string,
now := time.Now().UTC() now := time.Now().UTC()
exp := now.Add(ttl) exp := now.Add(ttl)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "iss": "orama-gateway",
"sub": subject, "sub": subject,
"aud": "gateway", "aud": "gateway",
"iat": now.Unix(), "iat": now.Unix(),

View File

@ -138,8 +138,8 @@ func TestJWTFlow(t *testing.T) {
t.Errorf("expected namespace %s, got %s", ns, claims.Namespace) t.Errorf("expected namespace %s, got %s", ns, claims.Namespace)
} }
if claims.Iss != "debros-gateway" { if claims.Iss != "orama-gateway" {
t.Errorf("expected issuer debros-gateway, got %s", claims.Iss) t.Errorf("expected issuer orama-gateway, got %s", claims.Iss)
} }
} }
@ -257,7 +257,7 @@ func TestAlgorithmConfusion_Rejected(t *testing.T) {
header := map[string]string{"alg": "none", "typ": "JWT"} header := map[string]string{"alg": "none", "typ": "JWT"}
hb, _ := json.Marshal(header) hb, _ := json.Marshal(header)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "sub": "attacker", "aud": "gateway", "iss": "orama-gateway", "sub": "attacker", "aud": "gateway",
"iat": time.Now().Unix(), "nbf": time.Now().Unix(), "iat": time.Now().Unix(), "nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns", "exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns",
} }
@ -275,7 +275,7 @@ func TestAlgorithmConfusion_Rejected(t *testing.T) {
header := map[string]string{"alg": "HS256", "typ": "JWT", "kid": s.keyID} header := map[string]string{"alg": "HS256", "typ": "JWT", "kid": s.keyID}
hb, _ := json.Marshal(header) hb, _ := json.Marshal(header)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "sub": "attacker", "aud": "gateway", "iss": "orama-gateway", "sub": "attacker", "aud": "gateway",
"iat": time.Now().Unix(), "nbf": time.Now().Unix(), "iat": time.Now().Unix(), "nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns", "exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns",
} }
@ -295,7 +295,7 @@ func TestAlgorithmConfusion_Rejected(t *testing.T) {
header := map[string]string{"alg": "RS256", "typ": "JWT", "kid": s.edKeyID} header := map[string]string{"alg": "RS256", "typ": "JWT", "kid": s.edKeyID}
hb, _ := json.Marshal(header) hb, _ := json.Marshal(header)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "sub": "attacker", "aud": "gateway", "iss": "orama-gateway", "sub": "attacker", "aud": "gateway",
"iat": time.Now().Unix(), "nbf": time.Now().Unix(), "iat": time.Now().Unix(), "nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns", "exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns",
} }
@ -321,7 +321,7 @@ func TestAlgorithmConfusion_Rejected(t *testing.T) {
header := map[string]string{"alg": "RS256", "typ": "JWT", "kid": "unknown-kid-123"} header := map[string]string{"alg": "RS256", "typ": "JWT", "kid": "unknown-kid-123"}
hb, _ := json.Marshal(header) hb, _ := json.Marshal(header)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "sub": "attacker", "aud": "gateway", "iss": "orama-gateway", "sub": "attacker", "aud": "gateway",
"iat": time.Now().Unix(), "nbf": time.Now().Unix(), "iat": time.Now().Unix(), "nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns", "exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns",
} }
@ -341,7 +341,7 @@ func TestAlgorithmConfusion_Rejected(t *testing.T) {
header := map[string]string{"alg": "EdDSA", "typ": "JWT"} header := map[string]string{"alg": "EdDSA", "typ": "JWT"}
hb, _ := json.Marshal(header) hb, _ := json.Marshal(header)
payload := map[string]any{ payload := map[string]any{
"iss": "debros-gateway", "sub": "attacker", "aud": "gateway", "iss": "orama-gateway", "sub": "attacker", "aud": "gateway",
"iat": time.Now().Unix(), "nbf": time.Now().Unix(), "iat": time.Now().Unix(), "nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns", "exp": time.Now().Add(time.Hour).Unix(), "namespace": "test-ns",
} }

View File

@ -65,7 +65,7 @@ type PeerInfo struct {
type Handler struct { type Handler struct {
logger *zap.Logger logger *zap.Logger
rqliteClient rqlite.Client rqliteClient rqlite.Client
oramaDir string // e.g., /home/debros/.orama oramaDir string // e.g., /home/orama/.orama
} }
// NewHandler creates a new join handler // NewHandler creates a new join handler

View File

@ -59,7 +59,7 @@ func NewHTTPSGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfi
// Use Let's Encrypt STAGING (consistent with SNI gateway) // Use Let's Encrypt STAGING (consistent with SNI gateway)
cacheDir := cfg.HTTPS.CacheDir cacheDir := cfg.HTTPS.CacheDir
if cacheDir == "" { if cacheDir == "" {
cacheDir = "/home/debros/.orama/tls-cache" cacheDir = "/home/orama/.orama/tls-cache"
} }
// Use Let's Encrypt STAGING - provides higher rate limits for testing/development // Use Let's Encrypt STAGING - provides higher rate limits for testing/development

View File

@ -32,7 +32,7 @@ func TestJWTGenerateAndParse(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("verify err: %v", err) t.Fatalf("verify err: %v", err)
} }
if claims.Namespace != "ns1" || claims.Sub != "subj" || claims.Aud != "gateway" || claims.Iss != "debros-gateway" { if claims.Namespace != "ns1" || claims.Sub != "subj" || claims.Aud != "gateway" || claims.Iss != "orama-gateway" {
t.Fatalf("unexpected claims: %+v", claims) t.Fatalf("unexpected claims: %+v", claims)
} }
} }

View File

@ -28,7 +28,7 @@ func TestExtractAPIKey(t *testing.T) {
} }
} }
// TestDomainRoutingMiddleware_NonDebrosNetwork tests that non-debros domains pass through // TestDomainRoutingMiddleware_NonDebrosNetwork tests that non-orama domains pass through
func TestDomainRoutingMiddleware_NonDebrosNetwork(t *testing.T) { func TestDomainRoutingMiddleware_NonDebrosNetwork(t *testing.T) {
nextCalled := false nextCalled := false
next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -46,7 +46,7 @@ func TestDomainRoutingMiddleware_NonDebrosNetwork(t *testing.T) {
middleware.ServeHTTP(rr, req) middleware.ServeHTTP(rr, req)
if !nextCalled { if !nextCalled {
t.Error("Expected next handler to be called for non-debros domain") t.Error("Expected next handler to be called for non-orama domain")
} }
if rr.Code != http.StatusOK { if rr.Code != http.StatusOK {
@ -379,10 +379,10 @@ func TestSecurityHeadersMiddleware(t *testing.T) {
expected := map[string]string{ expected := map[string]string{
"X-Content-Type-Options": "nosniff", "X-Content-Type-Options": "nosniff",
"X-Frame-Options": "DENY", "X-Frame-Options": "DENY",
"X-Xss-Protection": "0", "X-Xss-Protection": "0",
"Referrer-Policy": "strict-origin-when-cross-origin", "Referrer-Policy": "strict-origin-when-cross-origin",
"Permissions-Policy": "camera=(), microphone=(), geolocation=()", "Permissions-Policy": "camera=(), microphone=(), geolocation=()",
} }
for header, want := range expected { for header, want := range expected {
got := rr.Header().Get(header) got := rr.Header().Get(header)

View File

@ -42,7 +42,7 @@ func checkAnyonePerNode(nd *inspector.NodeData) []inspector.CheckResult {
if a.RelayActive { if a.RelayActive {
r = append(r, inspector.Pass("anyone.relay_active", "Anyone relay service active", anyoneSub, node, r = append(r, inspector.Pass("anyone.relay_active", "Anyone relay service active", anyoneSub, node,
"debros-anyone-relay is active", inspector.High)) "orama-anyone-relay is active", inspector.High))
} }
// --- Client-mode checks --- // --- Client-mode checks ---
@ -133,7 +133,7 @@ func checkAnyonePerNode(nd *inspector.NodeData) []inspector.CheckResult {
// --- Legacy client checks (if also running client service) --- // --- Legacy client checks (if also running client service) ---
if a.ClientActive { if a.ClientActive {
r = append(r, inspector.Pass("anyone.client_active", "Anyone client service active", anyoneSub, node, r = append(r, inspector.Pass("anyone.client_active", "Anyone client service active", anyoneSub, node,
"debros-anyone-client is active", inspector.High)) "orama-anyone-client is active", inspector.High))
if a.SocksListening { if a.SocksListening {
r = append(r, inspector.Pass("anyone.socks_listening", "SOCKS5 port 9050 listening", anyoneSub, node, r = append(r, inspector.Pass("anyone.socks_listening", "SOCKS5 port 9050 listening", anyoneSub, node,

View File

@ -55,7 +55,7 @@ func TestCheckAnyone_HealthyRelay(t *testing.T) {
func TestCheckAnyone_HealthyClient(t *testing.T) { func TestCheckAnyone_HealthyClient(t *testing.T) {
nd := makeNodeData("1.1.1.1", "nameserver") nd := makeNodeData("1.1.1.1", "nameserver")
nd.Anyone = &inspector.AnyoneData{ nd.Anyone = &inspector.AnyoneData{
RelayActive: true, // service is debros-anyone-relay for both modes RelayActive: true, // service is orama-anyone-relay for both modes
Mode: "client", Mode: "client",
SocksListening: true, SocksListening: true,
ControlListening: true, ControlListening: true,

View File

@ -36,20 +36,20 @@ func checkIPFSPerNode(nd *inspector.NodeData, data *inspector.ClusterData) []ins
// 3.1 IPFS daemon running // 3.1 IPFS daemon running
if ipfs.DaemonActive { if ipfs.DaemonActive {
r = append(r, inspector.Pass("ipfs.daemon_active", "IPFS daemon active", ipfsSub, node, r = append(r, inspector.Pass("ipfs.daemon_active", "IPFS daemon active", ipfsSub, node,
"debros-ipfs is active", inspector.Critical)) "orama-ipfs is active", inspector.Critical))
} else { } else {
r = append(r, inspector.Fail("ipfs.daemon_active", "IPFS daemon active", ipfsSub, node, r = append(r, inspector.Fail("ipfs.daemon_active", "IPFS daemon active", ipfsSub, node,
"debros-ipfs is not active", inspector.Critical)) "orama-ipfs is not active", inspector.Critical))
return r return r
} }
// 3.2 IPFS Cluster running // 3.2 IPFS Cluster running
if ipfs.ClusterActive { if ipfs.ClusterActive {
r = append(r, inspector.Pass("ipfs.cluster_active", "IPFS Cluster active", ipfsSub, node, r = append(r, inspector.Pass("ipfs.cluster_active", "IPFS Cluster active", ipfsSub, node,
"debros-ipfs-cluster is active", inspector.Critical)) "orama-ipfs-cluster is active", inspector.Critical))
} else { } else {
r = append(r, inspector.Fail("ipfs.cluster_active", "IPFS Cluster active", ipfsSub, node, r = append(r, inspector.Fail("ipfs.cluster_active", "IPFS Cluster active", ipfsSub, node,
"debros-ipfs-cluster is not active", inspector.Critical)) "orama-ipfs-cluster is not active", inspector.Critical))
} }
// 3.6 Swarm peer count // 3.6 Swarm peer count

View File

@ -36,10 +36,10 @@ func checkOlricPerNode(nd *inspector.NodeData) []inspector.CheckResult {
// 2.1 Service active // 2.1 Service active
if ol.ServiceActive { if ol.ServiceActive {
r = append(r, inspector.Pass("olric.service_active", "Olric service active", olricSub, node, r = append(r, inspector.Pass("olric.service_active", "Olric service active", olricSub, node,
"debros-olric is active", inspector.Critical)) "orama-olric is active", inspector.Critical))
} else { } else {
r = append(r, inspector.Fail("olric.service_active", "Olric service active", olricSub, node, r = append(r, inspector.Fail("olric.service_active", "Olric service active", olricSub, node,
"debros-olric is not active", inspector.Critical)) "orama-olric is not active", inspector.Critical))
return r return r
} }

View File

@ -34,7 +34,7 @@ func checkSystemPerNode(nd *inspector.NodeData) []inspector.CheckResult {
node := nd.Node.Name() node := nd.Node.Name()
// 6.1 Core services active // 6.1 Core services active
coreServices := []string{"debros-node", "debros-olric", "debros-ipfs", "debros-ipfs-cluster"} coreServices := []string{"orama-node", "orama-olric", "orama-ipfs", "orama-ipfs-cluster"}
for _, svc := range coreServices { for _, svc := range coreServices {
status, ok := sys.Services[svc] status, ok := sys.Services[svc]
if !ok { if !ok {
@ -51,7 +51,7 @@ func checkSystemPerNode(nd *inspector.NodeData) []inspector.CheckResult {
} }
// 6.2 Anyone relay/client services (only check if installed, don't fail if absent) // 6.2 Anyone relay/client services (only check if installed, don't fail if absent)
for _, svc := range []string{"debros-anyone-relay", "debros-anyone-client"} { for _, svc := range []string{"orama-anyone-relay", "orama-anyone-client"} {
status, ok := sys.Services[svc] status, ok := sys.Services[svc]
if !ok || status == "inactive" { if !ok || status == "inactive" {
continue // not installed or intentionally stopped continue // not installed or intentionally stopped
@ -94,21 +94,21 @@ func checkSystemPerNode(nd *inspector.NodeData) []inspector.CheckResult {
} }
} }
// 6.6 Failed systemd units (only debros-related units count as failures) // 6.6 Failed systemd units (only orama-related units count as failures)
var debrosUnits, externalUnits []string var oramaUnits, externalUnits []string
for _, u := range sys.FailedUnits { for _, u := range sys.FailedUnits {
if strings.HasPrefix(u, "debros-") || u == "wg-quick@wg0.service" || u == "caddy.service" || u == "coredns.service" { if strings.HasPrefix(u, "orama-") || u == "wg-quick@wg0.service" || u == "caddy.service" || u == "coredns.service" {
debrosUnits = append(debrosUnits, u) oramaUnits = append(oramaUnits, u)
} else { } else {
externalUnits = append(externalUnits, u) externalUnits = append(externalUnits, u)
} }
} }
if len(debrosUnits) > 0 { if len(oramaUnits) > 0 {
r = append(r, inspector.Fail("system.no_failed_units", "No failed debros systemd units", systemSub, node, r = append(r, inspector.Fail("system.no_failed_units", "No failed orama systemd units", systemSub, node,
fmt.Sprintf("failed: %s", strings.Join(debrosUnits, ", ")), inspector.High)) fmt.Sprintf("failed: %s", strings.Join(oramaUnits, ", ")), inspector.High))
} else { } else {
r = append(r, inspector.Pass("system.no_failed_units", "No failed debros systemd units", systemSub, node, r = append(r, inspector.Pass("system.no_failed_units", "No failed orama systemd units", systemSub, node,
"no failed debros units", inspector.High)) "no failed orama units", inspector.High))
} }
if len(externalUnits) > 0 { if len(externalUnits) > 0 {
r = append(r, inspector.Warn("system.external_failed_units", "External systemd units healthy", systemSub, node, r = append(r, inspector.Warn("system.external_failed_units", "External systemd units healthy", systemSub, node,
@ -217,15 +217,15 @@ func checkSystemPerNode(nd *inspector.NodeData) []inspector.CheckResult {
// 6.23 Process user // 6.23 Process user
if sys.ProcessUser != "" && sys.ProcessUser != "unknown" { if sys.ProcessUser != "" && sys.ProcessUser != "unknown" {
if sys.ProcessUser == "debros" { if sys.ProcessUser == "orama" {
r = append(r, inspector.Pass("system.process_user", "debros-node runs as correct user", systemSub, node, r = append(r, inspector.Pass("system.process_user", "orama-node runs as correct user", systemSub, node,
"user=debros", inspector.High)) "user=orama", inspector.High))
} else if sys.ProcessUser == "root" { } else if sys.ProcessUser == "root" {
r = append(r, inspector.Warn("system.process_user", "debros-node runs as correct user", systemSub, node, r = append(r, inspector.Warn("system.process_user", "orama-node runs as correct user", systemSub, node,
"user=root (should be debros)", inspector.High)) "user=root (should be orama)", inspector.High))
} else { } else {
r = append(r, inspector.Warn("system.process_user", "debros-node runs as correct user", systemSub, node, r = append(r, inspector.Warn("system.process_user", "orama-node runs as correct user", systemSub, node,
fmt.Sprintf("user=%s (expected debros)", sys.ProcessUser), inspector.Medium)) fmt.Sprintf("user=%s (expected orama)", sys.ProcessUser), inspector.Medium))
} }
} }

View File

@ -10,11 +10,11 @@ func TestCheckSystem_HealthyNode(t *testing.T) {
nd := makeNodeData("1.1.1.1", "node") nd := makeNodeData("1.1.1.1", "node")
nd.System = &inspector.SystemData{ nd.System = &inspector.SystemData{
Services: map[string]string{ Services: map[string]string{
"debros-node": "active", "orama-node": "active",
"debros-olric": "active", "orama-olric": "active",
"debros-ipfs": "active", "orama-ipfs": "active",
"debros-ipfs-cluster": "active", "orama-ipfs-cluster": "active",
"wg-quick@wg0": "active", "wg-quick@wg0": "active",
}, },
FailedUnits: nil, FailedUnits: nil,
MemTotalMB: 8192, MemTotalMB: 8192,
@ -31,17 +31,17 @@ func TestCheckSystem_HealthyNode(t *testing.T) {
InodePct: 10, InodePct: 10,
ListeningPorts: []int{5001, 3322, 6001, 4501}, ListeningPorts: []int{5001, 3322, 6001, 4501},
UFWActive: true, UFWActive: true,
ProcessUser: "debros", ProcessUser: "orama",
PanicCount: 0, PanicCount: 0,
} }
data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd}) data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd})
results := CheckSystem(data) results := CheckSystem(data)
expectStatus(t, results, "system.svc_debros_node", inspector.StatusPass) expectStatus(t, results, "system.svc_orama_node", inspector.StatusPass)
expectStatus(t, results, "system.svc_debros_olric", inspector.StatusPass) expectStatus(t, results, "system.svc_orama_olric", inspector.StatusPass)
expectStatus(t, results, "system.svc_debros_ipfs", inspector.StatusPass) expectStatus(t, results, "system.svc_orama_ipfs", inspector.StatusPass)
expectStatus(t, results, "system.svc_debros_ipfs_cluster", inspector.StatusPass) expectStatus(t, results, "system.svc_orama_ipfs_cluster", inspector.StatusPass)
expectStatus(t, results, "system.svc_wg", inspector.StatusPass) expectStatus(t, results, "system.svc_wg", inspector.StatusPass)
expectStatus(t, results, "system.no_failed_units", inspector.StatusPass) expectStatus(t, results, "system.no_failed_units", inspector.StatusPass)
expectStatus(t, results, "system.memory", inspector.StatusPass) expectStatus(t, results, "system.memory", inspector.StatusPass)
@ -63,30 +63,30 @@ func TestCheckSystem_ServiceInactive(t *testing.T) {
nd := makeNodeData("1.1.1.1", "node") nd := makeNodeData("1.1.1.1", "node")
nd.System = &inspector.SystemData{ nd.System = &inspector.SystemData{
Services: map[string]string{ Services: map[string]string{
"debros-node": "active", "orama-node": "active",
"debros-olric": "inactive", "orama-olric": "inactive",
"debros-ipfs": "active", "orama-ipfs": "active",
"debros-ipfs-cluster": "failed", "orama-ipfs-cluster": "failed",
}, },
} }
data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd}) data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd})
results := CheckSystem(data) results := CheckSystem(data)
expectStatus(t, results, "system.svc_debros_node", inspector.StatusPass) expectStatus(t, results, "system.svc_orama_node", inspector.StatusPass)
expectStatus(t, results, "system.svc_debros_olric", inspector.StatusFail) expectStatus(t, results, "system.svc_orama_olric", inspector.StatusFail)
expectStatus(t, results, "system.svc_debros_ipfs_cluster", inspector.StatusFail) expectStatus(t, results, "system.svc_orama_ipfs_cluster", inspector.StatusFail)
} }
func TestCheckSystem_NameserverServices(t *testing.T) { func TestCheckSystem_NameserverServices(t *testing.T) {
nd := makeNodeData("5.5.5.5", "nameserver-ns1") nd := makeNodeData("5.5.5.5", "nameserver-ns1")
nd.System = &inspector.SystemData{ nd.System = &inspector.SystemData{
Services: map[string]string{ Services: map[string]string{
"debros-node": "active", "orama-node": "active",
"debros-olric": "active", "orama-olric": "active",
"debros-ipfs": "active", "orama-ipfs": "active",
"debros-ipfs-cluster": "active", "orama-ipfs-cluster": "active",
"coredns": "active", "coredns": "active",
"caddy": "active", "caddy": "active",
}, },
} }
data := makeCluster(map[string]*inspector.NodeData{"5.5.5.5": nd}) data := makeCluster(map[string]*inspector.NodeData{"5.5.5.5": nd})
@ -99,10 +99,10 @@ func TestCheckSystem_NameserverServicesNotCheckedOnRegularNode(t *testing.T) {
nd := makeNodeData("1.1.1.1", "node") nd := makeNodeData("1.1.1.1", "node")
nd.System = &inspector.SystemData{ nd.System = &inspector.SystemData{
Services: map[string]string{ Services: map[string]string{
"debros-node": "active", "orama-node": "active",
"debros-olric": "active", "orama-olric": "active",
"debros-ipfs": "active", "orama-ipfs": "active",
"debros-ipfs-cluster": "active", "orama-ipfs-cluster": "active",
}, },
} }
data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd}) data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd})
@ -116,7 +116,7 @@ func TestCheckSystem_FailedUnits_Debros(t *testing.T) {
nd := makeNodeData("1.1.1.1", "node") nd := makeNodeData("1.1.1.1", "node")
nd.System = &inspector.SystemData{ nd.System = &inspector.SystemData{
Services: map[string]string{}, Services: map[string]string{},
FailedUnits: []string{"debros-node.service"}, FailedUnits: []string{"orama-node.service"},
} }
data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd}) data := makeCluster(map[string]*inspector.NodeData{"1.1.1.1": nd})
results := CheckSystem(data) results := CheckSystem(data)
@ -142,9 +142,9 @@ func TestCheckSystem_Memory(t *testing.T) {
total int total int
status inspector.Status status inspector.Status
}{ }{
{"healthy", 4000, 8000, inspector.StatusPass}, // 50% {"healthy", 4000, 8000, inspector.StatusPass}, // 50%
{"elevated", 7000, 8000, inspector.StatusWarn}, // 87.5% {"elevated", 7000, 8000, inspector.StatusWarn}, // 87.5%
{"critical", 7500, 8000, inspector.StatusFail}, // 93.75% {"critical", 7500, 8000, inspector.StatusFail}, // 93.75%
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
@ -248,7 +248,7 @@ func TestCheckSystem_ProcessUser(t *testing.T) {
user string user string
status inspector.Status status inspector.Status
}{ }{
{"correct", "debros", inspector.StatusPass}, {"correct", "orama", inspector.StatusPass},
{"root", "root", inspector.StatusWarn}, {"root", "root", inspector.StatusWarn},
{"other", "ubuntu", inspector.StatusWarn}, {"other", "ubuntu", inspector.StatusWarn},
} }

View File

@ -46,53 +46,53 @@ type NamespaceData struct {
// RQLiteData holds parsed RQLite status from a single node. // RQLiteData holds parsed RQLite status from a single node.
type RQLiteData struct { type RQLiteData struct {
Responsive bool Responsive bool
StatusRaw string // raw JSON from /status StatusRaw string // raw JSON from /status
NodesRaw string // raw JSON from /nodes?nonvoters NodesRaw string // raw JSON from /nodes?nonvoters
ReadyzRaw string // raw response from /readyz ReadyzRaw string // raw response from /readyz
DebugRaw string // raw JSON from /debug/vars DebugRaw string // raw JSON from /debug/vars
Status *RQLiteStatus // parsed /status Status *RQLiteStatus // parsed /status
Nodes map[string]*RQLiteNode // parsed /nodes Nodes map[string]*RQLiteNode // parsed /nodes
Readyz *RQLiteReadyz // parsed /readyz Readyz *RQLiteReadyz // parsed /readyz
DebugVars *RQLiteDebugVars // parsed /debug/vars DebugVars *RQLiteDebugVars // parsed /debug/vars
StrongRead bool // SELECT 1 with level=strong succeeded StrongRead bool // SELECT 1 with level=strong succeeded
} }
// RQLiteDebugVars holds metrics from /debug/vars. // RQLiteDebugVars holds metrics from /debug/vars.
type RQLiteDebugVars struct { type RQLiteDebugVars struct {
QueryErrors uint64 QueryErrors uint64
ExecuteErrors uint64 ExecuteErrors uint64
RemoteExecErrors uint64 RemoteExecErrors uint64
LeaderNotFound uint64 LeaderNotFound uint64
SnapshotErrors uint64 SnapshotErrors uint64
ClientRetries uint64 ClientRetries uint64
ClientTimeouts uint64 ClientTimeouts uint64
} }
// RQLiteStatus holds parsed fields from /status. // RQLiteStatus holds parsed fields from /status.
type RQLiteStatus struct { type RQLiteStatus struct {
RaftState string // Leader, Follower, Candidate, Shutdown RaftState string // Leader, Follower, Candidate, Shutdown
LeaderNodeID string // store.leader.node_id LeaderNodeID string // store.leader.node_id
LeaderAddr string // store.leader.addr LeaderAddr string // store.leader.addr
NodeID string // store.node_id NodeID string // store.node_id
Term uint64 // store.raft.term (current_term) Term uint64 // store.raft.term (current_term)
AppliedIndex uint64 // store.raft.applied_index AppliedIndex uint64 // store.raft.applied_index
CommitIndex uint64 // store.raft.commit_index CommitIndex uint64 // store.raft.commit_index
FsmPending uint64 // store.raft.fsm_pending FsmPending uint64 // store.raft.fsm_pending
LastContact string // store.raft.last_contact (followers only) LastContact string // store.raft.last_contact (followers only)
LastLogIndex uint64 // store.raft.last_log_index LastLogIndex uint64 // store.raft.last_log_index
LastLogTerm uint64 // store.raft.last_log_term LastLogTerm uint64 // store.raft.last_log_term
NumPeers int // store.raft.num_peers (string in JSON) NumPeers int // store.raft.num_peers (string in JSON)
LastSnapshot uint64 // store.raft.last_snapshot_index LastSnapshot uint64 // store.raft.last_snapshot_index
Voter bool // store.raft.voter Voter bool // store.raft.voter
DBSize int64 // store.sqlite3.db_size DBSize int64 // store.sqlite3.db_size
DBSizeFriendly string // store.sqlite3.db_size_friendly DBSizeFriendly string // store.sqlite3.db_size_friendly
DBAppliedIndex uint64 // store.db_applied_index DBAppliedIndex uint64 // store.db_applied_index
FsmIndex uint64 // store.fsm_index FsmIndex uint64 // store.fsm_index
Uptime string // http.uptime Uptime string // http.uptime
Version string // build.version Version string // build.version
GoVersion string // runtime.GOARCH + runtime.version GoVersion string // runtime.GOARCH + runtime.version
Goroutines int // runtime.num_goroutine Goroutines int // runtime.num_goroutine
HeapAlloc uint64 // runtime.memory.heap_alloc (bytes) HeapAlloc uint64 // runtime.memory.heap_alloc (bytes)
} }
// RQLiteNode holds parsed fields from /nodes response per node. // RQLiteNode holds parsed fields from /nodes response per node.
@ -107,11 +107,11 @@ type RQLiteNode struct {
// RQLiteReadyz holds parsed readiness state. // RQLiteReadyz holds parsed readiness state.
type RQLiteReadyz struct { type RQLiteReadyz struct {
Ready bool Ready bool
Store string // "ready" or error Store string // "ready" or error
Leader string // "ready" or error Leader string // "ready" or error
Node string // "ready" or error Node string // "ready" or error
RawBody string RawBody string
} }
// OlricData holds parsed Olric status from a single node. // OlricData holds parsed Olric status from a single node.
@ -130,17 +130,17 @@ type OlricData struct {
// IPFSData holds parsed IPFS status from a single node. // IPFSData holds parsed IPFS status from a single node.
type IPFSData struct { type IPFSData struct {
DaemonActive bool DaemonActive bool
ClusterActive bool ClusterActive bool
SwarmPeerCount int SwarmPeerCount int
ClusterPeerCount int ClusterPeerCount int
RepoSizeBytes int64 RepoSizeBytes int64
RepoMaxBytes int64 RepoMaxBytes int64
KuboVersion string KuboVersion string
ClusterVersion string ClusterVersion string
ClusterErrors int // peers reporting errors ClusterErrors int // peers reporting errors
HasSwarmKey bool HasSwarmKey bool
BootstrapEmpty bool // true if bootstrap list is empty (private swarm) BootstrapEmpty bool // true if bootstrap list is empty (private swarm)
} }
// DNSData holds parsed DNS/CoreDNS status from a nameserver node. // DNSData holds parsed DNS/CoreDNS status from a nameserver node.
@ -154,29 +154,29 @@ type DNSData struct {
CoreDNSRestarts int CoreDNSRestarts int
LogErrors int // error count in recent CoreDNS logs LogErrors int // error count in recent CoreDNS logs
// Resolution tests (dig results) // Resolution tests (dig results)
SOAResolves bool SOAResolves bool
NSResolves bool NSResolves bool
NSRecordCount int NSRecordCount int
WildcardResolves bool WildcardResolves bool
BaseAResolves bool BaseAResolves bool
// TLS // TLS
BaseTLSDaysLeft int // -1 = failed to check BaseTLSDaysLeft int // -1 = failed to check
WildTLSDaysLeft int // -1 = failed to check WildTLSDaysLeft int // -1 = failed to check
// Corefile // Corefile
CorefileExists bool CorefileExists bool
} }
// WireGuardData holds parsed WireGuard status from a node. // WireGuardData holds parsed WireGuard status from a node.
type WireGuardData struct { type WireGuardData struct {
InterfaceUp bool InterfaceUp bool
ServiceActive bool ServiceActive bool
WgIP string WgIP string
PeerCount int PeerCount int
Peers []WGPeer Peers []WGPeer
MTU int MTU int
ListenPort int ListenPort int
ConfigExists bool ConfigExists bool
ConfigPerms string // e.g. "600" ConfigPerms string // e.g. "600"
} }
// WGPeer holds parsed data for a single WireGuard peer. // WGPeer holds parsed data for a single WireGuard peer.
@ -192,26 +192,26 @@ type WGPeer struct {
// SystemData holds parsed system-level data from a node. // SystemData holds parsed system-level data from a node.
type SystemData struct { type SystemData struct {
Services map[string]string // service name → status Services map[string]string // service name → status
FailedUnits []string // systemd units in failed state FailedUnits []string // systemd units in failed state
MemTotalMB int MemTotalMB int
MemUsedMB int MemUsedMB int
MemFreeMB int MemFreeMB int
DiskTotalGB string DiskTotalGB string
DiskUsedGB string DiskUsedGB string
DiskAvailGB string DiskAvailGB string
DiskUsePct int DiskUsePct int
UptimeRaw string UptimeRaw string
LoadAvg string LoadAvg string
CPUCount int CPUCount int
OOMKills int OOMKills int
SwapUsedMB int SwapUsedMB int
SwapTotalMB int SwapTotalMB int
InodePct int // inode usage percentage InodePct int // inode usage percentage
ListeningPorts []int // ports from ss -tlnp ListeningPorts []int // ports from ss -tlnp
UFWActive bool UFWActive bool
ProcessUser string // user running debros-node (e.g. "debros") ProcessUser string // user running orama-node (e.g. "orama")
PanicCount int // panic/fatal in recent logs PanicCount int // panic/fatal in recent logs
} }
// NetworkData holds parsed network-level data from a node. // NetworkData holds parsed network-level data from a node.
@ -227,17 +227,17 @@ type NetworkData struct {
// AnyoneData holds parsed Anyone relay/client status from a node. // AnyoneData holds parsed Anyone relay/client status from a node.
type AnyoneData struct { type AnyoneData struct {
RelayActive bool // debros-anyone-relay systemd service active RelayActive bool // orama-anyone-relay systemd service active
ClientActive bool // debros-anyone-client systemd service active ClientActive bool // orama-anyone-client systemd service active
Mode string // "relay" or "client" (from anonrc ORPort presence) Mode string // "relay" or "client" (from anonrc ORPort presence)
ORPortListening bool // port 9001 bound locally ORPortListening bool // port 9001 bound locally
SocksListening bool // port 9050 bound locally (client SOCKS5) SocksListening bool // port 9050 bound locally (client SOCKS5)
ControlListening bool // port 9051 bound locally (control port) ControlListening bool // port 9051 bound locally (control port)
Bootstrapped bool // relay has bootstrapped to 100% Bootstrapped bool // relay has bootstrapped to 100%
BootstrapPct int // bootstrap percentage (0-100) BootstrapPct int // bootstrap percentage (0-100)
Fingerprint string // relay fingerprint Fingerprint string // relay fingerprint
Nickname string // relay nickname Nickname string // relay nickname
UptimeStr string // uptime from control port UptimeStr string // uptime from control port
ORPortReachable map[string]bool // host IP → whether we can TCP connect to their 9001 from this node ORPortReachable map[string]bool // host IP → whether we can TCP connect to their 9001 from this node
} }
@ -567,17 +567,17 @@ func collectOlric(ctx context.Context, node Node) *OlricData {
cmd := ` cmd := `
SEP="===INSPECTOR_SEP===" SEP="===INSPECTOR_SEP==="
echo "$SEP" echo "$SEP"
systemctl is-active debros-olric 2>/dev/null systemctl is-active orama-olric 2>/dev/null
echo "$SEP" echo "$SEP"
ss -tlnp 2>/dev/null | grep ':3322 ' | head -1 ss -tlnp 2>/dev/null | grep ':3322 ' | head -1
echo "$SEP" echo "$SEP"
journalctl -u debros-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(error|ERR)' || echo 0 journalctl -u orama-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(error|ERR)' || echo 0
echo "$SEP" echo "$SEP"
journalctl -u debros-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(suspect|marking.*(failed|dead))' || echo 0 journalctl -u orama-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(suspect|marking.*(failed|dead))' || echo 0
echo "$SEP" echo "$SEP"
journalctl -u debros-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(memberlist.*(join|leave))' || echo 0 journalctl -u orama-olric --no-pager -n 200 --since "1 hour ago" 2>/dev/null | grep -ciE '(memberlist.*(join|leave))' || echo 0
echo "$SEP" echo "$SEP"
systemctl show debros-olric --property=NRestarts 2>/dev/null | cut -d= -f2 systemctl show orama-olric --property=NRestarts 2>/dev/null | cut -d= -f2
echo "$SEP" echo "$SEP"
ps -C olric-server -o rss= 2>/dev/null | head -1 || echo 0 ps -C olric-server -o rss= 2>/dev/null | head -1 || echo 0
` `
@ -611,9 +611,9 @@ func collectIPFS(ctx context.Context, node Node) *IPFSData {
cmd := ` cmd := `
SEP="===INSPECTOR_SEP===" SEP="===INSPECTOR_SEP==="
echo "$SEP" echo "$SEP"
systemctl is-active debros-ipfs 2>/dev/null systemctl is-active orama-ipfs 2>/dev/null
echo "$SEP" echo "$SEP"
systemctl is-active debros-ipfs-cluster 2>/dev/null systemctl is-active orama-ipfs-cluster 2>/dev/null
echo "$SEP" echo "$SEP"
curl -sf -X POST 'http://localhost:4501/api/v0/swarm/peers' 2>/dev/null | python3 -c "import sys,json; d=json.load(sys.stdin); print(len(d.get('Peers') or []))" 2>/dev/null || echo -1 curl -sf -X POST 'http://localhost:4501/api/v0/swarm/peers' 2>/dev/null | python3 -c "import sys,json; d=json.load(sys.stdin); print(len(d.get('Peers') or []))" 2>/dev/null || echo -1
echo "$SEP" echo "$SEP"
@ -625,7 +625,7 @@ curl -sf -X POST 'http://localhost:4501/api/v0/version' 2>/dev/null | python3 -c
echo "$SEP" echo "$SEP"
curl -sf 'http://localhost:9094/id' 2>/dev/null | python3 -c "import sys,json; print(json.load(sys.stdin).get('version',''))" 2>/dev/null || echo unknown curl -sf 'http://localhost:9094/id' 2>/dev/null | python3 -c "import sys,json; print(json.load(sys.stdin).get('version',''))" 2>/dev/null || echo unknown
echo "$SEP" echo "$SEP"
sudo test -f /home/debros/.orama/data/ipfs/repo/swarm.key && echo yes || echo no sudo test -f /home/orama/.orama/data/ipfs/repo/swarm.key && echo yes || echo no
echo "$SEP" echo "$SEP"
curl -sf -X POST 'http://localhost:4501/api/v0/bootstrap/list' 2>/dev/null | python3 -c "import sys,json; peers=json.load(sys.stdin).get('Peers',[]); print(len(peers))" 2>/dev/null || echo -1 curl -sf -X POST 'http://localhost:4501/api/v0/bootstrap/list' 2>/dev/null | python3 -c "import sys,json; peers=json.load(sys.stdin).get('Peers',[]); print(len(peers))" 2>/dev/null || echo -1
` `
@ -887,8 +887,8 @@ func collectSystem(ctx context.Context, node Node) *SystemData {
} }
services := []string{ services := []string{
"debros-node", "debros-ipfs", "debros-ipfs-cluster", "orama-node", "orama-ipfs", "orama-ipfs-cluster",
"debros-olric", "debros-anyone-relay", "debros-anyone-client", "orama-olric", "orama-anyone-relay", "orama-anyone-client",
"coredns", "caddy", "wg-quick@wg0", "coredns", "caddy", "wg-quick@wg0",
} }
@ -918,9 +918,9 @@ func collectSystem(ctx context.Context, node Node) *SystemData {
cmd += ` && echo "$SEP"` cmd += ` && echo "$SEP"`
cmd += ` && sudo ufw status 2>/dev/null | head -1` cmd += ` && sudo ufw status 2>/dev/null | head -1`
cmd += ` && echo "$SEP"` cmd += ` && echo "$SEP"`
cmd += ` && ps -C debros-node -o user= 2>/dev/null | head -1 || echo unknown` cmd += ` && ps -C orama-node -o user= 2>/dev/null | head -1 || echo unknown`
cmd += ` && echo "$SEP"` cmd += ` && echo "$SEP"`
cmd += ` && journalctl -u debros-node --no-pager -n 500 --since "1 hour ago" 2>/dev/null | grep -ciE '(panic|fatal)' || echo 0` cmd += ` && journalctl -u orama-node --no-pager -n 500 --since "1 hour ago" 2>/dev/null | grep -ciE '(panic|fatal)' || echo 0`
res := RunSSH(ctx, node, cmd) res := RunSSH(ctx, node, cmd)
if !res.OK() && res.Stdout == "" { if !res.OK() && res.Stdout == "" {
@ -1152,9 +1152,9 @@ func collectAnyone(ctx context.Context, node Node) *AnyoneData {
cmd := ` cmd := `
SEP="===INSPECTOR_SEP===" SEP="===INSPECTOR_SEP==="
echo "$SEP" echo "$SEP"
systemctl is-active debros-anyone-relay 2>/dev/null || echo inactive systemctl is-active orama-anyone-relay 2>/dev/null || echo inactive
echo "$SEP" echo "$SEP"
systemctl is-active debros-anyone-client 2>/dev/null || echo inactive systemctl is-active orama-anyone-client 2>/dev/null || echo inactive
echo "$SEP" echo "$SEP"
ss -tlnp 2>/dev/null | grep -q ':9001 ' && echo yes || echo no ss -tlnp 2>/dev/null | grep -q ':9001 ' && echo yes || echo no
echo "$SEP" echo "$SEP"
@ -1289,11 +1289,11 @@ func collectAnyoneReachability(ctx context.Context, data *ClusterData) {
} }
func collectNamespaces(ctx context.Context, node Node) []NamespaceData { func collectNamespaces(ctx context.Context, node Node) []NamespaceData {
// Detect namespace services: debros-namespace-gateway@<name>.service // Detect namespace services: orama-namespace-gateway@<name>.service
cmd := ` cmd := `
SEP="===INSPECTOR_SEP===" SEP="===INSPECTOR_SEP==="
echo "$SEP" echo "$SEP"
systemctl list-units --type=service --all --no-pager --no-legend 'debros-namespace-gateway@*.service' 2>/dev/null | awk '{print $1}' | sed 's/debros-namespace-gateway@//;s/\.service//' systemctl list-units --type=service --all --no-pager --no-legend 'orama-namespace-gateway@*.service' 2>/dev/null | awk '{print $1}' | sed 's/orama-namespace-gateway@//;s/\.service//'
echo "$SEP" echo "$SEP"
` `
res := RunSSH(ctx, node, cmd) res := RunSSH(ctx, node, cmd)
@ -1327,7 +1327,7 @@ echo "$SEP"
nsCmd += fmt.Sprintf(` nsCmd += fmt.Sprintf(`
echo "NS_START:%s" echo "NS_START:%s"
# Get gateway port from systemd or default discovery # Get gateway port from systemd or default discovery
GWPORT=$(ss -tlnp 2>/dev/null | grep 'debros-namespace-gateway@%s' | grep -oP ':\K[0-9]+' | head -1) GWPORT=$(ss -tlnp 2>/dev/null | grep 'orama-namespace-gateway@%s' | grep -oP ':\K[0-9]+' | head -1)
echo "GW_PORT:${GWPORT:-0}" echo "GW_PORT:${GWPORT:-0}"
# Try common namespace port ranges (10000-10099) # Try common namespace port ranges (10000-10099)
for BASE in $(seq 10000 5 10099); do for BASE in $(seq 10000 5 10099); do

View File

@ -21,7 +21,7 @@ type DiscoveryResult struct {
// DiscoverPeerFromDomain queries an existing node to get its peer ID and IPFS info // DiscoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
// Tries HTTPS first, then falls back to HTTP // Tries HTTPS first, then falls back to HTTP
// Respects DEBROS_TRUSTED_TLS_DOMAINS and DEBROS_CA_CERT_PATH environment variables for certificate verification // Respects ORAMA_TRUSTED_TLS_DOMAINS and ORAMA_CA_CERT_PATH environment variables for certificate verification
func DiscoverPeerFromDomain(domain string) (*DiscoveryResult, error) { func DiscoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
// Use centralized TLS configuration that respects CA certificates and trusted domains // Use centralized TLS configuration that respects CA certificates and trusted domains
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain) client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)

View File

@ -113,7 +113,7 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
cfg.Cluster.Peername = nodeName cfg.Cluster.Peername = nodeName
cfg.Cluster.Secret = cm.secret cfg.Cluster.Secret = cm.secret
cfg.Cluster.ListenMultiaddress = []string{fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterListenPort)} cfg.Cluster.ListenMultiaddress = []string{fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterListenPort)}
cfg.Consensus.CRDT.ClusterName = "debros-cluster" cfg.Consensus.CRDT.ClusterName = "orama-cluster"
cfg.Consensus.CRDT.TrustedPeers = []string{"*"} cfg.Consensus.CRDT.TrustedPeers = []string{"*"}
cfg.API.RestAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort) cfg.API.RestAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort)
cfg.API.IPFSProxy.ListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort) cfg.API.IPFSProxy.ListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort)

View File

@ -86,9 +86,9 @@ func (is *InstanceSpawner) SpawnInstance(ctx context.Context, cfg InstanceConfig
// Raft tuning — match the global node's tuning for consistency // Raft tuning — match the global node's tuning for consistency
args = append(args, args = append(args,
"-raft-election-timeout", "5s", "-raft-election-timeout", "5s",
"-raft-heartbeat-timeout", "2s", "-raft-timeout", "2s",
"-raft-apply-timeout", "30s", "-raft-apply-timeout", "30s",
"-raft-leader-lease-timeout", "5s", "-raft-leader-lease-timeout", "2s",
) )
// Add join addresses if not the leader (must be before data directory) // Add join addresses if not the leader (must be before data directory)

View File

@ -128,11 +128,11 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
} }
raftLeaderLease := r.config.RaftLeaderLeaseTimeout raftLeaderLease := r.config.RaftLeaderLeaseTimeout
if raftLeaderLease == 0 { if raftLeaderLease == 0 {
raftLeaderLease = 5 * time.Second raftLeaderLease = 2 * time.Second
} }
args = append(args, args = append(args,
"-raft-election-timeout", raftElection.String(), "-raft-election-timeout", raftElection.String(),
"-raft-heartbeat-timeout", raftHeartbeat.String(), "-raft-timeout", raftHeartbeat.String(),
"-raft-apply-timeout", raftApply.String(), "-raft-apply-timeout", raftApply.String(),
"-raft-leader-lease-timeout", raftLeaderLease.String(), "-raft-leader-lease-timeout", raftLeaderLease.String(),
) )

View File

@ -17,7 +17,7 @@ func (r *RQLiteManager) rqliteDataDirPath() (string, error) {
} }
func (r *RQLiteManager) resolveMigrationsDir() (string, error) { func (r *RQLiteManager) resolveMigrationsDir() (string, error) {
productionPath := "/home/debros/src/migrations" productionPath := "/home/orama/src/migrations"
if _, err := os.Stat(productionPath); err == nil { if _, err := os.Stat(productionPath); err == nil {
return productionPath, nil return productionPath, nil
} }
@ -55,4 +55,3 @@ func (r *RQLiteManager) exponentialBackoff(attempt int, baseDelay time.Duration,
} }
return delay return delay
} }

View File

@ -37,7 +37,7 @@ func NewManager(namespaceBase string, logger *zap.Logger) *Manager {
// serviceName returns the systemd service name for a namespace and service type // serviceName returns the systemd service name for a namespace and service type
func (m *Manager) serviceName(namespace string, serviceType ServiceType) string { func (m *Manager) serviceName(namespace string, serviceType ServiceType) string {
return fmt.Sprintf("debros-namespace-%s@%s.service", serviceType, namespace) return fmt.Sprintf("orama-namespace-%s@%s.service", serviceType, namespace)
} }
// StartService starts a namespace service // StartService starts a namespace service
@ -228,7 +228,7 @@ func (m *Manager) StartAllNamespaceServices(namespace string) error {
// ListNamespaceServices returns all namespace services currently registered in systemd // ListNamespaceServices returns all namespace services currently registered in systemd
func (m *Manager) ListNamespaceServices() ([]string, error) { func (m *Manager) ListNamespaceServices() ([]string, error) {
cmd := exec.Command("sudo", "-n", "systemctl", "list-units", "--all", "--no-legend", "debros-namespace-*@*.service") cmd := exec.Command("sudo", "-n", "systemctl", "list-units", "--all", "--no-legend", "orama-namespace-*@*.service")
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list namespace services: %w; output: %s", err, string(output)) return nil, fmt.Errorf("failed to list namespace services: %w; output: %s", err, string(output))
@ -343,9 +343,9 @@ func (m *Manager) InstallTemplateUnits(sourceDir string) error {
m.logger.Info("Installing systemd template units", zap.String("source", sourceDir)) m.logger.Info("Installing systemd template units", zap.String("source", sourceDir))
templates := []string{ templates := []string{
"debros-namespace-rqlite@.service", "orama-namespace-rqlite@.service",
"debros-namespace-olric@.service", "orama-namespace-olric@.service",
"debros-namespace-gateway@.service", "orama-namespace-gateway@.service",
} }
for _, template := range templates { for _, template := range templates {

View File

@ -29,7 +29,7 @@ func init() {
trustedDomains = append(trustedDomains, defaultTrustedDomains...) trustedDomains = append(trustedDomains, defaultTrustedDomains...)
// Add any additional domains from environment // Add any additional domains from environment
domains := os.Getenv("DEBROS_TRUSTED_TLS_DOMAINS") domains := os.Getenv("ORAMA_TRUSTED_TLS_DOMAINS")
if domains != "" { if domains != "" {
for _, d := range strings.Split(domains, ",") { for _, d := range strings.Split(domains, ",") {
d = strings.TrimSpace(d) d = strings.TrimSpace(d)
@ -40,9 +40,9 @@ func init() {
} }
// Try to load CA certificate // Try to load CA certificate
caCertPath := os.Getenv("DEBROS_CA_CERT_PATH") caCertPath := os.Getenv("ORAMA_CA_CERT_PATH")
if caCertPath == "" { if caCertPath == "" {
caCertPath = "/etc/debros/ca.crt" caCertPath = "/etc/orama/ca.crt"
} }
if caCertData, err := os.ReadFile(caCertPath); err == nil { if caCertData, err := os.ReadFile(caCertPath); err == nil {

Some files were not shown because too many files have changed in this diff Show More