Compare commits

..

No commits in common. "main" and "v0.101.3-nightly" have entirely different histories.

1216 changed files with 14507 additions and 138846 deletions

View File

@ -0,0 +1,6 @@
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
version = 1
name = "network"
[setup]
script = "export MCP_BEARER_TOKEN=\"ra_9941ab97eb51668394a68963a2ab6fead0ca942afe437a6e2f4a520efcb24036\""

View File

@ -8,7 +8,7 @@ NOCOLOR='\033[0m'
# Run tests before push
echo -e "\n${CYAN}Running tests...${NOCOLOR}"
cd "$(git rev-parse --show-toplevel)/core" && go test ./...
go test ./... # Runs all tests in your repo
status=$?
if [ $status -ne 0 ]; then
echo -e "${RED}Push aborted: some tests failed.${NOCOLOR}"

View File

@ -1,91 +0,0 @@
name: Bug Report
description: Report a bug in Orama Network
labels: ["bug"]
body:
- type: markdown
attributes:
value: |
Thanks for reporting a bug! Please fill out the sections below.
**Security issues:** If this is a security vulnerability, do NOT open an issue. Email security@orama.io instead.
- type: input
id: version
attributes:
label: Orama version
description: "Run `orama version` to find this"
placeholder: "v0.18.0-beta"
validations:
required: true
- type: dropdown
id: component
attributes:
label: Component
options:
- Gateway / API
- CLI (orama command)
- WireGuard / Networking
- RQLite / Storage
- Olric / Caching
- IPFS / Pinning
- CoreDNS
- OramaOS
- Other
validations:
required: true
- type: textarea
id: description
attributes:
label: Description
description: A clear description of the bug
validations:
required: true
- type: textarea
id: steps
attributes:
label: Steps to reproduce
description: Minimal steps to reproduce the behavior
placeholder: |
1. Run `orama ...`
2. See error
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
description: What you expected to happen
validations:
required: true
- type: textarea
id: actual
attributes:
label: Actual behavior
description: What actually happened (include error messages and logs if any)
validations:
required: true
- type: textarea
id: environment
attributes:
label: Environment
description: OS, Go version, deployment environment, etc.
placeholder: |
- OS: Ubuntu 22.04
- Go: 1.23
- Environment: sandbox
validations:
required: false
- type: textarea
id: context
attributes:
label: Additional context
description: Logs, screenshots, monitor reports, or anything else that might help
validations:
required: false

View File

@ -1,49 +0,0 @@
name: Feature Request
description: Suggest a new feature or improvement
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
Thanks for the suggestion! Please describe what you'd like to see.
- type: dropdown
id: component
attributes:
label: Component
options:
- Gateway / API
- CLI (orama command)
- WireGuard / Networking
- RQLite / Storage
- Olric / Caching
- IPFS / Pinning
- CoreDNS
- OramaOS
- Other
validations:
required: true
- type: textarea
id: problem
attributes:
label: Problem
description: What problem does this solve? Why do you need it?
validations:
required: true
- type: textarea
id: solution
attributes:
label: Proposed solution
description: How do you think this should work?
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives considered
description: Any workarounds or alternative approaches you've thought of
validations:
required: false

View File

@ -1,31 +0,0 @@
## Summary
<!-- What does this PR do? Keep it to 1-3 bullet points. -->
## Motivation
<!-- Why is this change needed? Link to an issue if applicable. -->
## Test plan
<!-- How did you verify this works? -->
- [ ] `make test` passes
- [ ] Tested on sandbox/staging environment
## Distributed system impact
<!-- Does this change affect any of the following? If yes, explain. -->
- [ ] Raft quorum / RQLite
- [ ] WireGuard mesh / networking
- [ ] Olric gossip / caching
- [ ] Service startup ordering
- [ ] Rolling upgrade compatibility
## Checklist
- [ ] Tests added for new functionality or bug fix
- [ ] No debug code (`fmt.Println`, `log.Println`) left behind
- [ ] Docs updated (if user-facing behavior changed)
- [ ] Errors wrapped with context (`fmt.Errorf("...: %w", err)`)

View File

@ -1,80 +0,0 @@
name: Publish SDK to npm
on:
workflow_dispatch:
inputs:
version:
description: "Version to publish (e.g., 1.0.0). Leave empty to use package.json version."
required: false
dry-run:
description: "Dry run (don't actually publish)"
type: boolean
default: false
permissions:
contents: write
jobs:
publish:
name: Build & Publish @debros/orama
runs-on: ubuntu-latest
defaults:
run:
working-directory: sdk
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
registry-url: "https://registry.npmjs.org"
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Bump version
if: inputs.version != ''
run: npm version ${{ inputs.version }} --no-git-tag-version
- name: Typecheck
run: pnpm typecheck
- name: Build
run: pnpm build
- name: Run unit tests
run: pnpm vitest run tests/unit
- name: Publish (dry run)
if: inputs.dry-run == true
run: npm publish --access public --dry-run
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Publish
if: inputs.dry-run == false
run: npm publish --access public
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Get published version
if: inputs.dry-run == false
id: version
run: echo "version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
- name: Create git tag
if: inputs.dry-run == false
working-directory: .
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git tag "sdk/v${{ steps.version.outputs.version }}"
git push origin "sdk/v${{ steps.version.outputs.version }}"

View File

@ -28,8 +28,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.24"
cache-dependency-path: core/go.sum
go-version: "1.23"
- name: Get version
id: version
@ -47,7 +46,6 @@ jobs:
uses: docker/setup-qemu-action@v3
- name: Build binary
working-directory: core
env:
GOARCH: ${{ matrix.arch }}
CGO_ENABLED: 0
@ -59,9 +57,9 @@ jobs:
mkdir -p build/usr/local/bin
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-node cmd/node/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
# Build the entire gateway package so helper files (e.g., config parsing) are included
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-gateway ./cmd/gateway
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway ./cmd/gateway
- name: Create Debian package structure
run: |
@ -73,7 +71,7 @@ jobs:
mkdir -p ${PKG_NAME}/usr/local/bin
# Copy binaries
cp core/build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
chmod 755 ${PKG_NAME}/usr/local/bin/*
# Create control file

View File

@ -23,8 +23,8 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.24'
cache-dependency-path: core/go.sum
go-version: '1.21'
cache: true
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5

155
.gitignore vendored
View File

@ -1,90 +1,103 @@
# === Global ===
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
# Built binaries
bin/
dist/
# IDE and editor files
.vscode/
.idea/
*.swp
*.swo
*~
# OS generated files
.DS_Store
.codex/
redeploy-6.sh
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
*.swp
*.swo
*~
# IDE
.vscode/
.idea/
.cursor/
# Log files
*.log
# Environment & credentials
# Environment variables
.env
.env.*
!.env.example
.mcp.json
.claude/
.codex/
.env.local
.env.*.local
# === Core (Go) ===
core/phantom-auth/
core/bin/
core/bin-linux/
core/dist/
core/orama-cli-linux
core/keys_backup/
core/.gocache/
core/configs/
core/data/*
core/tmp/
core/temp/
core/results/
core/rnd/
core/vps.txt
core/coverage.txt
core/coverage.html
core/profile.out
core/e2e/config.yaml
core/scripts/remote-nodes.conf
# E2E test config (contains production credentials)
e2e/config.yaml
# Go build artifacts
*.exe
*.exe~
*.dll
*.so
*.dylib
*.test
*.out
# Temporary files
tmp/
temp/
*.tmp
# Coverage reports
coverage.txt
coverage.html
profile.out
# Build artifacts
*.deb
*.rpm
*.tar.gz
*.zip
go.work
# Logs
*.log
# Databases
*.db
# === Website ===
website/node_modules/
website/dist/
website/invest-api/invest-api
website/invest-api/*.db
website/invest-api/*.db-shm
website/invest-api/*.db-wal
# === SDK (TypeScript) ===
sdk/node_modules/
sdk/dist/
sdk/coverage/
# === Vault (Zig) ===
vault/.zig-cache/
vault/zig-out/
# === OS ===
os/output/
# === Local development ===
.dev/
# Local development files
.local/
local/
data/*
./bootstrap
./node
data/bootstrap/rqlite/
.env.*
configs/
.dev/
.gocache/
.claude/
.mcp.json
.cursor/
# Remote node credentials
scripts/remote-nodes.conf
orama-cli-linux
rnd/
keys_backup/
vps.txt
bin-linux/
website/

View File

@ -2,20 +2,18 @@
# Builds and releases orama (CLI) and orama-node binaries
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
project_name: orama-network
project_name: debros-network
env:
- GO111MODULE=on
before:
hooks:
- cmd: go mod tidy
dir: core
- go mod tidy
builds:
# orama CLI binary
- id: orama
dir: core
main: ./cmd/cli
binary: orama
goos:
@ -33,7 +31,6 @@ builds:
# orama-node binary (Linux only for apt)
- id: orama-node
dir: core
main: ./cmd/node
binary: orama-node
goos:
@ -78,7 +75,7 @@ nfpms:
- orama
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <dev@debros.io>
maintainer: DeBros <support@debros.io>
description: CLI tool for the Orama decentralized network
license: MIT
formats:
@ -87,7 +84,7 @@ nfpms:
section: utils
priority: optional
contents:
- src: ./core/README.md
- src: ./README.md
dst: /usr/share/doc/orama/README.md
deb:
lintian_overrides:
@ -100,7 +97,7 @@ nfpms:
- orama-node
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <dev@debros.io>
maintainer: DeBros <support@debros.io>
description: Node daemon for the Orama decentralized network
license: MIT
formats:
@ -109,7 +106,7 @@ nfpms:
section: net
priority: optional
contents:
- src: ./core/README.md
- src: ./README.md
dst: /usr/share/doc/orama-node/README.md
deb:
lintian_overrides:

View File

@ -32,7 +32,7 @@ This Code applies within all project spaces and when an individual is officially
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@orama.io
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@debros.io
All complaints will be reviewed and investigated promptly and fairly.

View File

@ -1,78 +1,47 @@
# Contributing to Orama Network
# Contributing to DeBros Network
Thanks for helping improve the network! This monorepo contains multiple projects — pick the one relevant to your contribution.
Thanks for helping improve the network! This guide covers setup, local dev, tests, and PR guidelines.
## Repository Structure
## Requirements
| Package | Language | Build |
|---------|----------|-------|
| `core/` | Go 1.24+ | `make core-build` |
| `website/` | TypeScript (pnpm) | `make website-build` |
| `vault/` | Zig 0.14+ | `make vault-build` |
| `os/` | Go + Buildroot | `make os-build` |
- Go 1.22+ (1.23 recommended)
- RQLite (optional for local runs; the Makefile starts nodes with embedded setup)
- Make (optional)
## Setup
```bash
git clone https://github.com/DeBrosOfficial/network.git
cd network
```
### Core (Go)
```bash
cd core
make deps
make build
make test
```
### Website
## Build, Test, Lint
- Build: `make build`
- Test: `make test`
- Format/Vet: `make fmt vet` (or `make lint`)
````
Useful CLI commands:
```bash
cd website
pnpm install
pnpm dev
```
./bin/orama health
./bin/orama peers
./bin/orama status
````
### Vault (Zig)
## Versioning
```bash
cd vault
zig build
zig build test
```
- The CLI reports its version via `orama version`.
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
## Pull Requests
1. Fork and create a topic branch from `main`.
2. Ensure `make test` passes for affected packages.
3. Include tests for new functionality or bug fixes.
4. Keep PRs focused — one concern per PR.
5. Write a clear description: motivation, approach, and how you tested it.
6. Update docs if you're changing user-facing behavior.
## Code Style
### Go (core/, os/)
- Follow standard Go conventions
- Run `make lint` before submitting
- Wrap errors with context: `fmt.Errorf("failed to X: %w", err)`
- No magic values — use named constants
### TypeScript (website/)
- TypeScript strict mode
- Follow existing patterns in the codebase
### Zig (vault/)
- Follow standard Zig conventions
- Run `zig build test` before submitting
## Security
If you find a security vulnerability, **do not open a public issue**. Email security@debros.io instead.
1. Fork and create a topic branch.
2. Ensure `make build test` passes; include tests for new functionality.
3. Keep PRs focused and well-described (motivation, approach, testing).
4. Update README/docs for behavior changes.
Thank you for contributing!

272
Makefile
View File

@ -1,66 +1,244 @@
# Orama Monorepo
# Delegates to sub-project Makefiles
TEST?=./...
.PHONY: help build test clean
.PHONY: test
test:
@echo Running tests...
go test -v $(TEST)
# === Core (Go network) ===
.PHONY: core core-build core-test core-clean core-lint
core: core-build
# Gateway-focused E2E tests assume gateway and nodes are already running
# Auto-discovers configuration from ~/.orama and queries database for API key
# No environment variables required
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-local test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
core-build:
$(MAKE) -C core build
# Check if gateway is running (helper)
.PHONY: check-gateway
check-gateway:
@if ! curl -sf http://localhost:6001/v1/health > /dev/null 2>&1; then \
echo "❌ Gateway not running on localhost:6001"; \
echo ""; \
echo "To run tests locally:"; \
echo " 1. Start the dev environment: make dev"; \
echo " 2. Wait for all services to start (~30 seconds)"; \
echo " 3. Run tests: make test-e2e-local"; \
echo ""; \
echo "To run tests against production:"; \
echo " ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e"; \
exit 1; \
fi
@echo "✅ Gateway is running"
core-test:
$(MAKE) -C core test
# Local E2E tests - checks gateway first
test-e2e-local: check-gateway
@echo "Running E2E tests against local dev environment..."
go test -v -tags e2e -timeout 30m ./e2e/...
core-lint:
$(MAKE) -C core lint
# Production E2E tests - includes production-only tests
test-e2e-prod:
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
echo "❌ ORAMA_GATEWAY_URL not set"; \
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
exit 1; \
fi
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
go test -v -tags "e2e production" -timeout 30m ./e2e/...
core-clean:
$(MAKE) -C core clean
# Generic e2e target (works with both local and production)
test-e2e:
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.orama..."
@echo "Tip: Use 'make test-e2e-local' for local or 'make test-e2e-prod' for production"
go test -v -tags e2e -timeout 30m ./e2e/...
# === Website ===
.PHONY: website website-dev website-build
website-dev:
cd website && pnpm dev
test-e2e-deployments:
@echo "Running deployment E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
website-build:
cd website && pnpm build
test-e2e-fullstack:
@echo "Running fullstack E2E tests..."
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
# === SDK (TypeScript) ===
.PHONY: sdk sdk-build sdk-test
sdk: sdk-build
test-e2e-https:
@echo "Running HTTPS/external access E2E tests..."
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
sdk-build:
cd sdk && pnpm install && pnpm build
test-e2e-shared:
@echo "Running shared E2E tests..."
go test -v -tags e2e -timeout 10m ./e2e/shared/...
sdk-test:
cd sdk && pnpm test
test-e2e-cluster:
@echo "Running cluster E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
# === Vault (Zig) ===
.PHONY: vault vault-build vault-test
vault-build:
cd vault && zig build
test-e2e-integration:
@echo "Running integration E2E tests..."
go test -v -tags e2e -timeout 20m ./e2e/integration/...
vault-test:
cd vault && zig build test
test-e2e-production:
@echo "Running production-only E2E tests..."
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
# === OS ===
.PHONY: os os-build
os-build:
$(MAKE) -C os
test-e2e-quick:
@echo "Running quick E2E smoke tests..."
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
# === Aggregate ===
build: core-build
test: core-test
clean: core-clean
# Network - Distributed P2P Database System
# Makefile for development and build tasks
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.101.3
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
LDFLAGS_LINUX := -s -w $(LDFLAGS)
# Build targets
build: deps
@echo "Building network executables (version=$(VERSION))..."
@mkdir -p bin
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
go build -ldflags "$(LDFLAGS)" -o bin/rqlite-mcp ./cmd/rqlite-mcp
# Inject gateway build metadata via pkg path variables
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/orama version"
# Cross-compile all binaries for Linux (used with --pre-built flag on VPS)
# Builds: DeBros binaries + Olric + CoreDNS (with rqlite plugin) + Caddy (with orama DNS module)
build-linux: deps
@echo "Cross-compiling all binaries for linux/amd64 (version=$(VERSION))..."
@mkdir -p bin-linux
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/identity ./cmd/identity
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-node ./cmd/node
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama cmd/cli/main.go
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/rqlite-mcp ./cmd/rqlite-mcp
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -trimpath -o bin-linux/gateway ./cmd/gateway
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-cli ./cmd/cli
@echo "Building Olric for linux/amd64..."
GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -trimpath -o bin-linux/olric-server github.com/olric-data/olric/cmd/olric-server
@echo "✓ All Linux binaries built in bin-linux/"
@echo ""
@echo "Next steps:"
@echo " 1. Build CoreDNS: make build-linux-coredns"
@echo " 2. Build Caddy: make build-linux-caddy"
@echo " 3. Or build all: make build-linux-all"
# Build CoreDNS with rqlite plugin for Linux
build-linux-coredns:
@bash scripts/build-linux-coredns.sh
# Build Caddy with orama DNS module for Linux
build-linux-caddy:
@bash scripts/build-linux-caddy.sh
# Build everything for Linux (all binaries + CoreDNS + Caddy)
build-linux-all: build-linux build-linux-coredns build-linux-caddy
@echo ""
@echo "✅ All Linux binaries ready in bin-linux/:"
@ls -la bin-linux/
@echo ""
@echo "Deploy to VPS:"
@echo " scp bin-linux/* ubuntu@<ip>:/home/debros/bin/"
@echo " scp bin-linux/coredns ubuntu@<ip>:/usr/local/bin/coredns"
@echo " scp bin-linux/caddy ubuntu@<ip>:/usr/bin/caddy"
@echo " sudo orama install --pre-built --no-pull ..."
# Install git hooks
install-hooks:
@echo "Installing git hooks..."
@bash scripts/install-hooks.sh
# Clean build artifacts
clean:
@echo "Cleaning build artifacts..."
rm -rf bin/
rm -rf data/
@echo "Clean complete!"
# Run bootstrap node (auto-selects identity and data dir)
run-node:
@echo "Starting node..."
@echo "Config: ~/.orama/node.yaml"
go run ./cmd/orama-node --config node.yaml
# Run second node - requires join address
run-node2:
@echo "Starting second node..."
@echo "Config: ~/.orama/node2.yaml"
go run ./cmd/orama-node --config node2.yaml
# Run third node - requires join address
run-node3:
@echo "Starting third node..."
@echo "Config: ~/.orama/node3.yaml"
go run ./cmd/orama-node --config node3.yaml
# Run gateway HTTP server
run-gateway:
@echo "Starting gateway HTTP server..."
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
go run ./cmd/orama-gateway
# Development environment target
# Uses orama dev up to start full stack with dependency and port checking
dev: build
@./bin/orama dev up
# Graceful shutdown of all dev services
stop:
@if [ -f ./bin/orama ]; then \
./bin/orama dev down || true; \
fi
@bash scripts/dev-kill-all.sh
# Force kill all processes (immediate termination)
kill:
@bash scripts/dev-kill-all.sh
# Help
help:
@echo "Orama Monorepo"
@echo "Available targets:"
@echo " build - Build all executables"
@echo " clean - Clean build artifacts"
@echo " test - Run unit tests"
@echo ""
@echo " Core (Go): make core-build | core-test | core-lint | core-clean"
@echo " Website: make website-dev | website-build"
@echo " Vault (Zig): make vault-build | vault-test"
@echo " OS: make os-build"
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@echo " - Checks dependencies and available ports"
@echo " - Generates configs and starts all services"
@echo " - Validates cluster health"
@echo " make stop - Gracefully stop all development services"
@echo " make kill - Force kill all development services (use if stop fails)"
@echo ""
@echo " Aggregate: make build | test | clean (delegates to core)"
@echo "E2E Testing:"
@echo " make test-e2e-local - Run E2E tests against local dev (checks gateway first)"
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
@echo " make test-e2e-deployments - Run deployment E2E tests"
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
@echo ""
@echo " Example production test:"
@echo " ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"
@echo ""
@echo "Development Management (via orama):"
@echo " ./bin/orama dev status - Show status of all dev services"
@echo " ./bin/orama dev logs <component> [--follow]"
@echo ""
@echo "Individual Node Targets (advanced):"
@echo " run-node - Start first node directly"
@echo " run-node2 - Start second node directly"
@echo " run-node3 - Start third node directly"
@echo " run-gateway - Start HTTP gateway directly"
@echo ""
@echo "Maintenance:"
@echo " deps - Download dependencies"
@echo " tidy - Tidy dependencies"
@echo " fmt - Format code"
@echo " vet - Vet code"
@echo " lint - Lint code (fmt + vet)"
@echo " help - Show this help"

575
README.md
View File

@ -1,50 +1,557 @@
# Orama Network
# Orama Network - Distributed P2P Platform
A decentralized infrastructure platform combining distributed SQL, IPFS storage, caching, serverless WASM execution, and privacy relay — all managed through a unified API gateway.
A high-performance API Gateway and distributed platform built in Go. Provides a unified HTTP/HTTPS API for distributed SQL (RQLite), distributed caching (Olric), decentralized storage (IPFS), pub/sub messaging, and serverless WebAssembly execution.
## Packages
**Architecture:** Modular Gateway / Edge Proxy following SOLID principles
| Package | Language | Description |
|---------|----------|-------------|
| [core/](core/) | Go | API gateway, distributed node, CLI, and client SDK |
| [sdk/](sdk/) | TypeScript | `@debros/orama` — JavaScript/TypeScript SDK ([npm](https://www.npmjs.com/package/@debros/orama)) |
| [website/](website/) | TypeScript | Marketing website and invest portal |
| [vault/](vault/) | Zig | Distributed secrets vault (Shamir's Secret Sharing) |
| [os/](os/) | Go + Buildroot | OramaOS — hardened minimal Linux for network nodes |
## Features
- **🔐 Authentication** - Wallet signatures, API keys, JWT tokens
- **💾 Storage** - IPFS-based decentralized file storage with encryption
- **⚡ Cache** - Distributed cache with Olric (in-memory key-value)
- **🗄️ Database** - RQLite distributed SQL with Raft consensus + Per-namespace SQLite databases
- **📡 Pub/Sub** - Real-time messaging via LibP2P and WebSocket
- **⚙️ Serverless** - WebAssembly function execution with host functions
- **🌐 HTTP Gateway** - Unified REST API with automatic HTTPS (Let's Encrypt)
- **📦 Client SDK** - Type-safe Go SDK for all services
- **🚀 App Deployments** - Deploy React, Next.js, Go, Node.js apps with automatic domains
- **🗄️ SQLite Databases** - Per-namespace isolated databases with IPFS backups
## Application Deployments
Deploy full-stack applications with automatic domain assignment and namespace isolation.
### Deploy a React App
```bash
# Build your app
cd my-react-app
npm run build
# Deploy to Orama Network
orama deploy static ./dist --name my-app
# Your app is now live at: https://my-app.orama.network
```
### Deploy Next.js with SSR
```bash
cd my-nextjs-app
# Ensure next.config.js has: output: 'standalone'
npm run build
orama deploy nextjs . --name my-nextjs --ssr
# Live at: https://my-nextjs.orama.network
```
### Deploy Go Backend
```bash
# Build for Linux (name binary 'app' for auto-detection)
GOOS=linux GOARCH=amd64 go build -o app main.go
# Deploy (must implement /health endpoint)
orama deploy go ./app --name my-api
# API live at: https://my-api.orama.network
```
### Create SQLite Database
```bash
# Create database
orama db create my-database
# Create schema
orama db query my-database "CREATE TABLE users (id INT, name TEXT)"
# Insert data
orama db query my-database "INSERT INTO users VALUES (1, 'Alice')"
# Query data
orama db query my-database "SELECT * FROM users"
# Backup to IPFS
orama db backup my-database
```
### Full-Stack Example
Deploy a complete app with React frontend, Go backend, and SQLite database:
```bash
# 1. Create database
orama db create myapp-db
orama db query myapp-db "CREATE TABLE users (id INT PRIMARY KEY, name TEXT)"
# 2. Deploy Go backend (connects to database)
GOOS=linux GOARCH=amd64 go build -o api main.go
orama deploy go ./api --name myapp-api
# 3. Deploy React frontend (calls backend API)
cd frontend && npm run build
orama deploy static ./dist --name myapp
# Access:
# Frontend: https://myapp.orama.network
# Backend: https://myapp-api.orama.network
```
**📖 Full Guide**: See [Deployment Guide](docs/DEPLOYMENT_GUIDE.md) for complete documentation, examples, and best practices.
## Quick Start
### Local Development
```bash
# Build the core network binaries
make core-build
# Build the project
make build
# Run tests
make core-test
# Start website dev server
make website-dev
# Build vault
make vault-build
# Start 5-node development cluster
make dev
```
The cluster automatically performs health checks before declaring success.
### Stop Development Environment
```bash
make stop
```
## Testing Services
After running `make dev`, test service health using these curl requests:
### Node Unified Gateways
Each node is accessible via a single unified gateway port:
```bash
# Node-1 (port 6001)
curl http://localhost:6001/health
# Node-2 (port 6002)
curl http://localhost:6002/health
# Node-3 (port 6003)
curl http://localhost:6003/health
# Node-4 (port 6004)
curl http://localhost:6004/health
# Node-5 (port 6005)
curl http://localhost:6005/health
```
## Network Architecture
### Unified Gateway Ports
```
Node-1: localhost:6001 → /rqlite/http, /rqlite/raft, /cluster, /ipfs/api
Node-2: localhost:6002 → Same routes
Node-3: localhost:6003 → Same routes
Node-4: localhost:6004 → Same routes
Node-5: localhost:6005 → Same routes
```
### Direct Service Ports (for debugging)
```
RQLite HTTP: 5001, 5002, 5003, 5004, 5005 (one per node)
RQLite Raft: 7001, 7002, 7003, 7004, 7005
IPFS API: 4501, 4502, 4503, 4504, 4505
IPFS Swarm: 4101, 4102, 4103, 4104, 4105
Cluster API: 9094, 9104, 9114, 9124, 9134
Internal Gateway: 6000
Olric Cache: 3320
Anon SOCKS: 9050
```
## Development Commands
```bash
# Start full cluster (5 nodes + gateway)
make dev
# Check service status
orama dev status
# View logs
orama dev logs node-1 # Node-1 logs
orama dev logs node-1 --follow # Follow logs in real-time
orama dev logs gateway --follow # Gateway logs
# Stop all services
orama stop
# Build binaries
make build
```
## CLI Commands
### Authentication
```bash
orama auth login # Authenticate with wallet
orama auth status # Check authentication
orama auth logout # Clear credentials
```
### Application Deployments
```bash
# Deploy applications
orama deploy static <path> --name myapp # React, Vue, static sites
orama deploy nextjs <path> --name myapp --ssr # Next.js with SSR (requires output: 'standalone')
orama deploy go <path> --name myapp # Go binaries (must have /health endpoint)
orama deploy nodejs <path> --name myapp # Node.js apps (must have /health endpoint)
# Manage deployments
orama deployments list # List all deployments
orama deployments get <name> # Get deployment details
orama deployments logs <name> --follow # View logs
orama deployments delete <name> # Delete deployment
orama deployments rollback <name> --version 1 # Rollback to version
```
### SQLite Databases
```bash
orama db create <name> # Create database
orama db query <name> "SELECT * FROM t" # Execute SQL query
orama db list # List all databases
orama db backup <name> # Backup to IPFS
orama db backups <name> # List backups
```
### Network Status
```bash
orama health # Cluster health check
orama peers # List connected peers
orama status # Network status
```
### RQLite Operations
```bash
orama query "SELECT * FROM users"
orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
orama transaction --file ops.json
```
### Pub/Sub
```bash
orama pubsub publish <topic> <message>
orama pubsub subscribe <topic> 30s
orama pubsub topics
```
## Serverless Functions (WASM)
Orama supports high-performance serverless function execution using WebAssembly (WASM). Functions are isolated, secure, and can interact with network services like the distributed cache.
### 1. Build Functions
Functions must be compiled to WASM. We recommend using [TinyGo](https://tinygo.org/).
```bash
# Build example functions to examples/functions/bin/
./examples/functions/build.sh
```
### 2. Deployment
Deploy your compiled `.wasm` file to the network via the Gateway.
```bash
# Deploy a function
curl -X POST http://localhost:6001/v1/functions \
-H "Authorization: Bearer <your_api_key>" \
-F "name=hello-world" \
-F "namespace=default" \
-F "wasm=@./examples/functions/bin/hello.wasm"
```
### 3. Invocation
Trigger your function with a JSON payload. The function receives the payload via `stdin` and returns its response via `stdout`.
```bash
# Invoke via HTTP
curl -X POST http://localhost:6001/v1/functions/hello-world/invoke \
-H "Authorization: Bearer <your_api_key>" \
-H "Content-Type: application/json" \
-d '{"name": "Developer"}'
```
### 4. Management
```bash
# List all functions in a namespace
curl http://localhost:6001/v1/functions?namespace=default
# Delete a function
curl -X DELETE http://localhost:6001/v1/functions/hello-world?namespace=default
```
## Production Deployment
### Prerequisites
- Ubuntu 22.04+ or Debian 12+
- `amd64` or `arm64` architecture
- 4GB RAM, 50GB SSD, 2 CPU cores
### Required Ports
**External (must be open in firewall):**
- **80** - HTTP (ACME/Let's Encrypt certificate challenges)
- **443** - HTTPS (Main gateway API endpoint)
- **4101** - IPFS Swarm (peer connections)
- **7001** - RQLite Raft (cluster consensus)
**Internal (bound to localhost, no firewall needed):**
- 4501 - IPFS API
- 5001 - RQLite HTTP API
- 6001 - Unified Gateway
- 8080 - IPFS Gateway
- 9050 - Anyone SOCKS5 proxy
- 9094 - IPFS Cluster API
- 3320/3322 - Olric Cache
**Anyone Relay Mode (optional, for earning rewards):**
- 9001 - Anyone ORPort (relay traffic, must be open externally)
### Anyone Network Integration
Orama Network integrates with the [Anyone Protocol](https://anyone.io) for anonymous routing. By default, nodes run as **clients** (consuming the network). Optionally, you can run as a **relay operator** to earn rewards.
**Client Mode (Default):**
- Routes traffic through Anyone network for anonymity
- SOCKS5 proxy on localhost:9050
- No rewards, just consumes network
**Relay Mode (Earn Rewards):**
- Provide bandwidth to the Anyone network
- Earn $ANYONE tokens as a relay operator
- Requires 100 $ANYONE tokens in your wallet
- Requires ORPort (9001) open to the internet
```bash
# Install as relay operator (earn rewards)
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-nickname "MyRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
# With exit relay (legal implications apply)
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-exit \
--anyone-nickname "MyExitRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
# Migrate existing Anyone installation
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-migrate \
--anyone-nickname "MyRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
```
**Important:** After installation, register your relay at [dashboard.anyone.io](https://dashboard.anyone.io) to start earning rewards.
### Installation
**macOS (Homebrew):**
```bash
brew install DeBrosOfficial/tap/orama
```
**Linux (Debian/Ubuntu):**
```bash
# Download and install the latest .deb package
curl -sL https://github.com/DeBrosOfficial/network/releases/latest/download/orama_$(curl -s https://api.github.com/repos/DeBrosOfficial/network/releases/latest | grep tag_name | cut -d '"' -f 4 | tr -d 'v')_linux_amd64.deb -o orama.deb
sudo dpkg -i orama.deb
```
**From Source:**
```bash
go install github.com/DeBrosOfficial/network/cmd/cli@latest
```
**Setup (after installation):**
```bash
sudo orama install --interactive
```
### Service Management
```bash
# Status
orama status
# Control services
sudo orama start
sudo orama stop
sudo orama restart
# View logs
orama logs node --follow
orama logs gateway --follow
orama logs ipfs --follow
```
### Upgrade
```bash
# Upgrade to latest version
sudo orama upgrade --interactive
```
## Configuration
All configuration lives in `~/.orama/`:
- `configs/node.yaml` - Node configuration
- `configs/gateway.yaml` - Gateway configuration
- `configs/olric.yaml` - Cache configuration
- `secrets/` - Keys and certificates
- `data/` - Service data directories
## Troubleshooting
### Services Not Starting
```bash
# Check status
systemctl status debros-node
# View logs
journalctl -u debros-node -f
# Check log files
tail -f /home/debros/.orama/logs/node.log
```
### Port Conflicts
```bash
# Check what's using specific ports
sudo lsof -i :443 # HTTPS Gateway
sudo lsof -i :7001 # TCP/SNI Gateway
sudo lsof -i :6001 # Internal Gateway
```
### RQLite Cluster Issues
```bash
# Connect to RQLite CLI
rqlite -H localhost -p 5001
# Check cluster status
.nodes
.status
.ready
# Check consistency level
.consistency
```
### Reset Installation
```bash
# Production reset (⚠️ DESTROYS DATA)
sudo orama uninstall
sudo rm -rf /home/debros/.orama
sudo orama install
```
## HTTP Gateway API
### Main Gateway Endpoints
- `GET /health` - Health status
- `GET /v1/status` - Full status
- `GET /v1/version` - Version info
- `POST /v1/rqlite/exec` - Execute SQL
- `POST /v1/rqlite/query` - Query database
- `GET /v1/rqlite/schema` - Get schema
- `POST /v1/pubsub/publish` - Publish message
- `GET /v1/pubsub/topics` - List topics
- `GET /v1/pubsub/ws?topic=<name>` - WebSocket subscribe
- `POST /v1/functions` - Deploy function (multipart/form-data)
- `POST /v1/functions/{name}/invoke` - Invoke function
- `GET /v1/functions` - List functions
- `DELETE /v1/functions/{name}` - Delete function
- `GET /v1/functions/{name}/logs` - Get function logs
See `openapi/gateway.yaml` for complete API specification.
## Documentation
| Document | Description |
|----------|-------------|
| [Architecture](core/docs/ARCHITECTURE.md) | System architecture and design patterns |
| [Deployment Guide](core/docs/DEPLOYMENT_GUIDE.md) | Deploy apps, databases, and domains |
| [Dev & Deploy](core/docs/DEV_DEPLOY.md) | Building, deploying to VPS, rolling upgrades |
| [Security](core/docs/SECURITY.md) | Security hardening and threat model |
| [Monitoring](core/docs/MONITORING.md) | Cluster health monitoring |
| [Client SDK](core/docs/CLIENT_SDK.md) | Go SDK documentation |
| [Serverless](core/docs/SERVERLESS.md) | WASM serverless functions |
| [Common Problems](core/docs/COMMON_PROBLEMS.md) | Troubleshooting known issues |
- **[Deployment Guide](docs/DEPLOYMENT_GUIDE.md)** - Deploy React, Next.js, Go apps and manage databases
- **[Architecture Guide](docs/ARCHITECTURE.md)** - System architecture and design patterns
- **[Client SDK](docs/CLIENT_SDK.md)** - Go SDK documentation and examples
- **[Gateway API](docs/GATEWAY_API.md)** - Complete HTTP API reference
- **[Security Deployment](docs/SECURITY_DEPLOYMENT_GUIDE.md)** - Production security hardening
- **[Testing Plan](docs/TESTING_PLAN.md)** - Comprehensive testing strategy and implementation
## Resources
- [RQLite Documentation](https://rqlite.io/docs/)
- [IPFS Documentation](https://docs.ipfs.tech/)
- [LibP2P Documentation](https://docs.libp2p.io/)
- [WebAssembly](https://webassembly.org/)
- [GitHub Repository](https://github.com/DeBrosOfficial/network)
- [Issue Tracker](https://github.com/DeBrosOfficial/network/issues)
## Project Structure
```
network/
├── cmd/ # Binary entry points
│ ├── cli/ # CLI tool
│ ├── gateway/ # HTTP Gateway
│ ├── node/ # P2P Node
│ └── rqlite-mcp/ # RQLite MCP server
├── pkg/ # Core packages
│ ├── gateway/ # Gateway implementation
│ │ └── handlers/ # HTTP handlers by domain
│ ├── client/ # Go SDK
│ ├── serverless/ # WASM engine
│ ├── rqlite/ # Database ORM
│ ├── contracts/ # Interface definitions
│ ├── httputil/ # HTTP utilities
│ └── errors/ # Error handling
├── docs/ # Documentation
├── e2e/ # End-to-end tests
└── examples/ # Example code
```
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md) for setup, development, and PR guidelines.
Contributions are welcome! This project follows:
- **SOLID Principles** - Single responsibility, open/closed, etc.
- **DRY Principle** - Don't repeat yourself
- **Clean Architecture** - Clear separation of concerns
- **Test Coverage** - Unit and E2E tests required
## License
[AGPL-3.0](LICENSE)
See our architecture docs for design patterns and guidelines.

211
cmd/cli/main.go Normal file
View File

@ -0,0 +1,211 @@
package main
import (
"fmt"
"os"
"time"
"github.com/DeBrosOfficial/network/pkg/cli"
)
var (
timeout = 30 * time.Second
format = "table"
)
// version metadata populated via -ldflags at build time
var (
version = "dev"
commit = ""
date = ""
)
func main() {
if len(os.Args) < 2 {
showHelp()
return
}
command := os.Args[1]
args := os.Args[2:]
// Parse global flags
parseGlobalFlags(args)
switch command {
case "version":
fmt.Printf("orama %s", version)
if commit != "" {
fmt.Printf(" (commit %s)", commit)
}
if date != "" {
fmt.Printf(" built %s", date)
}
fmt.Println()
return
// Development environment commands
case "dev":
cli.HandleDevCommand(args)
// Production environment commands (legacy with 'prod' prefix)
case "prod":
cli.HandleProdCommand(args)
// Direct production commands (new simplified interface)
case "invite":
cli.HandleProdCommand(append([]string{"invite"}, args...))
case "install":
cli.HandleProdCommand(append([]string{"install"}, args...))
case "upgrade":
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
case "migrate":
cli.HandleProdCommand(append([]string{"migrate"}, args...))
case "status":
cli.HandleProdCommand(append([]string{"status"}, args...))
case "start":
cli.HandleProdCommand(append([]string{"start"}, args...))
case "stop":
cli.HandleProdCommand(append([]string{"stop"}, args...))
case "restart":
cli.HandleProdCommand(append([]string{"restart"}, args...))
case "logs":
cli.HandleProdCommand(append([]string{"logs"}, args...))
case "uninstall":
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
// Authentication commands
case "auth":
cli.HandleAuthCommand(args)
// Deployment commands
case "deploy":
cli.HandleDeployCommand(args)
case "deployments":
cli.HandleDeploymentsCommand(args)
// Database commands
case "db":
cli.HandleDBCommand(args)
// Namespace management
case "namespace":
cli.HandleNamespaceCommand(args)
// Environment management
case "env":
cli.HandleEnvCommand(args)
// Help
case "help", "--help", "-h":
showHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command)
showHelp()
os.Exit(1)
}
}
func parseGlobalFlags(args []string) {
for i, arg := range args {
switch arg {
case "-f", "--format":
if i+1 < len(args) {
format = args[i+1]
}
case "-t", "--timeout":
if i+1 < len(args) {
if d, err := time.ParseDuration(args[i+1]); err == nil {
timeout = d
}
}
}
}
}
func showHelp() {
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: orama <command> [args...]\n\n")
fmt.Printf("💻 Local Development:\n")
fmt.Printf(" dev up - Start full local dev environment\n")
fmt.Printf(" dev down - Stop all dev services\n")
fmt.Printf(" dev status - Show status of dev services\n")
fmt.Printf(" dev logs <component> - View dev component logs\n")
fmt.Printf(" dev help - Show dev command help\n\n")
fmt.Printf("🚀 Production Deployment:\n")
fmt.Printf(" install - Install production node (requires root/sudo)\n")
fmt.Printf(" upgrade - Upgrade existing installation\n")
fmt.Printf(" status - Show production service status\n")
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" logs <service> - View production service logs\n")
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
fmt.Printf("🔐 Authentication:\n")
fmt.Printf(" auth login - Authenticate with wallet\n")
fmt.Printf(" auth logout - Clear stored credentials\n")
fmt.Printf(" auth whoami - Show current authentication\n")
fmt.Printf(" auth status - Show detailed auth info\n")
fmt.Printf(" auth help - Show auth command help\n\n")
fmt.Printf("📦 Deployments:\n")
fmt.Printf(" deploy static <path> - Deploy a static site (React, Vue, etc.)\n")
fmt.Printf(" deploy nextjs <path> - Deploy a Next.js application\n")
fmt.Printf(" deploy go <path> - Deploy a Go backend\n")
fmt.Printf(" deploy nodejs <path> - Deploy a Node.js backend\n")
fmt.Printf(" deployments list - List all deployments\n")
fmt.Printf(" deployments get <name> - Get deployment details\n")
fmt.Printf(" deployments logs <name> - View deployment logs\n")
fmt.Printf(" deployments delete <name> - Delete a deployment\n")
fmt.Printf(" deployments rollback <name> - Rollback to previous version\n\n")
fmt.Printf("🗄️ Databases:\n")
fmt.Printf(" db create <name> - Create a SQLite database\n")
fmt.Printf(" db query <name> \"<sql>\" - Execute SQL query\n")
fmt.Printf(" db list - List all databases\n")
fmt.Printf(" db backup <name> - Backup database to IPFS\n")
fmt.Printf(" db backups <name> - List database backups\n\n")
fmt.Printf("🏢 Namespaces:\n")
fmt.Printf(" namespace delete - Delete current namespace and all resources\n\n")
fmt.Printf("🌍 Environments:\n")
fmt.Printf(" env list - List all environments\n")
fmt.Printf(" env current - Show current environment\n")
fmt.Printf(" env switch <name> - Switch to environment\n\n")
fmt.Printf("Global Flags:\n")
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
fmt.Printf(" --help, -h - Show this help message\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Deploy a React app\n")
fmt.Printf(" cd my-react-app && npm run build\n")
fmt.Printf(" orama deploy static ./dist --name my-app\n\n")
fmt.Printf(" # Deploy a Next.js app with SSR\n")
fmt.Printf(" cd my-nextjs-app && npm run build\n")
fmt.Printf(" orama deploy nextjs . --name my-nextjs --ssr\n\n")
fmt.Printf(" # Create and use a database\n")
fmt.Printf(" orama db create my-db\n")
fmt.Printf(" orama db query my-db \"CREATE TABLE users (id INT, name TEXT)\"\n")
fmt.Printf(" orama db query my-db \"INSERT INTO users VALUES (1, 'Alice')\"\n\n")
fmt.Printf(" # Manage deployments\n")
fmt.Printf(" orama deployments list\n")
fmt.Printf(" orama deployments get my-app\n")
fmt.Printf(" orama deployments logs my-app --follow\n\n")
fmt.Printf(" # First node (creates new cluster)\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
fmt.Printf(" # Service management\n")
fmt.Printf(" orama status\n")
fmt.Printf(" orama logs node --follow\n")
}

View File

@ -14,6 +14,10 @@ import (
"go.uber.org/zap"
)
// For transition, alias main.GatewayConfig to pkg/gateway.Config
// server.go will be removed; this keeps compatibility until then.
type GatewayConfig = gateway.Config
func getEnvDefault(key, def string) string {
if v := os.Getenv(key); strings.TrimSpace(v) != "" {
return v
@ -69,13 +73,6 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
}
// Load YAML
type yamlWebRTCCfg struct {
Enabled bool `yaml:"enabled"`
SFUPort int `yaml:"sfu_port"`
TURNDomain string `yaml:"turn_domain"`
TURNSecret string `yaml:"turn_secret"`
}
type yamlCfg struct {
ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"`
@ -91,7 +88,6 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
IPFSAPIURL string `yaml:"ipfs_api_url"`
IPFSTimeout string `yaml:"ipfs_timeout"`
IPFSReplicationFactor int `yaml:"ipfs_replication_factor"`
WebRTC yamlWebRTCCfg `yaml:"webrtc"`
}
data, err := os.ReadFile(configPath)
@ -200,18 +196,6 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
cfg.IPFSReplicationFactor = y.IPFSReplicationFactor
}
// WebRTC configuration
cfg.WebRTCEnabled = y.WebRTC.Enabled
if y.WebRTC.SFUPort > 0 {
cfg.SFUPort = y.WebRTC.SFUPort
}
if v := strings.TrimSpace(y.WebRTC.TURNDomain); v != "" {
cfg.TURNDomain = v
}
if v := strings.TrimSpace(y.WebRTC.TURNSecret); v != "" {
cfg.TURNSecret = v
}
// Validate configuration
if errs := cfg.ValidateConfig(); len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))

View File

@ -66,25 +66,15 @@ func main() {
// Create HTTP server for ACME challenge (port 80)
httpServer := &http.Server{
Addr: ":80",
Handler: manager.HTTPHandler(nil), // Redirects all HTTP traffic to HTTPS except ACME challenge
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 60 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 20, // 1MB
Addr: ":80",
Handler: manager.HTTPHandler(nil), // Redirects all HTTP traffic to HTTPS except ACME challenge
}
// Create HTTPS server (port 443)
httpsServer := &http.Server{
Addr: ":443",
Handler: gw.Routes(),
TLSConfig: manager.TLSConfig(),
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 60 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 20, // 1MB
Addr: ":443",
Handler: gw.Routes(),
TLSConfig: manager.TLSConfig(),
}
// Start HTTP server for ACME challenge
@ -171,13 +161,8 @@ func main() {
// Standard HTTP server (no HTTPS)
server := &http.Server{
Addr: cfg.ListenAddr,
Handler: gw.Routes(),
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 60 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 20, // 1MB
Addr: cfg.ListenAddr,
Handler: gw.Routes(),
}
// Try to bind listener explicitly so binding failures are visible immediately.

326
cmd/rqlite-mcp/main.go Normal file
View File

@ -0,0 +1,326 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/rqlite/gorqlite"
)
// MCP JSON-RPC types
type JSONRPCRequest struct {
JSONRPC string `json:"jsonrpc"`
ID any `json:"id,omitempty"`
Method string `json:"method"`
Params json.RawMessage `json:"params,omitempty"`
}
type JSONRPCResponse struct {
JSONRPC string `json:"jsonrpc"`
ID any `json:"id"`
Result any `json:"result,omitempty"`
Error *ResponseError `json:"error,omitempty"`
}
type ResponseError struct {
Code int `json:"code"`
Message string `json:"message"`
}
// Tool definition
type Tool struct {
Name string `json:"name"`
Description string `json:"description"`
InputSchema any `json:"inputSchema"`
}
// Tool call types
type CallToolRequest struct {
Name string `json:"name"`
Arguments json.RawMessage `json:"arguments"`
}
type TextContent struct {
Type string `json:"type"`
Text string `json:"text"`
}
type CallToolResult struct {
Content []TextContent `json:"content"`
IsError bool `json:"isError,omitempty"`
}
type MCPServer struct {
conn *gorqlite.Connection
}
func NewMCPServer(rqliteURL string) (*MCPServer, error) {
// Disable gorqlite cluster discovery to avoid /nodes timeouts from unreachable peers
if strings.Contains(rqliteURL, "?") {
rqliteURL += "&disableClusterDiscovery=true"
} else {
rqliteURL += "?disableClusterDiscovery=true"
}
conn, err := gorqlite.Open(rqliteURL)
if err != nil {
return nil, err
}
return &MCPServer{
conn: conn,
}, nil
}
func (s *MCPServer) handleRequest(req JSONRPCRequest) JSONRPCResponse {
var resp JSONRPCResponse
resp.JSONRPC = "2.0"
resp.ID = req.ID
// Debug logging disabled to prevent excessive disk writes
// log.Printf("Received method: %s", req.Method)
switch req.Method {
case "initialize":
resp.Result = map[string]any{
"protocolVersion": "2024-11-05",
"capabilities": map[string]any{
"tools": map[string]any{},
},
"serverInfo": map[string]any{
"name": "rqlite-mcp",
"version": "0.1.0",
},
}
case "notifications/initialized":
// This is a notification, no response needed
return JSONRPCResponse{}
case "tools/list":
// Debug logging disabled to prevent excessive disk writes
tools := []Tool{
{
Name: "list_tables",
Description: "List all tables in the Rqlite database",
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{},
},
},
{
Name: "query",
Description: "Run a SELECT query on the Rqlite database",
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"sql": map[string]any{
"type": "string",
"description": "The SQL SELECT query to run",
},
},
"required": []string{"sql"},
},
},
{
Name: "execute",
Description: "Run an INSERT, UPDATE, or DELETE statement on the Rqlite database",
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"sql": map[string]any{
"type": "string",
"description": "The SQL statement (INSERT, UPDATE, DELETE) to run",
},
},
"required": []string{"sql"},
},
},
}
resp.Result = map[string]any{"tools": tools}
case "tools/call":
var callReq CallToolRequest
if err := json.Unmarshal(req.Params, &callReq); err != nil {
resp.Error = &ResponseError{Code: -32700, Message: "Parse error"}
return resp
}
resp.Result = s.handleToolCall(callReq)
default:
// Debug logging disabled to prevent excessive disk writes
resp.Error = &ResponseError{Code: -32601, Message: "Method not found"}
}
return resp
}
func (s *MCPServer) handleToolCall(req CallToolRequest) CallToolResult {
// Debug logging disabled to prevent excessive disk writes
// log.Printf("Tool call: %s", req.Name)
switch req.Name {
case "list_tables":
rows, err := s.conn.QueryOne("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
if err != nil {
return errorResult(fmt.Sprintf("Error listing tables: %v", err))
}
var tables []string
for rows.Next() {
slice, err := rows.Slice()
if err == nil && len(slice) > 0 {
tables = append(tables, fmt.Sprint(slice[0]))
}
}
if len(tables) == 0 {
return textResult("No tables found")
}
return textResult(strings.Join(tables, "\n"))
case "query":
var args struct {
SQL string `json:"sql"`
}
if err := json.Unmarshal(req.Arguments, &args); err != nil {
return errorResult(fmt.Sprintf("Invalid arguments: %v", err))
}
// Debug logging disabled to prevent excessive disk writes
rows, err := s.conn.QueryOne(args.SQL)
if err != nil {
return errorResult(fmt.Sprintf("Query error: %v", err))
}
var result strings.Builder
cols := rows.Columns()
result.WriteString(strings.Join(cols, " | ") + "\n")
result.WriteString(strings.Repeat("-", len(cols)*10) + "\n")
rowCount := 0
for rows.Next() {
vals, err := rows.Slice()
if err != nil {
continue
}
rowCount++
for i, v := range vals {
if i > 0 {
result.WriteString(" | ")
}
result.WriteString(fmt.Sprint(v))
}
result.WriteString("\n")
}
result.WriteString(fmt.Sprintf("\n(%d rows)", rowCount))
return textResult(result.String())
case "execute":
var args struct {
SQL string `json:"sql"`
}
if err := json.Unmarshal(req.Arguments, &args); err != nil {
return errorResult(fmt.Sprintf("Invalid arguments: %v", err))
}
// Debug logging disabled to prevent excessive disk writes
res, err := s.conn.WriteOne(args.SQL)
if err != nil {
return errorResult(fmt.Sprintf("Execution error: %v", err))
}
return textResult(fmt.Sprintf("Rows affected: %d", res.RowsAffected))
default:
return errorResult(fmt.Sprintf("Unknown tool: %s", req.Name))
}
}
func textResult(text string) CallToolResult {
return CallToolResult{
Content: []TextContent{
{
Type: "text",
Text: text,
},
},
}
}
func errorResult(text string) CallToolResult {
return CallToolResult{
Content: []TextContent{
{
Type: "text",
Text: text,
},
},
IsError: true,
}
}
func main() {
// Log to stderr so stdout is clean for JSON-RPC
log.SetOutput(os.Stderr)
rqliteURL := "http://localhost:5001"
if u := os.Getenv("RQLITE_URL"); u != "" {
rqliteURL = u
}
var server *MCPServer
var err error
// Retry connecting to rqlite
maxRetries := 30
for i := 0; i < maxRetries; i++ {
server, err = NewMCPServer(rqliteURL)
if err == nil {
break
}
if i%5 == 0 {
log.Printf("Waiting for Rqlite at %s... (%d/%d)", rqliteURL, i+1, maxRetries)
}
time.Sleep(1 * time.Second)
}
if err != nil {
log.Fatalf("Failed to connect to Rqlite after %d retries: %v", maxRetries, err)
}
log.Printf("MCP Rqlite server started (stdio transport)")
log.Printf("Connected to Rqlite at %s", rqliteURL)
// Read JSON-RPC requests from stdin, write responses to stdout
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
var req JSONRPCRequest
if err := json.Unmarshal([]byte(line), &req); err != nil {
// Debug logging disabled to prevent excessive disk writes
continue
}
resp := server.handleRequest(req)
// Don't send response for notifications (no ID)
if req.ID == nil && strings.HasPrefix(req.Method, "notifications/") {
continue
}
respData, err := json.Marshal(resp)
if err != nil {
// Debug logging disabled to prevent excessive disk writes
continue
}
fmt.Println(string(respData))
}
if err := scanner.Err(); err != nil {
// Debug logging disabled to prevent excessive disk writes
}
}

View File

@ -1,8 +0,0 @@
# OpenRouter API Key for changelog generation
# Get your API key from https://openrouter.ai/keys
OPENROUTER_API_KEY=your-api-key-here
# ZeroSSL API Key for TLS certificates (alternative to Let's Encrypt)
# Get your free API key from https://app.zerossl.com/developer
# If not set, Caddy will use Let's Encrypt as the default CA
ZEROSSL_API_KEY=

View File

@ -1,181 +0,0 @@
TEST?=./...
.PHONY: test
test:
@echo Running tests...
go test -v $(TEST)
# Gateway-focused E2E tests assume gateway and nodes are already running
# Auto-discovers configuration from ~/.orama and queries database for API key
# No environment variables required
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
# Production E2E tests - includes production-only tests
test-e2e-prod:
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
echo "❌ ORAMA_GATEWAY_URL not set"; \
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
exit 1; \
fi
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
go test -v -tags "e2e production" -timeout 30m ./e2e/...
# Generic e2e target
test-e2e:
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.orama..."
go test -v -tags e2e -timeout 30m ./e2e/...
test-e2e-deployments:
@echo "Running deployment E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
test-e2e-fullstack:
@echo "Running fullstack E2E tests..."
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
test-e2e-https:
@echo "Running HTTPS/external access E2E tests..."
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
test-e2e-shared:
@echo "Running shared E2E tests..."
go test -v -tags e2e -timeout 10m ./e2e/shared/...
test-e2e-cluster:
@echo "Running cluster E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
test-e2e-integration:
@echo "Running integration E2E tests..."
go test -v -tags e2e -timeout 20m ./e2e/integration/...
test-e2e-production:
@echo "Running production-only E2E tests..."
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
test-e2e-quick:
@echo "Running quick E2E smoke tests..."
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
# Network - Distributed P2P Database System
# Makefile for development and build tasks
.PHONY: build clean test deps tidy fmt vet lint install-hooks push-devnet push-testnet rollout-devnet rollout-testnet release
VERSION := 0.120.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
LDFLAGS_LINUX := -s -w $(LDFLAGS)
# Build targets
build: deps
@echo "Building network executables (version=$(VERSION))..."
@mkdir -p bin
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
# Inject gateway build metadata via pkg path variables
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
go build -ldflags "$(LDFLAGS)" -o bin/sfu ./cmd/sfu
go build -ldflags "$(LDFLAGS)" -o bin/turn ./cmd/turn
@echo "Build complete! Run ./bin/orama version"
# Cross-compile CLI for Linux (only binary needed locally; VPS builds everything else from source)
build-linux: deps
@echo "Cross-compiling CLI for linux/amd64 (version=$(VERSION))..."
@mkdir -p bin-linux
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama ./cmd/cli/
@echo "✓ CLI built at bin-linux/orama"
@echo ""
@echo "Prefer 'make build-archive' for full pre-built binary archive."
# Build pre-compiled binary archive for deployment (all binaries + deps)
build-archive: deps
@echo "Building binary archive (version=$(VERSION))..."
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
./bin/orama build --output /tmp/orama-$(VERSION)-linux-amd64.tar.gz
# Install git hooks
install-hooks:
@echo "Installing git hooks..."
@bash scripts/install-hooks.sh
# Install orama CLI to ~/.local/bin and configure PATH
install: build
@bash scripts/install.sh
# Clean build artifacts
clean:
@echo "Cleaning build artifacts..."
rm -rf bin/
rm -rf data/
@echo "Clean complete!"
# Push binary archive to devnet nodes (fanout distribution)
push-devnet:
./bin/orama node push --env devnet
# Push binary archive to testnet nodes (fanout distribution)
push-testnet:
./bin/orama node push --env testnet
# Full rollout to devnet (build + push + rolling upgrade)
rollout-devnet:
./bin/orama node rollout --env devnet --yes
# Full rollout to testnet (build + push + rolling upgrade)
rollout-testnet:
./bin/orama node rollout --env testnet --yes
# Interactive release workflow (tag + push)
release:
@bash scripts/release.sh
# Check health of all nodes in an environment
# Usage: make health ENV=devnet
health:
@if [ -z "$(ENV)" ]; then \
echo "Usage: make health ENV=devnet|testnet"; \
exit 1; \
fi
./bin/orama monitor report --env $(ENV)
# Help
help:
@echo "Available targets:"
@echo " build - Build all executables"
@echo " install - Build and install 'orama' CLI to ~/.local/bin"
@echo " clean - Clean build artifacts"
@echo " test - Run unit tests"
@echo ""
@echo "E2E Testing:"
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
@echo " make test-e2e-deployments - Run deployment E2E tests"
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
@echo ""
@echo " Example:"
@echo " ORAMA_GATEWAY_URL=https://orama-devnet.network make test-e2e-prod"
@echo ""
@echo "Deployment:"
@echo " make build-archive - Build pre-compiled binary archive for deployment"
@echo " make push-devnet - Push binary archive to devnet nodes"
@echo " make push-testnet - Push binary archive to testnet nodes"
@echo " make rollout-devnet - Full rollout: build + push + rolling upgrade (devnet)"
@echo " make rollout-testnet - Full rollout: build + push + rolling upgrade (testnet)"
@echo " make health ENV=devnet - Check health of all nodes in an environment"
@echo " make release - Interactive release workflow (tag + push)"
@echo ""
@echo "Maintenance:"
@echo " deps - Download dependencies"
@echo " tidy - Tidy dependencies"
@echo " fmt - Format code"
@echo " vet - Vet code"
@echo " lint - Lint code (fmt + vet)"
@echo " help - Show this help"

View File

@ -1,5 +0,0 @@
package main
func main() {
runCLI()
}

View File

@ -1,103 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
// Command groups
"github.com/DeBrosOfficial/network/pkg/cli/cmd/app"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/authcmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/buildcmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/dbcmd"
deploycmd "github.com/DeBrosOfficial/network/pkg/cli/cmd/deploy"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/envcmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/functioncmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/inspectcmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/monitorcmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/namespacecmd"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/node"
"github.com/DeBrosOfficial/network/pkg/cli/cmd/sandboxcmd"
)
// version metadata populated via -ldflags at build time
// Must match Makefile: -X 'main.version=...' -X 'main.commit=...' -X 'main.date=...'
var (
version = "dev"
commit = ""
date = ""
)
func newRootCmd() *cobra.Command {
rootCmd := &cobra.Command{
Use: "orama",
Short: "Orama CLI - Distributed P2P Network Management Tool",
Long: `Orama CLI is a tool for managing nodes, deploying applications,
and interacting with the Orama distributed network.`,
SilenceUsage: true,
SilenceErrors: true,
}
// Version command
rootCmd.AddCommand(&cobra.Command{
Use: "version",
Short: "Show version information",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("orama %s", version)
if commit != "" {
fmt.Printf(" (commit %s)", commit)
}
if date != "" {
fmt.Printf(" built %s", date)
}
fmt.Println()
},
})
// Node operator commands (was "prod")
rootCmd.AddCommand(node.Cmd)
// Deploy command (top-level, upsert)
rootCmd.AddCommand(deploycmd.Cmd)
// App management (was "deployments")
rootCmd.AddCommand(app.Cmd)
// Database commands
rootCmd.AddCommand(dbcmd.Cmd)
// Namespace commands
rootCmd.AddCommand(namespacecmd.Cmd)
// Environment commands
rootCmd.AddCommand(envcmd.Cmd)
// Auth commands
rootCmd.AddCommand(authcmd.Cmd)
// Inspect command
rootCmd.AddCommand(inspectcmd.Cmd)
// Monitor command
rootCmd.AddCommand(monitorcmd.Cmd)
// Serverless function commands
rootCmd.AddCommand(functioncmd.Cmd)
// Build command (cross-compile binary archive)
rootCmd.AddCommand(buildcmd.Cmd)
// Sandbox command (ephemeral Hetzner Cloud clusters)
rootCmd.AddCommand(sandboxcmd.Cmd)
return rootCmd
}
func runCLI() {
rootCmd := newRootCmd()
if err := rootCmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}

View File

@ -1,11 +0,0 @@
package main
import (
"os"
"github.com/DeBrosOfficial/network/pkg/cli"
)
func main() {
cli.HandleInspectCommand(os.Args[1:])
}

View File

@ -1,118 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/sfu"
"go.uber.org/zap"
)
// newSFUServer creates a new SFU server from config and logger.
// Wrapper to keep main.go clean and avoid importing sfu in main.
func newSFUServer(cfg *sfu.Config, logger *zap.Logger) (*sfu.Server, error) {
return sfu.NewServer(cfg, logger)
}
func parseSFUConfig(logger *logging.ColoredLogger) *sfu.Config {
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
flag.Parse()
var configPath string
var err error
if *configFlag != "" {
if filepath.IsAbs(*configFlag) {
configPath = *configFlag
} else {
configPath, err = config.DefaultPath(*configFlag)
if err != nil {
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
} else {
configPath, err = config.DefaultPath("sfu.yaml")
if err != nil {
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
type yamlTURNServer struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
Secure bool `yaml:"secure"`
}
type yamlCfg struct {
ListenAddr string `yaml:"listen_addr"`
Namespace string `yaml:"namespace"`
MediaPortStart int `yaml:"media_port_start"`
MediaPortEnd int `yaml:"media_port_end"`
TURNServers []yamlTURNServer `yaml:"turn_servers"`
TURNSecret string `yaml:"turn_secret"`
TURNCredentialTTL int `yaml:"turn_credential_ttl"`
RQLiteDSN string `yaml:"rqlite_dsn"`
}
data, err := os.ReadFile(configPath)
if err != nil {
logger.ComponentError(logging.ComponentSFU, "Config file not found",
zap.String("path", configPath), zap.Error(err))
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
os.Exit(1)
}
var y yamlCfg
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
logger.ComponentError(logging.ComponentSFU, "Failed to parse SFU config", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
os.Exit(1)
}
var turnServers []sfu.TURNServerConfig
for _, ts := range y.TURNServers {
turnServers = append(turnServers, sfu.TURNServerConfig{
Host: ts.Host,
Port: ts.Port,
Secure: ts.Secure,
})
}
cfg := &sfu.Config{
ListenAddr: y.ListenAddr,
Namespace: y.Namespace,
MediaPortStart: y.MediaPortStart,
MediaPortEnd: y.MediaPortEnd,
TURNServers: turnServers,
TURNSecret: y.TURNSecret,
TURNCredentialTTL: y.TURNCredentialTTL,
RQLiteDSN: y.RQLiteDSN,
}
if errs := cfg.Validate(); len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\nSFU configuration errors (%d):\n", len(errs))
for _, e := range errs {
fmt.Fprintf(os.Stderr, " - %s\n", e)
}
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
os.Exit(1)
}
logger.ComponentInfo(logging.ComponentSFU, "Loaded SFU configuration",
zap.String("path", configPath),
zap.String("listen_addr", cfg.ListenAddr),
zap.String("namespace", cfg.Namespace),
zap.Int("media_ports", cfg.MediaPortEnd-cfg.MediaPortStart),
zap.Int("turn_servers", len(cfg.TURNServers)),
)
return cfg
}

View File

@ -1,61 +0,0 @@
package main
import (
"errors"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/DeBrosOfficial/network/pkg/logging"
"go.uber.org/zap"
)
var (
version = "dev"
commit = "unknown"
)
func main() {
logger, err := logging.NewColoredLogger(logging.ComponentSFU, true)
if err != nil {
panic(err)
}
logger.ComponentInfo(logging.ComponentSFU, "Starting SFU server",
zap.String("version", version),
zap.String("commit", commit))
cfg := parseSFUConfig(logger)
server, err := newSFUServer(cfg, logger.Logger)
if err != nil {
logger.ComponentError(logging.ComponentSFU, "Failed to create SFU server", zap.Error(err))
os.Exit(1)
}
// Start HTTP server in background
go func() {
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
logger.ComponentError(logging.ComponentSFU, "SFU server error", zap.Error(err))
os.Exit(1)
}
}()
// Wait for termination signal
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
sig := <-quit
logger.ComponentInfo(logging.ComponentSFU, "Shutdown signal received", zap.String("signal", sig.String()))
// Graceful drain: notify peers and wait
server.Drain(30 * time.Second)
if err := server.Close(); err != nil {
logger.ComponentError(logging.ComponentSFU, "Error during shutdown", zap.Error(err))
}
logger.ComponentInfo(logging.ComponentSFU, "SFU server shutdown complete")
}

View File

@ -1,100 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/turn"
"go.uber.org/zap"
)
func parseTURNConfig(logger *logging.ColoredLogger) *turn.Config {
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
flag.Parse()
var configPath string
var err error
if *configFlag != "" {
if filepath.IsAbs(*configFlag) {
configPath = *configFlag
} else {
configPath, err = config.DefaultPath(*configFlag)
if err != nil {
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
} else {
configPath, err = config.DefaultPath("turn.yaml")
if err != nil {
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
type yamlCfg struct {
ListenAddr string `yaml:"listen_addr"`
TURNSListenAddr string `yaml:"turns_listen_addr"`
PublicIP string `yaml:"public_ip"`
Realm string `yaml:"realm"`
AuthSecret string `yaml:"auth_secret"`
RelayPortStart int `yaml:"relay_port_start"`
RelayPortEnd int `yaml:"relay_port_end"`
Namespace string `yaml:"namespace"`
TLSCertPath string `yaml:"tls_cert_path"`
TLSKeyPath string `yaml:"tls_key_path"`
}
data, err := os.ReadFile(configPath)
if err != nil {
logger.ComponentError(logging.ComponentTURN, "Config file not found",
zap.String("path", configPath), zap.Error(err))
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
os.Exit(1)
}
var y yamlCfg
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
logger.ComponentError(logging.ComponentTURN, "Failed to parse TURN config", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
os.Exit(1)
}
cfg := &turn.Config{
ListenAddr: y.ListenAddr,
TURNSListenAddr: y.TURNSListenAddr,
PublicIP: y.PublicIP,
Realm: y.Realm,
AuthSecret: y.AuthSecret,
RelayPortStart: y.RelayPortStart,
RelayPortEnd: y.RelayPortEnd,
Namespace: y.Namespace,
TLSCertPath: y.TLSCertPath,
TLSKeyPath: y.TLSKeyPath,
}
if errs := cfg.Validate(); len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\nTURN configuration errors (%d):\n", len(errs))
for _, e := range errs {
fmt.Fprintf(os.Stderr, " - %s\n", e)
}
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
os.Exit(1)
}
logger.ComponentInfo(logging.ComponentTURN, "Loaded TURN configuration",
zap.String("path", configPath),
zap.String("listen_addr", cfg.ListenAddr),
zap.String("namespace", cfg.Namespace),
zap.String("realm", cfg.Realm),
)
return cfg
}

View File

@ -1,48 +0,0 @@
package main
import (
"os"
"os/signal"
"syscall"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/turn"
"go.uber.org/zap"
)
var (
version = "dev"
commit = "unknown"
)
func main() {
logger, err := logging.NewColoredLogger(logging.ComponentTURN, true)
if err != nil {
panic(err)
}
logger.ComponentInfo(logging.ComponentTURN, "Starting TURN server",
zap.String("version", version),
zap.String("commit", commit))
cfg := parseTURNConfig(logger)
server, err := turn.NewServer(cfg, logger.Logger)
if err != nil {
logger.ComponentError(logging.ComponentTURN, "Failed to start TURN server", zap.Error(err))
os.Exit(1)
}
// Wait for termination signal
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
sig := <-quit
logger.ComponentInfo(logging.ComponentTURN, "Shutdown signal received", zap.String("signal", sig.String()))
if err := server.Close(); err != nil {
logger.ComponentError(logging.ComponentTURN, "Error during shutdown", zap.Error(err))
}
logger.ComponentInfo(logging.ComponentTURN, "TURN server shutdown complete")
}

View File

@ -1,217 +0,0 @@
# Common Problems & Solutions
Troubleshooting guide for known issues in the Orama Network.
---
## 1. Namespace Gateway: "Olric unavailable"
**Symptom:** `ns-<name>.orama-devnet.network/v1/health` returns `"olric": {"status": "unavailable"}`.
**Cause:** The Olric memberlist gossip between namespace nodes is broken. Olric uses UDP pings for health checks — if those fail, the cluster can't bootstrap and the gateway reports Olric as unavailable.
### Check 1: WireGuard packet loss between nodes
SSH into each node and ping the other namespace nodes over WireGuard:
```bash
ping -c 10 -W 2 10.0.0.X # replace with the WG IP of each peer
```
If you see packet loss over WireGuard but **not** over the public IP (`ping <public-ip>`), the WireGuard peer session is corrupted.
**Fix — Reset the WireGuard peer on both sides:**
```bash
# On Node A — replace <pubkey> and <endpoint> with Node B's values
wg set wg0 peer <NodeB-pubkey> remove
wg set wg0 peer <NodeB-pubkey> endpoint <NodeB-public-ip>:51820 allowed-ips <NodeB-wg-ip>/32 persistent-keepalive 25
# On Node B — same but with Node A's values
wg set wg0 peer <NodeA-pubkey> remove
wg set wg0 peer <NodeA-pubkey> endpoint <NodeA-public-ip>:51820 allowed-ips <NodeA-wg-ip>/32 persistent-keepalive 25
```
Then restart services: `sudo orama node restart`
You can find peer public keys with `wg show wg0`.
### Check 2: Olric bound to 0.0.0.0 instead of WireGuard IP
Check the Olric config on each node:
```bash
cat /opt/orama/.orama/data/namespaces/<name>/configs/olric-*.yaml
```
If `bindAddr` is `0.0.0.0`, the node will try to bind to IPv6 on dual-stack hosts, breaking memberlist gossip.
**Fix:** Edit the YAML to use the node's WireGuard IP (run `ip addr show wg0` to find it), then restart: `sudo orama node restart`
This was fixed in code (BindAddr validation in `SpawnOlric`), so new namespaces won't have this issue.
### Check 3: Olric logs show "Failed UDP ping" constantly
```bash
journalctl -u orama-namespace-olric@<name>.service --no-pager -n 30
```
If every UDP ping fails but TCP stream connections succeed, it's the WireGuard packet loss issue (see Check 1).
---
## 2. Namespace Gateway: Missing config fields
**Symptom:** Gateway config YAML is missing `global_rqlite_dsn`, has `olric_timeout: 0s`, or `olric_servers` only lists `localhost`.
**Cause:** Before the spawn handler fix, `spawnGatewayRemote()` didn't send `global_rqlite_dsn` or `olric_timeout` to remote nodes.
**Fix:** Edit the gateway config manually:
```bash
vim /opt/orama/.orama/data/namespaces/<name>/configs/gateway-*.yaml
```
Add/fix:
```yaml
global_rqlite_dsn: "http://10.0.0.X:10001"
olric_timeout: 30s
olric_servers:
- "10.0.0.X:10002"
- "10.0.0.Y:10002"
- "10.0.0.Z:10002"
```
Then: `sudo orama node restart`
This was fixed in code, so new namespaces get the correct config.
---
## 3. Namespace not restoring after restart (missing cluster-state.json)
**Symptom:** After `orama node restart`, the namespace services don't come back because `RestoreLocalClustersFromDisk` has no state file.
**Check:**
```bash
ls /opt/orama/.orama/data/namespaces/<name>/cluster-state.json
```
If the file doesn't exist, the node can't restore the namespace.
**Fix:** Create the file manually from another node that has it, or reconstruct it. The format is:
```json
{
"namespace": "<name>",
"rqlite": { "http_port": 10001, "raft_port": 10000, ... },
"olric": { "http_port": 10002, "memberlist_port": 10003, ... },
"gateway": { "http_port": 10004, ... }
}
```
This was fixed in code — `ProvisionCluster` now saves state to all nodes (including remote ones via the `save-cluster-state` spawn action).
---
## 4. Namespace gateway processes not restarting after upgrade
**Symptom:** After `orama upgrade --restart` or `orama node restart`, namespace gateway/olric/rqlite services don't start.
**Cause:** `orama node stop` disables systemd template services (`orama-namespace-gateway@<name>.service`). They have `PartOf=orama-node.service`, but that only propagates restart to **enabled** services.
**Fix:** Re-enable the services before restarting:
```bash
systemctl enable orama-namespace-rqlite@<name>.service
systemctl enable orama-namespace-olric@<name>.service
systemctl enable orama-namespace-gateway@<name>.service
sudo orama node restart
```
This was fixed in code — the upgrade orchestrator now re-enables `@` services before restarting.
---
## 5. SSH commands eating stdin inside heredocs
**Symptom:** When running a script that SSHes into multiple nodes inside a heredoc (`<<'EOS'`), only the first SSH command runs — the rest are silently skipped.
**Cause:** `ssh` reads from stdin, consuming the rest of the heredoc.
**Fix:** Add `-n` flag to all `ssh` calls inside heredocs:
```bash
ssh -n user@host 'command'
```
`scp` is not affected (doesn't read stdin).
---
---
## 6. RQLite returns 401 Unauthorized
**Symptom:** RQLite queries fail with HTTP 401 after security hardening.
**Cause:** RQLite now requires basic auth. The client isn't sending credentials.
**Fix:** Ensure the RQLite client is configured with the credentials from `/opt/orama/.orama/secrets/rqlite-auth.json`. The central RQLite client wrapper (`pkg/rqlite/client.go`) handles this automatically. If using a standalone client (e.g., CoreDNS plugin), ensure it's also configured.
---
## 7. Olric cluster split after upgrade
**Symptom:** Olric nodes can't gossip after enabling memberlist encryption.
**Cause:** Olric memberlist encryption is all-or-nothing. Nodes with encryption can't communicate with nodes without it.
**Fix:** All nodes must be restarted simultaneously when enabling Olric encryption. The cache will be lost (it rebuilds from DB). This is expected — Olric is a cache, not persistent storage.
---
## 8. OramaOS: LUKS unlock fails
**Symptom:** OramaOS node can't reconstruct its LUKS key after reboot.
**Cause:** Not enough peer vault-guardians are online to meet the Shamir threshold (K = max(3, N/3)).
**Fix:** Ensure enough cluster nodes are online and reachable over WireGuard. The agent retries with exponential backoff. For genesis nodes before 5+ peers exist, use:
```bash
orama node unlock --genesis --node-ip <wg-ip>
```
---
## 9. OramaOS: Enrollment timeout
**Symptom:** `orama node enroll` hangs or times out.
**Cause:** The OramaOS node's port 9999 isn't reachable, or the Gateway can't reach the node's WebSocket.
**Fix:** Check that port 9999 is open in your VPS provider's external firewall (Hetzner firewall, AWS security groups, etc.). OramaOS opens it internally, but provider-level firewalls must be configured separately.
---
## 10. Binary signature verification fails
**Symptom:** `orama node install` rejects the binary archive with a signature error.
**Cause:** The archive was tampered with, or the manifest.sig file is missing/corrupted.
**Fix:** Rebuild the archive with `orama build` and re-sign with `make sign` (in the orama-os repo). Ensure you're using the rootwallet that matches the embedded signer address.
---
## General Debugging Tips
- **Always use `sudo orama node restart`** instead of raw `systemctl` commands
- **Namespace data lives at:** `/opt/orama/.orama/data/namespaces/<name>/`
- **Check service logs:** `journalctl -u orama-namespace-olric@<name>.service --no-pager -n 50`
- **Check WireGuard:** `wg show wg0` — look for recent handshakes and transfer bytes
- **Check gateway health:** `curl http://localhost:<port>/v1/health` from the node itself
- **Node IPs:** Check `scripts/remote-nodes.conf` for credentials, `wg show wg0` for WG IPs
- **OramaOS nodes:** No SSH access — use Gateway API endpoints (`/v1/node/status`, `/v1/node/logs`) for diagnostics

View File

@ -1,213 +0,0 @@
# Inspector
The inspector is a cluster health check tool that SSHs into every node, collects subsystem data in parallel, runs deterministic checks, and optionally sends failures to an AI model for root-cause analysis.
## Pipeline
```
Collect (parallel SSH) → Check (deterministic Go) → Report (table/JSON) → Analyze (optional AI)
```
1. **Collect** — SSH into every node in parallel, run diagnostic commands, parse results into structured data.
2. **Check** — Run pure Go check functions against the collected data. Each check produces a pass/fail/warn/skip result with a severity level.
3. **Report** — Print results as a table (default) or JSON. Failures sort first, grouped by subsystem.
4. **Analyze** — If `--ai` is enabled and there are failures or warnings, send them to an LLM via OpenRouter for root-cause analysis.
## Quick Start
```bash
# Inspect all subsystems on devnet
orama inspect --env devnet
# Inspect only RQLite
orama inspect --env devnet --subsystem rqlite
# JSON output
orama inspect --env devnet --format json
# With AI analysis
orama inspect --env devnet --ai
```
## Usage
```
orama inspect [flags]
```
| Flag | Default | Description |
|------|---------|-------------|
| `--config` | `scripts/remote-nodes.conf` | Path to node configuration file |
| `--env` | *(required)* | Environment to inspect (`devnet`, `testnet`) |
| `--subsystem` | `all` | Comma-separated subsystems to inspect |
| `--format` | `table` | Output format: `table` or `json` |
| `--timeout` | `30s` | SSH command timeout per node |
| `--verbose` | `false` | Print collection progress |
| `--ai` | `false` | Enable AI analysis of failures |
| `--model` | `moonshotai/kimi-k2.5` | OpenRouter model for AI analysis |
| `--api-key` | `$OPENROUTER_API_KEY` | OpenRouter API key |
### Subsystem Names
`rqlite`, `olric`, `ipfs`, `dns`, `wireguard` (alias: `wg`), `system`, `network`, `namespace`
Multiple subsystems can be combined: `--subsystem rqlite,olric,dns`
## Subsystems
| Subsystem | What It Checks |
|-----------|---------------|
| **rqlite** | Raft state, leader election, readyz, commit/applied gap, FSM pending, strong reads, debug vars (query errors, leader_not_found, snapshots), cross-node leader agreement, term consistency, applied index convergence, quorum, version match |
| **olric** | Service active, memberlist up, restart count, memory usage, log analysis (suspects, flapping, errors), cross-node memberlist consistency |
| **ipfs** | Daemon active, cluster active, swarm peer count, cluster peer count, cluster errors, repo usage %, swarm key present, bootstrap list empty, cross-node version consistency |
| **dns** | CoreDNS active, Caddy active, ports (53/80/443), memory, restart count, log errors, Corefile exists, SOA/NS/wildcard/base-A resolution, TLS cert expiry, cross-node nameserver availability |
| **wireguard** | Interface up, service active, correct 10.0.0.x IP, listen port 51820, peer count vs expected, MTU 1420, config exists + permissions 600, peer handshakes (fresh/stale/never), peer traffic, catch-all route detection, cross-node peer count + MTU consistency |
| **system** | Core services (orama-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (orama), panic count, expected ports |
| **network** | Internet reachability, default route, WireGuard route, TCP connection count, TIME_WAIT count, TCP retransmission rate, WireGuard mesh ping (all peers) |
| **namespace** | Per-namespace: RQLite up + raft state + readyz, Olric memberlist, Gateway HTTP health. Cross-namespace: all-healthy check, RQLite quorum per namespace |
## Severity Levels
| Level | When Used |
|-------|-----------|
| **CRITICAL** | Service completely down. Raft quorum lost, RQLite unresponsive, no leader. |
| **HIGH** | Service degraded. Olric down, gateway not responding, IPFS swarm key missing. |
| **MEDIUM** | Non-ideal but functional. Stale handshakes, elevated memory, log suspects. |
| **LOW** | Informational. Non-standard MTU, port mismatch, version skew. |
## Check Statuses
| Status | Meaning |
|--------|---------|
| **pass** | Check passed. |
| **fail** | Check failed — action needed. |
| **warn** | Degraded — monitor or investigate. |
| **skip** | Check could not run (insufficient data). |
## Output Formats
### Table (default)
```
Inspecting 14 devnet nodes...
## RQLITE
----------------------------------------------------------------------
OK [CRITICAL] RQLite responding (ubuntu@10.0.0.1)
responsive=true version=v8.36.16
FAIL [CRITICAL] Cluster has exactly one leader
leaders=0 (NO LEADER)
...
======================================================================
Summary: 800 passed, 12 failed, 31 warnings, 0 skipped (4.2s)
```
Failures sort first, then warnings, then passes. Within each group, higher severity checks appear first.
### JSON (`--format json`)
```json
{
"summary": {
"passed": 800,
"failed": 12,
"warned": 31,
"skipped": 0,
"total": 843,
"duration_seconds": 4.2
},
"checks": [
{
"id": "rqlite.responsive",
"name": "RQLite responding",
"subsystem": "rqlite",
"severity": 3,
"status": "pass",
"message": "responsive=true version=v8.36.16",
"node": "ubuntu@10.0.0.1"
}
]
}
```
## AI Analysis
When `--ai` is enabled, failures and warnings are sent to an LLM via OpenRouter for root-cause analysis.
```bash
# Use default model (kimi-k2.5)
orama inspect --env devnet --ai
# Use a different model
orama inspect --env devnet --ai --model openai/gpt-4o
# Pass API key directly
orama inspect --env devnet --ai --api-key sk-or-...
```
The API key can be set via:
1. `--api-key` flag
2. `OPENROUTER_API_KEY` environment variable
3. `.env` file in the current directory
The AI receives the full check results plus cluster metadata and returns a structured analysis with likely root causes and suggested fixes.
## Exit Codes
| Code | Meaning |
|------|---------|
| `0` | All checks passed (or only warnings). |
| `1` | At least one check failed. |
## Configuration
The inspector reads node definitions from a pipe-delimited config file (default: `scripts/remote-nodes.conf`).
### Format
```
# environment|user@host|role
devnet|ubuntu@1.2.3.4|node
devnet|ubuntu@5.6.7.8|nameserver-ns1
```
| Field | Description |
|-------|-------------|
| `environment` | Cluster name (`devnet`, `testnet`) |
| `user@host` | SSH credentials |
| `role` | `node` or `nameserver-ns1`, `nameserver-ns2`, etc. |
SSH keys are resolved from rootwallet (`rw vault ssh get <host>/<user> --priv`).
Blank lines and lines starting with `#` are ignored.
### Node Roles
- **`node`** — Regular cluster node. Runs RQLite, Olric, IPFS, WireGuard, namespaces.
- **`nameserver-*`** — DNS nameserver. Runs CoreDNS + Caddy in addition to base services. System checks verify nameserver-specific services.
## Examples
```bash
# Full cluster inspection
orama inspect --env devnet
# Check only networking
orama inspect --env devnet --subsystem wireguard,network
# Quick RQLite health check
orama inspect --env devnet --subsystem rqlite
# Verbose mode (shows collection progress)
orama inspect --env devnet --verbose
# JSON for scripting / piping
orama inspect --env devnet --format json | jq '.checks[] | select(.status == "fail")'
# AI-assisted debugging
orama inspect --env devnet --ai --model anthropic/claude-sonnet-4
# Custom config file
orama inspect --config /path/to/nodes.conf --env testnet
```

View File

@ -1,278 +0,0 @@
# Monitoring
Real-time cluster health monitoring via SSH. The system has two parts:
1. **`orama node report`** — Runs on each VPS node, collects all local health data, outputs JSON
2. **`orama monitor`** — Runs on your local machine, SSHes into nodes, aggregates results, displays via TUI or tables
## Architecture
```
Developer Machine VPS Nodes (via SSH)
┌──────────────────┐ ┌────────────────────┐
│ orama monitor │ ──SSH──────────>│ orama node report │
│ (TUI / tables) │ <──JSON─────── │ (local collector) │
│ │ └────────────────────┘
│ CollectOnce() │ ──SSH──────────>│ orama node report │
│ DeriveAlerts() │ <──JSON─────── │ (local collector) │
│ Render() │ └────────────────────┘
└──────────────────┘
```
Each node runs `orama node report --json` locally (no SSH to other nodes), collecting data via `os/exec` and `net/http` to localhost services. The monitor SSHes into all nodes in parallel, collects reports, then runs cross-node analysis to detect cluster-wide issues.
## Quick Start
```bash
# Interactive TUI (auto-refreshes every 30s)
orama monitor --env testnet
# Cluster overview table
orama monitor cluster --env testnet
# Alerts only
orama monitor alerts --env testnet
# Full JSON report (pipe to jq or feed to LLM)
orama monitor report --env testnet
```
## `orama monitor` — Local Orchestrator
### Usage
```
orama monitor [subcommand] --env <environment> [flags]
```
Without a subcommand, launches the interactive TUI.
### Global Flags
| Flag | Default | Description |
|------|---------|-------------|
| `--env` | *(required)* | Environment: `devnet`, `testnet`, `mainnet` |
| `--json` | `false` | Machine-readable JSON output (for one-shot subcommands) |
| `--node` | | Filter to a specific node host/IP |
| `--config` | `scripts/remote-nodes.conf` | Path to node configuration file |
### Subcommands
| Subcommand | Description |
|------------|-------------|
| `live` | Interactive TUI monitor (default when no subcommand) |
| `cluster` | Cluster overview: all nodes, roles, RQLite state, WG peers |
| `node` | Per-node health details (system, services, WG, DNS) |
| `service` | Service status matrix across all nodes |
| `mesh` | WireGuard mesh connectivity and peer details |
| `dns` | DNS health: CoreDNS, Caddy, TLS cert expiry, resolution |
| `namespaces` | Namespace health across nodes |
| `alerts` | Active alerts and warnings sorted by severity |
| `report` | Full JSON dump optimized for LLM consumption |
### Examples
```bash
# Cluster overview
orama monitor cluster --env testnet
# Cluster overview as JSON
orama monitor cluster --env testnet --json
# Alerts for all nodes
orama monitor alerts --env testnet
# Single-node deep dive
orama monitor node --env testnet --node 51.195.109.238
# Services for one node
orama monitor service --env testnet --node 51.195.109.238
# WireGuard mesh details
orama monitor mesh --env testnet
# DNS health
orama monitor dns --env testnet
# Namespace health
orama monitor namespaces --env testnet
# Full report for LLM analysis
orama monitor report --env testnet | jq .
# Single-node report
orama monitor report --env testnet --node 51.195.109.238
# Custom config file
orama monitor cluster --config /path/to/nodes.conf --env devnet
```
### Interactive TUI
The `live` subcommand (default) launches a full-screen terminal UI:
**Tabs:** Overview | Nodes | Services | WG Mesh | DNS | Namespaces | Alerts
**Key Bindings:**
| Key | Action |
|-----|--------|
| `Tab` / `Shift+Tab` | Switch tabs |
| `j` / `k` or `↑` / `↓` | Scroll content |
| `r` | Force refresh |
| `q` / `Ctrl+C` | Quit |
The TUI auto-refreshes every 30 seconds. A spinner shows during data collection. Colors indicate health: green = healthy, red = critical, yellow = warning.
### LLM Report Format
`orama monitor report` outputs structured JSON designed for AI consumption:
```json
{
"meta": {
"environment": "testnet",
"collected_at": "2026-02-16T12:00:00Z",
"duration_seconds": 3.2,
"node_count": 3,
"healthy_count": 3
},
"summary": {
"rqlite_leader": "10.0.0.1",
"rqlite_voters": "3/3",
"rqlite_raft_term": 42,
"wg_mesh_status": "all connected",
"service_health": "all nominal",
"critical_alerts": 0,
"warning_alerts": 1,
"info_alerts": 0
},
"alerts": [...],
"nodes": [
{
"host": "51.195.109.238",
"status": "healthy",
"collection_ms": 526,
"report": { ... }
}
]
}
```
## `orama node report` — VPS-Side Collector
Runs locally on a VPS node. Collects all system and service data in parallel and outputs a single JSON blob. Requires root privileges.
### Usage
```bash
# On a VPS node
sudo orama node report --json
```
### What It Collects
| Section | Data |
|---------|------|
| **system** | CPU count, load average, memory/disk/swap usage, OOM kills, kernel version, uptime, clock time |
| **services** | Systemd service states (active, restarts, memory, CPU, restart loop detection) for 10 core services |
| **rqlite** | Raft state, leader, term, applied/commit index, peers, strong read test, readyz, debug vars |
| **olric** | Service state, memberlist, member count, restarts, memory, log analysis |
| **ipfs** | Daemon/cluster state, swarm/cluster peers, repo size, versions, swarm key |
| **gateway** | HTTP health check, subsystem status |
| **wireguard** | Interface state, WG IP, peers, handshake ages, MTU, config permissions |
| **dns** | CoreDNS/Caddy state, port bindings, resolution tests, TLS cert expiry |
| **anyone** | Relay/client state, bootstrap progress, fingerprint |
| **network** | Internet reachability, TCP stats, retransmission rate, listening ports, UFW rules |
| **processes** | Zombie count, orphan orama processes, panic/fatal count in logs |
| **namespaces** | Per-namespace service probes (RQLite, Olric, Gateway) |
### Performance
All 12 collectors run in parallel with goroutines. Typical collection time is **< 1 second** per node. HTTP timeouts are 3 seconds, command timeouts are 4 seconds.
### Output Schema
```json
{
"timestamp": "2026-02-16T12:00:00Z",
"hostname": "ns1",
"version": "0.107.0",
"collect_ms": 526,
"errors": [],
"system": { "cpu_count": 4, "load_avg_1": 0.1, "mem_total_mb": 7937, ... },
"services": { "services": [...], "failed_units": [] },
"rqlite": { "responsive": true, "raft_state": "Leader", "term": 42, ... },
"olric": { "service_active": true, "memberlist_up": true, ... },
"ipfs": { "daemon_active": true, "swarm_peers": 2, ... },
"gateway": { "responsive": true, "http_status": 200, ... },
"wireguard": { "interface_up": true, "wg_ip": "10.0.0.1", "peers": [...], ... },
"dns": { "coredns_active": true, "caddy_active": true, "base_tls_days_left": 88, ... },
"anyone": { "relay_active": true, "bootstrapped": true, ... },
"network": { "internet_reachable": true, "ufw_active": true, ... },
"processes": { "zombie_count": 0, "orphan_count": 0, "panic_count": 0, ... },
"namespaces": []
}
```
## Alert Detection
Alerts are derived from cross-node analysis of all collected reports. Each alert has a severity level and identifies the affected subsystem and node.
### Alert Severities
| Severity | Examples |
|----------|----------|
| **critical** | SSH collection failed (node unreachable), no RQLite leader, split brain, RQLite unresponsive, WireGuard interface down, WG peer never handshaked, OOM kills, service failed, UFW inactive |
| **warning** | Strong read failed, memory > 90%, disk > 85%, stale WG handshake (> 3min), Raft term inconsistency, applied index lag > 100, restart loop detected, TLS cert < 14 days, DNS down, namespace gateway down, Anyone not bootstrapped, clock skew > 5s, binary version mismatch, internet unreachable, high TCP retransmission |
| **info** | Zombie processes, orphan orama processes, swap usage > 30% |
### Cross-Node Checks
These checks compare data across all nodes:
- **RQLite Leader**: Exactly one leader exists (no split brain)
- **Leader Agreement**: All nodes agree on the same leader address
- **Raft Term Consistency**: Term values within 1 of each other
- **Applied Index Lag**: Followers within 100 entries of the leader
- **WireGuard Peer Symmetry**: Each node has N-1 peers
- **Clock Skew**: Node clocks within 5 seconds of each other
- **Binary Version**: All nodes running the same version
- **WebRTC SFU Coverage**: SFU running on expected nodes (3/3) per namespace
- **WebRTC TURN Redundancy**: TURN running on expected nodes (2/3) per namespace
### Per-Node Checks
- **RQLite**: Responsive, ready, strong read
- **WireGuard**: Interface up, handshake freshness
- **System**: Memory, disk, load, OOM kills, swap
- **Services**: Systemd state, restart loops
- **DNS**: CoreDNS/Caddy up, TLS cert expiry, SOA resolution
- **Anyone**: Bootstrap progress
- **Processes**: Zombies, orphans, panics in logs
- **Namespaces**: Gateway and RQLite per namespace
- **WebRTC**: SFU and TURN service health (when provisioned)
- **Network**: UFW, internet reachability, TCP retransmission
## Monitor vs Inspector
Both tools check cluster health, but they serve different purposes:
| | `orama monitor` | `orama inspect` |
|---|---|---|
| **Data source** | `orama node report --json` (single SSH call per node) | 15+ SSH commands per node per subsystem |
| **Speed** | ~3-5s for full cluster | ~4-10s for full cluster |
| **Output** | TUI, tables, JSON | Tables, JSON |
| **Focus** | Real-time monitoring, alert detection | Deep diagnostic checks with pass/fail/warn |
| **AI support** | `report` subcommand for LLM input | `--ai` flag for inline analysis |
| **Use case** | "Is anything wrong right now?" | "What exactly is wrong and why?" |
Use `monitor` for day-to-day health checks and the interactive TUI. Use `inspect` for deep diagnostics when something is already known to be broken.
## Configuration
Uses the same `scripts/remote-nodes.conf` as the inspector. See [INSPECTOR.md](INSPECTOR.md#configuration) for format details.
## Prerequisites
Nodes must have the `orama` CLI installed (via `orama node install` or `upload-source.sh`). The monitor runs `sudo orama node report --json` over SSH, so the binary must be at `/usr/local/bin/orama` on each node.

View File

@ -1,233 +0,0 @@
# OramaOS Deployment Guide
OramaOS is a custom minimal Linux image built with Buildroot. It replaces the standard Ubuntu-based node deployment for mainnet, devnet, and testnet environments. Sandbox clusters remain on Ubuntu for development convenience.
## What is OramaOS?
OramaOS is a locked-down operating system designed specifically for Orama node operators. Key properties:
- **No SSH, no shell** — operators cannot access the filesystem or run commands on the machine
- **LUKS full-disk encryption** — the data partition is encrypted; the key is split via Shamir's Secret Sharing across peer nodes
- **Read-only rootfs** — the OS image uses SquashFS with dm-verity integrity verification
- **A/B partition updates** — signed OS images are applied atomically with automatic rollback on failure
- **Service sandboxing** — each service runs in its own Linux namespace with seccomp syscall filtering
- **Signed binaries** — all updates are cryptographically signed with the Orama rootwallet
## Architecture
```
Partition Layout:
/dev/sda1 — ESP (EFI System Partition, systemd-boot)
/dev/sda2 — rootfs-A (SquashFS, read-only, dm-verity)
/dev/sda3 — rootfs-B (standby, for A/B updates)
/dev/sda4 — data (LUKS2 encrypted, ext4)
Boot Flow:
systemd-boot → dm-verity rootfs → orama-agent → WireGuard → services
```
The **orama-agent** is the only root process. It manages:
- Boot sequence and LUKS key reconstruction
- WireGuard tunnel setup
- Service lifecycle (start, stop, restart in sandboxed namespaces)
- Command reception from the Gateway over WireGuard
- OS updates (download, verify signature, A/B swap, reboot)
## Enrollment Flow
OramaOS nodes join the cluster through an enrollment process (different from the Ubuntu `orama node install` flow):
### Step 1: Flash OramaOS to VPS
Download the OramaOS image and flash it to your VPS:
```bash
# Download image (URL provided upon acceptance)
wget https://releases.orama.network/oramaos-v1.0.0-amd64.qcow2
# Flash to VPS (provider-specific — Hetzner, Vultr, etc.)
# Most providers support uploading custom images via their dashboard
```
### Step 2: First Boot — Enrollment Mode
On first boot, the agent:
1. Generates a random 8-character registration code
2. Starts a temporary HTTP server on port 9999
3. Opens an outbound WebSocket to the Gateway
4. Waits for enrollment to complete
The registration code is displayed on the VPS console (if available) and served at `http://<vps-ip>:9999/`.
### Step 3: Run Enrollment from CLI
On your local machine (where you have the `orama` CLI and rootwallet):
```bash
# Generate an invite token on any existing cluster node
orama node invite --expiry 24h
# Enroll the OramaOS node
orama node enroll --node-ip <vps-public-ip> --token <invite-token> --gateway <gateway-url>
```
The enrollment command:
1. Fetches the registration code from the node (port 9999)
2. Sends the code + invite token to the Gateway
3. Gateway validates everything, assigns a WireGuard IP, and pushes config to the node
4. Node configures WireGuard, formats the LUKS-encrypted data partition
5. LUKS key is split via Shamir and distributed to peer vault-guardians
6. Services start in sandboxed namespaces
7. Port 9999 closes permanently
### Step 4: Verify
```bash
# Check the node is online and healthy
orama monitor report --env <env>
```
## Genesis Node
The first OramaOS node in a cluster is the **genesis node**. It has a special boot path because there are no peers yet for Shamir key distribution:
1. Genesis generates a LUKS key and encrypts the data partition
2. The LUKS key is encrypted with a rootwallet-derived key and stored on the unencrypted rootfs
3. On reboot (before enough peers exist), the operator must manually unlock:
```bash
orama node unlock --genesis --node-ip <wg-ip>
```
This command:
1. Fetches the encrypted genesis key from the node
2. Decrypts it using the rootwallet (`rw decrypt`)
3. Sends the decrypted LUKS key to the agent over WireGuard
Once 5+ peers have joined, the genesis node distributes Shamir shares to peers, deletes the local encrypted key, and transitions to normal Shamir-based unlock. After this transition, `orama node unlock` is no longer needed.
## Normal Reboot (Shamir Unlock)
When an enrolled OramaOS node reboots:
1. Agent starts, brings up WireGuard
2. Contacts peer vault-guardians over WireGuard
3. Fetches K Shamir shares (K = threshold, typically `max(3, N/3)`)
4. Reconstructs LUKS key via Lagrange interpolation over GF(256)
5. Decrypts and mounts data partition
6. Starts all services
7. Zeros key from memory
If not enough peers are available, the agent enters a degraded "waiting for peers" state and retries with exponential backoff (1s, 2s, 4s, 8s, 16s, max 5 retries per cycle).
## Node Management
Since OramaOS has no SSH, all management happens through the Gateway API:
```bash
# Check node status
curl "https://gateway.example.com/v1/node/status?node_id=<id>"
# Send a command (e.g., restart a service)
curl -X POST "https://gateway.example.com/v1/node/command?node_id=<id>" \
-H "Content-Type: application/json" \
-d '{"action":"restart","service":"rqlite"}'
# View logs
curl "https://gateway.example.com/v1/node/logs?node_id=<id>&service=gateway&lines=100"
# Graceful node departure
curl -X POST "https://gateway.example.com/v1/node/leave" \
-H "Content-Type: application/json" \
-d '{"node_id":"<id>"}'
```
The Gateway proxies these requests to the agent over WireGuard (port 9998). The agent is never directly accessible from the public internet.
## OS Updates
OramaOS uses an A/B partition scheme for atomic, rollback-safe updates:
1. Agent periodically checks for new versions
2. Downloads the signed image (P2P over WireGuard between nodes)
3. Verifies the rootwallet EVM signature against the embedded public key
4. Writes to the standby partition (if running from A, writes to B)
5. Sets systemd-boot to boot from B with `tries_left=3`
6. Reboots
7. If B boots successfully (agent starts, WG connects, services healthy): marks B as "good"
8. If B fails 3 times: systemd-boot automatically falls back to A
No operator intervention is needed for updates. Failed updates are automatically rolled back.
## Service Sandboxing
Each service on OramaOS runs in an isolated environment:
- **Mount namespace** — each service only sees its own data directory as writable; everything else is read-only
- **UTS namespace** — isolated hostname
- **Dedicated UID/GID** — each service runs as a different user (not root)
- **Seccomp filtering** — per-service syscall allowlist (initially in audit mode, then enforce mode)
Services and their sandbox profiles:
| Service | Writable Path | Extra Syscalls |
|---------|--------------|----------------|
| RQLite | `/opt/orama/.orama/data/rqlite` | fsync, fdatasync (Raft + SQLite WAL) |
| Olric | `/opt/orama/.orama/data/olric` | sendmmsg, recvmmsg (gossip) |
| IPFS | `/opt/orama/.orama/data/ipfs` | sendfile, splice (data transfer) |
| Gateway | `/opt/orama/.orama/data/gateway` | sendfile, splice (HTTP) |
| CoreDNS | `/opt/orama/.orama/data/coredns` | sendmmsg, recvmmsg (DNS) |
## OramaOS vs Ubuntu Deployment
| Feature | Ubuntu | OramaOS |
|---------|--------|---------|
| SSH access | Yes | No |
| Shell access | Yes | No |
| Disk encryption | No | LUKS2 (Shamir) |
| OS updates | Manual (`orama node upgrade`) | Automatic (signed, A/B) |
| Service isolation | systemd only | Namespaces + seccomp |
| Rootfs integrity | None | dm-verity |
| Binary signing | Optional | Required |
| Operator data access | Full | None |
| Environments | All (including sandbox) | Mainnet, devnet, testnet |
## Cleaning / Factory Reset
OramaOS nodes cannot be cleaned with the standard `orama node clean` command (no SSH access). Instead:
- **Graceful departure:** `orama node leave` via the Gateway API — stops services, redistributes Shamir shares, removes WG peer
- **Factory reset:** Reflash the OramaOS image on the VPS via the hosting provider's dashboard
- **Data is unrecoverable:** Since the LUKS key is distributed across peers, reflashing destroys all data permanently
## Troubleshooting
### Node stuck in enrollment mode
The node boots but enrollment never completes.
**Check:** Can you reach `http://<vps-ip>:9999/` from your machine? If not, the VPS firewall may be blocking port 9999.
**Fix:** Ensure port 9999 is open in the VPS provider's firewall. OramaOS opens it automatically via its internal firewall, but external provider firewalls (Hetzner, AWS security groups) must be configured separately.
### LUKS unlock fails (not enough peers)
After reboot, the node can't reconstruct its LUKS key.
**Check:** How many peer nodes are online? The node needs at least K peers (threshold) to be reachable over WireGuard.
**Fix:** Ensure enough cluster nodes are online. If this is the genesis node and fewer than 5 peers exist, use:
```bash
orama node unlock --genesis --node-ip <wg-ip>
```
### Update failed, node rolled back
The node applied an update but reverted to the previous version.
**Check:** The agent logs will show why the new partition failed to boot (accessible via `GET /v1/node/logs?service=agent`).
**Common causes:** Corrupted download (signature verification should catch this), hardware issue, or incompatible configuration.
### Services not starting after reboot
The node rebooted and LUKS unlocked, but services are unhealthy.
**Check:** `GET /v1/node/status` — which services are down?
**Fix:** Try restarting the specific service via `POST /v1/node/command` with `{"action":"restart","service":"<name>"}`. If the issue persists, check service logs.

View File

@ -1,208 +0,0 @@
# Sandbox: Ephemeral Hetzner Cloud Clusters
Spin up temporary 5-node Orama clusters on Hetzner Cloud for development and testing. Total cost: ~€0.04/hour.
## Quick Start
```bash
# One-time setup (API key, domain, floating IPs, SSH key)
orama sandbox setup
# Create a cluster (~5 minutes)
orama sandbox create --name my-feature
# Check health
orama sandbox status
# SSH into a node
orama sandbox ssh 1
# Deploy code changes
orama sandbox rollout
# Tear it down
orama sandbox destroy
```
## Prerequisites
### 1. Hetzner Cloud Account
Create a project at [console.hetzner.cloud](https://console.hetzner.cloud) and generate an API token with read/write permissions under **Security > API Tokens**.
### 2. Domain with Glue Records
You need a domain (or subdomain) that points to Hetzner Floating IPs. The `orama sandbox setup` wizard will guide you through this.
**Example:** Using `sbx.dbrs.space`
At your domain registrar:
1. Create glue records (Personal DNS Servers):
- `ns1.sbx.dbrs.space``<floating-ip-1>`
- `ns2.sbx.dbrs.space``<floating-ip-2>`
2. Set custom nameservers for `sbx.dbrs.space`:
- `ns1.sbx.dbrs.space`
- `ns2.sbx.dbrs.space`
DNS propagation can take up to 48 hours.
### 3. Binary Archive
Build the binary archive before creating a cluster:
```bash
orama build
```
This creates `/tmp/orama-<version>-linux-amd64.tar.gz` with all pre-compiled binaries.
## Setup
Run the interactive setup wizard:
```bash
orama sandbox setup
```
This will:
1. Prompt for your Hetzner API token and validate it
2. Ask for your sandbox domain
3. Create or reuse 2 Hetzner Floating IPs (~$0.005/hr each)
4. Create a firewall with sandbox rules
5. Create a rootwallet SSH entry (`sandbox/root`) if it doesn't exist
6. Upload the wallet-derived public key to Hetzner
7. Display DNS configuration instructions
Config is saved to `~/.orama/sandbox.yaml`.
## Commands
### `orama sandbox create [--name <name>]`
Creates a new 5-node cluster. If `--name` is omitted, a random name is generated (e.g., "swift-falcon").
**Cluster layout:**
- Nodes 1-2: Nameservers (CoreDNS + Caddy + all services)
- Nodes 3-5: Regular nodes (all services except CoreDNS)
**Phases:**
1. Provision 5 CX22 servers on Hetzner (parallel, ~90s)
2. Assign floating IPs to nameserver nodes (~10s)
3. Upload binary archive to all nodes (parallel, ~60s)
4. Install genesis node + generate invite tokens (~120s)
5. Join remaining 4 nodes (serial with health checks, ~180s)
6. Verify cluster health (~15s)
**One sandbox at a time.** Since the floating IPs are shared, only one sandbox can own the nameservers. Destroy the active sandbox before creating a new one.
### `orama sandbox destroy [--name <name>] [--force]`
Tears down a cluster:
1. Unassigns floating IPs
2. Deletes all 5 servers (parallel)
3. Removes state file
Use `--force` to skip confirmation.
### `orama sandbox list`
Lists all sandboxes with their status. Also checks Hetzner for orphaned servers that don't have a corresponding state file.
### `orama sandbox status [--name <name>]`
Shows per-node health including:
- Service status (active/inactive)
- RQLite role (Leader/Follower)
- Cluster summary (commit index, voter count)
### `orama sandbox rollout [--name <name>]`
Deploys code changes:
1. Uses the latest binary archive from `/tmp/` (run `orama build` first)
2. Pushes to all nodes
3. Rolling upgrade: followers first, leader last, 15s between nodes
### `orama sandbox ssh <node-number>`
Opens an interactive SSH session to a sandbox node (1-5).
```bash
orama sandbox ssh 1 # SSH into node 1 (genesis/ns1)
orama sandbox ssh 3 # SSH into node 3 (regular node)
```
## Architecture
### Floating IPs
Hetzner Floating IPs are persistent IPv4 addresses that can be reassigned between servers. They solve the DNS chicken-and-egg problem:
- Glue records at the registrar point to 2 Floating IPs (configured once)
- Each new sandbox assigns the Floating IPs to its nameserver nodes
- DNS works instantly — no propagation delay between clusters
### SSH Authentication
Sandbox uses a rootwallet-derived SSH key (`sandbox/root` vault entry), the same mechanism as production. The wallet must be unlocked (`rw unlock`) before running sandbox commands that use SSH. The public key is uploaded to Hetzner during setup and injected into every server at creation time.
### Server Naming
Servers: `sbx-<name>-<N>` (e.g., `sbx-swift-falcon-1` through `sbx-swift-falcon-5`)
### State Files
Sandbox state is stored at `~/.orama/sandboxes/<name>.yaml`. This tracks server IDs, IPs, roles, and cluster status.
## Cost
| Resource | Cost | Qty | Total |
|----------|------|-----|-------|
| CX22 (2 vCPU, 4GB) | €0.006/hr | 5 | €0.03/hr |
| Floating IPv4 | €0.005/hr | 2 | €0.01/hr |
| **Total** | | | **~€0.04/hr** |
Servers are billed per hour. Floating IPs are billed as long as they exist (even unassigned). Destroy the sandbox when not in use to save on server costs.
## Troubleshooting
### "sandbox not configured"
Run `orama sandbox setup` first.
### "no binary archive found"
Run `orama build` to create the binary archive.
### "sandbox X is already active"
Only one sandbox can be active at a time. Destroy it first:
```bash
orama sandbox destroy --name <name>
```
### Server creation fails
Check:
- Hetzner API token is valid and has read/write permissions
- You haven't hit Hetzner's server limit (default: 10 per project)
- The selected location has CX22 capacity
### Genesis install fails
SSH into the node to debug:
```bash
orama sandbox ssh 1
journalctl -u orama-node -f
```
The sandbox will be left in "error" state. You can destroy and recreate it.
### DNS not resolving
1. Verify glue records are configured at your registrar
2. Check propagation: `dig NS sbx.dbrs.space @8.8.8.8`
3. Propagation can take 24-48 hours for new domains
### Orphaned servers
If `orama sandbox list` shows orphaned servers, delete them manually at [console.hetzner.cloud](https://console.hetzner.cloud). Sandbox servers are labeled `orama-sandbox=<name>` for easy identification.

View File

@ -1,194 +0,0 @@
# Security Hardening
This document describes all security measures applied to the Orama Network, covering both Phase 1 (service hardening on existing Ubuntu nodes) and Phase 2 (OramaOS locked-down image).
## Phase 1: Service Hardening
These measures apply to all nodes (Ubuntu and OramaOS).
### Network Isolation
**CIDR Validation (Step 1.1)**
- WireGuard subnet restricted to `10.0.0.0/24` across all components: firewall rules, rate limiter, auth module, and WireGuard PostUp/PostDown iptables rules
- Prevents other tenants on shared VPS providers from bypassing the firewall via overlapping `10.x.x.x` ranges
**IPv6 Disabled (Step 1.2)**
- IPv6 disabled system-wide via sysctl: `net.ipv6.conf.all.disable_ipv6=1`
- Prevents services bound to `0.0.0.0` from being reachable via IPv6 (which had no firewall rules)
### Authentication
**Internal Endpoint Auth (Step 1.3)**
- `/v1/internal/wg/peers` and `/v1/internal/wg/peer/remove` now require cluster secret validation
- Peer removal additionally validates the request originates from a WireGuard subnet IP
**RQLite Authentication (Step 1.7)**
- RQLite runs with `-auth` flag pointing to a credentials file
- All RQLite HTTP requests include `Authorization: Basic <base64>` headers
- Credentials generated at cluster genesis, distributed to joining nodes via join response
- Both the central RQLite client wrapper and the standalone CoreDNS RQLite client send auth
**Olric Gossip Encryption (Step 1.8)**
- Olric memberlist uses a 32-byte encryption key for all gossip traffic
- Key generated at genesis, distributed via join response
- Prevents rogue nodes from joining the gossip ring and poisoning caches
- Note: encryption is all-or-nothing (coordinated restart required when enabling)
**IPFS Cluster TrustedPeers (Step 1.9)**
- IPFS Cluster `TrustedPeers` populated with actual cluster peer IDs (was `["*"]`)
- New peers added to TrustedPeers on all existing nodes during join
- Prevents unauthorized peers from controlling IPFS pinning
**Vault V1 Auth Enforcement (Step 1.14)**
- V1 push/pull endpoints require a valid session token when vault-guardian is configured
- Previously, auth was optional for backward compatibility — any WG peer could read/overwrite Shamir shares
### Token & Key Storage
**Refresh Token Hashing (Step 1.5)**
- Refresh tokens stored as SHA-256 hashes in RQLite (never plaintext)
- On lookup: hash the incoming token, query by hash
- On revocation: hash before revoking (both single-token and by-subject)
- Existing tokens invalidated on upgrade (users re-authenticate)
**API Key Hashing (Step 1.6)**
- API keys stored as HMAC-SHA256 hashes using a server-side secret
- HMAC secret generated at cluster genesis, stored in `~/.orama/secrets/api-key-hmac-secret`
- On lookup: compute HMAC, query by hash — fast enough for every request (unlike bcrypt)
- In-memory cache uses raw key as cache key (never persisted)
- During rolling upgrade: dual lookup (HMAC first, then raw as fallback) until all nodes upgraded
**TURN Secret Encryption (Step 1.15)**
- TURN shared secrets encrypted at rest in RQLite using AES-256-GCM
- Encryption key derived via HKDF from the cluster secret with purpose string `"turn-encryption"`
### TLS & Transport
**InsecureSkipVerify Fix (Step 1.10)**
- During node join, TLS verification uses TOFU (Trust On First Use)
- Invite token output includes the CA certificate fingerprint (SHA-256)
- Joining node verifies the server cert fingerprint matches before proceeding
- After join: CA cert stored locally for future connections
**WebSocket Origin Validation (Step 1.4)**
- All WebSocket upgraders validate the `Origin` header against the node's configured domain
- Non-browser clients (no Origin header) are still allowed
- Prevents cross-site WebSocket hijacking attacks
### Process Isolation
**Dedicated User (Step 1.11)**
- All services run as the `orama` user (not root)
- Caddy and CoreDNS get `AmbientCapabilities=CAP_NET_BIND_SERVICE` for ports 80/443 and 53
- WireGuard stays as root (kernel netlink requires it)
- vault-guardian already had proper hardening
**systemd Hardening (Step 1.12)**
- All service units include:
```ini
ProtectSystem=strict
ProtectHome=yes
NoNewPrivileges=yes
PrivateDevices=yes
ProtectKernelTunables=yes
ProtectKernelModules=yes
RestrictNamespaces=yes
ReadWritePaths=/opt/orama/.orama
```
- Applied to both template files (`pkg/environments/templates/`) and hardcoded unit generators (`pkg/environments/production/services.go`)
### Supply Chain
**Binary Signing (Step 1.13)**
- Build archives include `manifest.sig` — a rootwallet EVM signature of the manifest hash
- During install, the signature is verified against the embedded Orama public key
- Unsigned or tampered archives are rejected
## Phase 2: OramaOS
These measures apply only to OramaOS nodes (mainnet, devnet, testnet).
### Immutable OS
- **Read-only rootfs** — SquashFS with dm-verity integrity verification
- **No shell**`/bin/sh` symlinked to `/bin/false`, no bash/ash/ssh
- **No SSH** — OpenSSH not included in the image
- **Minimal packages** — only what's needed for systemd, cryptsetup, and the agent
### Full-Disk Encryption
- **LUKS2** with AES-XTS-Plain64 on the data partition
- **Shamir's Secret Sharing** over GF(256) — LUKS key split across peer vault-guardians
- **Adaptive threshold** — K = max(3, N/3) where N is the number of peers
- **Key zeroing** — LUKS key wiped from memory immediately after use
- **Malicious share detection** — fetch K+1 shares when possible, verify consistency
### Service Sandboxing
Each service runs in isolated Linux namespaces:
- **CLONE_NEWNS** — mount namespace (filesystem isolation)
- **CLONE_NEWUTS** — hostname namespace
- **Dedicated UID/GID** — each service has its own user
- **Seccomp filtering** — per-service syscall allowlist
Note: CLONE_NEWPID is intentionally omitted — it makes services PID 1 in their namespace, which changes signal semantics (SIGTERM ignored by default for PID 1).
### Signed Updates
- A/B partition scheme with systemd-boot and boot counting (`tries_left=3`)
- All updates signed with rootwallet EVM signature (secp256k1 + keccak256)
- Signer address: `0xb5d8a496c8b2412990d7D467E17727fdF5954afC`
- P2P distribution over WireGuard between nodes
- Automatic rollback on 3 consecutive boot failures
### Zero Operator Access
- Operators cannot read data on the machine (LUKS encrypted, no shell)
- Management only through Gateway API → agent over WireGuard
- All commands are logged and auditable
- No root access, no console access, no file system access
## Rollout Strategy
### Phase 1 Batches
```
Batch 1 (zero-risk, no restart):
- CIDR fix
- IPv6 disable
- Internal endpoint auth
- WebSocket origin check
Batch 2 (medium-risk, restart needed):
- Hash refresh tokens
- Hash API keys
- Binary signing
- Vault V1 auth enforcement
- TURN secret encryption
Batch 3 (high-risk, coordinated rollout):
- RQLite auth (followers first, leader last)
- Olric encryption (simultaneous restart)
- IPFS Cluster TrustedPeers
Batch 4 (infrastructure changes):
- InsecureSkipVerify fix
- Dedicated user
- systemd hardening
```
### Phase 2
1. Build and test OramaOS image in QEMU
2. Deploy to sandbox cluster alongside Ubuntu nodes
3. Verify interop and stability
4. Gradual migration: testnet → devnet → mainnet (one node at a time, maintaining Raft quorum)
## Verification
All changes verified on sandbox cluster before production deployment:
- `make test` — all unit tests pass
- `orama monitor report --env sandbox` — full cluster health
- Manual endpoint testing (e.g., curl without auth → 401)
- Security-specific checks (IPv6 listeners, RQLite auth, binary signatures)

View File

@ -1,374 +0,0 @@
# Serverless Functions
Orama Network runs serverless functions as sandboxed WebAssembly (WASM) modules. Functions are written in Go, compiled to WASM with TinyGo, and executed in an isolated wazero runtime with configurable memory limits and timeouts.
Functions receive input via **stdin** (JSON) and return output via **stdout** (JSON). They can also access Orama services — database, cache, storage, secrets, PubSub, and HTTP — through **host functions** injected by the runtime.
## Quick Start
```bash
# 1. Scaffold a new function
orama function init my-function
# 2. Edit your handler
cd my-function
# edit function.go
# 3. Build to WASM
orama function build
# 4. Deploy
orama function deploy
# 5. Invoke
orama function invoke my-function --data '{"name": "World"}'
# 6. View logs
orama function logs my-function
```
## Project Structure
```
my-function/
├── function.go # Handler code
└── function.yaml # Configuration
```
### function.yaml
```yaml
name: my-function # Required. Letters, digits, hyphens, underscores.
public: false # Allow unauthenticated invocation (default: false)
memory: 64 # Memory limit in MB (1-256, default: 64)
timeout: 30 # Execution timeout in seconds (1-300, default: 30)
retry:
count: 0 # Retry attempts on failure (default: 0)
delay: 5 # Seconds between retries (default: 5)
env: # Environment variables (accessible via get_env)
MY_VAR: "value"
```
### function.go (minimal)
```go
package main
import (
"encoding/json"
"os"
)
func main() {
// Read JSON input from stdin
var input []byte
buf := make([]byte, 4096)
for {
n, err := os.Stdin.Read(buf)
if n > 0 {
input = append(input, buf[:n]...)
}
if err != nil {
break
}
}
var payload map[string]interface{}
json.Unmarshal(input, &payload)
// Process and return JSON output via stdout
response := map[string]interface{}{
"result": "Hello!",
}
output, _ := json.Marshal(response)
os.Stdout.Write(output)
}
```
### Building
Functions are compiled to WASM using [TinyGo](https://tinygo.org/):
```bash
# Using the CLI (recommended)
orama function build
# Or manually
tinygo build -o function.wasm -target wasi function.go
```
## Host Functions API
Host functions let your WASM code interact with Orama services. They are imported from the `"env"` or `"host"` module (both work) and use a pointer/length ABI for string parameters.
All host functions are registered at runtime by the engine. They are available to every function without additional configuration.
### Context
| Function | Description |
|----------|-------------|
| `get_caller_wallet()` → string | Wallet address of the caller (from JWT) |
| `get_request_id()` → string | Unique invocation ID |
| `get_env(key)` → string | Environment variable from function.yaml |
| `get_secret(name)` → string | Decrypted secret value (see [Managing Secrets](#managing-secrets)) |
### Database (RQLite)
| Function | Description |
|----------|-------------|
| `db_query(sql, argsJSON)` → JSON | Execute SELECT query. Args as JSON array. Returns JSON array of row objects. |
| `db_execute(sql, argsJSON)` → int | Execute INSERT/UPDATE/DELETE. Returns affected row count. |
Example query from WASM:
```
db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?", '["user123"]')
→ [{"push_token": "abc...", "device_type": "ios"}]
```
### Cache (Olric Distributed Cache)
| Function | Description |
|----------|-------------|
| `cache_get(key)` → bytes | Get cached value by key. Returns empty on miss. |
| `cache_set(key, value, ttl)` | Store value with TTL in seconds. |
| `cache_incr(key)` → int64 | Atomically increment by 1 (init to 0 if missing). |
| `cache_incr_by(key, delta)` → int64 | Atomically increment by delta. |
### HTTP
| Function | Description |
|----------|-------------|
| `http_fetch(method, url, headersJSON, body)` → JSON | Make outbound HTTP request. Headers as JSON object. Returns `{"status": 200, "headers": {...}, "body": "..."}`. Timeout: 30s. |
### PubSub
| Function | Description |
|----------|-------------|
| `pubsub_publish(topic, dataJSON)` → bool | Publish message to a PubSub topic. Returns true on success. |
### Logging
| Function | Description |
|----------|-------------|
| `log_info(message)` | Log info-level message (captured in invocation logs). |
| `log_error(message)` | Log error-level message. |
## Managing Secrets
Secrets are encrypted at rest (AES-256-GCM) and scoped to your namespace. Functions read them via `get_secret("name")` at runtime.
### CLI Commands
```bash
# Set a secret (inline value)
orama function secrets set APNS_KEY_ID "ABC123DEF"
# Set a secret from a file (useful for PEM keys, certificates)
orama function secrets set APNS_AUTH_KEY --from-file ./AuthKey_ABC123.p8
# List all secret names (values are never shown)
orama function secrets list
# Delete a secret
orama function secrets delete APNS_KEY_ID
# Delete without confirmation
orama function secrets delete APNS_KEY_ID --force
```
### How It Works
1. **You set secrets** via the CLI → encrypted and stored in the database
2. **Functions read secrets** at runtime via `get_secret("name")` → decrypted on demand
3. **Namespace isolation** → each namespace has its own secret store; functions in namespace A cannot read secrets from namespace B
## PubSub Triggers
Triggers let functions react to events automatically. When a message is published to a PubSub topic, all functions with a trigger on that topic are invoked asynchronously.
### CLI Commands
```bash
# Add a trigger: invoke "call-push-handler" when messages hit "calls:invite"
orama function triggers add call-push-handler --topic calls:invite
# List triggers for a function
orama function triggers list call-push-handler
# Delete a trigger
orama function triggers delete call-push-handler <trigger-id>
```
### Trigger Event Payload
When triggered via PubSub, the function receives this JSON via stdin:
```json
{
"topic": "calls:invite",
"data": { ... },
"namespace": "my-namespace",
"trigger_depth": 1,
"timestamp": 1708972800
}
```
### Depth Limiting
To prevent infinite loops (function A publishes to topic → triggers function A again), trigger depth is tracked. Maximum depth is **5**. If a function's output triggers another function, `trigger_depth` increments. At depth 5, no further triggers fire.
## Function Lifecycle
### Versioning
Each deploy creates a new version. The WASM binary is stored in **IPFS** (content-addressed) and metadata is stored in **RQLite**.
```bash
# List versions
orama function versions my-function
# Invoke a specific version
curl -X POST .../v1/functions/my-function@2/invoke
```
### Invocation Logging
Every invocation is logged with: request ID, duration, status (success/error/timeout), input/output size, and any `log_info`/`log_error` messages.
```bash
orama function logs my-function
```
## CLI Reference
| Command | Description |
|---------|-------------|
| `orama function init <name>` | Scaffold a new function project |
| `orama function build [dir]` | Compile Go to WASM |
| `orama function deploy [dir]` | Deploy WASM to the network |
| `orama function invoke <name> --data <json>` | Invoke a function |
| `orama function list` | List deployed functions |
| `orama function get <name>` | Get function details |
| `orama function delete <name>` | Delete a function |
| `orama function logs <name>` | View invocation logs |
| `orama function versions <name>` | List function versions |
| `orama function secrets set <name> <value>` | Set an encrypted secret |
| `orama function secrets list` | List secret names |
| `orama function secrets delete <name>` | Delete a secret |
| `orama function triggers add <fn> --topic <t>` | Add PubSub trigger |
| `orama function triggers list <fn>` | List triggers |
| `orama function triggers delete <fn> <id>` | Delete a trigger |
## HTTP API Reference
| Method | Endpoint | Description |
|--------|----------|-------------|
| POST | `/v1/functions` | Deploy function (multipart/form-data) |
| GET | `/v1/functions` | List functions |
| GET | `/v1/functions/{name}` | Get function info |
| DELETE | `/v1/functions/{name}` | Delete function |
| POST | `/v1/functions/{name}/invoke` | Invoke function |
| GET | `/v1/functions/{name}/versions` | List versions |
| GET | `/v1/functions/{name}/logs` | Get logs |
| WS | `/v1/functions/{name}/ws` | WebSocket invoke (streaming) |
| PUT | `/v1/functions/secrets` | Set a secret |
| GET | `/v1/functions/secrets` | List secret names |
| DELETE | `/v1/functions/secrets/{name}` | Delete a secret |
| POST | `/v1/functions/{name}/triggers` | Add PubSub trigger |
| GET | `/v1/functions/{name}/triggers` | List triggers |
| DELETE | `/v1/functions/{name}/triggers/{id}` | Delete trigger |
| POST | `/v1/invoke/{namespace}/{name}` | Direct invoke (alt endpoint) |
## Example: Call Push Handler
A real-world function that sends VoIP push notifications when a call invite is published to PubSub:
```yaml
# function.yaml
name: call-push-handler
memory: 128
timeout: 30
```
```go
// function.go — triggered by PubSub on "calls:invite"
package main
import (
"encoding/json"
"os"
)
// This function:
// 1. Receives a call invite event from PubSub trigger
// 2. Queries the database for the callee's device info
// 3. Reads push notification credentials from secrets
// 4. Sends a push notification via http_fetch
func main() {
// Read PubSub trigger event from stdin
var input []byte
buf := make([]byte, 4096)
for {
n, err := os.Stdin.Read(buf)
if n > 0 {
input = append(input, buf[:n]...)
}
if err != nil {
break
}
}
// Parse the trigger event wrapper
var event struct {
Topic string `json:"topic"`
Data json.RawMessage `json:"data"`
}
json.Unmarshal(input, &event)
// Parse the actual call invite data
var invite struct {
CalleeID string `json:"calleeId"`
CallerName string `json:"callerName"`
CallType string `json:"callType"`
}
json.Unmarshal(event.Data, &invite)
// At this point, the function would use host functions:
//
// 1. db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?",
// json.Marshal([]string{invite.CalleeID}))
//
// 2. get_secret("FCM_SERVER_KEY") for Android push
// get_secret("APNS_KEY_PEM") for iOS push
//
// 3. http_fetch("POST", "https://fcm.googleapis.com/v1/...", headers, body)
//
// 4. log_info("Push sent to " + invite.CalleeID)
//
// Note: Host functions use the WASM ABI (pointer/length).
// A Go SDK for ergonomic access is planned.
response := map[string]interface{}{
"status": "sent",
"callee": invite.CalleeID,
}
output, _ := json.Marshal(response)
os.Stdout.Write(output)
}
```
Deploy and wire the trigger:
```bash
orama function build
orama function deploy
# Set push notification secrets
orama function secrets set FCM_SERVER_KEY "your-fcm-key"
orama function secrets set APNS_KEY_PEM --from-file ./AuthKey.p8
orama function secrets set APNS_KEY_ID "ABC123"
orama function secrets set APNS_TEAM_ID "TEAM456"
# Wire the PubSub trigger
orama function triggers add call-push-handler --topic calls:invite
```

View File

@ -1,291 +0,0 @@
# WebRTC Integration
Real-time voice, video, and data channels for Orama Network namespaces.
## Architecture
```
Client A Client B
│ │
│ 1. Get TURN credentials (REST) │
│ 2. Connect WebSocket (signaling) │
│ 3. Exchange SDP/ICE via SFU │
│ │
▼ ▼
┌──────────┐ UDP relay ┌──────────┐
│ TURN │◄──────────────────►│ TURN │
│ Server │ (public IPs) │ Server │
│ Node 1 │ │ Node 2 │
└────┬─────┘ └────┬─────┘
│ WireGuard │ WireGuard
▼ ▼
┌──────────────────────────────────────────┐
│ SFU Servers (3 nodes) │
│ - WebSocket signaling (WireGuard only) │
│ - Pion WebRTC (RTP forwarding) │
│ - Room management │
│ - Track publish/subscribe │
└──────────────────────────────────────────┘
```
**Key design decisions:**
- **TURN-shielded**: SFU binds only to WireGuard IPs. All client media flows through TURN relay.
- **`iceTransportPolicy: relay`** enforced server-side — no direct peer connections.
- **Opt-in per namespace** via `orama namespace enable webrtc`.
- **SFU on all 3 nodes**, **TURN on 2 of 3 nodes** (redundancy without over-provisioning).
- **Separate port allocation** from existing namespace services.
## Prerequisites
- Namespace must be provisioned with a ready cluster (RQLite + Olric + Gateway running).
- Command must be run on a cluster node (uses internal gateway endpoint).
## Enable / Disable
```bash
# Enable WebRTC for a namespace
orama namespace enable webrtc --namespace myapp
# Check status
orama namespace webrtc-status --namespace myapp
# Disable WebRTC (stops services, deallocates ports, removes DNS)
orama namespace disable webrtc --namespace myapp
```
### What happens on enable:
1. Generates a per-namespace TURN shared secret (32 bytes, crypto/rand)
2. Inserts `namespace_webrtc_config` DB record
3. Allocates WebRTC port blocks on each node (SFU signaling + media range, TURN relay range)
4. Spawns TURN on 2 nodes (selected by capacity)
5. Spawns SFU on all 3 nodes
6. Creates DNS A records: `turn.ns-{name}.{baseDomain}` pointing to TURN node public IPs
7. Updates cluster state on all nodes (for cold-boot restoration)
### What happens on disable:
1. Stops SFU on all 3 nodes
2. Stops TURN on 2 nodes
3. Deallocates all WebRTC ports
4. Deletes TURN DNS records
5. Cleans up DB records (`namespace_webrtc_config`, `webrtc_rooms`)
6. Updates cluster state
## Client Integration (JavaScript)
### Authentication
All WebRTC endpoints require authentication. Use one of:
```
# Option A: API Key via header (recommended)
X-API-Key: <your-namespace-api-key>
# Option B: API Key via Authorization header
Authorization: ApiKey <your-namespace-api-key>
# Option C: JWT Bearer token
Authorization: Bearer <jwt>
```
### 1. Get TURN Credentials
```javascript
const response = await fetch('https://ns-myapp.orama-devnet.network/v1/webrtc/turn/credentials', {
method: 'POST',
headers: { 'X-API-Key': apiKey }
});
const { uris, username, password, ttl } = await response.json();
// uris: [
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=udp",
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=tcp",
// "turns:turn.ns-myapp.orama-devnet.network:5349"
// ]
// username: "{expiry_unix}:{namespace}"
// password: HMAC-SHA1 derived (base64)
// ttl: 600 (seconds)
```
### 2. Create PeerConnection
```javascript
const pc = new RTCPeerConnection({
iceServers: [{ urls: uris, username, credential: password }],
iceTransportPolicy: 'relay' // enforced by SFU
});
```
### 3. Connect Signaling WebSocket
```javascript
const ws = new WebSocket(
`wss://ns-myapp.orama-devnet.network/v1/webrtc/signal?room=${roomId}&api_key=${apiKey}`
);
ws.onmessage = (event) => {
const msg = JSON.parse(event.data);
switch (msg.type) {
case 'offer': handleOffer(msg); break;
case 'answer': handleAnswer(msg); break;
case 'ice-candidate': handleICE(msg); break;
case 'peer-joined': handleJoin(msg); break;
case 'peer-left': handleLeave(msg); break;
case 'turn-credentials':
case 'refresh-credentials':
updateTURN(msg); // SFU sends refreshed creds at 80% TTL
break;
case 'server-draining':
reconnect(); // SFU shutting down, reconnect to another node
break;
}
};
```
### 4. Room Management (REST)
```javascript
const headers = { 'X-API-Key': apiKey, 'Content-Type': 'application/json' };
// Create room
await fetch('/v1/webrtc/rooms', {
method: 'POST',
headers,
body: JSON.stringify({ room_id: 'my-room' })
});
// List rooms
const rooms = await fetch('/v1/webrtc/rooms', { headers });
// Close room
await fetch('/v1/webrtc/rooms?room_id=my-room', {
method: 'DELETE',
headers
});
```
## API Reference
### REST Endpoints
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| POST | `/v1/webrtc/turn/credentials` | JWT/API key | Get TURN relay credentials |
| GET/WS | `/v1/webrtc/signal` | JWT/API key | WebSocket signaling |
| GET | `/v1/webrtc/rooms` | JWT/API key | List rooms |
| POST | `/v1/webrtc/rooms` | JWT/API key (owner) | Create room |
| DELETE | `/v1/webrtc/rooms` | JWT/API key (owner) | Close room |
### Signaling Messages
| Type | Direction | Description |
|------|-----------|-------------|
| `join` | Client → SFU | Join room |
| `offer` | Client ↔ SFU | SDP offer |
| `answer` | Client ↔ SFU | SDP answer |
| `ice-candidate` | Client ↔ SFU | ICE candidate |
| `leave` | Client → SFU | Leave room |
| `peer-joined` | SFU → Client | New peer notification |
| `peer-left` | SFU → Client | Peer departure |
| `turn-credentials` | SFU → Client | Initial TURN credentials |
| `refresh-credentials` | SFU → Client | Refreshed credentials (at 80% TTL) |
| `server-draining` | SFU → Client | SFU shutting down |
## Port Allocation
WebRTC uses a **separate port allocation system** from the core namespace ports:
| Service | Port Range | Protocol | Per Namespace |
|---------|-----------|----------|---------------|
| SFU signaling | 30000-30099 | TCP (WireGuard only) | 1 port |
| SFU media (RTP) | 20000-29999 | UDP (WireGuard only) | 500 ports |
| TURN listen | 3478 | UDP + TCP | fixed |
| TURNS (TLS) | 5349 | TCP | fixed |
| TURN relay | 49152-65535 | UDP | 800 ports |
## TURN Credential Protocol
- Credentials use HMAC-SHA1 with a per-namespace shared secret
- Username format: `{expiry_unix}:{namespace}`
- Password: `base64(HMAC-SHA1(shared_secret, username))`
- Default TTL: 600 seconds (10 minutes)
- SFU proactively sends `refresh-credentials` at 80% of TTL (8 minutes)
- Clients should update ICE servers on receiving refresh
## TURNS TLS Certificate
TURNS (port 5349) uses TLS. Certificate provisioning:
1. **Let's Encrypt (primary)**: On TURN spawn, the TURN domain is added to the local Caddy instance's Caddyfile. Caddy provisions a Let's Encrypt cert via DNS-01 ACME challenge (using the orama DNS provider). TURN reads the cert from Caddy's storage.
2. **Self-signed (fallback)**: If Caddy cert provisioning fails (timeout, Caddy not running), a self-signed cert is generated with the node's public IP as SAN.
Caddy auto-renews Let's Encrypt certs at ~60 days. TURN picks up renewed certs on restart.
## Monitoring
```bash
# Check WebRTC status
orama namespace webrtc-status --namespace myapp
# Monitor report includes SFU/TURN status
orama monitor report --env devnet
# Inspector checks WebRTC health
orama inspector --env devnet
```
The monitoring report includes per-namespace `sfu_up` and `turn_up` fields. The inspector runs cross-node checks to verify SFU coverage (3 nodes) and TURN redundancy (2 nodes).
## Debugging
```bash
# SFU logs
journalctl -u orama-namespace-sfu@myapp -f
# TURN logs
journalctl -u orama-namespace-turn@myapp -f
# Check service status
systemctl status orama-namespace-sfu@myapp
systemctl status orama-namespace-turn@myapp
```
## Security Model
- **Forced relay**: `iceTransportPolicy: relay` enforced server-side. Clients cannot bypass TURN.
- **HMAC credentials**: Per-namespace TURN shared secret. Credentials expire after 10 minutes.
- **Namespace isolation**: Each namespace has its own TURN secret, port ranges, and rooms.
- **Authentication required**: All WebRTC endpoints require API key or JWT (`X-API-Key` header, `Authorization: ApiKey`, or `Authorization: Bearer`).
- **Room management**: Creating/closing rooms requires namespace ownership.
- **SFU on WireGuard only**: SFU binds to 10.0.0.x, never 0.0.0.0. Only reachable via TURN relay.
- **Permissions-Policy**: `camera=(self), microphone=(self)` — only same-origin can access media devices.
## Firewall
When WebRTC is enabled, the following ports are opened via UFW on TURN nodes:
| Port | Protocol | Purpose |
|------|----------|---------|
| 3478 | UDP | TURN standard |
| 3478 | TCP | TURN TCP fallback (for clients behind UDP-blocking firewalls) |
| 5349 | TCP | TURNS — TURN over TLS (encrypted, works through strict firewalls/DPI) |
| 49152-65535 | UDP | TURN relay range (allocated per namespace) |
SFU ports are NOT opened in the firewall — they are WireGuard-internal only.
## Database Tables
| Table | Purpose |
|-------|---------|
| `namespace_webrtc_config` | Per-namespace WebRTC config (enabled, TURN secret, node counts) |
| `webrtc_rooms` | Room-to-SFU-node affinity |
| `webrtc_port_allocations` | SFU/TURN port tracking |
## Cold Boot Recovery
On node restart, the cluster state file (`cluster_state.json`) includes `has_sfu`, `has_turn`, and port allocation data. The restore process:
1. Core services restore first: RQLite → Olric → Gateway
2. If `has_turn` is set: fetches TURN shared secret from DB, spawns TURN
3. If `has_sfu` is set: fetches WebRTC config from DB, spawns SFU with TURN server list
If the DB is unavailable during restore, SFU/TURN restoration is skipped with a warning log. They will be restored on the next successful DB connection.

View File

@ -1,241 +0,0 @@
//go:build e2e
package shared_test
import (
"bytes"
"encoding/json"
"net/http"
"strings"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
// turnCredentialsResponse is the expected response from the TURN credentials endpoint.
type turnCredentialsResponse struct {
URLs []string `json:"urls"`
Username string `json:"username"`
Credential string `json:"credential"`
TTL int `json:"ttl"`
}
// TestWebRTC_TURNCredentials_RequiresAuth verifies that the TURN credentials endpoint
// rejects unauthenticated requests.
func TestWebRTC_TURNCredentials_RequiresAuth(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
client := e2e.NewHTTPClient(10 * time.Second)
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
}
}
// TestWebRTC_TURNCredentials_ValidResponse verifies that authenticated requests to the
// TURN credentials endpoint return a valid credential structure.
func TestWebRTC_TURNCredentials_ValidResponse(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
apiKey := e2e.GetAPIKey()
if apiKey == "" {
t.Skip("no API key configured")
}
client := e2e.NewHTTPClient(10 * time.Second)
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+apiKey)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 OK, got %d", resp.StatusCode)
}
var creds turnCredentialsResponse
if err := json.NewDecoder(resp.Body).Decode(&creds); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if len(creds.URLs) == 0 {
t.Fatal("expected at least one TURN URL")
}
if creds.Username == "" {
t.Fatal("expected non-empty username")
}
if creds.Credential == "" {
t.Fatal("expected non-empty credential")
}
if creds.TTL <= 0 {
t.Fatalf("expected positive TTL, got %d", creds.TTL)
}
}
// TestWebRTC_Rooms_RequiresAuth verifies that the rooms endpoint rejects unauthenticated requests.
func TestWebRTC_Rooms_RequiresAuth(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
client := e2e.NewHTTPClient(10 * time.Second)
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
}
}
// TestWebRTC_Signal_RequiresAuth verifies that the signaling WebSocket rejects
// unauthenticated connections.
func TestWebRTC_Signal_RequiresAuth(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
client := e2e.NewHTTPClient(10 * time.Second)
// Use regular HTTP GET to the signal endpoint — without auth it should return 401
// before WebSocket upgrade
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/signal?room=test-room", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expected 401, got %d", resp.StatusCode)
}
}
// TestWebRTC_Rooms_CreateAndList verifies room creation and listing with proper auth.
func TestWebRTC_Rooms_CreateAndList(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
apiKey := e2e.GetAPIKey()
if apiKey == "" {
t.Skip("no API key configured")
}
client := e2e.NewHTTPClient(10 * time.Second)
roomID := e2e.GenerateUniqueID("e2e-webrtc-room")
// Create room
createBody, _ := json.Marshal(map[string]string{"room_id": roomID})
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/rooms", bytes.NewReader(createBody))
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
t.Fatalf("create room failed: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
t.Fatalf("expected 200/201, got %d", resp.StatusCode)
}
// List rooms
req, err = http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+apiKey)
resp, err = client.Do(req)
if err != nil {
t.Fatalf("list rooms failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200, got %d", resp.StatusCode)
}
// Clean up: delete room
req, err = http.NewRequest("DELETE", gatewayURL+"/v1/webrtc/rooms?room_id="+roomID, nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+apiKey)
resp2, err := client.Do(req)
if err != nil {
t.Fatalf("delete room failed: %v", err)
}
resp2.Body.Close()
}
// TestWebRTC_PermissionsPolicy verifies the Permissions-Policy header allows camera and microphone.
func TestWebRTC_PermissionsPolicy(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
apiKey := e2e.GetAPIKey()
if apiKey == "" {
t.Skip("no API key configured")
}
client := e2e.NewHTTPClient(10 * time.Second)
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+apiKey)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
pp := resp.Header.Get("Permissions-Policy")
if pp == "" {
t.Skip("Permissions-Policy header not set")
}
if !strings.Contains(pp, "camera=(self)") {
t.Errorf("Permissions-Policy missing camera=(self), got: %s", pp)
}
if !strings.Contains(pp, "microphone=(self)") {
t.Errorf("Permissions-Policy missing microphone=(self), got: %s", pp)
}
}

View File

@ -1,182 +0,0 @@
module github.com/DeBrosOfficial/network
go 1.24.6
require (
github.com/charmbracelet/bubbles v0.20.0
github.com/charmbracelet/bubbletea v1.2.4
github.com/charmbracelet/lipgloss v1.0.0
github.com/coredns/caddy v1.1.4
github.com/coredns/coredns v1.12.1
github.com/ethereum/go-ethereum v1.13.14
github.com/go-chi/chi/v5 v5.2.3
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
github.com/libp2p/go-libp2p v0.41.1
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/mackerelio/go-osstat v0.2.6
github.com/mattn/go-sqlite3 v1.14.32
github.com/mdp/qrterminal/v3 v3.2.1
github.com/miekg/dns v1.1.70
github.com/multiformats/go-multiaddr v0.16.0
github.com/olric-data/olric v0.7.0
github.com/pion/interceptor v0.1.40
github.com/pion/rtcp v1.2.15
github.com/pion/turn/v4 v4.0.2
github.com/pion/webrtc/v4 v4.1.2
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
github.com/tetratelabs/wazero v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.47.0
golang.org/x/net v0.49.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/buraksezer/consistent v0.10.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.4.5 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/memberlist v0.5.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.3.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/redis/go-redis/v9 v9.8.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
github.com/tidwall/btree v1.7.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/redcon v1.6.2 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.40.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
rsc.io/qr v0.2.0 // indirect
)

View File

@ -1,19 +0,0 @@
-- Migration 016: Node health events for failure detection
-- Tracks peer-to-peer health observations for quorum-based dead node detection
BEGIN;
CREATE TABLE IF NOT EXISTS node_health_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
observer_id TEXT NOT NULL, -- node that detected the failure
target_id TEXT NOT NULL, -- node that is suspect/dead
status TEXT NOT NULL, -- 'suspect', 'dead', 'recovered'
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_nhe_target_status ON node_health_events(target_id, status);
CREATE INDEX IF NOT EXISTS idx_nhe_created_at ON node_health_events(created_at);
INSERT OR IGNORE INTO schema_migrations(version) VALUES (16);
COMMIT;

View File

@ -1,21 +0,0 @@
-- Migration 017: Phantom auth sessions for QR code + deep link authentication
-- Stores session state for the CLI-to-phone relay pattern via the gateway
BEGIN;
CREATE TABLE IF NOT EXISTS phantom_auth_sessions (
id TEXT PRIMARY KEY,
namespace TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
wallet TEXT,
api_key TEXT,
error_message TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
expires_at TIMESTAMP NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_phantom_sessions_status ON phantom_auth_sessions(status);
INSERT OR IGNORE INTO schema_migrations(version) VALUES (17);
COMMIT;

View File

@ -1,96 +0,0 @@
-- Migration 018: WebRTC Services (SFU + TURN) for Namespace Clusters
-- Adds per-namespace WebRTC configuration, room tracking, and port allocation
-- WebRTC is opt-in: enabled via `orama namespace enable webrtc`
BEGIN;
-- Per-namespace WebRTC configuration
-- One row per namespace that has WebRTC enabled
CREATE TABLE IF NOT EXISTS namespace_webrtc_config (
id TEXT PRIMARY KEY, -- UUID
namespace_cluster_id TEXT NOT NULL UNIQUE, -- FK to namespace_clusters
namespace_name TEXT NOT NULL, -- Cached for easier lookups
enabled INTEGER NOT NULL DEFAULT 1, -- 1 = enabled, 0 = disabled
-- TURN authentication
turn_shared_secret TEXT NOT NULL, -- HMAC-SHA1 shared secret (base64, 32 bytes)
turn_credential_ttl INTEGER NOT NULL DEFAULT 600, -- Credential TTL in seconds (default: 10 min)
-- Service topology
sfu_node_count INTEGER NOT NULL DEFAULT 3, -- SFU instances (all 3 nodes)
turn_node_count INTEGER NOT NULL DEFAULT 2, -- TURN instances (2 of 3 nodes for HA)
-- Metadata
enabled_by TEXT NOT NULL, -- Wallet address that enabled WebRTC
enabled_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
disabled_at TIMESTAMP,
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_webrtc_config_namespace ON namespace_webrtc_config(namespace_name);
CREATE INDEX IF NOT EXISTS idx_webrtc_config_cluster ON namespace_webrtc_config(namespace_cluster_id);
-- WebRTC room tracking
-- Tracks active rooms and their SFU node affinity
CREATE TABLE IF NOT EXISTS webrtc_rooms (
id TEXT PRIMARY KEY, -- UUID
namespace_cluster_id TEXT NOT NULL, -- FK to namespace_clusters
namespace_name TEXT NOT NULL, -- Cached for easier lookups
room_id TEXT NOT NULL, -- Application-defined room identifier
-- SFU affinity
sfu_node_id TEXT NOT NULL, -- Node hosting this room's SFU
sfu_internal_ip TEXT NOT NULL, -- WireGuard IP of SFU node
sfu_signaling_port INTEGER NOT NULL, -- SFU WebSocket signaling port
-- Room state
participant_count INTEGER NOT NULL DEFAULT 0,
max_participants INTEGER NOT NULL DEFAULT 100,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_activity TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Prevent duplicate rooms within a namespace
UNIQUE(namespace_cluster_id, room_id),
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_webrtc_rooms_namespace ON webrtc_rooms(namespace_name);
CREATE INDEX IF NOT EXISTS idx_webrtc_rooms_node ON webrtc_rooms(sfu_node_id);
CREATE INDEX IF NOT EXISTS idx_webrtc_rooms_activity ON webrtc_rooms(last_activity);
-- WebRTC port allocations
-- Separate from namespace_port_allocations to avoid breaking existing port blocks
-- Each namespace gets SFU + TURN ports on each node where those services run
CREATE TABLE IF NOT EXISTS webrtc_port_allocations (
id TEXT PRIMARY KEY, -- UUID
node_id TEXT NOT NULL, -- Physical node ID
namespace_cluster_id TEXT NOT NULL, -- FK to namespace_clusters
service_type TEXT NOT NULL, -- 'sfu' or 'turn'
-- SFU ports (when service_type = 'sfu')
sfu_signaling_port INTEGER, -- WebSocket signaling port
sfu_media_port_start INTEGER, -- Start of RTP media port range
sfu_media_port_end INTEGER, -- End of RTP media port range
-- TURN ports (when service_type = 'turn')
turn_listen_port INTEGER, -- TURN listener port (3478)
turn_tls_port INTEGER, -- TURN TLS port (443/UDP)
turn_relay_port_start INTEGER, -- Start of relay port range
turn_relay_port_end INTEGER, -- End of relay port range
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Prevent overlapping allocations
UNIQUE(node_id, namespace_cluster_id, service_type),
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_webrtc_ports_node ON webrtc_port_allocations(node_id);
CREATE INDEX IF NOT EXISTS idx_webrtc_ports_cluster ON webrtc_port_allocations(namespace_cluster_id);
CREATE INDEX IF NOT EXISTS idx_webrtc_ports_type ON webrtc_port_allocations(service_type);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (18);
COMMIT;

View File

@ -1,4 +0,0 @@
-- Invalidate all existing refresh tokens.
-- Tokens were stored in plaintext; the application now stores SHA-256 hashes.
-- Users will need to re-authenticate (tokens have 30-day expiry anyway).
UPDATE refresh_tokens SET revoked_at = datetime('now') WHERE revoked_at IS NULL;

View File

@ -1,350 +0,0 @@
package auth
import (
"encoding/hex"
"os"
"strings"
"testing"
)
// ---------------------------------------------------------------------------
// extractDomainFromURL
// ---------------------------------------------------------------------------
func TestExtractDomainFromURL(t *testing.T) {
tests := []struct {
name string
input string
want string
}{
{
name: "https with domain only",
input: "https://example.com",
want: "example.com",
},
{
name: "http with port and path",
input: "http://example.com:8080/path",
want: "example.com",
},
{
name: "https with subdomain and path",
input: "https://sub.domain.com/api/v1",
want: "sub.domain.com",
},
{
name: "no scheme bare domain",
input: "example.com",
want: "example.com",
},
{
name: "https with IP and port",
input: "https://192.168.1.1:443",
want: "192.168.1.1",
},
{
name: "empty string",
input: "",
want: "",
},
{
name: "bare domain no scheme",
input: "gateway.orama.network",
want: "gateway.orama.network",
},
{
name: "https with query params",
input: "https://example.com?foo=bar",
want: "example.com",
},
{
name: "https with path and query params",
input: "https://example.com/page?q=1&r=2",
want: "example.com",
},
{
name: "bare domain with port",
input: "example.com:9090",
want: "example.com",
},
{
name: "https with fragment",
input: "https://example.com/page#section",
want: "example.com",
},
{
name: "https with user info",
input: "https://user:pass@example.com/path",
want: "example.com",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := extractDomainFromURL(tt.input)
if got != tt.want {
t.Errorf("extractDomainFromURL(%q) = %q, want %q", tt.input, got, tt.want)
}
})
}
}
// ---------------------------------------------------------------------------
// ValidateWalletAddress
// ---------------------------------------------------------------------------
func TestValidateWalletAddress(t *testing.T) {
validHex40 := "aabbccddee1122334455aabbccddee1122334455"
tests := []struct {
name string
address string
want bool
}{
{
name: "valid 40 char hex with 0x prefix",
address: "0x" + validHex40,
want: true,
},
{
name: "valid 40 char hex without prefix",
address: validHex40,
want: true,
},
{
name: "valid uppercase hex with 0x prefix",
address: "0x" + strings.ToUpper(validHex40),
want: true,
},
{
name: "too short",
address: "0xaabbccdd",
want: false,
},
{
name: "too long",
address: "0x" + validHex40 + "ff",
want: false,
},
{
name: "non hex characters",
address: "0x" + "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
want: false,
},
{
name: "empty string",
address: "",
want: false,
},
{
name: "just 0x prefix",
address: "0x",
want: false,
},
{
name: "39 hex chars with 0x prefix",
address: "0x" + validHex40[:39],
want: false,
},
{
name: "41 hex chars with 0x prefix",
address: "0x" + validHex40 + "a",
want: false,
},
{
name: "mixed case hex is valid",
address: "0xAaBbCcDdEe1122334455aAbBcCdDeE1122334455",
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ValidateWalletAddress(tt.address)
if got != tt.want {
t.Errorf("ValidateWalletAddress(%q) = %v, want %v", tt.address, got, tt.want)
}
})
}
}
// ---------------------------------------------------------------------------
// FormatWalletAddress
// ---------------------------------------------------------------------------
func TestFormatWalletAddress(t *testing.T) {
tests := []struct {
name string
address string
want string
}{
{
name: "already lowercase with 0x",
address: "0xaabbccddee1122334455aabbccddee1122334455",
want: "0xaabbccddee1122334455aabbccddee1122334455",
},
{
name: "uppercase gets lowercased",
address: "0xAABBCCDDEE1122334455AABBCCDDEE1122334455",
want: "0xaabbccddee1122334455aabbccddee1122334455",
},
{
name: "without 0x prefix gets it added",
address: "aabbccddee1122334455aabbccddee1122334455",
want: "0xaabbccddee1122334455aabbccddee1122334455",
},
{
name: "0X uppercase prefix gets normalized",
address: "0XAABBCCDDEE1122334455AABBCCDDEE1122334455",
want: "0xaabbccddee1122334455aabbccddee1122334455",
},
{
name: "mixed case gets normalized",
address: "0xAaBbCcDdEe1122334455AaBbCcDdEe1122334455",
want: "0xaabbccddee1122334455aabbccddee1122334455",
},
{
name: "empty string gets 0x prefix",
address: "",
want: "0x",
},
{
name: "just 0x stays as 0x",
address: "0x",
want: "0x",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := FormatWalletAddress(tt.address)
if got != tt.want {
t.Errorf("FormatWalletAddress(%q) = %q, want %q", tt.address, got, tt.want)
}
})
}
}
// ---------------------------------------------------------------------------
// GenerateRandomString
// ---------------------------------------------------------------------------
func TestGenerateRandomString(t *testing.T) {
t.Run("returns correct length", func(t *testing.T) {
lengths := []int{8, 16, 32, 64}
for _, l := range lengths {
s, err := GenerateRandomString(l)
if err != nil {
t.Fatalf("GenerateRandomString(%d) returned error: %v", l, err)
}
if len(s) != l {
t.Errorf("GenerateRandomString(%d) returned string of length %d, want %d", l, len(s), l)
}
}
})
t.Run("two calls produce different values", func(t *testing.T) {
s1, err := GenerateRandomString(32)
if err != nil {
t.Fatalf("first call returned error: %v", err)
}
s2, err := GenerateRandomString(32)
if err != nil {
t.Fatalf("second call returned error: %v", err)
}
if s1 == s2 {
t.Errorf("two calls to GenerateRandomString(32) produced the same value: %q", s1)
}
})
t.Run("returns hex characters only", func(t *testing.T) {
s, err := GenerateRandomString(32)
if err != nil {
t.Fatalf("GenerateRandomString(32) returned error: %v", err)
}
// hex.DecodeString requires even-length input; pad if needed
toDecode := s
if len(toDecode)%2 != 0 {
toDecode = toDecode + "0"
}
if _, err := hex.DecodeString(toDecode); err != nil {
t.Errorf("GenerateRandomString(32) returned non-hex string: %q, err: %v", s, err)
}
})
t.Run("length zero returns empty string", func(t *testing.T) {
s, err := GenerateRandomString(0)
if err != nil {
t.Fatalf("GenerateRandomString(0) returned error: %v", err)
}
if s != "" {
t.Errorf("GenerateRandomString(0) = %q, want empty string", s)
}
})
t.Run("length one returns single hex char", func(t *testing.T) {
s, err := GenerateRandomString(1)
if err != nil {
t.Fatalf("GenerateRandomString(1) returned error: %v", err)
}
if len(s) != 1 {
t.Errorf("GenerateRandomString(1) returned string of length %d, want 1", len(s))
}
// Must be a valid hex character
const hexChars = "0123456789abcdef"
if !strings.Contains(hexChars, s) {
t.Errorf("GenerateRandomString(1) = %q, not a valid hex character", s)
}
})
}
// ---------------------------------------------------------------------------
// phantomAuthURL
// ---------------------------------------------------------------------------
func TestPhantomAuthURL(t *testing.T) {
t.Run("returns default when env var not set", func(t *testing.T) {
// Ensure the env var is not set
os.Unsetenv("ORAMA_PHANTOM_AUTH_URL")
got := phantomAuthURL()
if got != defaultPhantomAuthURL {
t.Errorf("phantomAuthURL() = %q, want default %q", got, defaultPhantomAuthURL)
}
})
t.Run("returns custom URL when env var is set", func(t *testing.T) {
custom := "https://custom-phantom.example.com"
os.Setenv("ORAMA_PHANTOM_AUTH_URL", custom)
defer os.Unsetenv("ORAMA_PHANTOM_AUTH_URL")
got := phantomAuthURL()
if got != custom {
t.Errorf("phantomAuthURL() = %q, want %q", got, custom)
}
})
t.Run("trailing slash stripped from env var", func(t *testing.T) {
custom := "https://custom-phantom.example.com/"
os.Setenv("ORAMA_PHANTOM_AUTH_URL", custom)
defer os.Unsetenv("ORAMA_PHANTOM_AUTH_URL")
got := phantomAuthURL()
want := "https://custom-phantom.example.com"
if got != want {
t.Errorf("phantomAuthURL() = %q, want %q (trailing slash should be stripped)", got, want)
}
})
t.Run("multiple trailing slashes stripped from env var", func(t *testing.T) {
custom := "https://custom-phantom.example.com///"
os.Setenv("ORAMA_PHANTOM_AUTH_URL", custom)
defer os.Unsetenv("ORAMA_PHANTOM_AUTH_URL")
got := phantomAuthURL()
want := "https://custom-phantom.example.com"
if got != want {
t.Errorf("phantomAuthURL() = %q, want %q (trailing slashes should be stripped)", got, want)
}
})
}

View File

@ -1,22 +0,0 @@
package auth
import "net"
// WireGuardSubnet is the internal WireGuard mesh CIDR.
const WireGuardSubnet = "10.0.0.0/24"
// IsWireGuardPeer checks whether remoteAddr (host:port format) originates
// from the WireGuard mesh subnet. This provides cryptographic peer
// authentication since WireGuard validates keys at the tunnel layer.
func IsWireGuardPeer(remoteAddr string) bool {
host, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return false
}
ip := net.ParseIP(host)
if ip == nil {
return false
}
_, wgNet, _ := net.ParseCIDR(WireGuardSubnet)
return wgNet.Contains(ip)
}

View File

@ -1,214 +0,0 @@
package auth
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
qrterminal "github.com/mdp/qrterminal/v3"
)
// defaultPhantomAuthURL is the default Phantom auth React app URL (deployed on Orama devnet).
// Override with ORAMA_PHANTOM_AUTH_URL environment variable.
const defaultPhantomAuthURL = "https://phantom-auth-y0w9aa.orama-devnet.network"
// phantomAuthURL returns the Phantom auth URL, preferring the environment variable.
func phantomAuthURL() string {
if u := os.Getenv("ORAMA_PHANTOM_AUTH_URL"); u != "" {
return strings.TrimRight(u, "/")
}
return defaultPhantomAuthURL
}
// PhantomSession represents a phantom auth session from the gateway.
type PhantomSession struct {
SessionID string `json:"session_id"`
ExpiresAt string `json:"expires_at"`
}
// PhantomSessionStatus represents the polled status of a phantom auth session.
type PhantomSessionStatus struct {
SessionID string `json:"session_id"`
Status string `json:"status"`
Wallet string `json:"wallet"`
APIKey string `json:"api_key"`
Namespace string `json:"namespace"`
Error string `json:"error"`
}
// PerformPhantomAuthentication runs the Phantom Solana auth flow:
// 1. Prompt for namespace
// 2. Create session via gateway
// 3. Display QR code in terminal
// 4. Poll for completion
// 5. Return credentials
func PerformPhantomAuthentication(gatewayURL, namespace string) (*Credentials, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🟣 Phantom Wallet Authentication (Solana)")
fmt.Println("==========================================")
fmt.Println("Requires an NFT from the authorized collection.")
// Prompt for namespace if empty
if namespace == "" {
for {
fmt.Print("Enter namespace (required): ")
nsInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read namespace: %w", err)
}
namespace = strings.TrimSpace(nsInput)
if namespace != "" {
break
}
fmt.Println("Namespace cannot be empty.")
}
}
domain := extractDomainFromURL(gatewayURL)
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
// 1. Create phantom session
fmt.Println("\nCreating authentication session...")
session, err := createPhantomSession(client, gatewayURL, namespace)
if err != nil {
return nil, fmt.Errorf("failed to create session: %w", err)
}
// 2. Build auth URL and display QR code
authURL := fmt.Sprintf("%s/?session=%s&gateway=%s&namespace=%s",
phantomAuthURL(), session.SessionID, url.QueryEscape(gatewayURL), url.QueryEscape(namespace))
fmt.Println("\nScan this QR code with your phone to authenticate:")
fmt.Println()
qrterminal.GenerateWithConfig(authURL, qrterminal.Config{
Level: qrterminal.M,
Writer: os.Stdout,
BlackChar: qrterminal.BLACK,
WhiteChar: qrterminal.WHITE,
QuietZone: 1,
})
fmt.Println()
fmt.Printf("Or open this URL on your phone:\n%s\n\n", authURL)
fmt.Println("Waiting for authentication... (timeout: 5 minutes)")
// 3. Poll for completion
creds, err := pollPhantomSession(client, gatewayURL, session.SessionID)
if err != nil {
return nil, err
}
// Set namespace and build namespace URL
creds.Namespace = namespace
if domain := extractDomainFromURL(gatewayURL); domain != "" {
if namespace == "default" {
creds.NamespaceURL = fmt.Sprintf("https://%s", domain)
} else {
creds.NamespaceURL = fmt.Sprintf("https://ns-%s.%s", namespace, domain)
}
}
fmt.Printf("\n🎉 Authentication successful!\n")
truncatedKey := creds.APIKey
if len(truncatedKey) > 8 {
truncatedKey = truncatedKey[:8] + "..."
}
fmt.Printf("📝 API Key: %s\n", truncatedKey)
return creds, nil
}
// createPhantomSession creates a new phantom auth session via the gateway.
func createPhantomSession(client *http.Client, gatewayURL, namespace string) (*PhantomSession, error) {
reqBody := map[string]string{
"namespace": namespace,
}
payload, err := json.Marshal(reqBody)
if err != nil {
return nil, err
}
resp, err := client.Post(gatewayURL+"/v1/auth/phantom/session", "application/json", bytes.NewReader(payload))
if err != nil {
return nil, fmt.Errorf("failed to call gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
}
var session PhantomSession
if err := json.NewDecoder(resp.Body).Decode(&session); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &session, nil
}
// pollPhantomSession polls the gateway for session completion.
func pollPhantomSession(client *http.Client, gatewayURL, sessionID string) (*Credentials, error) {
pollInterval := 2 * time.Second
maxDuration := 5 * time.Minute
deadline := time.Now().Add(maxDuration)
spinnerChars := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
spinnerIdx := 0
for time.Now().Before(deadline) {
resp, err := client.Get(gatewayURL + "/v1/auth/phantom/session/" + sessionID)
if err != nil {
time.Sleep(pollInterval)
continue
}
var status PhantomSessionStatus
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
resp.Body.Close()
time.Sleep(pollInterval)
continue
}
resp.Body.Close()
switch status.Status {
case "completed":
fmt.Printf("\r✅ Authenticated! \n")
return &Credentials{
APIKey: status.APIKey,
Wallet: status.Wallet,
UserID: status.Wallet,
IssuedAt: time.Now(),
}, nil
case "failed":
fmt.Printf("\r❌ Authentication failed \n")
errMsg := status.Error
if errMsg == "" {
errMsg = "unknown error"
}
return nil, fmt.Errorf("authentication failed: %s", errMsg)
case "expired":
fmt.Printf("\r⏰ Session expired \n")
return nil, fmt.Errorf("authentication session expired")
case "pending":
fmt.Printf("\r%s Waiting for phone authentication... ", spinnerChars[spinnerIdx%len(spinnerChars)])
spinnerIdx++
}
time.Sleep(pollInterval)
}
fmt.Printf("\r⏰ Timeout \n")
return nil, fmt.Errorf("authentication timed out after 5 minutes")
}

View File

@ -1,290 +0,0 @@
package auth
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// IsRootWalletInstalled checks if the `rw` CLI is available in PATH
func IsRootWalletInstalled() bool {
_, err := exec.LookPath("rw")
return err == nil
}
// getRootWalletAddress gets the EVM address from the RootWallet keystore
func getRootWalletAddress() (string, error) {
cmd := exec.Command("rw", "address", "--chain", "evm")
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to get address from rw: %w", err)
}
addr := strings.TrimSpace(string(out))
if addr == "" {
return "", fmt.Errorf("rw returned empty address — run 'rw init' first")
}
return addr, nil
}
// signWithRootWallet signs a message using RootWallet's EVM key.
// Stdin is passed through so the user can enter their password if the session is expired.
func signWithRootWallet(message string) (string, error) {
cmd := exec.Command("rw", "sign", message, "--chain", "evm")
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to sign with rw: %w", err)
}
sig := strings.TrimSpace(string(out))
if sig == "" {
return "", fmt.Errorf("rw returned empty signature")
}
return sig, nil
}
// PerformRootWalletAuthentication performs a challenge-response authentication flow
// using the RootWallet CLI to sign a gateway-issued nonce
func PerformRootWalletAuthentication(gatewayURL, namespace string) (*Credentials, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🔐 RootWallet Authentication")
fmt.Println("=============================")
// 1. Get wallet address from RootWallet
fmt.Println("⏳ Reading wallet address from RootWallet...")
wallet, err := getRootWalletAddress()
if err != nil {
return nil, fmt.Errorf("failed to get wallet address: %w", err)
}
if !ValidateWalletAddress(wallet) {
return nil, fmt.Errorf("invalid wallet address from rw: %s", wallet)
}
fmt.Printf("✅ Wallet: %s\n", wallet)
// 2. Prompt for namespace if not provided
if namespace == "" {
for {
fmt.Print("Enter namespace (required): ")
nsInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read namespace: %w", err)
}
namespace = strings.TrimSpace(nsInput)
if namespace != "" {
break
}
fmt.Println("⚠️ Namespace cannot be empty. Please enter a namespace.")
}
}
fmt.Printf("✅ Namespace: %s\n", namespace)
// 3. Request challenge nonce from gateway
fmt.Println("⏳ Requesting authentication challenge...")
domain := extractDomainFromURL(gatewayURL)
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
nonce, err := requestChallenge(client, gatewayURL, wallet, namespace)
if err != nil {
return nil, fmt.Errorf("failed to get challenge: %w", err)
}
// 4. Sign the nonce with RootWallet
fmt.Println("⏳ Signing challenge with RootWallet...")
signature, err := signWithRootWallet(nonce)
if err != nil {
return nil, fmt.Errorf("failed to sign challenge: %w", err)
}
fmt.Println("✅ Challenge signed")
// 5. Verify signature with gateway
fmt.Println("⏳ Verifying signature with gateway...")
creds, err := verifySignature(client, gatewayURL, wallet, nonce, signature, namespace)
if err != nil {
return nil, fmt.Errorf("failed to verify signature: %w", err)
}
// If namespace cluster is being provisioned, poll until ready
if creds.ProvisioningPollURL != "" {
fmt.Println("⏳ Provisioning namespace cluster...")
pollErr := pollNamespaceProvisioning(client, gatewayURL, creds.ProvisioningPollURL)
if pollErr != nil {
fmt.Printf("⚠️ Provisioning poll failed: %v\n", pollErr)
fmt.Println(" Credentials are saved. Cluster may still be provisioning in background.")
} else {
fmt.Println("✅ Namespace cluster ready!")
}
}
fmt.Printf("\n🎉 Authentication successful!\n")
fmt.Printf("🏢 Namespace: %s\n", creds.Namespace)
return creds, nil
}
// requestChallenge sends POST /v1/auth/challenge and returns the nonce
func requestChallenge(client *http.Client, gatewayURL, wallet, namespace string) (string, error) {
reqBody := map[string]string{
"wallet": wallet,
"namespace": namespace,
}
payload, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
resp, err := client.Post(gatewayURL+"/v1/auth/challenge", "application/json", bytes.NewReader(payload))
if err != nil {
return "", fmt.Errorf("failed to call gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
}
var result struct {
Nonce string `json:"nonce"`
Wallet string `json:"wallet"`
Namespace string `json:"namespace"`
ExpiresAt string `json:"expires_at"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
if result.Nonce == "" {
return "", fmt.Errorf("no nonce in challenge response")
}
return result.Nonce, nil
}
// verifySignature sends POST /v1/auth/verify and returns credentials
func verifySignature(client *http.Client, gatewayURL, wallet, nonce, signature, namespace string) (*Credentials, error) {
reqBody := map[string]string{
"wallet": wallet,
"nonce": nonce,
"signature": signature,
"namespace": namespace,
"chain_type": "ETH",
}
payload, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
resp, err := client.Post(gatewayURL+"/v1/auth/verify", "application/json", bytes.NewReader(payload))
if err != nil {
return nil, fmt.Errorf("failed to call gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
}
var result struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
Subject string `json:"subject"`
Namespace string `json:"namespace"`
APIKey string `json:"api_key"`
// Provisioning fields (202 Accepted)
Status string `json:"status"`
PollURL string `json:"poll_url"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
if result.APIKey == "" {
return nil, fmt.Errorf("no api_key in verify response")
}
// Build namespace gateway URL
namespaceURL := ""
if d := extractDomainFromURL(gatewayURL); d != "" {
if namespace == "default" {
namespaceURL = fmt.Sprintf("https://%s", d)
} else {
namespaceURL = fmt.Sprintf("https://ns-%s.%s", namespace, d)
}
}
creds := &Credentials{
APIKey: result.APIKey,
RefreshToken: result.RefreshToken,
Namespace: result.Namespace,
UserID: result.Subject,
Wallet: result.Subject,
IssuedAt: time.Now(),
NamespaceURL: namespaceURL,
}
// If 202, namespace cluster is being provisioned — set poll URL
if resp.StatusCode == http.StatusAccepted && result.PollURL != "" {
creds.ProvisioningPollURL = result.PollURL
}
// Note: result.ExpiresIn is the JWT access token lifetime (15min),
// NOT the API key lifetime. Don't set ExpiresAt — the API key is permanent.
return creds, nil
}
// pollNamespaceProvisioning polls the namespace status endpoint until the cluster is ready.
func pollNamespaceProvisioning(client *http.Client, gatewayURL, pollPath string) error {
pollURL := gatewayURL + pollPath
timeout := time.After(120 * time.Second)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-timeout:
return fmt.Errorf("timed out after 120s waiting for namespace cluster")
case <-ticker.C:
resp, err := client.Get(pollURL)
if err != nil {
continue // Retry on network error
}
var status struct {
Status string `json:"status"`
}
decErr := json.NewDecoder(resp.Body).Decode(&status)
resp.Body.Close()
if decErr != nil {
continue
}
switch status.Status {
case "ready":
return nil
case "failed", "error":
return fmt.Errorf("namespace provisioning failed")
}
// "provisioning" or other — keep polling
fmt.Print(".")
}
}
}

View File

@ -1,318 +0,0 @@
package build
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// Manifest describes the contents of a binary archive.
type Manifest struct {
Version string `json:"version"`
Commit string `json:"commit"`
Date string `json:"date"`
Arch string `json:"arch"`
Checksums map[string]string `json:"checksums"` // filename -> sha256
}
// generateManifest creates the manifest with SHA256 checksums of all binaries.
func (b *Builder) generateManifest() (*Manifest, error) {
m := &Manifest{
Version: b.version,
Commit: b.commit,
Date: b.date,
Arch: b.flags.Arch,
Checksums: make(map[string]string),
}
entries, err := os.ReadDir(b.binDir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
path := filepath.Join(b.binDir, entry.Name())
hash, err := sha256File(path)
if err != nil {
return nil, fmt.Errorf("failed to hash %s: %w", entry.Name(), err)
}
m.Checksums[entry.Name()] = hash
}
return m, nil
}
// createArchive creates the tar.gz archive from the build directory.
func (b *Builder) createArchive(outputPath string, manifest *Manifest) error {
fmt.Printf("\nCreating archive: %s\n", outputPath)
// Write manifest.json to tmpDir
manifestData, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(b.tmpDir, "manifest.json"), manifestData, 0644); err != nil {
return err
}
// Create output file
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
gw := gzip.NewWriter(f)
defer gw.Close()
tw := tar.NewWriter(gw)
defer tw.Close()
// Add bin/ directory
if err := addDirToTar(tw, b.binDir, "bin"); err != nil {
return err
}
// Add systemd/ directory
systemdDir := filepath.Join(b.tmpDir, "systemd")
if _, err := os.Stat(systemdDir); err == nil {
if err := addDirToTar(tw, systemdDir, "systemd"); err != nil {
return err
}
}
// Add packages/ directory if it exists
packagesDir := filepath.Join(b.tmpDir, "packages")
if _, err := os.Stat(packagesDir); err == nil {
if err := addDirToTar(tw, packagesDir, "packages"); err != nil {
return err
}
}
// Add manifest.json
if err := addFileToTar(tw, filepath.Join(b.tmpDir, "manifest.json"), "manifest.json"); err != nil {
return err
}
// Add manifest.sig if it exists (created by --sign)
sigPath := filepath.Join(b.tmpDir, "manifest.sig")
if _, err := os.Stat(sigPath); err == nil {
if err := addFileToTar(tw, sigPath, "manifest.sig"); err != nil {
return err
}
}
// Print summary
fmt.Printf(" bin/: %d binaries\n", len(manifest.Checksums))
fmt.Printf(" systemd/: namespace templates\n")
fmt.Printf(" manifest: v%s (%s) linux/%s\n", manifest.Version, manifest.Commit, manifest.Arch)
info, err := f.Stat()
if err == nil {
fmt.Printf(" size: %s\n", formatBytes(info.Size()))
}
return nil
}
// signManifest signs the manifest hash using rootwallet CLI.
// Produces manifest.sig containing the hex-encoded EVM signature.
func (b *Builder) signManifest(manifest *Manifest) error {
fmt.Printf("\nSigning manifest with rootwallet...\n")
// Serialize manifest deterministically (compact JSON, sorted keys via json.Marshal)
manifestData, err := json.Marshal(manifest)
if err != nil {
return fmt.Errorf("failed to marshal manifest: %w", err)
}
// Hash the manifest JSON
hash := sha256.Sum256(manifestData)
hashHex := hex.EncodeToString(hash[:])
// Call rw sign <hash> --chain evm
cmd := exec.Command("rw", "sign", hashHex, "--chain", "evm")
var stdout, stderr strings.Builder
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("rw sign failed: %w\n%s", err, stderr.String())
}
signature := strings.TrimSpace(stdout.String())
if signature == "" {
return fmt.Errorf("rw sign produced empty signature")
}
// Write signature file
sigPath := filepath.Join(b.tmpDir, "manifest.sig")
if err := os.WriteFile(sigPath, []byte(signature), 0644); err != nil {
return fmt.Errorf("failed to write manifest.sig: %w", err)
}
fmt.Printf(" Manifest signed (SHA256: %s...)\n", hashHex[:16])
return nil
}
// addDirToTar adds all files in a directory to the tar archive under the given prefix.
func addDirToTar(tw *tar.Writer, srcDir, prefix string) error {
return filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Calculate relative path
relPath, err := filepath.Rel(srcDir, path)
if err != nil {
return err
}
tarPath := filepath.Join(prefix, relPath)
if info.IsDir() {
header := &tar.Header{
Name: tarPath + "/",
Mode: 0755,
Typeflag: tar.TypeDir,
}
return tw.WriteHeader(header)
}
return addFileToTar(tw, path, tarPath)
})
}
// addFileToTar adds a single file to the tar archive.
func addFileToTar(tw *tar.Writer, srcPath, tarPath string) error {
f, err := os.Open(srcPath)
if err != nil {
return err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: tarPath,
Size: info.Size(),
Mode: int64(info.Mode()),
}
if err := tw.WriteHeader(header); err != nil {
return err
}
_, err = io.Copy(tw, f)
return err
}
// sha256File computes the SHA256 hash of a file.
func sha256File(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return hex.EncodeToString(h.Sum(nil)), nil
}
// downloadFile downloads a URL to a local file path.
func downloadFile(url, destPath string) error {
client := &http.Client{Timeout: 5 * time.Minute}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %w", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("download %s returned status %d", url, resp.StatusCode)
}
f, err := os.Create(destPath)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, resp.Body)
return err
}
// extractFileFromTarball extracts a single file from a tar.gz archive.
func extractFileFromTarball(tarPath, targetFile, destPath string) error {
f, err := os.Open(tarPath)
if err != nil {
return err
}
defer f.Close()
gr, err := gzip.NewReader(f)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
// Match the target file (strip leading ./ if present)
name := strings.TrimPrefix(header.Name, "./")
if name == targetFile {
out, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, tr); err != nil {
return err
}
return nil
}
}
return fmt.Errorf("file %s not found in archive %s", targetFile, tarPath)
}
// formatBytes formats bytes into a human-readable string.
func formatBytes(b int64) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp])
}

View File

@ -1,829 +0,0 @@
package build
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/constants"
)
// oramaBinary defines a binary to cross-compile from the project source.
type oramaBinary struct {
Name string // output binary name
Package string // Go package path relative to project root
// Extra ldflags beyond the standard ones
ExtraLDFlags string
}
// Builder orchestrates the entire build process.
type Builder struct {
flags *Flags
projectDir string
tmpDir string
binDir string
version string
commit string
date string
}
// NewBuilder creates a new Builder.
func NewBuilder(flags *Flags) *Builder {
return &Builder{flags: flags}
}
// Build runs the full build pipeline.
func (b *Builder) Build() error {
start := time.Now()
// Find project root
projectDir, err := findProjectRoot()
if err != nil {
return err
}
b.projectDir = projectDir
// Read version from Makefile or use "dev"
b.version = b.readVersion()
b.commit = b.readCommit()
b.date = time.Now().UTC().Format("2006-01-02T15:04:05Z")
// Create temp build directory
b.tmpDir, err = os.MkdirTemp("", "orama-build-*")
if err != nil {
return fmt.Errorf("failed to create temp dir: %w", err)
}
defer os.RemoveAll(b.tmpDir)
b.binDir = filepath.Join(b.tmpDir, "bin")
if err := os.MkdirAll(b.binDir, 0755); err != nil {
return fmt.Errorf("failed to create bin dir: %w", err)
}
fmt.Printf("Building orama %s for linux/%s\n", b.version, b.flags.Arch)
fmt.Printf("Project: %s\n\n", b.projectDir)
// Step 1: Cross-compile Orama binaries
if err := b.buildOramaBinaries(); err != nil {
return fmt.Errorf("failed to build orama binaries: %w", err)
}
// Step 2: Cross-compile Vault Guardian (Zig)
if err := b.buildVaultGuardian(); err != nil {
return fmt.Errorf("failed to build vault-guardian: %w", err)
}
// Step 3: Cross-compile Olric
if err := b.buildOlric(); err != nil {
return fmt.Errorf("failed to build olric: %w", err)
}
// Step 4: Cross-compile IPFS Cluster
if err := b.buildIPFSCluster(); err != nil {
return fmt.Errorf("failed to build ipfs-cluster: %w", err)
}
// Step 5: Build CoreDNS with RQLite plugin
if err := b.buildCoreDNS(); err != nil {
return fmt.Errorf("failed to build coredns: %w", err)
}
// Step 6: Build Caddy with Orama DNS module
if err := b.buildCaddy(); err != nil {
return fmt.Errorf("failed to build caddy: %w", err)
}
// Step 7: Download pre-built IPFS Kubo
if err := b.downloadIPFS(); err != nil {
return fmt.Errorf("failed to download ipfs: %w", err)
}
// Step 8: Download pre-built RQLite
if err := b.downloadRQLite(); err != nil {
return fmt.Errorf("failed to download rqlite: %w", err)
}
// Step 9: Copy systemd templates
if err := b.copySystemdTemplates(); err != nil {
return fmt.Errorf("failed to copy systemd templates: %w", err)
}
// Step 10: Generate manifest
manifest, err := b.generateManifest()
if err != nil {
return fmt.Errorf("failed to generate manifest: %w", err)
}
// Step 11: Sign manifest (optional)
if b.flags.Sign {
if err := b.signManifest(manifest); err != nil {
return fmt.Errorf("failed to sign manifest: %w", err)
}
}
// Step 12: Create archive
outputPath := b.flags.Output
if outputPath == "" {
outputPath = fmt.Sprintf("/tmp/orama-%s-linux-%s.tar.gz", b.version, b.flags.Arch)
}
if err := b.createArchive(outputPath, manifest); err != nil {
return fmt.Errorf("failed to create archive: %w", err)
}
elapsed := time.Since(start).Round(time.Second)
fmt.Printf("\nBuild complete in %s\n", elapsed)
fmt.Printf("Archive: %s\n", outputPath)
return nil
}
func (b *Builder) buildOramaBinaries() error {
fmt.Println("[1/8] Cross-compiling Orama binaries...")
ldflags := fmt.Sprintf("-s -w -X 'main.version=%s' -X 'main.commit=%s' -X 'main.date=%s'",
b.version, b.commit, b.date)
gatewayLDFlags := fmt.Sprintf("%s -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=%s' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=%s' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=%s'",
ldflags, b.version, b.commit, b.date)
binaries := []oramaBinary{
{Name: "orama", Package: "./cmd/cli/"},
{Name: "orama-node", Package: "./cmd/node/"},
{Name: "gateway", Package: "./cmd/gateway/", ExtraLDFlags: gatewayLDFlags},
{Name: "identity", Package: "./cmd/identity/"},
{Name: "sfu", Package: "./cmd/sfu/"},
{Name: "turn", Package: "./cmd/turn/"},
}
for _, bin := range binaries {
flags := ldflags
if bin.ExtraLDFlags != "" {
flags = bin.ExtraLDFlags
}
output := filepath.Join(b.binDir, bin.Name)
cmd := exec.Command("go", "build",
"-ldflags", flags,
"-trimpath",
"-o", output,
bin.Package)
cmd.Dir = b.projectDir
cmd.Env = b.crossEnv()
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if b.flags.Verbose {
fmt.Printf(" go build -o %s %s\n", bin.Name, bin.Package)
}
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to build %s: %w", bin.Name, err)
}
fmt.Printf(" ✓ %s\n", bin.Name)
}
return nil
}
func (b *Builder) buildVaultGuardian() error {
fmt.Println("[2/8] Cross-compiling Vault Guardian (Zig)...")
// Ensure zig is available
if _, err := exec.LookPath("zig"); err != nil {
return fmt.Errorf("zig not found in PATH — install from https://ziglang.org/download/")
}
// Vault source is sibling to orama project
vaultDir := filepath.Join(b.projectDir, "..", "orama-vault")
if _, err := os.Stat(filepath.Join(vaultDir, "build.zig")); err != nil {
return fmt.Errorf("vault source not found at %s — expected orama-vault as sibling directory: %w", vaultDir, err)
}
// Map Go arch to Zig target triple
var zigTarget string
switch b.flags.Arch {
case "amd64":
zigTarget = "x86_64-linux-musl"
case "arm64":
zigTarget = "aarch64-linux-musl"
default:
return fmt.Errorf("unsupported architecture for vault: %s", b.flags.Arch)
}
if b.flags.Verbose {
fmt.Printf(" zig build -Dtarget=%s -Doptimize=ReleaseSafe\n", zigTarget)
}
cmd := exec.Command("zig", "build",
fmt.Sprintf("-Dtarget=%s", zigTarget),
"-Doptimize=ReleaseSafe")
cmd.Dir = vaultDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("zig build failed: %w", err)
}
// Copy output binary to build bin dir
src := filepath.Join(vaultDir, "zig-out", "bin", "vault-guardian")
dst := filepath.Join(b.binDir, "vault-guardian")
if err := copyFile(src, dst); err != nil {
return fmt.Errorf("failed to copy vault-guardian binary: %w", err)
}
fmt.Println(" ✓ vault-guardian")
return nil
}
// copyFile copies a file from src to dst, preserving executable permissions.
func copyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return err
}
defer dstFile.Close()
if _, err := srcFile.WriteTo(dstFile); err != nil {
return err
}
return nil
}
func (b *Builder) buildOlric() error {
fmt.Printf("[3/8] Cross-compiling Olric %s...\n", constants.OlricVersion)
// go install doesn't support cross-compilation with GOBIN set,
// so we create a temporary module and use go build -o instead.
tmpDir, err := os.MkdirTemp("", "olric-build-*")
if err != nil {
return fmt.Errorf("create temp dir: %w", err)
}
defer os.RemoveAll(tmpDir)
modInit := exec.Command("go", "mod", "init", "olric-build")
modInit.Dir = tmpDir
modInit.Stderr = os.Stderr
if err := modInit.Run(); err != nil {
return fmt.Errorf("go mod init: %w", err)
}
modGet := exec.Command("go", "get",
fmt.Sprintf("github.com/olric-data/olric/cmd/olric-server@%s", constants.OlricVersion))
modGet.Dir = tmpDir
modGet.Env = append(os.Environ(),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
modGet.Stderr = os.Stderr
if err := modGet.Run(); err != nil {
return fmt.Errorf("go get olric: %w", err)
}
cmd := exec.Command("go", "build",
"-ldflags", "-s -w",
"-trimpath",
"-o", filepath.Join(b.binDir, "olric-server"),
fmt.Sprintf("github.com/olric-data/olric/cmd/olric-server"))
cmd.Dir = tmpDir
cmd.Env = append(b.crossEnv(),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
fmt.Println(" ✓ olric-server")
return nil
}
func (b *Builder) buildIPFSCluster() error {
fmt.Printf("[4/8] Cross-compiling IPFS Cluster %s...\n", constants.IPFSClusterVersion)
tmpDir, err := os.MkdirTemp("", "ipfs-cluster-build-*")
if err != nil {
return fmt.Errorf("create temp dir: %w", err)
}
defer os.RemoveAll(tmpDir)
modInit := exec.Command("go", "mod", "init", "ipfs-cluster-build")
modInit.Dir = tmpDir
modInit.Stderr = os.Stderr
if err := modInit.Run(); err != nil {
return fmt.Errorf("go mod init: %w", err)
}
modGet := exec.Command("go", "get",
fmt.Sprintf("github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@%s", constants.IPFSClusterVersion))
modGet.Dir = tmpDir
modGet.Env = append(os.Environ(),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
modGet.Stderr = os.Stderr
if err := modGet.Run(); err != nil {
return fmt.Errorf("go get ipfs-cluster: %w", err)
}
cmd := exec.Command("go", "build",
"-ldflags", "-s -w",
"-trimpath",
"-o", filepath.Join(b.binDir, "ipfs-cluster-service"),
"github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service")
cmd.Dir = tmpDir
cmd.Env = append(b.crossEnv(),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
fmt.Println(" ✓ ipfs-cluster-service")
return nil
}
func (b *Builder) buildCoreDNS() error {
fmt.Printf("[5/8] Building CoreDNS %s with RQLite plugin...\n", constants.CoreDNSVersion)
buildDir := filepath.Join(b.tmpDir, "coredns-build")
// Clone CoreDNS
fmt.Println(" Cloning CoreDNS...")
cmd := exec.Command("git", "clone", "--depth", "1",
"--branch", "v"+constants.CoreDNSVersion,
"https://github.com/coredns/coredns.git", buildDir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to clone coredns: %w", err)
}
// Copy RQLite plugin from local source
pluginSrc := filepath.Join(b.projectDir, "pkg", "coredns", "rqlite")
pluginDst := filepath.Join(buildDir, "plugin", "rqlite")
if err := os.MkdirAll(pluginDst, 0755); err != nil {
return err
}
entries, err := os.ReadDir(pluginSrc)
if err != nil {
return fmt.Errorf("failed to read rqlite plugin source at %s: %w", pluginSrc, err)
}
for _, entry := range entries {
if entry.IsDir() || filepath.Ext(entry.Name()) != ".go" {
continue
}
data, err := os.ReadFile(filepath.Join(pluginSrc, entry.Name()))
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(pluginDst, entry.Name()), data, 0644); err != nil {
return err
}
}
// Write plugin.cfg (same as build-linux-coredns.sh)
pluginCfg := `metadata:metadata
cancel:cancel
tls:tls
reload:reload
nsid:nsid
bufsize:bufsize
root:root
bind:bind
debug:debug
trace:trace
ready:ready
health:health
pprof:pprof
prometheus:metrics
errors:errors
log:log
dnstap:dnstap
local:local
dns64:dns64
acl:acl
any:any
chaos:chaos
loadbalance:loadbalance
cache:cache
rewrite:rewrite
header:header
dnssec:dnssec
autopath:autopath
minimal:minimal
template:template
transfer:transfer
hosts:hosts
file:file
auto:auto
secondary:secondary
loop:loop
forward:forward
grpc:grpc
erratic:erratic
whoami:whoami
on:github.com/coredns/caddy/onevent
sign:sign
view:view
rqlite:rqlite
`
if err := os.WriteFile(filepath.Join(buildDir, "plugin.cfg"), []byte(pluginCfg), 0644); err != nil {
return err
}
// Add dependencies
fmt.Println(" Adding dependencies...")
goPath := os.Getenv("PATH")
baseEnv := append(os.Environ(),
"PATH="+goPath,
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
for _, dep := range []string{"github.com/miekg/dns@latest", "go.uber.org/zap@latest"} {
cmd := exec.Command("go", "get", dep)
cmd.Dir = buildDir
cmd.Env = baseEnv
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to get %s: %w", dep, err)
}
}
cmd = exec.Command("go", "mod", "tidy")
cmd.Dir = buildDir
cmd.Env = baseEnv
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("go mod tidy failed: %w", err)
}
// Generate plugin code
fmt.Println(" Generating plugin code...")
cmd = exec.Command("go", "generate")
cmd.Dir = buildDir
cmd.Env = baseEnv
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("go generate failed: %w", err)
}
// Cross-compile
fmt.Println(" Building binary...")
cmd = exec.Command("go", "build",
"-ldflags", "-s -w",
"-trimpath",
"-o", filepath.Join(b.binDir, "coredns"))
cmd.Dir = buildDir
cmd.Env = append(baseEnv,
"GOOS=linux",
fmt.Sprintf("GOARCH=%s", b.flags.Arch),
"CGO_ENABLED=0")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("build failed: %w", err)
}
fmt.Println(" ✓ coredns")
return nil
}
func (b *Builder) buildCaddy() error {
fmt.Printf("[6/8] Building Caddy %s with Orama DNS module...\n", constants.CaddyVersion)
// Ensure xcaddy is available
if _, err := exec.LookPath("xcaddy"); err != nil {
return fmt.Errorf("xcaddy not found in PATH — install with: go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest")
}
moduleDir := filepath.Join(b.tmpDir, "caddy-dns-orama")
if err := os.MkdirAll(moduleDir, 0755); err != nil {
return err
}
// Write go.mod
goMod := fmt.Sprintf(`module github.com/DeBrosOfficial/caddy-dns-orama
go 1.22
require (
github.com/caddyserver/caddy/v2 v2.%s
github.com/libdns/libdns v1.1.0
)
`, constants.CaddyVersion[2:])
if err := os.WriteFile(filepath.Join(moduleDir, "go.mod"), []byte(goMod), 0644); err != nil {
return err
}
// Write provider.go — read from the caddy installer's generated code
// We inline the same provider code used by the VPS-side caddy installer
providerCode := generateCaddyProviderCode()
if err := os.WriteFile(filepath.Join(moduleDir, "provider.go"), []byte(providerCode), 0644); err != nil {
return err
}
// go mod tidy
cmd := exec.Command("go", "mod", "tidy")
cmd.Dir = moduleDir
cmd.Env = append(os.Environ(),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("go mod tidy failed: %w", err)
}
// Build with xcaddy
fmt.Println(" Building binary...")
cmd = exec.Command("xcaddy", "build",
"v"+constants.CaddyVersion,
"--with", "github.com/DeBrosOfficial/caddy-dns-orama="+moduleDir,
"--output", filepath.Join(b.binDir, "caddy"))
cmd.Env = append(os.Environ(),
"GOOS=linux",
fmt.Sprintf("GOARCH=%s", b.flags.Arch),
"GOPROXY=https://proxy.golang.org|direct",
"GONOSUMDB=*")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("xcaddy build failed: %w", err)
}
fmt.Println(" ✓ caddy")
return nil
}
func (b *Builder) downloadIPFS() error {
fmt.Printf("[7/8] Downloading IPFS Kubo %s...\n", constants.IPFSKuboVersion)
arch := b.flags.Arch
tarball := fmt.Sprintf("kubo_%s_linux-%s.tar.gz", constants.IPFSKuboVersion, arch)
url := fmt.Sprintf("https://dist.ipfs.tech/kubo/%s/%s", constants.IPFSKuboVersion, tarball)
tarPath := filepath.Join(b.tmpDir, tarball)
if err := downloadFile(url, tarPath); err != nil {
return err
}
// Extract ipfs binary from kubo/ipfs
if err := extractFileFromTarball(tarPath, "kubo/ipfs", filepath.Join(b.binDir, "ipfs")); err != nil {
return err
}
fmt.Println(" ✓ ipfs")
return nil
}
func (b *Builder) downloadRQLite() error {
fmt.Printf("[8/8] Downloading RQLite %s...\n", constants.RQLiteVersion)
arch := b.flags.Arch
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", constants.RQLiteVersion, arch)
url := fmt.Sprintf("https://github.com/rqlite/rqlite/releases/download/v%s/%s", constants.RQLiteVersion, tarball)
tarPath := filepath.Join(b.tmpDir, tarball)
if err := downloadFile(url, tarPath); err != nil {
return err
}
// Extract rqlited binary
extractDir := fmt.Sprintf("rqlite-v%s-linux-%s", constants.RQLiteVersion, arch)
if err := extractFileFromTarball(tarPath, extractDir+"/rqlited", filepath.Join(b.binDir, "rqlited")); err != nil {
return err
}
fmt.Println(" ✓ rqlited")
return nil
}
func (b *Builder) copySystemdTemplates() error {
systemdSrc := filepath.Join(b.projectDir, "systemd")
systemdDst := filepath.Join(b.tmpDir, "systemd")
if err := os.MkdirAll(systemdDst, 0755); err != nil {
return err
}
entries, err := os.ReadDir(systemdSrc)
if err != nil {
return fmt.Errorf("failed to read systemd dir: %w", err)
}
for _, entry := range entries {
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".service") {
continue
}
data, err := os.ReadFile(filepath.Join(systemdSrc, entry.Name()))
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(systemdDst, entry.Name()), data, 0644); err != nil {
return err
}
}
return nil
}
// crossEnv returns the environment for cross-compilation.
func (b *Builder) crossEnv() []string {
return append(os.Environ(),
"GOOS=linux",
fmt.Sprintf("GOARCH=%s", b.flags.Arch),
"CGO_ENABLED=0")
}
func (b *Builder) readVersion() string {
// Try to read from Makefile
data, err := os.ReadFile(filepath.Join(b.projectDir, "Makefile"))
if err != nil {
return "dev"
}
for _, line := range strings.Split(string(data), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "VERSION") {
parts := strings.SplitN(line, ":=", 2)
if len(parts) == 2 {
return strings.TrimSpace(parts[1])
}
}
}
return "dev"
}
func (b *Builder) readCommit() string {
cmd := exec.Command("git", "rev-parse", "--short", "HEAD")
cmd.Dir = b.projectDir
out, err := cmd.Output()
if err != nil {
return "unknown"
}
return strings.TrimSpace(string(out))
}
// generateCaddyProviderCode returns the Caddy DNS provider Go source.
// This is the same code used by the VPS-side caddy installer.
func generateCaddyProviderCode() string {
return `// Package orama implements a DNS provider for Caddy that uses the Orama Network
// gateway's internal ACME API for DNS-01 challenge validation.
package orama
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/libdns/libdns"
)
func init() {
caddy.RegisterModule(Provider{})
}
// Provider wraps the Orama DNS provider for Caddy.
type Provider struct {
// Endpoint is the URL of the Orama gateway's ACME API
// Default: http://localhost:6001/v1/internal/acme
Endpoint string ` + "`json:\"endpoint,omitempty\"`" + `
}
// CaddyModule returns the Caddy module information.
func (Provider) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "dns.providers.orama",
New: func() caddy.Module { return new(Provider) },
}
}
// Provision sets up the module.
func (p *Provider) Provision(ctx caddy.Context) error {
if p.Endpoint == "" {
p.Endpoint = "http://localhost:6001/v1/internal/acme"
}
return nil
}
// UnmarshalCaddyfile parses the Caddyfile configuration.
func (p *Provider) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
for d.NextBlock(0) {
switch d.Val() {
case "endpoint":
if !d.NextArg() {
return d.ArgErr()
}
p.Endpoint = d.Val()
default:
return d.Errf("unrecognized option: %s", d.Val())
}
}
}
return nil
}
// AppendRecords adds records to the zone.
func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
var added []libdns.Record
for _, rec := range records {
rr := rec.RR()
if rr.Type != "TXT" {
continue
}
fqdn := rr.Name + "." + zone
payload := map[string]string{"fqdn": fqdn, "value": rr.Data}
body, err := json.Marshal(payload)
if err != nil {
return added, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", p.Endpoint+"/present", bytes.NewReader(body))
if err != nil {
return added, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return added, fmt.Errorf("failed to present challenge: %w", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return added, fmt.Errorf("present failed with status %d", resp.StatusCode)
}
added = append(added, rec)
}
return added, nil
}
// DeleteRecords removes records from the zone.
func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
var deleted []libdns.Record
for _, rec := range records {
rr := rec.RR()
if rr.Type != "TXT" {
continue
}
fqdn := rr.Name + "." + zone
payload := map[string]string{"fqdn": fqdn, "value": rr.Data}
body, err := json.Marshal(payload)
if err != nil {
return deleted, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", p.Endpoint+"/cleanup", bytes.NewReader(body))
if err != nil {
return deleted, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return deleted, fmt.Errorf("failed to cleanup challenge: %w", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return deleted, fmt.Errorf("cleanup failed with status %d", resp.StatusCode)
}
deleted = append(deleted, rec)
}
return deleted, nil
}
// GetRecords returns the records in the zone. Not used for ACME.
func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {
return nil, nil
}
// SetRecords sets the records in the zone. Not used for ACME.
func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
return nil, nil
}
// Interface guards
var (
_ caddy.Module = (*Provider)(nil)
_ caddy.Provisioner = (*Provider)(nil)
_ caddyfile.Unmarshaler = (*Provider)(nil)
_ libdns.RecordAppender = (*Provider)(nil)
_ libdns.RecordDeleter = (*Provider)(nil)
_ libdns.RecordGetter = (*Provider)(nil)
_ libdns.RecordSetter = (*Provider)(nil)
)
`
}

View File

@ -1,82 +0,0 @@
package build
import (
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
)
// Flags represents build command flags.
type Flags struct {
Arch string
Output string
Verbose bool
Sign bool // Sign the archive manifest with rootwallet
}
// Handle is the entry point for the build command.
func Handle(args []string) {
flags, err := parseFlags(args)
if err != nil {
if err == flag.ErrHelp {
return
}
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
b := NewBuilder(flags)
if err := b.Build(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func parseFlags(args []string) (*Flags, error) {
fs := flag.NewFlagSet("build", flag.ContinueOnError)
fs.SetOutput(os.Stderr)
flags := &Flags{}
fs.StringVar(&flags.Arch, "arch", "amd64", "Target architecture (amd64, arm64)")
fs.StringVar(&flags.Output, "output", "", "Output archive path (default: /tmp/orama-<version>-linux-<arch>.tar.gz)")
fs.BoolVar(&flags.Verbose, "verbose", false, "Verbose output")
fs.BoolVar(&flags.Sign, "sign", false, "Sign the manifest with rootwallet (requires rw in PATH)")
if err := fs.Parse(args); err != nil {
return nil, err
}
return flags, nil
}
// findProjectRoot walks up from the current directory looking for go.mod.
func findProjectRoot() (string, error) {
dir, err := os.Getwd()
if err != nil {
return "", err
}
for {
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
// Verify it's the network project
if _, err := os.Stat(filepath.Join(dir, "cmd", "cli")); err == nil {
return dir, nil
}
}
parent := filepath.Dir(dir)
if parent == dir {
break
}
dir = parent
}
return "", fmt.Errorf("could not find project root (no go.mod with cmd/cli found)")
}
// detectHostArch returns the host architecture in Go naming convention.
func detectHostArch() string {
return runtime.GOARCH
}

View File

@ -1,80 +0,0 @@
package cluster
import (
"fmt"
"os"
)
// HandleCommand handles cluster subcommands.
func HandleCommand(args []string) {
if len(args) == 0 {
ShowHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "status":
HandleStatus(subargs)
case "health":
HandleHealth(subargs)
case "rqlite":
HandleRQLite(subargs)
case "watch":
HandleWatch(subargs)
case "help":
ShowHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown cluster subcommand: %s\n", subcommand)
ShowHelp()
os.Exit(1)
}
}
// hasFlag checks if a flag is present in the args slice.
func hasFlag(args []string, flag string) bool {
for _, a := range args {
if a == flag {
return true
}
}
return false
}
// getFlagValue returns the value of a flag from the args slice.
// Returns empty string if the flag is not found or has no value.
func getFlagValue(args []string, flag string) string {
for i, a := range args {
if a == flag && i+1 < len(args) {
return args[i+1]
}
}
return ""
}
// ShowHelp displays help information for cluster commands.
func ShowHelp() {
fmt.Printf("Cluster Management Commands\n\n")
fmt.Printf("Usage: orama cluster <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" status - Show cluster node status (RQLite + Olric)\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --all - SSH into all nodes from nodes.conf (TODO)\n")
fmt.Printf(" health - Run cluster health checks\n")
fmt.Printf(" rqlite <subcommand> - RQLite-specific commands\n")
fmt.Printf(" status - Show detailed Raft state for local node\n")
fmt.Printf(" voters - Show current voter list\n")
fmt.Printf(" backup [--output FILE] - Trigger manual backup\n")
fmt.Printf(" watch - Live cluster status monitor\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --interval SECONDS - Refresh interval (default: 10)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" orama cluster status\n")
fmt.Printf(" orama cluster health\n")
fmt.Printf(" orama cluster rqlite status\n")
fmt.Printf(" orama cluster rqlite voters\n")
fmt.Printf(" orama cluster rqlite backup --output /tmp/backup.db\n")
fmt.Printf(" orama cluster watch --interval 5\n")
}

View File

@ -1,244 +0,0 @@
package cluster
import (
"fmt"
"os"
)
// checkResult represents the outcome of a single health check.
type checkResult struct {
Name string
Status string // "PASS", "FAIL", "WARN"
Detail string
}
// HandleHealth handles the "orama cluster health" command.
func HandleHealth(args []string) {
fmt.Printf("Cluster Health Check\n")
fmt.Printf("====================\n\n")
var results []checkResult
// Check 1: RQLite reachable
status, err := queryRQLiteStatus()
if err != nil {
results = append(results, checkResult{
Name: "RQLite reachable",
Status: "FAIL",
Detail: fmt.Sprintf("Cannot connect to RQLite: %v", err),
})
printHealthResults(results)
os.Exit(1)
return
}
results = append(results, checkResult{
Name: "RQLite reachable",
Status: "PASS",
Detail: fmt.Sprintf("HTTP API responding on %s", status.HTTP.Address),
})
// Check 2: Raft state is leader or follower (not candidate or shutdown)
raftState := status.Store.Raft.State
switch raftState {
case "Leader", "Follower":
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "PASS",
Detail: fmt.Sprintf("Node is %s", raftState),
})
case "Candidate":
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "WARN",
Detail: "Node is Candidate (election in progress)",
})
default:
results = append(results, checkResult{
Name: "Raft state healthy",
Status: "FAIL",
Detail: fmt.Sprintf("Node is in unexpected state: %s", raftState),
})
}
// Check 3: Leader exists
if status.Store.Raft.Leader != "" {
results = append(results, checkResult{
Name: "Leader exists",
Status: "PASS",
Detail: fmt.Sprintf("Leader: %s", status.Store.Raft.Leader),
})
} else {
results = append(results, checkResult{
Name: "Leader exists",
Status: "FAIL",
Detail: "No leader detected in Raft cluster",
})
}
// Check 4: Applied index is advancing (commit == applied means caught up)
if status.Store.Raft.AppliedIndex >= status.Store.Raft.CommitIndex {
results = append(results, checkResult{
Name: "Log replication",
Status: "PASS",
Detail: fmt.Sprintf("Applied index (%d) >= commit index (%d)",
status.Store.Raft.AppliedIndex, status.Store.Raft.CommitIndex),
})
} else {
lag := status.Store.Raft.CommitIndex - status.Store.Raft.AppliedIndex
severity := "WARN"
if lag > 1000 {
severity = "FAIL"
}
results = append(results, checkResult{
Name: "Log replication",
Status: severity,
Detail: fmt.Sprintf("Applied index (%d) behind commit index (%d) by %d entries",
status.Store.Raft.AppliedIndex, status.Store.Raft.CommitIndex, lag),
})
}
// Check 5: Query nodes to validate cluster membership
nodes, err := queryRQLiteNodes(true)
if err != nil {
results = append(results, checkResult{
Name: "Cluster nodes reachable",
Status: "FAIL",
Detail: fmt.Sprintf("Cannot query /nodes: %v", err),
})
} else {
totalNodes := len(nodes)
voters := 0
nonVoters := 0
reachable := 0
leaders := 0
for _, node := range nodes {
if node.Voter {
voters++
} else {
nonVoters++
}
if node.Reachable {
reachable++
}
if node.Leader {
leaders++
}
}
// Check 5a: Node count
results = append(results, checkResult{
Name: "Cluster membership",
Status: "PASS",
Detail: fmt.Sprintf("%d nodes (%d voters, %d non-voters)", totalNodes, voters, nonVoters),
})
// Check 5b: All nodes reachable
if reachable == totalNodes {
results = append(results, checkResult{
Name: "All nodes reachable",
Status: "PASS",
Detail: fmt.Sprintf("%d/%d nodes reachable", reachable, totalNodes),
})
} else {
unreachable := totalNodes - reachable
results = append(results, checkResult{
Name: "All nodes reachable",
Status: "WARN",
Detail: fmt.Sprintf("%d/%d nodes reachable (%d unreachable)", reachable, totalNodes, unreachable),
})
}
// Check 5c: Exactly one leader
if leaders == 1 {
results = append(results, checkResult{
Name: "Single leader",
Status: "PASS",
Detail: "Exactly 1 leader in cluster",
})
} else if leaders == 0 {
results = append(results, checkResult{
Name: "Single leader",
Status: "FAIL",
Detail: "No leader found among nodes",
})
} else {
results = append(results, checkResult{
Name: "Single leader",
Status: "FAIL",
Detail: fmt.Sprintf("Multiple leaders detected: %d (split-brain?)", leaders),
})
}
// Check 5d: Quorum check (majority of voters must be reachable)
quorum := (voters / 2) + 1
reachableVoters := 0
for _, node := range nodes {
if node.Voter && node.Reachable {
reachableVoters++
}
}
if reachableVoters >= quorum {
results = append(results, checkResult{
Name: "Quorum healthy",
Status: "PASS",
Detail: fmt.Sprintf("%d/%d voters reachable (quorum requires %d)", reachableVoters, voters, quorum),
})
} else {
results = append(results, checkResult{
Name: "Quorum healthy",
Status: "FAIL",
Detail: fmt.Sprintf("%d/%d voters reachable (quorum requires %d)", reachableVoters, voters, quorum),
})
}
}
printHealthResults(results)
// Exit with non-zero if any failures
for _, r := range results {
if r.Status == "FAIL" {
os.Exit(1)
}
}
}
// printHealthResults prints the health check results in a formatted table.
func printHealthResults(results []checkResult) {
// Find the longest check name for alignment
maxName := 0
for _, r := range results {
if len(r.Name) > maxName {
maxName = len(r.Name)
}
}
for _, r := range results {
indicator := " "
switch r.Status {
case "PASS":
indicator = "PASS"
case "FAIL":
indicator = "FAIL"
case "WARN":
indicator = "WARN"
}
fmt.Printf(" [%s] %-*s %s\n", indicator, maxName, r.Name, r.Detail)
}
fmt.Println()
// Summary
pass, fail, warn := 0, 0, 0
for _, r := range results {
switch r.Status {
case "PASS":
pass++
case "FAIL":
fail++
case "WARN":
warn++
}
}
fmt.Printf("Summary: %d passed, %d failed, %d warnings\n", pass, fail, warn)
}

View File

@ -1,187 +0,0 @@
package cluster
import (
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// HandleRQLite handles the "orama cluster rqlite" subcommand group.
func HandleRQLite(args []string) {
if len(args) == 0 {
showRQLiteHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "status":
handleRQLiteStatus()
case "voters":
handleRQLiteVoters()
case "backup":
handleRQLiteBackup(subargs)
case "help":
showRQLiteHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand)
showRQLiteHelp()
os.Exit(1)
}
}
// handleRQLiteStatus shows detailed Raft state for the local node.
func handleRQLiteStatus() {
fmt.Printf("RQLite Raft Status\n")
fmt.Printf("==================\n\n")
status, err := queryRQLiteStatus()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Printf("Node Configuration\n")
fmt.Printf(" Node ID: %s\n", status.Store.NodeID)
fmt.Printf(" Raft Address: %s\n", status.Store.Address)
fmt.Printf(" HTTP Address: %s\n", status.HTTP.Address)
fmt.Printf(" Data Directory: %s\n", status.Store.Dir)
fmt.Println()
fmt.Printf("Raft State\n")
fmt.Printf(" State: %s\n", strings.ToUpper(status.Store.Raft.State))
fmt.Printf(" Current Term: %d\n", status.Store.Raft.Term)
fmt.Printf(" Applied Index: %d\n", status.Store.Raft.AppliedIndex)
fmt.Printf(" Commit Index: %d\n", status.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", status.Store.Raft.Leader)
if status.Store.Raft.AppliedIndex < status.Store.Raft.CommitIndex {
lag := status.Store.Raft.CommitIndex - status.Store.Raft.AppliedIndex
fmt.Printf(" Replication Lag: %d entries behind\n", lag)
} else {
fmt.Printf(" Replication Lag: none (fully caught up)\n")
}
if status.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", status.Node.Uptime)
}
fmt.Println()
}
// handleRQLiteVoters shows the current voter list from /nodes.
func handleRQLiteVoters() {
fmt.Printf("RQLite Cluster Voters\n")
fmt.Printf("=====================\n\n")
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
voters := 0
nonVoters := 0
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
"NODE ID", "ADDRESS", "ROLE", "LEADER", "REACHABLE")
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
strings.Repeat("-", 20),
strings.Repeat("-", 30),
strings.Repeat("-", 8),
strings.Repeat("-", 10),
strings.Repeat("-", 10))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 20 {
nodeID = nodeID[:17] + "..."
}
role := "non-voter"
if node.Voter {
role = "voter"
voters++
} else {
nonVoters++
}
leader := "no"
if node.Leader {
leader = "yes"
}
reachable := "no"
if node.Reachable {
reachable = "yes"
}
fmt.Printf("%-20s %-30s %-8s %-10s %-10s\n",
nodeID, node.Address, role, leader, reachable)
}
fmt.Printf("\nTotal: %d voters, %d non-voters\n", voters, nonVoters)
quorum := (voters / 2) + 1
fmt.Printf("Quorum requirement: %d/%d voters\n", quorum, voters)
}
// handleRQLiteBackup triggers a manual backup via the RQLite backup endpoint.
func handleRQLiteBackup(args []string) {
outputFile := getFlagValue(args, "--output")
if outputFile == "" {
outputFile = fmt.Sprintf("rqlite-backup-%s.db", time.Now().Format("20060102-150405"))
}
fmt.Printf("RQLite Backup\n")
fmt.Printf("=============\n\n")
fmt.Printf("Requesting backup from %s/db/backup ...\n", rqliteBaseURL)
client := &http.Client{Timeout: 60 * time.Second}
resp, err := client.Get(rqliteBaseURL + "/db/backup")
if err != nil {
fmt.Fprintf(os.Stderr, "Error: cannot connect to RQLite: %v\n", err)
os.Exit(1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
fmt.Fprintf(os.Stderr, "Error: backup request returned HTTP %d: %s\n", resp.StatusCode, string(body))
os.Exit(1)
}
outFile, err := os.Create(outputFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: cannot create output file: %v\n", err)
os.Exit(1)
}
defer outFile.Close()
written, err := io.Copy(outFile, resp.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to write backup: %v\n", err)
os.Exit(1)
}
fmt.Printf("Backup saved to: %s (%d bytes)\n", outputFile, written)
}
// showRQLiteHelp displays help for rqlite subcommands.
func showRQLiteHelp() {
fmt.Printf("RQLite Commands\n\n")
fmt.Printf("Usage: orama cluster rqlite <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" status - Show detailed Raft state for local node\n")
fmt.Printf(" voters - Show current voter list from cluster\n")
fmt.Printf(" backup - Trigger manual database backup\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --output FILE - Output file path (default: rqlite-backup-<timestamp>.db)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" orama cluster rqlite status\n")
fmt.Printf(" orama cluster rqlite voters\n")
fmt.Printf(" orama cluster rqlite backup --output /tmp/backup.db\n")
}

View File

@ -1,248 +0,0 @@
package cluster
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
const (
rqliteBaseURL = "http://localhost:5001"
httpTimeout = 10 * time.Second
)
// rqliteStatus represents the relevant fields from the RQLite /status endpoint.
type rqliteStatus struct {
Store struct {
Raft struct {
State string `json:"state"`
AppliedIndex uint64 `json:"applied_index"`
CommitIndex uint64 `json:"commit_index"`
Term uint64 `json:"current_term"`
Leader string `json:"leader"`
} `json:"raft"`
Dir string `json:"dir"`
NodeID string `json:"node_id"`
Address string `json:"addr"`
} `json:"store"`
HTTP struct {
Address string `json:"addr"`
} `json:"http"`
Node struct {
Uptime string `json:"uptime"`
} `json:"node"`
}
// rqliteNode represents a node from the /nodes endpoint.
type rqliteNode struct {
ID string `json:"id"`
Address string `json:"addr"`
Leader bool `json:"leader"`
Voter bool `json:"voter"`
Reachable bool `json:"reachable"`
Time float64 `json:"time"`
TimeS string `json:"time_s"`
}
// HandleStatus handles the "orama cluster status" command.
func HandleStatus(args []string) {
if hasFlag(args, "--all") {
fmt.Printf("Remote node aggregation via SSH is not yet implemented.\n")
fmt.Printf("Currently showing local node status only.\n\n")
}
fmt.Printf("Cluster Status\n")
fmt.Printf("==============\n\n")
// Query RQLite status
status, err := queryRQLiteStatus()
if err != nil {
fmt.Fprintf(os.Stderr, "Error querying RQLite status: %v\n", err)
fmt.Printf("RQLite may not be running on this node.\n\n")
} else {
printLocalStatus(status)
}
// Query RQLite nodes
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Fprintf(os.Stderr, "Error querying RQLite nodes: %v\n", err)
} else {
printNodesTable(nodes)
}
// Query Olric status (best-effort)
printOlricStatus()
}
// queryRQLiteStatus queries the local RQLite /status endpoint.
func queryRQLiteStatus() (*rqliteStatus, error) {
client := &http.Client{Timeout: httpTimeout}
resp, err := client.Get(rqliteBaseURL + "/status")
if err != nil {
return nil, fmt.Errorf("connect to RQLite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response: %w", err)
}
var status rqliteStatus
if err := json.Unmarshal(body, &status); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
return &status, nil
}
// queryRQLiteNodes queries the local RQLite /nodes endpoint.
// If includeNonVoters is true, appends ?nonvoters to the query.
func queryRQLiteNodes(includeNonVoters bool) (map[string]*rqliteNode, error) {
client := &http.Client{Timeout: httpTimeout}
url := rqliteBaseURL + "/nodes"
if includeNonVoters {
url += "?nonvoters"
}
resp, err := client.Get(url)
if err != nil {
return nil, fmt.Errorf("connect to RQLite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response: %w", err)
}
var nodes map[string]*rqliteNode
if err := json.Unmarshal(body, &nodes); err != nil {
return nil, fmt.Errorf("parse response: %w", err)
}
return nodes, nil
}
// printLocalStatus prints the local node's RQLite status.
func printLocalStatus(s *rqliteStatus) {
fmt.Printf("Local Node\n")
fmt.Printf(" Node ID: %s\n", s.Store.NodeID)
fmt.Printf(" Raft Address: %s\n", s.Store.Address)
fmt.Printf(" HTTP Address: %s\n", s.HTTP.Address)
fmt.Printf(" Raft State: %s\n", strings.ToUpper(s.Store.Raft.State))
fmt.Printf(" Raft Term: %d\n", s.Store.Raft.Term)
fmt.Printf(" Applied Index: %d\n", s.Store.Raft.AppliedIndex)
fmt.Printf(" Commit Index: %d\n", s.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", s.Store.Raft.Leader)
if s.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", s.Node.Uptime)
}
fmt.Println()
}
// printNodesTable prints a formatted table of all cluster nodes.
func printNodesTable(nodes map[string]*rqliteNode) {
if len(nodes) == 0 {
fmt.Printf("No nodes found in cluster.\n\n")
return
}
fmt.Printf("Cluster Nodes (%d total)\n", len(nodes))
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
"NODE ID", "ADDRESS", "VOTER", "LEADER", "REACHABLE", "LATENCY")
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
strings.Repeat("-", 20),
strings.Repeat("-", 30),
strings.Repeat("-", 8),
strings.Repeat("-", 10),
strings.Repeat("-", 10),
strings.Repeat("-", 12))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 20 {
nodeID = nodeID[:17] + "..."
}
voter := "no"
if node.Voter {
voter = "yes"
}
leader := "no"
if node.Leader {
leader = "yes"
}
reachable := "no"
if node.Reachable {
reachable = "yes"
}
latency := "-"
if node.TimeS != "" {
latency = node.TimeS
} else if node.Time > 0 {
latency = fmt.Sprintf("%.3fs", node.Time)
}
fmt.Printf("%-20s %-30s %-8s %-10s %-10s %-12s\n",
nodeID, node.Address, voter, leader, reachable, latency)
}
fmt.Println()
}
// printOlricStatus attempts to query the local Olric status endpoint.
func printOlricStatus() {
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://localhost:3320/")
if err != nil {
fmt.Printf("Olric: not reachable on localhost:3320 (%v)\n\n", err)
return
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Printf("Olric: reachable but could not read response\n\n")
return
}
if resp.StatusCode == http.StatusOK {
fmt.Printf("Olric: reachable (HTTP %d)\n", resp.StatusCode)
// Try to parse as JSON for a nicer display
var data map[string]interface{}
if err := json.Unmarshal(body, &data); err == nil {
for key, val := range data {
fmt.Printf(" %s: %v\n", key, val)
}
} else {
// Not JSON, print raw (truncated)
raw := strings.TrimSpace(string(body))
if len(raw) > 200 {
raw = raw[:200] + "..."
}
if raw != "" {
fmt.Printf(" Response: %s\n", raw)
}
}
} else {
fmt.Printf("Olric: reachable but returned HTTP %d\n", resp.StatusCode)
}
fmt.Println()
}

View File

@ -1,136 +0,0 @@
package cluster
import (
"fmt"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
)
// HandleWatch handles the "orama cluster watch" command.
// It polls RQLite status and nodes at a configurable interval and reprints a summary.
func HandleWatch(args []string) {
interval := 10 * time.Second
// Parse --interval flag
intervalStr := getFlagValue(args, "--interval")
if intervalStr != "" {
secs, err := strconv.Atoi(intervalStr)
if err != nil || secs < 1 {
fmt.Fprintf(os.Stderr, "Error: --interval must be a positive integer (seconds)\n")
os.Exit(1)
}
interval = time.Duration(secs) * time.Second
}
// Set up signal handling for clean exit
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
fmt.Printf("Watching cluster status (interval: %s, Ctrl+C to exit)\n\n", interval)
// Initial render
renderWatchScreen()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
renderWatchScreen()
case <-sigCh:
fmt.Printf("\nWatch stopped.\n")
return
}
}
}
// renderWatchScreen clears the terminal and prints a summary of cluster state.
func renderWatchScreen() {
// Clear screen using ANSI escape codes
fmt.Print("\033[2J\033[H")
now := time.Now().Format("2006-01-02 15:04:05")
fmt.Printf("Cluster Watch [%s]\n", now)
fmt.Printf("=======================================\n\n")
// Query RQLite status
status, err := queryRQLiteStatus()
if err != nil {
fmt.Printf("RQLite: UNREACHABLE (%v)\n\n", err)
} else {
fmt.Printf("Local Node: %s\n", status.Store.NodeID)
fmt.Printf(" State: %-10s Term: %-6d Applied: %-8d Commit: %-8d\n",
strings.ToUpper(status.Store.Raft.State),
status.Store.Raft.Term,
status.Store.Raft.AppliedIndex,
status.Store.Raft.CommitIndex)
fmt.Printf(" Leader: %s\n", status.Store.Raft.Leader)
if status.Node.Uptime != "" {
fmt.Printf(" Uptime: %s\n", status.Node.Uptime)
}
fmt.Println()
}
// Query nodes
nodes, err := queryRQLiteNodes(true)
if err != nil {
fmt.Printf("Nodes: UNAVAILABLE (%v)\n\n", err)
} else {
total := len(nodes)
voters := 0
reachable := 0
for _, n := range nodes {
if n.Voter {
voters++
}
if n.Reachable {
reachable++
}
}
fmt.Printf("Cluster: %d nodes (%d voters), %d/%d reachable\n\n",
total, voters, reachable, total)
// Compact table
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
"ID", "ADDRESS", "VOTER", "LEADER", "UP")
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
strings.Repeat("-", 18),
strings.Repeat("-", 28),
strings.Repeat("-", 7),
strings.Repeat("-", 7),
strings.Repeat("-", 7))
for id, node := range nodes {
nodeID := id
if len(nodeID) > 18 {
nodeID = nodeID[:15] + "..."
}
voter := " "
if node.Voter {
voter = "yes"
}
leader := " "
if node.Leader {
leader = "yes"
}
up := "no"
if node.Reachable {
up = "yes"
}
fmt.Printf("%-18s %-28s %-7s %-7s %-7s\n",
nodeID, node.Address, voter, leader, up)
}
}
fmt.Printf("\nPress Ctrl+C to exit\n")
}

View File

@ -1,23 +0,0 @@
package app
import (
"github.com/DeBrosOfficial/network/pkg/cli/deployments"
"github.com/spf13/cobra"
)
// Cmd is the root command for managing deployed applications (was "deployments").
var Cmd = &cobra.Command{
Use: "app",
Aliases: []string{"apps"},
Short: "Manage deployed applications",
Long: `List, get, delete, rollback, and view logs/stats for your deployed applications.`,
}
func init() {
Cmd.AddCommand(deployments.ListCmd)
Cmd.AddCommand(deployments.GetCmd)
Cmd.AddCommand(deployments.DeleteCmd)
Cmd.AddCommand(deployments.RollbackCmd)
Cmd.AddCommand(deployments.LogsCmd)
Cmd.AddCommand(deployments.StatsCmd)
}

View File

@ -1,72 +0,0 @@
package authcmd
import (
"github.com/DeBrosOfficial/network/pkg/cli"
"github.com/spf13/cobra"
)
// Cmd is the root command for authentication.
var Cmd = &cobra.Command{
Use: "auth",
Short: "Authentication management",
Long: `Manage authentication with the Orama network.
Supports RootWallet (EVM) and Phantom (Solana) authentication methods.`,
}
var loginCmd = &cobra.Command{
Use: "login",
Short: "Authenticate with wallet",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand(append([]string{"login"}, args...))
},
DisableFlagParsing: true,
}
var logoutCmd = &cobra.Command{
Use: "logout",
Short: "Clear stored credentials",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand([]string{"logout"})
},
}
var whoamiCmd = &cobra.Command{
Use: "whoami",
Short: "Show current authentication status",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand([]string{"whoami"})
},
}
var statusCmd = &cobra.Command{
Use: "status",
Short: "Show detailed authentication info",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand([]string{"status"})
},
}
var listCmd = &cobra.Command{
Use: "list",
Short: "List all stored credentials",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand([]string{"list"})
},
}
var switchCmd = &cobra.Command{
Use: "switch",
Short: "Switch between stored credentials",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleAuthCommand([]string{"switch"})
},
}
func init() {
Cmd.AddCommand(loginCmd)
Cmd.AddCommand(logoutCmd)
Cmd.AddCommand(whoamiCmd)
Cmd.AddCommand(statusCmd)
Cmd.AddCommand(listCmd)
Cmd.AddCommand(switchCmd)
}

View File

@ -1,24 +0,0 @@
package buildcmd
import (
"github.com/DeBrosOfficial/network/pkg/cli/build"
"github.com/spf13/cobra"
)
// Cmd is the top-level build command.
var Cmd = &cobra.Command{
Use: "build",
Short: "Build pre-compiled binary archive for deployment",
Long: `Cross-compile all Orama binaries and dependencies for Linux,
then package them into a deployment archive. The archive includes:
- Orama binaries (CLI, node, gateway, identity, SFU, TURN)
- Olric, IPFS Kubo, IPFS Cluster, RQLite, CoreDNS, Caddy
- Systemd namespace templates
- manifest.json with checksums
The resulting archive can be pushed to nodes with 'orama node push'.`,
Run: func(cmd *cobra.Command, args []string) {
build.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,74 +0,0 @@
package cluster
import (
origCluster "github.com/DeBrosOfficial/network/pkg/cli/cluster"
"github.com/spf13/cobra"
)
// Cmd is the root command for cluster operations (flattened from cluster rqlite).
var Cmd = &cobra.Command{
Use: "cluster",
Short: "Cluster management and diagnostics",
Long: `View cluster status, run health checks, manage RQLite Raft state,
and monitor the cluster in real-time.`,
}
var statusSubCmd = &cobra.Command{
Use: "status",
Short: "Show cluster node status (RQLite + Olric)",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleStatus(args)
},
}
var healthSubCmd = &cobra.Command{
Use: "health",
Short: "Run cluster health checks",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleHealth(args)
},
}
var watchSubCmd = &cobra.Command{
Use: "watch",
Short: "Live cluster status monitor",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleWatch(args)
},
DisableFlagParsing: true,
}
// Flattened rqlite commands (was cluster rqlite <cmd>)
var raftStatusCmd = &cobra.Command{
Use: "raft-status",
Short: "Show detailed Raft state for local node",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleRQLite([]string{"status"})
},
}
var votersCmd = &cobra.Command{
Use: "voters",
Short: "Show current voter list",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleRQLite([]string{"voters"})
},
}
var backupCmd = &cobra.Command{
Use: "backup",
Short: "Trigger manual RQLite backup",
Run: func(cmd *cobra.Command, args []string) {
origCluster.HandleRQLite(append([]string{"backup"}, args...))
},
DisableFlagParsing: true,
}
func init() {
Cmd.AddCommand(statusSubCmd)
Cmd.AddCommand(healthSubCmd)
Cmd.AddCommand(watchSubCmd)
Cmd.AddCommand(raftStatusCmd)
Cmd.AddCommand(votersCmd)
Cmd.AddCommand(backupCmd)
}

View File

@ -1,21 +0,0 @@
package dbcmd
import (
"github.com/DeBrosOfficial/network/pkg/cli/db"
"github.com/spf13/cobra"
)
// Cmd is the root command for database operations.
var Cmd = &cobra.Command{
Use: "db",
Short: "Manage SQLite databases",
Long: `Create and manage per-namespace SQLite databases.`,
}
func init() {
Cmd.AddCommand(db.CreateCmd)
Cmd.AddCommand(db.QueryCmd)
Cmd.AddCommand(db.ListCmd)
Cmd.AddCommand(db.BackupCmd)
Cmd.AddCommand(db.BackupsCmd)
}

View File

@ -1,21 +0,0 @@
package deploy
import (
"github.com/DeBrosOfficial/network/pkg/cli/deployments"
"github.com/spf13/cobra"
)
// Cmd is the top-level deploy command (upsert: create or update).
var Cmd = &cobra.Command{
Use: "deploy",
Short: "Deploy applications to the Orama network",
Long: `Deploy static sites, Next.js apps, Go backends, and Node.js backends.
If a deployment with the same name exists, it will be updated.`,
}
func init() {
Cmd.AddCommand(deployments.DeployStaticCmd)
Cmd.AddCommand(deployments.DeployNextJSCmd)
Cmd.AddCommand(deployments.DeployGoCmd)
Cmd.AddCommand(deployments.DeployNodeJSCmd)
}

View File

@ -1,66 +0,0 @@
package envcmd
import (
"github.com/DeBrosOfficial/network/pkg/cli"
"github.com/spf13/cobra"
)
// Cmd is the root command for environment management.
var Cmd = &cobra.Command{
Use: "env",
Short: "Manage environments",
Long: `List, switch, add, and remove Orama network environments.
Available default environments: production, devnet, testnet.`,
}
var listCmd = &cobra.Command{
Use: "list",
Short: "List all available environments",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleEnvCommand([]string{"list"})
},
}
var currentCmd = &cobra.Command{
Use: "current",
Short: "Show current active environment",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleEnvCommand([]string{"current"})
},
}
var useCmd = &cobra.Command{
Use: "use <name>",
Aliases: []string{"switch"},
Short: "Switch to a different environment",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
cli.HandleEnvCommand(append([]string{"switch"}, args...))
},
}
var addCmd = &cobra.Command{
Use: "add <name> <gateway_url> [description]",
Short: "Add a custom environment",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
cli.HandleEnvCommand(append([]string{"add"}, args...))
},
}
var removeCmd = &cobra.Command{
Use: "remove <name>",
Short: "Remove an environment",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
cli.HandleEnvCommand(append([]string{"remove"}, args...))
},
}
func init() {
Cmd.AddCommand(listCmd)
Cmd.AddCommand(currentCmd)
Cmd.AddCommand(useCmd)
Cmd.AddCommand(addCmd)
Cmd.AddCommand(removeCmd)
}

View File

@ -1,38 +0,0 @@
package functioncmd
import (
"github.com/DeBrosOfficial/network/pkg/cli/functions"
"github.com/spf13/cobra"
)
// Cmd is the top-level function command.
var Cmd = &cobra.Command{
Use: "function",
Short: "Manage serverless functions",
Long: `Deploy, invoke, and manage serverless functions on the Orama Network.
A function is a folder containing:
function.go your handler code (uses the fn SDK)
function.yaml configuration (name, memory, timeout, etc.)
Quick start:
orama function init my-function
cd my-function
orama function build
orama function deploy
orama function invoke my-function --data '{"name": "World"}'`,
}
func init() {
Cmd.AddCommand(functions.InitCmd)
Cmd.AddCommand(functions.BuildCmd)
Cmd.AddCommand(functions.DeployCmd)
Cmd.AddCommand(functions.InvokeCmd)
Cmd.AddCommand(functions.ListCmd)
Cmd.AddCommand(functions.GetCmd)
Cmd.AddCommand(functions.DeleteCmd)
Cmd.AddCommand(functions.LogsCmd)
Cmd.AddCommand(functions.VersionsCmd)
Cmd.AddCommand(functions.SecretsCmd)
Cmd.AddCommand(functions.TriggersCmd)
}

View File

@ -1,18 +0,0 @@
package inspectcmd
import (
"github.com/DeBrosOfficial/network/pkg/cli"
"github.com/spf13/cobra"
)
// Cmd is the inspect command for SSH-based cluster inspection.
var Cmd = &cobra.Command{
Use: "inspect",
Short: "Inspect cluster health via SSH",
Long: `SSH into cluster nodes and run health checks.
Supports AI-powered failure analysis and result export.`,
Run: func(cmd *cobra.Command, args []string) {
cli.HandleInspectCommand(args)
},
DisableFlagParsing: true, // Pass all flags through to existing handler
}

View File

@ -1,200 +0,0 @@
package monitorcmd
import (
"context"
"os"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/monitor"
"github.com/DeBrosOfficial/network/pkg/cli/monitor/display"
"github.com/DeBrosOfficial/network/pkg/cli/monitor/tui"
"github.com/spf13/cobra"
)
// Cmd is the root monitor command.
var Cmd = &cobra.Command{
Use: "monitor",
Short: "Monitor cluster health from your local machine",
Long: `SSH into cluster nodes and display real-time health data.
Runs 'orama node report --json' on each node and aggregates results.
Without a subcommand, launches the interactive TUI.`,
RunE: runLive,
}
// Shared persistent flags.
var (
flagEnv string
flagJSON bool
flagNode string
flagConfig string
)
func init() {
Cmd.PersistentFlags().StringVar(&flagEnv, "env", "", "Environment: devnet, testnet, mainnet (required)")
Cmd.PersistentFlags().BoolVar(&flagJSON, "json", false, "Machine-readable JSON output")
Cmd.PersistentFlags().StringVar(&flagNode, "node", "", "Filter to specific node host/IP")
Cmd.PersistentFlags().StringVar(&flagConfig, "config", "scripts/nodes.conf", "Path to nodes.conf")
Cmd.MarkPersistentFlagRequired("env")
Cmd.AddCommand(liveCmd)
Cmd.AddCommand(clusterCmd)
Cmd.AddCommand(nodeCmd)
Cmd.AddCommand(serviceCmd)
Cmd.AddCommand(meshCmd)
Cmd.AddCommand(dnsCmd)
Cmd.AddCommand(namespacesCmd)
Cmd.AddCommand(alertsCmd)
Cmd.AddCommand(reportCmd)
}
// ---------------------------------------------------------------------------
// Subcommands
// ---------------------------------------------------------------------------
var liveCmd = &cobra.Command{
Use: "live",
Short: "Interactive TUI monitor",
RunE: runLive,
}
var clusterCmd = &cobra.Command{
Use: "cluster",
Short: "Cluster overview (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.ClusterJSON(snap, os.Stdout)
}
return display.ClusterTable(snap, os.Stdout)
},
}
var nodeCmd = &cobra.Command{
Use: "node",
Short: "Per-node health details (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.NodeJSON(snap, os.Stdout)
}
return display.NodeTable(snap, os.Stdout)
},
}
var serviceCmd = &cobra.Command{
Use: "service",
Short: "Service status across the cluster (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.ServiceJSON(snap, os.Stdout)
}
return display.ServiceTable(snap, os.Stdout)
},
}
var meshCmd = &cobra.Command{
Use: "mesh",
Short: "Mesh connectivity status (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.MeshJSON(snap, os.Stdout)
}
return display.MeshTable(snap, os.Stdout)
},
}
var dnsCmd = &cobra.Command{
Use: "dns",
Short: "DNS health overview (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.DNSJSON(snap, os.Stdout)
}
return display.DNSTable(snap, os.Stdout)
},
}
var namespacesCmd = &cobra.Command{
Use: "namespaces",
Short: "Namespace usage summary (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.NamespacesJSON(snap, os.Stdout)
}
return display.NamespacesTable(snap, os.Stdout)
},
}
var alertsCmd = &cobra.Command{
Use: "alerts",
Short: "Active alerts and warnings (one-shot)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
if flagJSON {
return display.AlertsJSON(snap, os.Stdout)
}
return display.AlertsTable(snap, os.Stdout)
},
}
var reportCmd = &cobra.Command{
Use: "report",
Short: "Full cluster report (JSON)",
RunE: func(cmd *cobra.Command, args []string) error {
snap, err := collectSnapshot()
if err != nil {
return err
}
return display.FullReport(snap, os.Stdout)
},
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func collectSnapshot() (*monitor.ClusterSnapshot, error) {
cfg := newConfig()
return monitor.CollectOnce(context.Background(), cfg)
}
func newConfig() monitor.CollectorConfig {
return monitor.CollectorConfig{
ConfigPath: flagConfig,
Env: flagEnv,
NodeFilter: flagNode,
Timeout: 30 * time.Second,
}
}
func runLive(cmd *cobra.Command, args []string) error {
cfg := newConfig()
return tui.Run(cfg)
}

View File

@ -1,103 +0,0 @@
package namespacecmd
import (
"github.com/DeBrosOfficial/network/pkg/cli"
"github.com/spf13/cobra"
)
// Cmd is the root command for namespace management.
var Cmd = &cobra.Command{
Use: "namespace",
Aliases: []string{"ns"},
Short: "Manage namespaces",
Long: `List, delete, and repair namespaces on the Orama network.`,
}
var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete the current namespace and all its resources",
Run: func(cmd *cobra.Command, args []string) {
forceFlag, _ := cmd.Flags().GetBool("force")
var cliArgs []string
cliArgs = append(cliArgs, "delete")
if forceFlag {
cliArgs = append(cliArgs, "--force")
}
cli.HandleNamespaceCommand(cliArgs)
},
}
var listCmd = &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Short: "List namespaces owned by the current wallet",
Run: func(cmd *cobra.Command, args []string) {
cli.HandleNamespaceCommand([]string{"list"})
},
}
var repairCmd = &cobra.Command{
Use: "repair <namespace>",
Short: "Repair an under-provisioned namespace cluster",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
cli.HandleNamespaceCommand(append([]string{"repair"}, args...))
},
}
var enableCmd = &cobra.Command{
Use: "enable <feature>",
Short: "Enable a feature for a namespace",
Long: "Enable a feature for a namespace. Supported features: webrtc",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ns, _ := cmd.Flags().GetString("namespace")
cliArgs := []string{"enable", args[0]}
if ns != "" {
cliArgs = append(cliArgs, "--namespace", ns)
}
cli.HandleNamespaceCommand(cliArgs)
},
}
var disableCmd = &cobra.Command{
Use: "disable <feature>",
Short: "Disable a feature for a namespace",
Long: "Disable a feature for a namespace. Supported features: webrtc",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ns, _ := cmd.Flags().GetString("namespace")
cliArgs := []string{"disable", args[0]}
if ns != "" {
cliArgs = append(cliArgs, "--namespace", ns)
}
cli.HandleNamespaceCommand(cliArgs)
},
}
var webrtcStatusCmd = &cobra.Command{
Use: "webrtc-status",
Short: "Show WebRTC service status for a namespace",
Run: func(cmd *cobra.Command, args []string) {
ns, _ := cmd.Flags().GetString("namespace")
cliArgs := []string{"webrtc-status"}
if ns != "" {
cliArgs = append(cliArgs, "--namespace", ns)
}
cli.HandleNamespaceCommand(cliArgs)
},
}
func init() {
deleteCmd.Flags().Bool("force", false, "Skip confirmation prompt")
enableCmd.Flags().String("namespace", "", "Namespace name")
disableCmd.Flags().String("namespace", "", "Namespace name")
webrtcStatusCmd.Flags().String("namespace", "", "Namespace name")
Cmd.AddCommand(listCmd)
Cmd.AddCommand(deleteCmd)
Cmd.AddCommand(repairCmd)
Cmd.AddCommand(enableCmd)
Cmd.AddCommand(disableCmd)
Cmd.AddCommand(webrtcStatusCmd)
}

View File

@ -1,219 +0,0 @@
package namespacecmd
import (
"bufio"
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/DeBrosOfficial/network/pkg/auth"
"github.com/spf13/cobra"
)
var rqliteCmd = &cobra.Command{
Use: "rqlite",
Short: "Manage the namespace's internal RQLite database",
Long: "Export and import the namespace's internal RQLite database (stores deployments, DNS records, API keys, etc.).",
}
var rqliteExportCmd = &cobra.Command{
Use: "export",
Short: "Export the namespace's RQLite database to a local SQLite file",
Long: "Downloads a consistent SQLite snapshot of the namespace's internal RQLite database.",
RunE: rqliteExport,
}
var rqliteImportCmd = &cobra.Command{
Use: "import",
Short: "Import a SQLite dump into the namespace's RQLite (DESTRUCTIVE)",
Long: `Replaces the namespace's entire RQLite database with the contents of the provided SQLite file.
WARNING: This is a destructive operation. All existing data in the namespace's RQLite
(deployments, DNS records, API keys, etc.) will be replaced with the imported file.`,
RunE: rqliteImport,
}
func init() {
rqliteExportCmd.Flags().StringP("output", "o", "", "Output file path (default: rqlite-export.db)")
rqliteImportCmd.Flags().StringP("input", "i", "", "Input SQLite file path")
_ = rqliteImportCmd.MarkFlagRequired("input")
rqliteCmd.AddCommand(rqliteExportCmd)
rqliteCmd.AddCommand(rqliteImportCmd)
Cmd.AddCommand(rqliteCmd)
}
func rqliteExport(cmd *cobra.Command, args []string) error {
output, _ := cmd.Flags().GetString("output")
if output == "" {
output = "rqlite-export.db"
}
apiURL := nsRQLiteAPIURL()
token, err := nsRQLiteAuthToken()
if err != nil {
return err
}
url := apiURL + "/v1/rqlite/export"
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{
Timeout: 0,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
fmt.Printf("Exporting RQLite database to %s...\n", output)
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to connect to gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("export failed (HTTP %d): %s", resp.StatusCode, string(body))
}
outFile, err := os.Create(output)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
defer outFile.Close()
written, err := io.Copy(outFile, resp.Body)
if err != nil {
os.Remove(output)
return fmt.Errorf("failed to write export file: %w", err)
}
fmt.Printf("Export complete: %s (%d bytes)\n", output, written)
return nil
}
func rqliteImport(cmd *cobra.Command, args []string) error {
input, _ := cmd.Flags().GetString("input")
info, err := os.Stat(input)
if err != nil {
return fmt.Errorf("cannot access input file: %w", err)
}
if info.IsDir() {
return fmt.Errorf("input path is a directory, not a file")
}
store, err := auth.LoadEnhancedCredentials()
if err != nil {
return fmt.Errorf("failed to load credentials: %w", err)
}
gatewayURL := auth.GetDefaultGatewayURL()
creds := store.GetDefaultCredential(gatewayURL)
if creds == nil || !creds.IsValid() {
return fmt.Errorf("not authenticated. Run 'orama auth login' first")
}
namespace := creds.Namespace
if namespace == "" {
namespace = "default"
}
fmt.Printf("WARNING: This will REPLACE the entire RQLite database for namespace '%s'.\n", namespace)
fmt.Printf("All existing data (deployments, DNS records, API keys, etc.) will be lost.\n")
fmt.Printf("Importing from: %s (%d bytes)\n\n", input, info.Size())
fmt.Printf("Type the namespace name '%s' to confirm: ", namespace)
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
confirmation := strings.TrimSpace(scanner.Text())
if confirmation != namespace {
return fmt.Errorf("aborted - namespace name did not match")
}
apiURL := nsRQLiteAPIURL()
token, err := nsRQLiteAuthToken()
if err != nil {
return err
}
file, err := os.Open(input)
if err != nil {
return fmt.Errorf("failed to open input file: %w", err)
}
defer file.Close()
url := apiURL + "/v1/rqlite/import"
req, err := http.NewRequest(http.MethodPost, url, file)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = info.Size()
client := &http.Client{
Timeout: 0,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
fmt.Printf("Importing database...\n")
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to connect to gateway: %w", err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("import failed (HTTP %d): %s", resp.StatusCode, string(body))
}
fmt.Printf("Import complete. The namespace '%s' RQLite database has been replaced.\n", namespace)
return nil
}
func nsRQLiteAPIURL() string {
if url := os.Getenv("ORAMA_API_URL"); url != "" {
return url
}
return auth.GetDefaultGatewayURL()
}
func nsRQLiteAuthToken() (string, error) {
if token := os.Getenv("ORAMA_TOKEN"); token != "" {
return token, nil
}
store, err := auth.LoadEnhancedCredentials()
if err != nil {
return "", fmt.Errorf("failed to load credentials: %w", err)
}
gatewayURL := auth.GetDefaultGatewayURL()
creds := store.GetDefaultCredential(gatewayURL)
if creds == nil {
return "", fmt.Errorf("no credentials found for %s. Run 'orama auth login' to authenticate", gatewayURL)
}
if !creds.IsValid() {
return "", fmt.Errorf("credentials expired for %s. Run 'orama auth login' to re-authenticate", gatewayURL)
}
return creds.APIKey, nil
}

View File

@ -1,25 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/clean"
"github.com/spf13/cobra"
)
var cleanCmd = &cobra.Command{
Use: "clean",
Short: "Clean (wipe) remote nodes for reinstallation",
Long: `Remove all Orama data, services, and configuration from remote nodes.
Anyone relay keys at /var/lib/anon/ are preserved.
This is a DESTRUCTIVE operation. Use --force to skip confirmation.
Examples:
orama node clean --env testnet # Clean all testnet nodes
orama node clean --env testnet --node 1.2.3.4 # Clean specific node
orama node clean --env testnet --nuclear # Also remove shared binaries
orama node clean --env testnet --force # Skip confirmation`,
Run: func(cmd *cobra.Command, args []string) {
clean.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,177 +0,0 @@
package node
import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/utils"
"github.com/spf13/cobra"
)
var doctorCmd = &cobra.Command{
Use: "doctor",
Short: "Diagnose common node issues",
Long: `Run a series of diagnostic checks on this node to identify
common issues with services, connectivity, disk space, and more.`,
RunE: runDoctor,
}
type check struct {
Name string
Status string // PASS, FAIL, WARN
Detail string
}
func runDoctor(cmd *cobra.Command, args []string) error {
fmt.Println("Node Doctor")
fmt.Println("===========")
fmt.Println()
var checks []check
// 1. Check if services exist
services := utils.GetProductionServices()
if len(services) == 0 {
checks = append(checks, check{"Services installed", "FAIL", "No Orama services found. Run 'orama node install' first."})
} else {
checks = append(checks, check{"Services installed", "PASS", fmt.Sprintf("%d services found", len(services))})
}
// 2. Check each service status
running := 0
stopped := 0
for _, svc := range services {
active, _ := utils.IsServiceActive(svc)
if active {
running++
} else {
stopped++
}
}
if stopped > 0 {
checks = append(checks, check{"Services running", "WARN", fmt.Sprintf("%d running, %d stopped", running, stopped)})
} else if running > 0 {
checks = append(checks, check{"Services running", "PASS", fmt.Sprintf("All %d services running", running)})
}
// 3. Check RQLite health
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://localhost:5001/status")
if err != nil {
checks = append(checks, check{"RQLite reachable", "FAIL", fmt.Sprintf("Cannot connect: %v", err)})
} else {
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
checks = append(checks, check{"RQLite reachable", "PASS", "HTTP API responding on :5001"})
} else {
checks = append(checks, check{"RQLite reachable", "WARN", fmt.Sprintf("HTTP %d", resp.StatusCode)})
}
}
// 4. Check Olric health
resp, err = client.Get("http://localhost:3320/")
if err != nil {
checks = append(checks, check{"Olric reachable", "FAIL", fmt.Sprintf("Cannot connect: %v", err)})
} else {
resp.Body.Close()
checks = append(checks, check{"Olric reachable", "PASS", "Responding on :3320"})
}
// 5. Check Gateway health
resp, err = client.Get("http://localhost:8443/health")
if err != nil {
checks = append(checks, check{"Gateway reachable", "FAIL", fmt.Sprintf("Cannot connect: %v", err)})
} else {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
var health map[string]interface{}
if json.Unmarshal(body, &health) == nil {
if s, ok := health["status"].(string); ok {
checks = append(checks, check{"Gateway reachable", "PASS", fmt.Sprintf("Status: %s", s)})
} else {
checks = append(checks, check{"Gateway reachable", "PASS", "Responding"})
}
} else {
checks = append(checks, check{"Gateway reachable", "PASS", "Responding"})
}
} else {
checks = append(checks, check{"Gateway reachable", "WARN", fmt.Sprintf("HTTP %d", resp.StatusCode)})
}
}
// 6. Check disk space
out, err := exec.Command("df", "-h", "/opt/orama").Output()
if err == nil {
lines := strings.Split(string(out), "\n")
if len(lines) > 1 {
fields := strings.Fields(lines[1])
if len(fields) >= 5 {
usePercent := fields[4]
checks = append(checks, check{"Disk space (/opt/orama)", "PASS", fmt.Sprintf("Usage: %s (available: %s)", usePercent, fields[3])})
}
}
}
// 7. Check DNS resolution (basic)
_, err = net.LookupHost("orama-devnet.network")
if err != nil {
checks = append(checks, check{"DNS resolution", "WARN", fmt.Sprintf("Cannot resolve orama-devnet.network: %v", err)})
} else {
checks = append(checks, check{"DNS resolution", "PASS", "orama-devnet.network resolves"})
}
// 8. Check if ports are conflicting (only for stopped services)
ports, err := utils.CollectPortsForServices(services, true)
if err == nil && len(ports) > 0 {
var conflicts []string
for _, spec := range ports {
ln, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", spec.Port))
if err != nil {
conflicts = append(conflicts, fmt.Sprintf("%s (:%d)", spec.Name, spec.Port))
} else {
ln.Close()
}
}
if len(conflicts) > 0 {
checks = append(checks, check{"Port conflicts", "WARN", fmt.Sprintf("Ports in use: %s", strings.Join(conflicts, ", "))})
} else {
checks = append(checks, check{"Port conflicts", "PASS", "No conflicts detected"})
}
}
// Print results
maxName := 0
for _, c := range checks {
if len(c.Name) > maxName {
maxName = len(c.Name)
}
}
pass, fail, warn := 0, 0, 0
for _, c := range checks {
fmt.Printf(" [%s] %-*s %s\n", c.Status, maxName, c.Name, c.Detail)
switch c.Status {
case "PASS":
pass++
case "FAIL":
fail++
case "WARN":
warn++
}
}
fmt.Printf("\nSummary: %d passed, %d failed, %d warnings\n", pass, fail, warn)
if fail > 0 {
os.Exit(1)
}
return nil
}

View File

@ -1,26 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/enroll"
"github.com/spf13/cobra"
)
var enrollCmd = &cobra.Command{
Use: "enroll",
Short: "Enroll an OramaOS node into the cluster",
Long: `Enroll a freshly booted OramaOS node into the cluster.
The OramaOS node displays a registration code on port 9999. Provide this code
along with an invite token to complete enrollment. The Gateway pushes cluster
configuration (WireGuard, secrets, peer list) to the node.
Usage:
orama node enroll --node-ip <ip> --code <code> --token <invite-token> --env <environment>
The node must be reachable over the public internet on port 9999 (enrollment only).
After enrollment, port 9999 is permanently closed and all communication goes over WireGuard.`,
Run: func(cmd *cobra.Command, args []string) {
enroll.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,18 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/install"
"github.com/spf13/cobra"
)
var installCmd = &cobra.Command{
Use: "install",
Short: "Install production node (requires sudo)",
Long: `Install and configure an Orama production node on this machine.
For the first node, this creates a new cluster. For subsequent nodes,
use --join and --token to join an existing cluster.`,
Run: func(cmd *cobra.Command, args []string) {
install.Handle(args)
},
DisableFlagParsing: true, // Pass flags through to existing handler
}

View File

@ -1,18 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/invite"
"github.com/spf13/cobra"
)
var inviteCmd = &cobra.Command{
Use: "invite",
Short: "Manage invite tokens for joining the cluster",
Long: `Generate invite tokens that allow new nodes to join the cluster.
Running without a subcommand creates a new token (same as 'invite create').`,
Run: func(cmd *cobra.Command, args []string) {
// Default behavior: create a new invite token
invite.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,45 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/lifecycle"
"github.com/spf13/cobra"
)
var forceFlag bool
var startCmd = &cobra.Command{
Use: "start",
Short: "Start all production services (requires sudo)",
Run: func(cmd *cobra.Command, args []string) {
lifecycle.HandleStart()
},
}
var stopCmd = &cobra.Command{
Use: "stop",
Short: "Stop all production services (requires sudo)",
Long: `Stop all Orama services in dependency order and disable auto-start.
Includes namespace services, global services, and supporting services.
Use --force to bypass quorum safety check.`,
Run: func(cmd *cobra.Command, args []string) {
force, _ := cmd.Flags().GetBool("force")
lifecycle.HandleStopWithFlags(force)
},
}
var restartCmd = &cobra.Command{
Use: "restart",
Short: "Restart all production services (requires sudo)",
Long: `Restart all Orama services. Stops in dependency order then restarts.
Includes explicit namespace service restart.
Use --force to bypass quorum safety check.`,
Run: func(cmd *cobra.Command, args []string) {
force, _ := cmd.Flags().GetBool("force")
lifecycle.HandleRestartWithFlags(force)
},
}
func init() {
stopCmd.Flags().Bool("force", false, "Bypass quorum safety check")
restartCmd.Flags().Bool("force", false, "Bypass quorum safety check")
}

View File

@ -1,17 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/logs"
"github.com/spf13/cobra"
)
var logsCmd = &cobra.Command{
Use: "logs <service>",
Short: "View production service logs",
Long: `Stream logs for a specific Orama production service.
Service aliases: node, ipfs, cluster, gateway, olric`,
Run: func(cmd *cobra.Command, args []string) {
logs.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,15 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/migrate"
"github.com/spf13/cobra"
)
var migrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate from old unified setup (requires sudo)",
Run: func(cmd *cobra.Command, args []string) {
migrate.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,35 +0,0 @@
package node
import (
"github.com/spf13/cobra"
)
// Cmd is the root command for node operator commands (was "prod").
var Cmd = &cobra.Command{
Use: "node",
Short: "Node operator commands (requires sudo for most operations)",
Long: `Manage the Orama node running on this machine.
Includes install, upgrade, start/stop/restart, status, logs, and more.
Most commands require root privileges (sudo).`,
}
func init() {
Cmd.AddCommand(installCmd)
Cmd.AddCommand(uninstallCmd)
Cmd.AddCommand(upgradeCmd)
Cmd.AddCommand(startCmd)
Cmd.AddCommand(stopCmd)
Cmd.AddCommand(restartCmd)
Cmd.AddCommand(statusCmd)
Cmd.AddCommand(logsCmd)
Cmd.AddCommand(inviteCmd)
Cmd.AddCommand(migrateCmd)
Cmd.AddCommand(doctorCmd)
Cmd.AddCommand(reportCmd)
Cmd.AddCommand(pushCmd)
Cmd.AddCommand(rolloutCmd)
Cmd.AddCommand(cleanCmd)
Cmd.AddCommand(recoverRaftCmd)
Cmd.AddCommand(enrollCmd)
Cmd.AddCommand(unlockCmd)
}

View File

@ -1,24 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/push"
"github.com/spf13/cobra"
)
var pushCmd = &cobra.Command{
Use: "push",
Short: "Push binary archive to remote nodes",
Long: `Upload a pre-built binary archive to remote nodes.
By default, uses fanout distribution: uploads to one hub node,
then distributes to all others via server-to-server SCP.
Examples:
orama node push --env devnet # Fanout to all devnet nodes
orama node push --env testnet --node 1.2.3.4 # Single node
orama node push --env testnet --direct # Sequential upload to each node`,
Run: func(cmd *cobra.Command, args []string) {
push.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,31 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/recover"
"github.com/spf13/cobra"
)
var recoverRaftCmd = &cobra.Command{
Use: "recover-raft",
Short: "Recover RQLite cluster from split-brain",
Long: `Recover the RQLite Raft cluster from split-brain failure.
Strategy:
1. Stop orama-node on ALL nodes simultaneously
2. Backup and delete raft/ on non-leader nodes
3. Start leader node, wait for Leader state
4. Start remaining nodes in batches
5. Verify cluster health
The --leader flag must point to the node with the highest commit index.
This is a DESTRUCTIVE operation. Use --force to skip confirmation.
Examples:
orama node recover-raft --env testnet --leader 1.2.3.4
orama node recover-raft --env devnet --leader 1.2.3.4 --force`,
Run: func(cmd *cobra.Command, args []string) {
recover.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,22 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/report"
"github.com/spf13/cobra"
)
var reportCmd = &cobra.Command{
Use: "report",
Short: "Output comprehensive node health data as JSON",
Long: `Collect all system and service data from this node and output
as a single JSON blob. Designed to be called by 'orama monitor' over SSH.
Requires root privileges for full data collection.`,
RunE: func(cmd *cobra.Command, args []string) error {
jsonFlag, _ := cmd.Flags().GetBool("json")
return report.Handle(jsonFlag, "")
},
}
func init() {
reportCmd.Flags().Bool("json", true, "Output as JSON (default)")
}

View File

@ -1,22 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/rollout"
"github.com/spf13/cobra"
)
var rolloutCmd = &cobra.Command{
Use: "rollout",
Short: "Build, push, and rolling upgrade all nodes in an environment",
Long: `Full deployment pipeline: build binary archive locally, push to all nodes,
then perform a rolling upgrade (one node at a time).
Examples:
orama node rollout --env testnet # Full: build + push + rolling upgrade
orama node rollout --env testnet --no-build # Skip build, use existing archive
orama node rollout --env testnet --yes # Skip confirmation`,
Run: func(cmd *cobra.Command, args []string) {
rollout.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,14 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/status"
"github.com/spf13/cobra"
)
var statusCmd = &cobra.Command{
Use: "status",
Short: "Show production service status",
Run: func(cmd *cobra.Command, args []string) {
status.Handle()
},
}

View File

@ -1,14 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/uninstall"
"github.com/spf13/cobra"
)
var uninstallCmd = &cobra.Command{
Use: "uninstall",
Short: "Remove production services (requires sudo)",
Run: func(cmd *cobra.Command, args []string) {
uninstall.Handle()
},
}

View File

@ -1,26 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/unlock"
"github.com/spf13/cobra"
)
var unlockCmd = &cobra.Command{
Use: "unlock",
Short: "Unlock an OramaOS genesis node",
Long: `Manually unlock a genesis OramaOS node that cannot reconstruct its LUKS key
via Shamir shares (not enough peers online).
This is only needed for the genesis node before enough peers have joined for
Shamir-based unlock. Once 5+ peers exist, the genesis node transitions to
normal Shamir unlock and this command is no longer needed.
Usage:
orama node unlock --genesis --node-ip <wg-ip>
The node must be reachable over WireGuard on port 9998.`,
Run: func(cmd *cobra.Command, args []string) {
unlock.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,17 +0,0 @@
package node
import (
"github.com/DeBrosOfficial/network/pkg/cli/production/upgrade"
"github.com/spf13/cobra"
)
var upgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "Upgrade existing installation (requires sudo)",
Long: `Upgrade the Orama node binary and optionally restart services.
Uses rolling restart with quorum safety to ensure zero downtime.`,
Run: func(cmd *cobra.Command, args []string) {
upgrade.Handle(args)
},
DisableFlagParsing: true,
}

View File

@ -1,140 +0,0 @@
package sandboxcmd
import (
"fmt"
"os"
"github.com/DeBrosOfficial/network/pkg/cli/sandbox"
"github.com/spf13/cobra"
)
// Cmd is the root command for sandbox operations.
var Cmd = &cobra.Command{
Use: "sandbox",
Short: "Manage ephemeral Hetzner Cloud clusters for testing",
Long: `Spin up temporary 5-node Orama clusters on Hetzner Cloud for development and testing.
Setup (one-time):
orama sandbox setup
Usage:
orama sandbox create [--name <name>] Create a new 5-node cluster
orama sandbox destroy [--name <name>] Tear down a cluster
orama sandbox list List active sandboxes
orama sandbox status [--name <name>] Show cluster health
orama sandbox rollout [--name <name>] Build + push + rolling upgrade
orama sandbox ssh <node-number> SSH into a sandbox node (1-5)
orama sandbox reset Delete all infra and config to start fresh`,
}
var setupCmd = &cobra.Command{
Use: "setup",
Short: "Interactive setup: Hetzner API key, domain, floating IPs, SSH key",
RunE: func(cmd *cobra.Command, args []string) error {
return sandbox.Setup()
},
}
var createCmd = &cobra.Command{
Use: "create",
Short: "Create a new 5-node sandbox cluster (~5 min)",
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
return sandbox.Create(name)
},
}
var destroyCmd = &cobra.Command{
Use: "destroy",
Short: "Destroy a sandbox cluster and release resources",
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
force, _ := cmd.Flags().GetBool("force")
return sandbox.Destroy(name, force)
},
}
var listCmd = &cobra.Command{
Use: "list",
Short: "List active sandbox clusters",
RunE: func(cmd *cobra.Command, args []string) error {
return sandbox.List()
},
}
var statusCmd = &cobra.Command{
Use: "status",
Short: "Show cluster health report",
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
return sandbox.Status(name)
},
}
var rolloutCmd = &cobra.Command{
Use: "rollout",
Short: "Build + push + rolling upgrade to sandbox cluster",
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
anyoneClient, _ := cmd.Flags().GetBool("anyone-client")
return sandbox.Rollout(name, sandbox.RolloutFlags{
AnyoneClient: anyoneClient,
})
},
}
var resetCmd = &cobra.Command{
Use: "reset",
Short: "Delete all sandbox infrastructure and config to start fresh",
Long: `Deletes floating IPs, firewall, and SSH key from Hetzner Cloud,
then removes the local config (~/.orama/sandbox.yaml) and SSH keys.
Use this when you need to switch datacenter locations (floating IPs are
location-bound) or to completely start over with sandbox setup.`,
RunE: func(cmd *cobra.Command, args []string) error {
return sandbox.Reset()
},
}
var sshCmd = &cobra.Command{
Use: "ssh <node-number>",
Short: "SSH into a sandbox node (1-5)",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
name, _ := cmd.Flags().GetString("name")
var nodeNum int
if _, err := fmt.Sscanf(args[0], "%d", &nodeNum); err != nil {
fmt.Fprintf(os.Stderr, "Invalid node number: %s (expected 1-5)\n", args[0])
os.Exit(1)
}
return sandbox.SSHInto(name, nodeNum)
},
}
func init() {
// create flags
createCmd.Flags().String("name", "", "Sandbox name (random if not specified)")
// destroy flags
destroyCmd.Flags().String("name", "", "Sandbox name (uses active if not specified)")
destroyCmd.Flags().Bool("force", false, "Skip confirmation")
// status flags
statusCmd.Flags().String("name", "", "Sandbox name (uses active if not specified)")
// rollout flags
rolloutCmd.Flags().String("name", "", "Sandbox name (uses active if not specified)")
rolloutCmd.Flags().Bool("anyone-client", false, "Enable Anyone client (SOCKS5 proxy) on all nodes")
// ssh flags
sshCmd.Flags().String("name", "", "Sandbox name (uses active if not specified)")
Cmd.AddCommand(setupCmd)
Cmd.AddCommand(createCmd)
Cmd.AddCommand(destroyCmd)
Cmd.AddCommand(listCmd)
Cmd.AddCommand(statusCmd)
Cmd.AddCommand(rolloutCmd)
Cmd.AddCommand(sshCmd)
Cmd.AddCommand(resetCmd)
}

View File

@ -1,79 +0,0 @@
package functions
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/spf13/cobra"
)
// BuildCmd compiles a function to WASM using TinyGo.
var BuildCmd = &cobra.Command{
Use: "build [directory]",
Short: "Build a function to WASM using TinyGo",
Long: `Compiles function.go in the given directory (or current directory) to a WASM binary.
Requires TinyGo to be installed (https://tinygo.org/getting-started/install/).`,
Args: cobra.MaximumNArgs(1),
RunE: runBuild,
}
func runBuild(cmd *cobra.Command, args []string) error {
dir := ""
if len(args) > 0 {
dir = args[0]
}
_, err := buildFunction(dir)
return err
}
// buildFunction compiles the function in dir and returns the path to the WASM output.
func buildFunction(dir string) (string, error) {
absDir, err := ResolveFunctionDir(dir)
if err != nil {
return "", err
}
// Verify function.go exists
goFile := filepath.Join(absDir, "function.go")
if _, err := os.Stat(goFile); os.IsNotExist(err) {
return "", fmt.Errorf("function.go not found in %s", absDir)
}
// Verify function.yaml exists
if _, err := os.Stat(filepath.Join(absDir, "function.yaml")); os.IsNotExist(err) {
return "", fmt.Errorf("function.yaml not found in %s", absDir)
}
// Check TinyGo is installed
tinygoPath, err := exec.LookPath("tinygo")
if err != nil {
return "", fmt.Errorf("tinygo not found in PATH. Install it: https://tinygo.org/getting-started/install/")
}
outputPath := filepath.Join(absDir, "function.wasm")
fmt.Printf("Building %s...\n", absDir)
// Run tinygo build
buildCmd := exec.Command(tinygoPath, "build", "-o", outputPath, "-target", "wasi", ".")
buildCmd.Dir = absDir
buildCmd.Stdout = os.Stdout
buildCmd.Stderr = os.Stderr
if err := buildCmd.Run(); err != nil {
return "", fmt.Errorf("tinygo build failed: %w", err)
}
// Validate output
if err := ValidateWASMFile(outputPath); err != nil {
os.Remove(outputPath)
return "", fmt.Errorf("build produced invalid WASM: %w", err)
}
info, _ := os.Stat(outputPath)
fmt.Printf("Built %s (%d bytes)\n", outputPath, info.Size())
return outputPath, nil
}

View File

@ -1,53 +0,0 @@
package functions
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
)
var deleteForce bool
// DeleteCmd deletes a deployed function.
var DeleteCmd = &cobra.Command{
Use: "delete <name>",
Short: "Delete a deployed function",
Long: "Deletes a function from the Orama Network. This action cannot be undone.",
Args: cobra.ExactArgs(1),
RunE: runDelete,
}
func init() {
DeleteCmd.Flags().BoolVarP(&deleteForce, "force", "f", false, "Skip confirmation prompt")
}
func runDelete(cmd *cobra.Command, args []string) error {
name := args[0]
if !deleteForce {
fmt.Printf("Are you sure you want to delete function %q? This cannot be undone. [y/N] ", name)
reader := bufio.NewReader(os.Stdin)
answer, _ := reader.ReadString('\n')
answer = strings.TrimSpace(strings.ToLower(answer))
if answer != "y" && answer != "yes" {
fmt.Println("Cancelled.")
return nil
}
}
result, err := apiDelete("/v1/functions/" + name)
if err != nil {
return err
}
if msg, ok := result["message"]; ok {
fmt.Println(msg)
} else {
fmt.Printf("Function %q deleted.\n", name)
}
return nil
}

View File

@ -1,89 +0,0 @@
package functions
import (
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
)
// DeployCmd deploys a function to the Orama Network.
var DeployCmd = &cobra.Command{
Use: "deploy [directory]",
Short: "Deploy a function to the Orama Network",
Long: `Deploys the function in the given directory (or current directory).
If no .wasm file exists, it will be built automatically using TinyGo.
Reads configuration from function.yaml.`,
Args: cobra.MaximumNArgs(1),
RunE: runDeploy,
}
func runDeploy(cmd *cobra.Command, args []string) error {
dir := ""
if len(args) > 0 {
dir = args[0]
}
absDir, err := ResolveFunctionDir(dir)
if err != nil {
return err
}
// Load configuration
cfg, err := LoadConfig(absDir)
if err != nil {
return err
}
wasmPath := filepath.Join(absDir, "function.wasm")
// Auto-build if no WASM file exists
if _, err := os.Stat(wasmPath); os.IsNotExist(err) {
fmt.Printf("No function.wasm found, building...\n\n")
built, err := buildFunction(dir)
if err != nil {
return err
}
wasmPath = built
fmt.Println()
} else {
// Validate existing WASM
if err := ValidateWASMFile(wasmPath); err != nil {
return fmt.Errorf("existing function.wasm is invalid: %w\nRun 'orama function build' to rebuild", err)
}
}
fmt.Printf("Deploying function %q...\n", cfg.Name)
result, err := uploadWASMFunction(wasmPath, cfg)
if err != nil {
return err
}
fmt.Printf("\nFunction deployed successfully!\n\n")
if msg, ok := result["message"]; ok {
fmt.Printf(" %s\n", msg)
}
if fn, ok := result["function"].(map[string]interface{}); ok {
if id, ok := fn["id"]; ok {
fmt.Printf(" ID: %s\n", id)
}
fmt.Printf(" Name: %s\n", cfg.Name)
if v, ok := fn["version"]; ok {
fmt.Printf(" Version: %v\n", v)
}
if wc, ok := fn["wasm_cid"]; ok {
fmt.Printf(" WASM CID: %s\n", wc)
}
if st, ok := fn["status"]; ok {
fmt.Printf(" Status: %s\n", st)
}
}
fmt.Printf("\nInvoke with:\n")
fmt.Printf(" orama function invoke %s --data '{\"name\": \"World\"}'\n", cfg.Name)
return nil
}

View File

@ -1,35 +0,0 @@
package functions
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
)
// GetCmd shows details of a deployed function.
var GetCmd = &cobra.Command{
Use: "get <name>",
Short: "Get details of a deployed function",
Long: "Retrieves and displays detailed information about a specific function.",
Args: cobra.ExactArgs(1),
RunE: runGet,
}
func runGet(cmd *cobra.Command, args []string) error {
name := args[0]
result, err := apiGet("/v1/functions/" + name)
if err != nil {
return err
}
// Pretty-print the result
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
return fmt.Errorf("failed to format response: %w", err)
}
fmt.Println(string(data))
return nil
}

View File

@ -1,260 +0,0 @@
package functions
import (
"bytes"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"github.com/DeBrosOfficial/network/pkg/cli/shared"
"gopkg.in/yaml.v3"
)
// FunctionConfig represents the function.yaml configuration.
type FunctionConfig struct {
Name string `yaml:"name"`
Public bool `yaml:"public"`
Memory int `yaml:"memory"`
Timeout int `yaml:"timeout"`
Retry RetryConfig `yaml:"retry"`
Env map[string]string `yaml:"env"`
}
// RetryConfig holds retry settings.
type RetryConfig struct {
Count int `yaml:"count"`
Delay int `yaml:"delay"`
}
// wasmMagicBytes is the WASM binary magic number: \0asm
var wasmMagicBytes = []byte{0x00, 0x61, 0x73, 0x6d}
// validNameRegex validates function names (alphanumeric, hyphens, underscores).
var validNameRegex = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_-]*$`)
// LoadConfig reads and parses a function.yaml from the given directory.
func LoadConfig(dir string) (*FunctionConfig, error) {
path := filepath.Join(dir, "function.yaml")
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read function.yaml: %w", err)
}
var cfg FunctionConfig
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("failed to parse function.yaml: %w", err)
}
// Apply defaults
if cfg.Memory == 0 {
cfg.Memory = 64
}
if cfg.Timeout == 0 {
cfg.Timeout = 30
}
if cfg.Retry.Delay == 0 {
cfg.Retry.Delay = 5
}
// Validate
if cfg.Name == "" {
return nil, fmt.Errorf("function.yaml: 'name' is required")
}
if !validNameRegex.MatchString(cfg.Name) {
return nil, fmt.Errorf("function.yaml: 'name' must start with a letter and contain only letters, digits, hyphens, or underscores")
}
if cfg.Memory < 1 || cfg.Memory > 256 {
return nil, fmt.Errorf("function.yaml: 'memory' must be between 1 and 256 MB (got %d)", cfg.Memory)
}
if cfg.Timeout < 1 || cfg.Timeout > 300 {
return nil, fmt.Errorf("function.yaml: 'timeout' must be between 1 and 300 seconds (got %d)", cfg.Timeout)
}
return &cfg, nil
}
// ValidateWASM checks that the given bytes are a valid WASM binary (magic number check).
func ValidateWASM(data []byte) error {
if len(data) < 8 {
return fmt.Errorf("file too small to be a valid WASM binary (%d bytes)", len(data))
}
if !bytes.HasPrefix(data, wasmMagicBytes) {
return fmt.Errorf("file is not a valid WASM binary (bad magic bytes)")
}
return nil
}
// ValidateWASMFile checks that the file at the given path is a valid WASM binary.
func ValidateWASMFile(path string) error {
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open WASM file: %w", err)
}
defer f.Close()
header := make([]byte, 8)
n, err := f.Read(header)
if err != nil {
return fmt.Errorf("failed to read WASM file: %w", err)
}
return ValidateWASM(header[:n])
}
// apiRequest performs an authenticated HTTP request to the gateway API.
func apiRequest(method, endpoint string, body io.Reader, contentType string) (*http.Response, error) {
apiURL := shared.GetAPIURL()
url := apiURL + endpoint
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
if contentType != "" {
req.Header.Set("Content-Type", contentType)
}
token, err := shared.GetAuthToken()
if err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
return http.DefaultClient.Do(req)
}
// apiGet performs an authenticated GET request and returns the parsed JSON response.
func apiGet(endpoint string) (map[string]interface{}, error) {
resp, err := apiRequest("GET", endpoint, nil, "")
if err != nil {
return nil, err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
}
var result map[string]interface{}
if err := json.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
return result, nil
}
// apiDelete performs an authenticated DELETE request and returns the parsed JSON response.
func apiDelete(endpoint string) (map[string]interface{}, error) {
resp, err := apiRequest("DELETE", endpoint, nil, "")
if err != nil {
return nil, err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
}
var result map[string]interface{}
if err := json.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
return result, nil
}
// uploadWASMFunction uploads a WASM file to the deploy endpoint via multipart/form-data.
func uploadWASMFunction(wasmPath string, cfg *FunctionConfig) (map[string]interface{}, error) {
wasmFile, err := os.Open(wasmPath)
if err != nil {
return nil, fmt.Errorf("failed to open WASM file: %w", err)
}
defer wasmFile.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
// Add form fields
writer.WriteField("name", cfg.Name)
writer.WriteField("is_public", strconv.FormatBool(cfg.Public))
writer.WriteField("memory_limit_mb", strconv.Itoa(cfg.Memory))
writer.WriteField("timeout_seconds", strconv.Itoa(cfg.Timeout))
writer.WriteField("retry_count", strconv.Itoa(cfg.Retry.Count))
writer.WriteField("retry_delay_seconds", strconv.Itoa(cfg.Retry.Delay))
// Add env vars as metadata JSON
if len(cfg.Env) > 0 {
metadata, _ := json.Marshal(map[string]interface{}{
"env_vars": cfg.Env,
})
writer.WriteField("metadata", string(metadata))
}
// Add WASM file
part, err := writer.CreateFormFile("wasm", filepath.Base(wasmPath))
if err != nil {
return nil, fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, wasmFile); err != nil {
return nil, fmt.Errorf("failed to write WASM data: %w", err)
}
writer.Close()
resp, err := apiRequest("POST", "/v1/functions", body, writer.FormDataContentType())
if err != nil {
return nil, err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("deploy failed (%d): %s", resp.StatusCode, string(respBody))
}
var result map[string]interface{}
if err := json.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
return result, nil
}
// ResolveFunctionDir resolves and validates a function directory.
// If dir is empty, uses the current working directory.
func ResolveFunctionDir(dir string) (string, error) {
if dir == "" {
dir = "."
}
absDir, err := filepath.Abs(dir)
if err != nil {
return "", fmt.Errorf("failed to resolve path: %w", err)
}
info, err := os.Stat(absDir)
if err != nil {
return "", fmt.Errorf("directory does not exist: %w", err)
}
if !info.IsDir() {
return "", fmt.Errorf("%s is not a directory", absDir)
}
return absDir, nil
}

View File

@ -1,84 +0,0 @@
package functions
import (
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
)
// InitCmd scaffolds a new function project.
var InitCmd = &cobra.Command{
Use: "init <name>",
Short: "Create a new serverless function project",
Long: "Scaffolds a new directory with function.go and function.yaml templates.",
Args: cobra.ExactArgs(1),
RunE: runInit,
}
func runInit(cmd *cobra.Command, args []string) error {
name := args[0]
if !validNameRegex.MatchString(name) {
return fmt.Errorf("invalid function name %q: must start with a letter and contain only letters, digits, hyphens, or underscores", name)
}
dir := filepath.Join(".", name)
if _, err := os.Stat(dir); err == nil {
return fmt.Errorf("directory %q already exists", name)
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
// Write function.yaml
yamlContent := fmt.Sprintf(`name: %s
public: false
memory: 64
timeout: 30
retry:
count: 0
delay: 5
`, name)
if err := os.WriteFile(filepath.Join(dir, "function.yaml"), []byte(yamlContent), 0o644); err != nil {
return fmt.Errorf("failed to write function.yaml: %w", err)
}
// Write function.go
goContent := fmt.Sprintf(`package main
import "github.com/DeBrosOfficial/network/sdk/fn"
func main() {
fn.Run(func(input []byte) ([]byte, error) {
var req struct {
Name string `+"`"+`json:"name"`+"`"+`
}
fn.ParseJSON(input, &req)
if req.Name == "" {
req.Name = "World"
}
return fn.JSON(map[string]string{
"greeting": "Hello, " + req.Name + "!",
})
})
}
`)
if err := os.WriteFile(filepath.Join(dir, "function.go"), []byte(goContent), 0o644); err != nil {
return fmt.Errorf("failed to write function.go: %w", err)
}
fmt.Printf("Created function project: %s/\n", name)
fmt.Printf(" %s/function.yaml — configuration\n", name)
fmt.Printf(" %s/function.go — handler code\n\n", name)
fmt.Printf("Next steps:\n")
fmt.Printf(" cd %s\n", name)
fmt.Printf(" orama function build\n")
fmt.Printf(" orama function deploy\n")
return nil
}

View File

@ -1,58 +0,0 @@
package functions
import (
"bytes"
"fmt"
"io"
"net/http"
"github.com/spf13/cobra"
)
var invokeData string
// InvokeCmd invokes a deployed function.
var InvokeCmd = &cobra.Command{
Use: "invoke <name>",
Short: "Invoke a deployed function",
Long: "Sends a request to invoke the named function with optional JSON payload.",
Args: cobra.ExactArgs(1),
RunE: runInvoke,
}
func init() {
InvokeCmd.Flags().StringVar(&invokeData, "data", "{}", "JSON payload to send to the function")
}
func runInvoke(cmd *cobra.Command, args []string) error {
name := args[0]
fmt.Printf("Invoking function %q...\n\n", name)
resp, err := apiRequest("POST", "/v1/functions/"+name+"/invoke", bytes.NewBufferString(invokeData), "application/json")
if err != nil {
return err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response: %w", err)
}
// Print timing info from headers
if reqID := resp.Header.Get("X-Request-ID"); reqID != "" {
fmt.Printf("Request ID: %s\n", reqID)
}
if dur := resp.Header.Get("X-Duration-Ms"); dur != "" {
fmt.Printf("Duration: %s ms\n", dur)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("invocation failed (%d): %s", resp.StatusCode, string(respBody))
}
fmt.Printf("\nOutput:\n%s\n", string(respBody))
return nil
}

View File

@ -1,80 +0,0 @@
package functions
import (
"fmt"
"os"
"text/tabwriter"
"github.com/spf13/cobra"
)
// ListCmd lists all deployed functions.
var ListCmd = &cobra.Command{
Use: "list",
Short: "List deployed functions",
Long: "Lists all functions deployed in the current namespace.",
Args: cobra.NoArgs,
RunE: runList,
}
func runList(cmd *cobra.Command, args []string) error {
result, err := apiGet("/v1/functions")
if err != nil {
return err
}
functions, ok := result["functions"].([]interface{})
if !ok || len(functions) == 0 {
fmt.Println("No functions deployed.")
return nil
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintln(w, "NAME\tVERSION\tSTATUS\tMEMORY\tTIMEOUT\tPUBLIC")
fmt.Fprintln(w, "----\t-------\t------\t------\t-------\t------")
for _, f := range functions {
fn, ok := f.(map[string]interface{})
if !ok {
continue
}
name := valStr(fn, "name")
version := valNum(fn, "version")
status := valStr(fn, "status")
memory := valNum(fn, "memory_limit_mb")
timeout := valNum(fn, "timeout_seconds")
public := valBool(fn, "is_public")
publicStr := "no"
if public {
publicStr = "yes"
}
fmt.Fprintf(w, "%s\t%d\t%s\t%dMB\t%ds\t%s\n", name, version, status, memory, timeout, publicStr)
}
w.Flush()
fmt.Printf("\nTotal: %d function(s)\n", len(functions))
return nil
}
func valStr(m map[string]interface{}, key string) string {
if v, ok := m[key]; ok {
return fmt.Sprintf("%v", v)
}
return ""
}
func valNum(m map[string]interface{}, key string) int {
if v, ok := m[key].(float64); ok {
return int(v)
}
return 0
}
func valBool(m map[string]interface{}, key string) bool {
if v, ok := m[key].(bool); ok {
return v
}
return false
}

View File

@ -1,57 +0,0 @@
package functions
import (
"fmt"
"strconv"
"github.com/spf13/cobra"
)
var logsLimit int
// LogsCmd retrieves function execution logs.
var LogsCmd = &cobra.Command{
Use: "logs <name>",
Short: "Get execution logs for a function",
Long: "Retrieves the most recent execution logs for a deployed function.",
Args: cobra.ExactArgs(1),
RunE: runLogs,
}
func init() {
LogsCmd.Flags().IntVar(&logsLimit, "limit", 50, "Maximum number of log entries to retrieve")
}
func runLogs(cmd *cobra.Command, args []string) error {
name := args[0]
endpoint := "/v1/functions/" + name + "/logs"
if logsLimit > 0 {
endpoint += "?limit=" + strconv.Itoa(logsLimit)
}
result, err := apiGet(endpoint)
if err != nil {
return err
}
logs, ok := result["logs"].([]interface{})
if !ok || len(logs) == 0 {
fmt.Printf("No logs found for function %q.\n", name)
return nil
}
for _, entry := range logs {
log, ok := entry.(map[string]interface{})
if !ok {
continue
}
ts := valStr(log, "timestamp")
level := valStr(log, "level")
msg := valStr(log, "message")
fmt.Printf("[%s] %s: %s\n", ts, level, msg)
}
fmt.Printf("\nShowing %d log(s)\n", len(logs))
return nil
}

View File

@ -1,156 +0,0 @@
package functions
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
)
var (
secretsDeleteForce bool
secretsFromFile string
)
// SecretsCmd is the parent command for secrets management.
var SecretsCmd = &cobra.Command{
Use: "secrets",
Short: "Manage function secrets",
Long: `Set, list, and delete encrypted secrets for your serverless functions.
Functions access secrets at runtime via the get_secret() host function.
Secrets are scoped to your namespace and encrypted at rest with AES-256-GCM.
Examples:
orama function secrets set API_KEY "sk-abc123"
orama function secrets set CERT_PEM --from-file ./cert.pem
orama function secrets list
orama function secrets delete API_KEY`,
}
// SecretsSetCmd stores an encrypted secret.
var SecretsSetCmd = &cobra.Command{
Use: "set <name> [value]",
Short: "Set a secret",
Long: `Stores an encrypted secret. Functions access it via get_secret("name"). If --from-file is used, value is read from the file instead.`,
Args: cobra.RangeArgs(1, 2),
RunE: runSecretsSet,
}
// SecretsListCmd lists secret names.
var SecretsListCmd = &cobra.Command{
Use: "list",
Short: "List secret names",
Long: "Lists all secret names in the current namespace. Values are never shown.",
Args: cobra.NoArgs,
RunE: runSecretsList,
}
// SecretsDeleteCmd deletes a secret.
var SecretsDeleteCmd = &cobra.Command{
Use: "delete <name>",
Short: "Delete a secret",
Long: "Permanently deletes a secret. Functions will no longer be able to access it.",
Args: cobra.ExactArgs(1),
RunE: runSecretsDelete,
}
func init() {
SecretsCmd.AddCommand(SecretsSetCmd)
SecretsCmd.AddCommand(SecretsListCmd)
SecretsCmd.AddCommand(SecretsDeleteCmd)
SecretsSetCmd.Flags().StringVar(&secretsFromFile, "from-file", "", "Read secret value from a file")
SecretsDeleteCmd.Flags().BoolVarP(&secretsDeleteForce, "force", "f", false, "Skip confirmation prompt")
}
func runSecretsSet(cmd *cobra.Command, args []string) error {
name := args[0]
var value string
if secretsFromFile != "" {
data, err := os.ReadFile(secretsFromFile)
if err != nil {
return fmt.Errorf("failed to read file %s: %w", secretsFromFile, err)
}
value = string(data)
} else if len(args) >= 2 {
value = args[1]
} else {
return fmt.Errorf("secret value required: provide as argument or use --from-file")
}
body, _ := json.Marshal(map[string]string{
"name": name,
"value": value,
})
resp, err := apiRequest("PUT", "/v1/functions/secrets", bytes.NewReader(body), "application/json")
if err != nil {
return err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != 200 {
return fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
}
fmt.Printf("Secret %q set successfully.\n", name)
return nil
}
func runSecretsList(cmd *cobra.Command, args []string) error {
result, err := apiGet("/v1/functions/secrets")
if err != nil {
return err
}
secrets, _ := result["secrets"].([]interface{})
if len(secrets) == 0 {
fmt.Println("No secrets found.")
return nil
}
fmt.Printf("Secrets (%d):\n", len(secrets))
for _, s := range secrets {
fmt.Printf(" %s\n", s)
}
return nil
}
func runSecretsDelete(cmd *cobra.Command, args []string) error {
name := args[0]
if !secretsDeleteForce {
fmt.Printf("Are you sure you want to delete secret %q? [y/N] ", name)
reader := bufio.NewReader(os.Stdin)
answer, _ := reader.ReadString('\n')
answer = strings.TrimSpace(strings.ToLower(answer))
if answer != "y" && answer != "yes" {
fmt.Println("Cancelled.")
return nil
}
}
result, err := apiDelete("/v1/functions/secrets/" + name)
if err != nil {
return err
}
if msg, ok := result["message"]; ok {
fmt.Println(msg)
} else {
fmt.Printf("Secret %q deleted.\n", name)
}
return nil
}

View File

@ -1,151 +0,0 @@
package functions
import (
"bytes"
"encoding/json"
"fmt"
"io"
"text/tabwriter"
"github.com/spf13/cobra"
)
var triggerTopic string
// TriggersCmd is the parent command for trigger management.
var TriggersCmd = &cobra.Command{
Use: "triggers",
Short: "Manage function PubSub triggers",
Long: `Add, list, and delete PubSub triggers for your serverless functions.
When a message is published to a topic, all functions with a trigger on
that topic are automatically invoked with the message as input.
Examples:
orama function triggers add my-function --topic calls:invite
orama function triggers list my-function
orama function triggers delete my-function <trigger-id>`,
}
// TriggersAddCmd adds a PubSub trigger to a function.
var TriggersAddCmd = &cobra.Command{
Use: "add <function-name>",
Short: "Add a PubSub trigger",
Long: "Registers a PubSub trigger so the function is invoked when a message is published to the topic.",
Args: cobra.ExactArgs(1),
RunE: runTriggersAdd,
}
// TriggersListCmd lists triggers for a function.
var TriggersListCmd = &cobra.Command{
Use: "list <function-name>",
Short: "List triggers for a function",
Args: cobra.ExactArgs(1),
RunE: runTriggersList,
}
// TriggersDeleteCmd deletes a trigger.
var TriggersDeleteCmd = &cobra.Command{
Use: "delete <function-name> <trigger-id>",
Short: "Delete a trigger",
Args: cobra.ExactArgs(2),
RunE: runTriggersDelete,
}
func init() {
TriggersCmd.AddCommand(TriggersAddCmd)
TriggersCmd.AddCommand(TriggersListCmd)
TriggersCmd.AddCommand(TriggersDeleteCmd)
TriggersAddCmd.Flags().StringVar(&triggerTopic, "topic", "", "PubSub topic to trigger on (required)")
TriggersAddCmd.MarkFlagRequired("topic")
}
func runTriggersAdd(cmd *cobra.Command, args []string) error {
funcName := args[0]
body, _ := json.Marshal(map[string]string{
"topic": triggerTopic,
})
resp, err := apiRequest("POST", "/v1/functions/"+funcName+"/triggers", bytes.NewReader(body), "application/json")
if err != nil {
return err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != 201 && resp.StatusCode != 200 {
return fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
}
var result map[string]interface{}
if err := json.Unmarshal(respBody, &result); err != nil {
return fmt.Errorf("failed to parse response: %w", err)
}
fmt.Printf("Trigger added: %s → %s (id: %s)\n", triggerTopic, funcName, result["trigger_id"])
return nil
}
func runTriggersList(cmd *cobra.Command, args []string) error {
funcName := args[0]
result, err := apiGet("/v1/functions/" + funcName + "/triggers")
if err != nil {
return err
}
triggers, _ := result["triggers"].([]interface{})
if len(triggers) == 0 {
fmt.Printf("No triggers for function %q.\n", funcName)
return nil
}
w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 2, ' ', 0)
fmt.Fprintln(w, "ID\tTOPIC\tENABLED")
for _, t := range triggers {
tr, ok := t.(map[string]interface{})
if !ok {
continue
}
id, _ := tr["ID"].(string)
if id == "" {
id, _ = tr["id"].(string)
}
topic, _ := tr["Topic"].(string)
if topic == "" {
topic, _ = tr["topic"].(string)
}
enabled := true
if e, ok := tr["Enabled"].(bool); ok {
enabled = e
} else if e, ok := tr["enabled"].(bool); ok {
enabled = e
}
fmt.Fprintf(w, "%s\t%s\t%v\n", id, topic, enabled)
}
w.Flush()
return nil
}
func runTriggersDelete(cmd *cobra.Command, args []string) error {
funcName := args[0]
triggerID := args[1]
result, err := apiDelete("/v1/functions/" + funcName + "/triggers/" + triggerID)
if err != nil {
return err
}
if msg, ok := result["message"]; ok {
fmt.Println(msg)
} else {
fmt.Println("Trigger deleted.")
}
return nil
}

View File

@ -1,54 +0,0 @@
package functions
import (
"fmt"
"os"
"text/tabwriter"
"github.com/spf13/cobra"
)
// VersionsCmd lists all versions of a function.
var VersionsCmd = &cobra.Command{
Use: "versions <name>",
Short: "List all versions of a function",
Long: "Shows all deployed versions of a specific function.",
Args: cobra.ExactArgs(1),
RunE: runVersions,
}
func runVersions(cmd *cobra.Command, args []string) error {
name := args[0]
result, err := apiGet("/v1/functions/" + name + "/versions")
if err != nil {
return err
}
versions, ok := result["versions"].([]interface{})
if !ok || len(versions) == 0 {
fmt.Printf("No versions found for function %q.\n", name)
return nil
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintln(w, "VERSION\tWASM CID\tSTATUS\tCREATED")
fmt.Fprintln(w, "-------\t--------\t------\t-------")
for _, v := range versions {
ver, ok := v.(map[string]interface{})
if !ok {
continue
}
version := valNum(ver, "version")
wasmCID := valStr(ver, "wasm_cid")
status := valStr(ver, "status")
created := valStr(ver, "created_at")
fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", version, wasmCID, status, created)
}
w.Flush()
fmt.Printf("\nTotal: %d version(s)\n", len(versions))
return nil
}

View File

@ -1,198 +0,0 @@
package cli
import (
"bufio"
"context"
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
"github.com/DeBrosOfficial/network/pkg/inspector"
// Import checks package so init() registers the checkers
_ "github.com/DeBrosOfficial/network/pkg/inspector/checks"
)
// loadDotEnv loads key=value pairs from a .env file into os environment.
// Only sets vars that are not already set (env takes precedence over file).
func loadDotEnv(path string) {
f, err := os.Open(path)
if err != nil {
return // .env is optional
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
eq := strings.IndexByte(line, '=')
if eq < 1 {
continue
}
key := line[:eq]
value := line[eq+1:]
// Only set if not already in environment
if os.Getenv(key) == "" {
os.Setenv(key, value)
}
}
}
// HandleInspectCommand handles the "orama inspect" command.
func HandleInspectCommand(args []string) {
// Load .env file from current directory (only sets unset vars)
loadDotEnv(".env")
fs := flag.NewFlagSet("inspect", flag.ExitOnError)
configPath := fs.String("config", "scripts/nodes.conf", "Path to nodes.conf")
env := fs.String("env", "", "Environment to inspect (devnet, testnet)")
subsystem := fs.String("subsystem", "all", "Subsystem to inspect (rqlite,olric,ipfs,dns,wg,system,network,anyone,all)")
format := fs.String("format", "table", "Output format (table, json)")
timeout := fs.Duration("timeout", 30*time.Second, "SSH command timeout")
verbose := fs.Bool("verbose", false, "Verbose output")
// Output flags
outputDir := fs.String("output", "", "Save results to directory as markdown (e.g., ./results)")
// AI flags
aiEnabled := fs.Bool("ai", false, "Enable AI analysis of failures")
aiModel := fs.String("model", "moonshotai/kimi-k2.5", "OpenRouter model for AI analysis")
aiAPIKey := fs.String("api-key", "", "OpenRouter API key (or OPENROUTER_API_KEY env)")
fs.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: orama inspect [flags]\n\n")
fmt.Fprintf(os.Stderr, "Inspect cluster health by SSHing into nodes and running checks.\n\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
fs.PrintDefaults()
fmt.Fprintf(os.Stderr, "\nExamples:\n")
fmt.Fprintf(os.Stderr, " orama inspect --env devnet\n")
fmt.Fprintf(os.Stderr, " orama inspect --env devnet --subsystem rqlite\n")
fmt.Fprintf(os.Stderr, " orama inspect --env devnet --ai\n")
fmt.Fprintf(os.Stderr, " orama inspect --env devnet --ai --model openai/gpt-4o\n")
fmt.Fprintf(os.Stderr, " orama inspect --env devnet --ai --output ./results\n")
}
if err := fs.Parse(args); err != nil {
os.Exit(1)
}
if *env == "" {
fmt.Fprintf(os.Stderr, "Error: --env is required (devnet, testnet)\n")
os.Exit(1)
}
// Load nodes
nodes, err := inspector.LoadNodes(*configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
os.Exit(1)
}
// Filter by environment
nodes = inspector.FilterByEnv(nodes, *env)
if len(nodes) == 0 {
fmt.Fprintf(os.Stderr, "Error: no nodes found for environment %q\n", *env)
os.Exit(1)
}
// Prepare wallet-derived SSH keys
cleanup, err := remotessh.PrepareNodeKeys(nodes)
if err != nil {
fmt.Fprintf(os.Stderr, "Error preparing SSH keys: %v\n", err)
os.Exit(1)
}
defer cleanup()
// Parse subsystems
var subsystems []string
if *subsystem != "all" {
subsystems = strings.Split(*subsystem, ",")
}
fmt.Printf("Inspecting %d %s nodes", len(nodes), *env)
if len(subsystems) > 0 {
fmt.Printf(" [%s]", strings.Join(subsystems, ","))
}
if *aiEnabled {
fmt.Printf(" (AI: %s)", *aiModel)
}
fmt.Printf("...\n\n")
// Phase 1: Collect
ctx, cancel := context.WithTimeout(context.Background(), *timeout+10*time.Second)
defer cancel()
if *verbose {
fmt.Printf("Collecting data from %d nodes (timeout: %s)...\n", len(nodes), timeout)
}
data := inspector.Collect(ctx, nodes, subsystems, *verbose)
if *verbose {
fmt.Printf("Collection complete in %.1fs\n\n", data.Duration.Seconds())
}
// Phase 2: Check
results := inspector.RunChecks(data, subsystems)
// Phase 3: Report
switch *format {
case "json":
inspector.PrintJSON(results, os.Stdout)
default:
inspector.PrintTable(results, os.Stdout)
}
// Phase 4: AI Analysis (if enabled and there are failures or warnings)
var analysis *inspector.AnalysisResult
if *aiEnabled {
issues := results.FailuresAndWarnings()
if len(issues) == 0 {
fmt.Printf("\nAll checks passed — no AI analysis needed.\n")
} else if *outputDir != "" {
// Per-group AI analysis for file output
groups := inspector.GroupFailures(results)
fmt.Printf("\nAnalyzing %d unique issues with %s...\n", len(groups), *aiModel)
var err error
analysis, err = inspector.AnalyzeGroups(groups, results, data, *aiModel, *aiAPIKey)
if err != nil {
fmt.Fprintf(os.Stderr, "\nAI analysis failed: %v\n", err)
} else {
inspector.PrintAnalysis(analysis, os.Stdout)
}
} else {
// Per-subsystem AI analysis for terminal output
subs := map[string]bool{}
for _, c := range issues {
subs[c.Subsystem] = true
}
fmt.Printf("\nAnalyzing %d issues across %d subsystems with %s...\n", len(issues), len(subs), *aiModel)
var err error
analysis, err = inspector.Analyze(results, data, *aiModel, *aiAPIKey)
if err != nil {
fmt.Fprintf(os.Stderr, "\nAI analysis failed: %v\n", err)
} else {
inspector.PrintAnalysis(analysis, os.Stdout)
}
}
}
// Phase 5: Write results to disk (if --output is set)
if *outputDir != "" {
outPath, err := inspector.WriteResults(*outputDir, *env, results, data, analysis)
if err != nil {
fmt.Fprintf(os.Stderr, "\nError writing results: %v\n", err)
} else {
fmt.Printf("\nResults saved to %s\n", outPath)
}
}
// Exit with non-zero if any failures
if failures := results.Failures(); len(failures) > 0 {
os.Exit(1)
}
}

View File

@ -1,903 +0,0 @@
package monitor
import (
"fmt"
"strings"
"github.com/DeBrosOfficial/network/pkg/cli/production/report"
)
// AlertSeverity represents the severity of an alert.
type AlertSeverity string
const (
AlertCritical AlertSeverity = "critical"
AlertWarning AlertSeverity = "warning"
AlertInfo AlertSeverity = "info"
)
// Alert represents a detected issue.
type Alert struct {
Severity AlertSeverity `json:"severity"`
Subsystem string `json:"subsystem"`
Node string `json:"node"`
Message string `json:"message"`
}
// joiningGraceSec is the grace period (in seconds) after a node starts during
// which unreachability alerts from other nodes are downgraded to info.
const joiningGraceSec = 300
// nodeContext carries per-node metadata needed for context-aware alerting.
type nodeContext struct {
host string
role string // "node", "nameserver-ns1", etc.
isNameserver bool
isJoining bool // orama-node active_since_sec < joiningGraceSec
uptimeSec int // orama-node active_since_sec
}
// buildNodeContexts builds a map of WG IP -> nodeContext for all healthy nodes.
func buildNodeContexts(snap *ClusterSnapshot) map[string]*nodeContext {
ctxMap := make(map[string]*nodeContext)
for _, cs := range snap.Nodes {
if cs.Report == nil {
continue
}
r := cs.Report
host := nodeHost(r)
nc := &nodeContext{
host: host,
role: cs.Node.Role,
isNameserver: strings.HasPrefix(cs.Node.Role, "nameserver"),
}
// Determine uptime from orama-node service
if r.Services != nil {
for _, svc := range r.Services.Services {
if svc.Name == "orama-node" && svc.ActiveState == "active" {
nc.uptimeSec = int(svc.ActiveSinceSec)
nc.isJoining = svc.ActiveSinceSec < joiningGraceSec
break
}
}
}
ctxMap[host] = nc
// Also index by WG IP for cross-node RQLite unreachability lookups
if r.WireGuard != nil && r.WireGuard.WgIP != "" {
ctxMap[r.WireGuard.WgIP] = nc
}
}
return ctxMap
}
// DeriveAlerts scans a ClusterSnapshot and produces alerts.
func DeriveAlerts(snap *ClusterSnapshot) []Alert {
var alerts []Alert
// Collection failures
for _, cs := range snap.Nodes {
if cs.Error != nil {
alerts = append(alerts, Alert{
Severity: AlertCritical,
Subsystem: "ssh",
Node: cs.Node.Host,
Message: fmt.Sprintf("Collection failed: %v", cs.Error),
})
}
}
reports := snap.Healthy()
if len(reports) == 0 {
return alerts
}
// Build context map for role/uptime-aware alerting
nodeCtxMap := buildNodeContexts(snap)
// Cross-node checks
alerts = append(alerts, checkRQLiteLeader(reports)...)
alerts = append(alerts, checkRQLiteQuorum(reports)...)
alerts = append(alerts, checkRaftTermConsistency(reports)...)
alerts = append(alerts, checkAppliedIndexLag(reports)...)
alerts = append(alerts, checkWGPeerSymmetry(reports)...)
alerts = append(alerts, checkClockSkew(reports)...)
alerts = append(alerts, checkBinaryVersion(reports)...)
alerts = append(alerts, checkOlricMemberConsistency(reports)...)
alerts = append(alerts, checkIPFSSwarmConsistency(reports)...)
alerts = append(alerts, checkIPFSClusterConsistency(reports)...)
// Per-node checks
for _, r := range reports {
host := nodeHost(r)
nc := nodeCtxMap[host]
alerts = append(alerts, checkNodeRQLite(r, host, nodeCtxMap)...)
alerts = append(alerts, checkNodeWireGuard(r, host)...)
alerts = append(alerts, checkNodeSystem(r, host)...)
alerts = append(alerts, checkNodeServices(r, host, nc)...)
alerts = append(alerts, checkNodeDNS(r, host, nc)...)
alerts = append(alerts, checkNodeAnyone(r, host)...)
alerts = append(alerts, checkNodeProcesses(r, host)...)
alerts = append(alerts, checkNodeNamespaces(r, host)...)
alerts = append(alerts, checkNodeNetwork(r, host)...)
alerts = append(alerts, checkNodeOlric(r, host)...)
alerts = append(alerts, checkNodeIPFS(r, host)...)
alerts = append(alerts, checkNodeGateway(r, host)...)
}
return alerts
}
func nodeHost(r *report.NodeReport) string {
if r.PublicIP != "" {
return r.PublicIP
}
return r.Hostname
}
// ---------------------------------------------------------------------------
// Cross-node checks
// ---------------------------------------------------------------------------
func checkRQLiteLeader(reports []*report.NodeReport) []Alert {
var alerts []Alert
leaders := 0
leaderAddrs := map[string]bool{}
for _, r := range reports {
if r.RQLite != nil && r.RQLite.RaftState == "Leader" {
leaders++
}
if r.RQLite != nil && r.RQLite.LeaderAddr != "" {
leaderAddrs[r.RQLite.LeaderAddr] = true
}
}
if leaders == 0 {
alerts = append(alerts, Alert{AlertCritical, "rqlite", "cluster", "No RQLite leader found"})
} else if leaders > 1 {
alerts = append(alerts, Alert{AlertCritical, "rqlite", "cluster",
fmt.Sprintf("Split brain: %d leaders detected", leaders)})
}
if len(leaderAddrs) > 1 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", "cluster",
fmt.Sprintf("Leader disagreement: nodes report %d different leader addresses", len(leaderAddrs))})
}
return alerts
}
func checkRQLiteQuorum(reports []*report.NodeReport) []Alert {
var voters, responsive int
for _, r := range reports {
if r.RQLite == nil {
continue
}
if r.RQLite.Responsive {
responsive++
if r.RQLite.Voter {
voters++
}
}
}
if responsive == 0 {
return nil // no rqlite data at all
}
// Total voters = responsive voters + unresponsive nodes that should be voters.
// For quorum calculation, use the total voter count (responsive + unreachable).
totalVoters := voters
for _, r := range reports {
if r.RQLite != nil && !r.RQLite.Responsive {
// Assume unresponsive nodes were voters (conservative estimate).
totalVoters++
}
}
if totalVoters < 2 {
return nil // single-node cluster, no quorum concept
}
quorum := totalVoters/2 + 1
if voters < quorum {
return []Alert{{AlertCritical, "rqlite", "cluster",
fmt.Sprintf("Quorum lost: only %d/%d voters reachable (need %d)", voters, totalVoters, quorum)}}
}
if voters == quorum {
return []Alert{{AlertWarning, "rqlite", "cluster",
fmt.Sprintf("Quorum fragile: exactly %d/%d voters reachable (one more failure = quorum loss)", voters, totalVoters)}}
}
return nil
}
func checkRaftTermConsistency(reports []*report.NodeReport) []Alert {
var minTerm, maxTerm uint64
first := true
for _, r := range reports {
if r.RQLite == nil || !r.RQLite.Responsive {
continue
}
if first {
minTerm = r.RQLite.Term
maxTerm = r.RQLite.Term
first = false
}
if r.RQLite.Term < minTerm {
minTerm = r.RQLite.Term
}
if r.RQLite.Term > maxTerm {
maxTerm = r.RQLite.Term
}
}
if maxTerm-minTerm > 1 {
return []Alert{{AlertWarning, "rqlite", "cluster",
fmt.Sprintf("Raft term inconsistency: min=%d, max=%d (delta=%d)", minTerm, maxTerm, maxTerm-minTerm)}}
}
return nil
}
func checkAppliedIndexLag(reports []*report.NodeReport) []Alert {
var maxApplied uint64
for _, r := range reports {
if r.RQLite != nil && r.RQLite.Applied > maxApplied {
maxApplied = r.RQLite.Applied
}
}
var alerts []Alert
for _, r := range reports {
if r.RQLite == nil || !r.RQLite.Responsive {
continue
}
lag := maxApplied - r.RQLite.Applied
if lag > 100 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", nodeHost(r),
fmt.Sprintf("Applied index lag: %d behind leader (local=%d, max=%d)", lag, r.RQLite.Applied, maxApplied)})
}
}
return alerts
}
func checkWGPeerSymmetry(reports []*report.NodeReport) []Alert {
type nodeInfo struct {
host string
peerKeys map[string]bool
}
var nodes []nodeInfo
for _, r := range reports {
if r.WireGuard == nil || !r.WireGuard.InterfaceUp {
continue
}
ni := nodeInfo{host: nodeHost(r), peerKeys: map[string]bool{}}
for _, p := range r.WireGuard.Peers {
ni.peerKeys[p.PublicKey] = true
}
nodes = append(nodes, ni)
}
var alerts []Alert
expectedPeers := len(nodes) - 1
for _, ni := range nodes {
if len(ni.peerKeys) < expectedPeers {
alerts = append(alerts, Alert{AlertCritical, "wireguard", ni.host,
fmt.Sprintf("WG peer count mismatch: has %d peers, expected %d", len(ni.peerKeys), expectedPeers)})
}
}
return alerts
}
func checkClockSkew(reports []*report.NodeReport) []Alert {
var times []struct {
host string
t int64
}
for _, r := range reports {
if r.System != nil && r.System.TimeUnix > 0 {
times = append(times, struct {
host string
t int64
}{nodeHost(r), r.System.TimeUnix})
}
}
if len(times) < 2 {
return nil
}
var minT, maxT int64 = times[0].t, times[0].t
var minHost, maxHost string = times[0].host, times[0].host
for _, t := range times[1:] {
if t.t < minT {
minT = t.t
minHost = t.host
}
if t.t > maxT {
maxT = t.t
maxHost = t.host
}
}
delta := maxT - minT
if delta > 5 {
return []Alert{{AlertWarning, "system", "cluster",
fmt.Sprintf("Clock skew: %ds between %s and %s", delta, minHost, maxHost)}}
}
return nil
}
func checkBinaryVersion(reports []*report.NodeReport) []Alert {
versions := map[string][]string{} // version -> list of hosts
for _, r := range reports {
v := r.Version
if v == "" {
v = "unknown"
}
versions[v] = append(versions[v], nodeHost(r))
}
if len(versions) > 1 {
msg := "Binary version mismatch:"
for v, hosts := range versions {
msg += fmt.Sprintf(" %s=%v", v, hosts)
}
return []Alert{{AlertWarning, "system", "cluster", msg}}
}
return nil
}
func checkOlricMemberConsistency(reports []*report.NodeReport) []Alert {
// Count nodes where Olric is active to determine expected member count.
activeCount := 0
for _, r := range reports {
if r.Olric != nil && r.Olric.ServiceActive {
activeCount++
}
}
if activeCount < 2 {
return nil
}
var alerts []Alert
for _, r := range reports {
if r.Olric == nil || !r.Olric.ServiceActive || r.Olric.MemberCount == 0 {
continue
}
if r.Olric.MemberCount < activeCount {
alerts = append(alerts, Alert{AlertWarning, "olric", nodeHost(r),
fmt.Sprintf("Olric member count: %d (expected %d active nodes)", r.Olric.MemberCount, activeCount)})
}
}
return alerts
}
func checkIPFSSwarmConsistency(reports []*report.NodeReport) []Alert {
// Count IPFS-active nodes to determine expected peer count.
activeCount := 0
for _, r := range reports {
if r.IPFS != nil && r.IPFS.DaemonActive {
activeCount++
}
}
if activeCount < 2 {
return nil
}
expectedPeers := activeCount - 1
var alerts []Alert
for _, r := range reports {
if r.IPFS == nil || !r.IPFS.DaemonActive {
continue
}
if r.IPFS.SwarmPeerCount == 0 {
alerts = append(alerts, Alert{AlertCritical, "ipfs", nodeHost(r),
"IPFS node isolated: 0 swarm peers"})
} else if r.IPFS.SwarmPeerCount < expectedPeers {
alerts = append(alerts, Alert{AlertWarning, "ipfs", nodeHost(r),
fmt.Sprintf("IPFS swarm peers: %d (expected %d)", r.IPFS.SwarmPeerCount, expectedPeers)})
}
}
return alerts
}
func checkIPFSClusterConsistency(reports []*report.NodeReport) []Alert {
activeCount := 0
for _, r := range reports {
if r.IPFS != nil && r.IPFS.ClusterActive {
activeCount++
}
}
if activeCount < 2 {
return nil
}
var alerts []Alert
for _, r := range reports {
if r.IPFS == nil || !r.IPFS.ClusterActive {
continue
}
if r.IPFS.ClusterPeerCount < activeCount {
alerts = append(alerts, Alert{AlertWarning, "ipfs", nodeHost(r),
fmt.Sprintf("IPFS cluster peers: %d (expected %d)", r.IPFS.ClusterPeerCount, activeCount)})
}
}
return alerts
}
// ---------------------------------------------------------------------------
// Per-node checks
// ---------------------------------------------------------------------------
func checkNodeRQLite(r *report.NodeReport, host string, nodeCtxMap map[string]*nodeContext) []Alert {
if r.RQLite == nil {
return nil
}
var alerts []Alert
if !r.RQLite.Responsive {
alerts = append(alerts, Alert{AlertCritical, "rqlite", host, "RQLite not responding"})
return alerts // no point checking further
}
if !r.RQLite.Ready {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host, "RQLite not ready (/readyz failed)"})
}
if !r.RQLite.StrongRead {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host, "Strong read failed"})
}
// Raft state anomalies
if r.RQLite.RaftState == "Candidate" {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host, "RQLite in election (Candidate state)"})
}
if r.RQLite.RaftState == "Shutdown" {
alerts = append(alerts, Alert{AlertCritical, "rqlite", host, "RQLite in Shutdown state"})
}
// FSM backlog
if r.RQLite.FsmPending > 10 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite FSM backlog: %d entries pending", r.RQLite.FsmPending)})
}
// Commit-applied gap (per-node, distinct from cross-node applied index lag)
if r.RQLite.Commit > 0 && r.RQLite.Applied > 0 && r.RQLite.Commit > r.RQLite.Applied {
gap := r.RQLite.Commit - r.RQLite.Applied
if gap > 100 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite commit-applied gap: %d (commit=%d, applied=%d)", gap, r.RQLite.Commit, r.RQLite.Applied)})
}
}
// Resource pressure
if r.RQLite.Goroutines > 1000 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite goroutine count high: %d", r.RQLite.Goroutines)})
}
if r.RQLite.HeapMB > 1000 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite heap memory high: %dMB", r.RQLite.HeapMB)})
}
// Cluster partition detection: check if this node reports other nodes as unreachable.
// If the unreachable node recently joined (< 5 min), downgrade to info — probes
// may not have succeeded yet and this is expected transient behavior.
for nodeAddr, info := range r.RQLite.Nodes {
if !info.Reachable {
// nodeAddr is like "10.0.0.4:7001" — extract the IP to look up context
targetIP := strings.Split(nodeAddr, ":")[0]
if targetCtx, ok := nodeCtxMap[targetIP]; ok && targetCtx.isJoining {
alerts = append(alerts, Alert{AlertInfo, "rqlite", host,
fmt.Sprintf("Node %s recently joined (%ds ago), probe pending for %s",
targetCtx.host, targetCtx.uptimeSec, nodeAddr)})
} else {
alerts = append(alerts, Alert{AlertCritical, "rqlite", host,
fmt.Sprintf("RQLite reports node %s unreachable (cluster partition)", nodeAddr)})
}
}
}
// Debug vars
if dv := r.RQLite.DebugVars; dv != nil {
if dv.LeaderNotFound > 0 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite leader_not_found errors: %d", dv.LeaderNotFound)})
}
if dv.SnapshotErrors > 0 {
alerts = append(alerts, Alert{AlertWarning, "rqlite", host,
fmt.Sprintf("RQLite snapshot errors: %d", dv.SnapshotErrors)})
}
totalQueryErrors := dv.QueryErrors + dv.ExecuteErrors
if totalQueryErrors > 0 {
alerts = append(alerts, Alert{AlertInfo, "rqlite", host,
fmt.Sprintf("RQLite query/execute errors: %d", totalQueryErrors)})
}
}
return alerts
}
func checkNodeWireGuard(r *report.NodeReport, host string) []Alert {
if r.WireGuard == nil {
return nil
}
var alerts []Alert
if !r.WireGuard.InterfaceUp {
alerts = append(alerts, Alert{AlertCritical, "wireguard", host, "WireGuard interface down"})
return alerts
}
for _, p := range r.WireGuard.Peers {
if p.HandshakeAgeSec > 180 && p.LatestHandshake > 0 {
alerts = append(alerts, Alert{AlertWarning, "wireguard", host,
fmt.Sprintf("Stale WG handshake with peer %s: %ds ago", truncateKey(p.PublicKey), p.HandshakeAgeSec)})
}
if p.LatestHandshake == 0 {
alerts = append(alerts, Alert{AlertCritical, "wireguard", host,
fmt.Sprintf("WG peer %s has never handshaked", truncateKey(p.PublicKey))})
}
}
return alerts
}
func checkNodeSystem(r *report.NodeReport, host string) []Alert {
if r.System == nil {
return nil
}
var alerts []Alert
if r.System.MemUsePct > 90 {
alerts = append(alerts, Alert{AlertWarning, "system", host,
fmt.Sprintf("Memory at %d%%", r.System.MemUsePct)})
}
if r.System.DiskUsePct > 85 {
alerts = append(alerts, Alert{AlertWarning, "system", host,
fmt.Sprintf("Disk at %d%%", r.System.DiskUsePct)})
}
if r.System.OOMKills > 0 {
alerts = append(alerts, Alert{AlertCritical, "system", host,
fmt.Sprintf("%d OOM kills detected", r.System.OOMKills)})
}
if r.System.SwapUsedMB > 0 && r.System.SwapTotalMB > 0 {
pct := r.System.SwapUsedMB * 100 / r.System.SwapTotalMB
if pct > 30 {
alerts = append(alerts, Alert{AlertInfo, "system", host,
fmt.Sprintf("Swap usage at %d%%", pct)})
}
}
// High load
if r.System.CPUCount > 0 {
loadRatio := r.System.LoadAvg1 / float64(r.System.CPUCount)
if loadRatio > 2.0 {
alerts = append(alerts, Alert{AlertWarning, "system", host,
fmt.Sprintf("High load: %.1f (%.1fx CPU count)", r.System.LoadAvg1, loadRatio)})
}
}
// Inode exhaustion
if r.System.InodePct > 95 {
alerts = append(alerts, Alert{AlertCritical, "system", host,
fmt.Sprintf("Inode exhaustion imminent: %d%%", r.System.InodePct)})
} else if r.System.InodePct > 90 {
alerts = append(alerts, Alert{AlertWarning, "system", host,
fmt.Sprintf("Inode usage at %d%%", r.System.InodePct)})
}
return alerts
}
func checkNodeServices(r *report.NodeReport, host string, nc *nodeContext) []Alert {
if r.Services == nil {
return nil
}
var alerts []Alert
for _, svc := range r.Services.Services {
// Skip services that are expected to be inactive based on node role/mode
if shouldSkipServiceAlert(svc.Name, svc.ActiveState, r, nc) {
continue
}
if svc.ActiveState == "failed" {
alerts = append(alerts, Alert{AlertCritical, "service", host,
fmt.Sprintf("Service %s is FAILED", svc.Name)})
} else if svc.ActiveState != "active" && svc.ActiveState != "" && svc.ActiveState != "unknown" {
alerts = append(alerts, Alert{AlertWarning, "service", host,
fmt.Sprintf("Service %s is %s", svc.Name, svc.ActiveState)})
}
if svc.RestartLoopRisk {
alerts = append(alerts, Alert{AlertCritical, "service", host,
fmt.Sprintf("Service %s restart loop: %d restarts, active for %ds", svc.Name, svc.NRestarts, svc.ActiveSinceSec)})
}
}
for _, unit := range r.Services.FailedUnits {
alerts = append(alerts, Alert{AlertWarning, "service", host,
fmt.Sprintf("Failed systemd unit: %s", unit)})
}
return alerts
}
// shouldSkipServiceAlert returns true if this service being inactive is expected
// given the node's role and anyone mode.
func shouldSkipServiceAlert(svcName, state string, r *report.NodeReport, nc *nodeContext) bool {
if state == "active" || state == "failed" {
return false // always report active (no alert) and failed (always alert)
}
// CoreDNS: only expected on nameserver nodes
if svcName == "coredns" && (nc == nil || !nc.isNameserver) {
return true
}
// Anyone services: only alert for the mode the node is configured for
if r.Anyone != nil {
mode := r.Anyone.Mode
if svcName == "orama-anyone-client" && mode == "relay" {
return true // relay node doesn't run client
}
if svcName == "orama-anyone-relay" && mode == "client" {
return true // client node doesn't run relay
}
}
// If anyone section is nil (no anyone configured), skip both anyone services
if r.Anyone == nil && (svcName == "orama-anyone-client" || svcName == "orama-anyone-relay") {
return true
}
return false
}
func checkNodeDNS(r *report.NodeReport, host string, nc *nodeContext) []Alert {
if r.DNS == nil {
return nil
}
isNameserver := nc != nil && nc.isNameserver
var alerts []Alert
// CoreDNS: only check on nameserver nodes
if isNameserver && !r.DNS.CoreDNSActive {
alerts = append(alerts, Alert{AlertCritical, "dns", host, "CoreDNS is down"})
}
// Caddy: check on all nodes (any node can host namespaces)
if !r.DNS.CaddyActive {
alerts = append(alerts, Alert{AlertCritical, "dns", host, "Caddy is down"})
}
// TLS cert expiry: only meaningful on nameserver nodes that have public domains
if isNameserver {
if r.DNS.BaseTLSDaysLeft >= 0 && r.DNS.BaseTLSDaysLeft < 14 {
alerts = append(alerts, Alert{AlertWarning, "dns", host,
fmt.Sprintf("Base TLS cert expires in %d days", r.DNS.BaseTLSDaysLeft)})
}
if r.DNS.WildTLSDaysLeft >= 0 && r.DNS.WildTLSDaysLeft < 14 {
alerts = append(alerts, Alert{AlertWarning, "dns", host,
fmt.Sprintf("Wildcard TLS cert expires in %d days", r.DNS.WildTLSDaysLeft)})
}
}
// DNS resolution checks: only on nameserver nodes with CoreDNS running
if isNameserver && r.DNS.CoreDNSActive {
if !r.DNS.SOAResolves {
alerts = append(alerts, Alert{AlertWarning, "dns", host, "SOA record not resolving"})
}
if !r.DNS.WildcardResolves {
alerts = append(alerts, Alert{AlertWarning, "dns", host, "Wildcard DNS not resolving"})
}
if !r.DNS.BaseAResolves {
alerts = append(alerts, Alert{AlertWarning, "dns", host, "Base domain A record not resolving"})
}
if !r.DNS.NSResolves {
alerts = append(alerts, Alert{AlertWarning, "dns", host, "NS records not resolving"})
}
if !r.DNS.Port53Bound {
alerts = append(alerts, Alert{AlertCritical, "dns", host, "CoreDNS active but port 53 not bound"})
}
}
if r.DNS.CaddyActive && !r.DNS.Port443Bound {
alerts = append(alerts, Alert{AlertCritical, "dns", host, "Caddy active but port 443 not bound"})
}
return alerts
}
func checkNodeAnyone(r *report.NodeReport, host string) []Alert {
if r.Anyone == nil {
return nil
}
var alerts []Alert
if (r.Anyone.RelayActive || r.Anyone.ClientActive) && !r.Anyone.Bootstrapped {
alerts = append(alerts, Alert{AlertWarning, "anyone", host,
fmt.Sprintf("Anyone bootstrap at %d%%", r.Anyone.BootstrapPct)})
}
return alerts
}
func checkNodeProcesses(r *report.NodeReport, host string) []Alert {
if r.Processes == nil {
return nil
}
var alerts []Alert
if r.Processes.ZombieCount > 0 {
alerts = append(alerts, Alert{AlertInfo, "system", host,
fmt.Sprintf("%d zombie processes", r.Processes.ZombieCount)})
}
if r.Processes.OrphanCount > 0 {
alerts = append(alerts, Alert{AlertInfo, "system", host,
fmt.Sprintf("%d orphan orama processes", r.Processes.OrphanCount)})
}
if r.Processes.PanicCount > 0 {
alerts = append(alerts, Alert{AlertCritical, "system", host,
fmt.Sprintf("%d panic/fatal in orama-node logs (1h)", r.Processes.PanicCount)})
}
return alerts
}
func checkNodeNamespaces(r *report.NodeReport, host string) []Alert {
var alerts []Alert
for _, ns := range r.Namespaces {
if !ns.GatewayUp {
alerts = append(alerts, Alert{AlertWarning, "namespace", host,
fmt.Sprintf("Namespace %s gateway down", ns.Name)})
}
if !ns.RQLiteUp {
alerts = append(alerts, Alert{AlertWarning, "namespace", host,
fmt.Sprintf("Namespace %s RQLite down", ns.Name)})
}
}
return alerts
}
func checkNodeNetwork(r *report.NodeReport, host string) []Alert {
if r.Network == nil {
return nil
}
var alerts []Alert
if !r.Network.UFWActive {
alerts = append(alerts, Alert{AlertCritical, "network", host, "UFW firewall is inactive"})
}
if !r.Network.InternetReachable {
alerts = append(alerts, Alert{AlertWarning, "network", host, "Internet not reachable (ping 8.8.8.8 failed)"})
}
if r.Network.TCPRetransRate > 5.0 {
alerts = append(alerts, Alert{AlertWarning, "network", host,
fmt.Sprintf("High TCP retransmission rate: %.1f%%", r.Network.TCPRetransRate)})
}
// Check for internal ports exposed in UFW rules.
// Ports 5001 (RQLite), 6001 (Gateway), 3320 (Olric), 4501 (IPFS API) should be internal only.
internalPorts := []string{"5001", "6001", "3320", "4501"}
for _, rule := range r.Network.UFWRules {
ruleLower := strings.ToLower(rule)
// Only flag ALLOW rules (not deny/reject).
if !strings.Contains(ruleLower, "allow") {
continue
}
for _, port := range internalPorts {
// Match rules like "5001 ALLOW Anywhere" or "5001/tcp ALLOW IN"
// but not rules restricted to 10.0.0.0/24 (WG subnet).
if strings.Contains(rule, port) && !strings.Contains(rule, "10.0.0.") {
alerts = append(alerts, Alert{AlertCritical, "network", host,
fmt.Sprintf("Internal port %s exposed in UFW: %s", port, strings.TrimSpace(rule))})
}
}
}
return alerts
}
func checkNodeOlric(r *report.NodeReport, host string) []Alert {
if r.Olric == nil {
return nil
}
var alerts []Alert
if !r.Olric.ServiceActive {
alerts = append(alerts, Alert{AlertCritical, "olric", host, "Olric service down"})
return alerts
}
if !r.Olric.MemberlistUp {
alerts = append(alerts, Alert{AlertCritical, "olric", host, "Olric memberlist port down"})
}
if r.Olric.LogSuspects > 0 {
alerts = append(alerts, Alert{AlertWarning, "olric", host,
fmt.Sprintf("Olric member suspects: %d in last hour", r.Olric.LogSuspects)})
}
if r.Olric.LogFlapping > 5 {
alerts = append(alerts, Alert{AlertWarning, "olric", host,
fmt.Sprintf("Olric members flapping: %d join/leave events in last hour", r.Olric.LogFlapping)})
}
if r.Olric.LogErrors > 20 {
alerts = append(alerts, Alert{AlertWarning, "olric", host,
fmt.Sprintf("High Olric error rate: %d errors in last hour", r.Olric.LogErrors)})
}
if r.Olric.RestartCount > 3 {
alerts = append(alerts, Alert{AlertWarning, "olric", host,
fmt.Sprintf("Olric excessive restarts: %d", r.Olric.RestartCount)})
}
if r.Olric.ProcessMemMB > 500 {
alerts = append(alerts, Alert{AlertWarning, "olric", host,
fmt.Sprintf("Olric high memory: %dMB", r.Olric.ProcessMemMB)})
}
return alerts
}
func checkNodeIPFS(r *report.NodeReport, host string) []Alert {
if r.IPFS == nil {
return nil
}
var alerts []Alert
if !r.IPFS.DaemonActive {
alerts = append(alerts, Alert{AlertCritical, "ipfs", host, "IPFS daemon down"})
}
if !r.IPFS.ClusterActive {
alerts = append(alerts, Alert{AlertCritical, "ipfs", host, "IPFS cluster down"})
}
// Only check these if daemon is running (otherwise data is meaningless).
if r.IPFS.DaemonActive {
if r.IPFS.SwarmPeerCount == 0 {
alerts = append(alerts, Alert{AlertCritical, "ipfs", host, "IPFS isolated: no swarm peers"})
}
if !r.IPFS.HasSwarmKey {
alerts = append(alerts, Alert{AlertCritical, "ipfs", host,
"IPFS swarm key missing (private network compromised)"})
}
if !r.IPFS.BootstrapEmpty {
alerts = append(alerts, Alert{AlertWarning, "ipfs", host,
"IPFS bootstrap list not empty (should be empty for private swarm)"})
}
}
if r.IPFS.RepoUsePct > 95 {
alerts = append(alerts, Alert{AlertCritical, "ipfs", host,
fmt.Sprintf("IPFS repo nearly full: %d%%", r.IPFS.RepoUsePct)})
} else if r.IPFS.RepoUsePct > 90 {
alerts = append(alerts, Alert{AlertWarning, "ipfs", host,
fmt.Sprintf("IPFS repo at %d%%", r.IPFS.RepoUsePct)})
}
if r.IPFS.ClusterErrors > 0 {
alerts = append(alerts, Alert{AlertWarning, "ipfs", host,
fmt.Sprintf("IPFS cluster peer errors: %d", r.IPFS.ClusterErrors)})
}
return alerts
}
func checkNodeGateway(r *report.NodeReport, host string) []Alert {
if r.Gateway == nil {
return nil
}
var alerts []Alert
if !r.Gateway.Responsive {
alerts = append(alerts, Alert{AlertCritical, "gateway", host, "Gateway not responding"})
return alerts
}
if r.Gateway.HTTPStatus != 200 {
alerts = append(alerts, Alert{AlertWarning, "gateway", host,
fmt.Sprintf("Gateway health check returned HTTP %d", r.Gateway.HTTPStatus)})
}
for name, sub := range r.Gateway.Subsystems {
if sub.Status != "ok" && sub.Status != "" {
msg := fmt.Sprintf("Gateway subsystem %s: status=%s", name, sub.Status)
if sub.Error != "" {
msg += fmt.Sprintf(" error=%s", sub.Error)
}
alerts = append(alerts, Alert{AlertWarning, "gateway", host, msg})
}
}
return alerts
}
func truncateKey(key string) string {
if len(key) > 8 {
return key[:8] + "..."
}
return key
}

Some files were not shown because too many files have changed in this diff Show More