mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-03-27 09:04:13 +00:00
Compare commits
84 Commits
v0.106.0-n
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82c477266d | ||
|
|
169be97026 | ||
|
|
4b7c342c77 | ||
|
|
7d5ccc0678 | ||
|
|
1ca779880b | ||
|
|
3b779cd5a0 | ||
|
|
b94fd1efcd | ||
|
|
abcc23c4f3 | ||
|
|
ebaf37e9d0 | ||
|
|
7c165b9579 | ||
|
|
c536e45d0f | ||
|
|
655bd92178 | ||
|
|
211c0275d3 | ||
|
|
5456d57aeb | ||
|
|
8ea4499052 | ||
|
|
6657c90e36 | ||
|
|
0764ac287e | ||
|
|
c4fd1878a7 | ||
|
|
3d70f92ed5 | ||
|
|
fa826f0d00 | ||
|
|
733b059681 | ||
|
|
78d876e71b | ||
|
|
6468019136 | ||
|
|
e2b6f7d721 | ||
|
|
fd87eec476 | ||
|
|
a0468461ab | ||
|
|
2f5718146a | ||
|
|
f26676db2c | ||
|
|
fade8f89ed | ||
|
|
ed4e490463 | ||
|
|
6898f47e2e | ||
|
|
f0d2621199 | ||
|
|
c6998b6ac2 | ||
|
|
45a8285ae8 | ||
|
|
80e26f33fb | ||
|
|
25495448ed | ||
|
|
1882876922 | ||
|
|
7227e5ceb9 | ||
|
|
7f1c592235 | ||
|
|
72fb5f1a5a | ||
|
|
2fecebc0c2 | ||
|
|
85eb98ed34 | ||
|
|
714a986a78 | ||
|
|
bcfdabb32d | ||
|
|
3597c61cfc | ||
|
|
552fde428e | ||
|
|
ca86becf85 | ||
|
|
bfff2a241b | ||
|
|
3e9ef5ac6c | ||
|
|
f1dc3014fc | ||
|
|
19463b8621 | ||
|
|
a79ae41dd5 | ||
|
|
e4d51676cc | ||
|
|
e6f828d6f1 | ||
|
|
8ee606bfb1 | ||
|
|
58ea896cb0 | ||
|
|
d256a83fb7 | ||
|
|
c731486454 | ||
|
|
8cabe48f7d | ||
|
|
c499b2d76e | ||
|
|
4ebf558719 | ||
|
|
2b0bfaaa12 | ||
|
|
a71b979036 | ||
|
|
106c2df4d2 | ||
|
|
40600c3557 | ||
|
|
aa2da83969 | ||
|
|
bb98418ac9 | ||
|
|
b58e1d80ee | ||
|
|
4f1709e136 | ||
|
|
83804422c4 | ||
|
|
8aef779fcd | ||
|
|
0b5b6e68e3 | ||
|
|
f889c2e358 | ||
|
|
1e38fc2861 | ||
|
|
88ba08fcba | ||
|
|
865a4f3434 | ||
|
|
7163aad850 | ||
|
|
25a167f9b4 | ||
|
|
bc9cbb3627 | ||
|
|
ef8002bf13 | ||
|
|
29d255676f | ||
|
|
ba4e2688e4 | ||
|
|
749d5ed5e7 | ||
|
|
ade6241357 |
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
name: Bug Report
|
||||
description: Report a bug in Orama Network
|
||||
labels: ["bug"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for reporting a bug! Please fill out the sections below.
|
||||
|
||||
**Security issues:** If this is a security vulnerability, do NOT open an issue. Email security@orama.io instead.
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Orama version
|
||||
description: "Run `orama version` to find this"
|
||||
placeholder: "v0.18.0-beta"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
options:
|
||||
- Gateway / API
|
||||
- CLI (orama command)
|
||||
- WireGuard / Networking
|
||||
- RQLite / Storage
|
||||
- Olric / Caching
|
||||
- IPFS / Pinning
|
||||
- CoreDNS
|
||||
- OramaOS
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: A clear description of the bug
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: Minimal steps to reproduce the behavior
|
||||
placeholder: |
|
||||
1. Run `orama ...`
|
||||
2. See error
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: What you expected to happen
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: Actual behavior
|
||||
description: What actually happened (include error messages and logs if any)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: environment
|
||||
attributes:
|
||||
label: Environment
|
||||
description: OS, Go version, deployment environment, etc.
|
||||
placeholder: |
|
||||
- OS: Ubuntu 22.04
|
||||
- Go: 1.23
|
||||
- Environment: sandbox
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Logs, screenshots, monitor reports, or anything else that might help
|
||||
validations:
|
||||
required: false
|
||||
49
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
49
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: Feature Request
|
||||
description: Suggest a new feature or improvement
|
||||
labels: ["enhancement"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for the suggestion! Please describe what you'd like to see.
|
||||
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
options:
|
||||
- Gateway / API
|
||||
- CLI (orama command)
|
||||
- WireGuard / Networking
|
||||
- RQLite / Storage
|
||||
- Olric / Caching
|
||||
- IPFS / Pinning
|
||||
- CoreDNS
|
||||
- OramaOS
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: Problem
|
||||
description: What problem does this solve? Why do you need it?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed solution
|
||||
description: How do you think this should work?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: Alternatives considered
|
||||
description: Any workarounds or alternative approaches you've thought of
|
||||
validations:
|
||||
required: false
|
||||
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
## Summary
|
||||
|
||||
<!-- What does this PR do? Keep it to 1-3 bullet points. -->
|
||||
|
||||
## Motivation
|
||||
|
||||
<!-- Why is this change needed? Link to an issue if applicable. -->
|
||||
|
||||
## Test plan
|
||||
|
||||
<!-- How did you verify this works? -->
|
||||
|
||||
- [ ] `make test` passes
|
||||
- [ ] Tested on sandbox/staging environment
|
||||
|
||||
## Distributed system impact
|
||||
|
||||
<!-- Does this change affect any of the following? If yes, explain. -->
|
||||
|
||||
- [ ] Raft quorum / RQLite
|
||||
- [ ] WireGuard mesh / networking
|
||||
- [ ] Olric gossip / caching
|
||||
- [ ] Service startup ordering
|
||||
- [ ] Rolling upgrade compatibility
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Tests added for new functionality or bug fix
|
||||
- [ ] No debug code (`fmt.Println`, `log.Println`) left behind
|
||||
- [ ] Docs updated (if user-facing behavior changed)
|
||||
- [ ] Errors wrapped with context (`fmt.Errorf("...: %w", err)`)
|
||||
80
.github/workflows/publish-sdk.yml
vendored
Normal file
80
.github/workflows/publish-sdk.yml
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
name: Publish SDK to npm
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to publish (e.g., 1.0.0). Leave empty to use package.json version."
|
||||
required: false
|
||||
dry-run:
|
||||
description: "Dry run (don't actually publish)"
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Build & Publish @debros/orama
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: sdk
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Bump version
|
||||
if: inputs.version != ''
|
||||
run: npm version ${{ inputs.version }} --no-git-tag-version
|
||||
|
||||
- name: Typecheck
|
||||
run: pnpm typecheck
|
||||
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
|
||||
- name: Run unit tests
|
||||
run: pnpm vitest run tests/unit
|
||||
|
||||
- name: Publish (dry run)
|
||||
if: inputs.dry-run == true
|
||||
run: npm publish --access public --dry-run
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Publish
|
||||
if: inputs.dry-run == false
|
||||
run: npm publish --access public
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Get published version
|
||||
if: inputs.dry-run == false
|
||||
id: version
|
||||
run: echo "version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create git tag
|
||||
if: inputs.dry-run == false
|
||||
working-directory: .
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git tag "sdk/v${{ steps.version.outputs.version }}"
|
||||
git push origin "sdk/v${{ steps.version.outputs.version }}"
|
||||
10
.github/workflows/release-apt.yml
vendored
10
.github/workflows/release-apt.yml
vendored
@ -28,7 +28,8 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
go-version: "1.24"
|
||||
cache-dependency-path: core/go.sum
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
@ -46,6 +47,7 @@ jobs:
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Build binary
|
||||
working-directory: core
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: 0
|
||||
@ -57,9 +59,9 @@ jobs:
|
||||
|
||||
mkdir -p build/usr/local/bin
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-node cmd/node/main.go
|
||||
# Build the entire gateway package so helper files (e.g., config parsing) are included
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway ./cmd/gateway
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-gateway ./cmd/gateway
|
||||
|
||||
- name: Create Debian package structure
|
||||
run: |
|
||||
@ -71,7 +73,7 @@ jobs:
|
||||
mkdir -p ${PKG_NAME}/usr/local/bin
|
||||
|
||||
# Copy binaries
|
||||
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
|
||||
cp core/build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
|
||||
chmod 755 ${PKG_NAME}/usr/local/bin/*
|
||||
|
||||
# Create control file
|
||||
|
||||
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
@ -23,8 +23,8 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
go-version: '1.24'
|
||||
cache-dependency-path: core/go.sum
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
|
||||
164
.gitignore
vendored
164
.gitignore
vendored
@ -1,112 +1,90 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# Built binaries
|
||||
bin/
|
||||
dist/
|
||||
|
||||
# IDE and editor files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS generated files
|
||||
# === Global ===
|
||||
.DS_Store
|
||||
.codex/
|
||||
redeploy-6.sh
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# Log files
|
||||
*.log
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
.cursor/
|
||||
|
||||
# Environment variables
|
||||
# Environment & credentials
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
.env.*
|
||||
!.env.example
|
||||
.mcp.json
|
||||
.claude/
|
||||
.codex/
|
||||
|
||||
# E2E test config (contains production credentials)
|
||||
e2e/config.yaml
|
||||
# === Core (Go) ===
|
||||
core/phantom-auth/
|
||||
core/bin/
|
||||
core/bin-linux/
|
||||
core/dist/
|
||||
core/orama-cli-linux
|
||||
core/keys_backup/
|
||||
core/.gocache/
|
||||
core/configs/
|
||||
core/data/*
|
||||
core/tmp/
|
||||
core/temp/
|
||||
core/results/
|
||||
core/rnd/
|
||||
core/vps.txt
|
||||
core/coverage.txt
|
||||
core/coverage.html
|
||||
core/profile.out
|
||||
core/e2e/config.yaml
|
||||
core/scripts/remote-nodes.conf
|
||||
|
||||
# Temporary files
|
||||
tmp/
|
||||
temp/
|
||||
*.tmp
|
||||
|
||||
# Coverage reports
|
||||
coverage.txt
|
||||
coverage.html
|
||||
profile.out
|
||||
|
||||
# Build artifacts
|
||||
# Go build artifacts
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.test
|
||||
*.out
|
||||
*.deb
|
||||
*.rpm
|
||||
*.tar.gz
|
||||
*.zip
|
||||
go.work
|
||||
|
||||
# Local development files
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Databases
|
||||
*.db
|
||||
|
||||
# === Website ===
|
||||
website/node_modules/
|
||||
website/dist/
|
||||
website/invest-api/invest-api
|
||||
website/invest-api/*.db
|
||||
website/invest-api/*.db-shm
|
||||
website/invest-api/*.db-wal
|
||||
|
||||
# === SDK (TypeScript) ===
|
||||
sdk/node_modules/
|
||||
sdk/dist/
|
||||
sdk/coverage/
|
||||
|
||||
# === Vault (Zig) ===
|
||||
vault/.zig-cache/
|
||||
vault/zig-out/
|
||||
|
||||
# === OS ===
|
||||
os/output/
|
||||
|
||||
# === Local development ===
|
||||
.dev/
|
||||
.local/
|
||||
local/
|
||||
|
||||
data/*
|
||||
./bootstrap
|
||||
./node
|
||||
data/bootstrap/rqlite/
|
||||
|
||||
.env.*
|
||||
|
||||
configs/
|
||||
|
||||
.dev/
|
||||
|
||||
.gocache/
|
||||
|
||||
.claude/
|
||||
.mcp.json
|
||||
.cursor/
|
||||
|
||||
# Remote node credentials
|
||||
scripts/remote-nodes.conf
|
||||
|
||||
orama-cli-linux
|
||||
|
||||
rnd/
|
||||
|
||||
keys_backup/
|
||||
|
||||
vps.txt
|
||||
|
||||
bin-linux/
|
||||
|
||||
website/
|
||||
|
||||
terms-agreement
|
||||
|
||||
cli
|
||||
./inspector
|
||||
|
||||
results/
|
||||
|
||||
phantom-auth/
|
||||
@ -2,18 +2,20 @@
|
||||
# Builds and releases orama (CLI) and orama-node binaries
|
||||
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
|
||||
|
||||
project_name: debros-network
|
||||
project_name: orama-network
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- cmd: go mod tidy
|
||||
dir: core
|
||||
|
||||
builds:
|
||||
# orama CLI binary
|
||||
- id: orama
|
||||
dir: core
|
||||
main: ./cmd/cli
|
||||
binary: orama
|
||||
goos:
|
||||
@ -31,6 +33,7 @@ builds:
|
||||
|
||||
# orama-node binary (Linux only for apt)
|
||||
- id: orama-node
|
||||
dir: core
|
||||
main: ./cmd/node
|
||||
binary: orama-node
|
||||
goos:
|
||||
@ -75,7 +78,7 @@ nfpms:
|
||||
- orama
|
||||
vendor: DeBros
|
||||
homepage: https://github.com/DeBrosOfficial/network
|
||||
maintainer: DeBros <support@debros.io>
|
||||
maintainer: DeBros <dev@debros.io>
|
||||
description: CLI tool for the Orama decentralized network
|
||||
license: MIT
|
||||
formats:
|
||||
@ -84,7 +87,7 @@ nfpms:
|
||||
section: utils
|
||||
priority: optional
|
||||
contents:
|
||||
- src: ./README.md
|
||||
- src: ./core/README.md
|
||||
dst: /usr/share/doc/orama/README.md
|
||||
deb:
|
||||
lintian_overrides:
|
||||
@ -97,7 +100,7 @@ nfpms:
|
||||
- orama-node
|
||||
vendor: DeBros
|
||||
homepage: https://github.com/DeBrosOfficial/network
|
||||
maintainer: DeBros <support@debros.io>
|
||||
maintainer: DeBros <dev@debros.io>
|
||||
description: Node daemon for the Orama decentralized network
|
||||
license: MIT
|
||||
formats:
|
||||
@ -106,7 +109,7 @@ nfpms:
|
||||
section: net
|
||||
priority: optional
|
||||
contents:
|
||||
- src: ./README.md
|
||||
- src: ./core/README.md
|
||||
dst: /usr/share/doc/orama-node/README.md
|
||||
deb:
|
||||
lintian_overrides:
|
||||
|
||||
@ -32,7 +32,7 @@ This Code applies within all project spaces and when an individual is officially
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@debros.io
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@orama.io
|
||||
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
|
||||
@ -1,47 +1,78 @@
|
||||
# Contributing to DeBros Network
|
||||
# Contributing to Orama Network
|
||||
|
||||
Thanks for helping improve the network! This guide covers setup, local dev, tests, and PR guidelines.
|
||||
Thanks for helping improve the network! This monorepo contains multiple projects — pick the one relevant to your contribution.
|
||||
|
||||
## Requirements
|
||||
## Repository Structure
|
||||
|
||||
- Go 1.22+ (1.23 recommended)
|
||||
- RQLite (optional for local runs; the Makefile starts nodes with embedded setup)
|
||||
- Make (optional)
|
||||
| Package | Language | Build |
|
||||
|---------|----------|-------|
|
||||
| `core/` | Go 1.24+ | `make core-build` |
|
||||
| `website/` | TypeScript (pnpm) | `make website-build` |
|
||||
| `vault/` | Zig 0.14+ | `make vault-build` |
|
||||
| `os/` | Go + Buildroot | `make os-build` |
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
git clone https://github.com/DeBrosOfficial/network.git
|
||||
cd network
|
||||
make deps
|
||||
```
|
||||
|
||||
## Build, Test, Lint
|
||||
|
||||
- Build: `make build`
|
||||
- Test: `make test`
|
||||
- Format/Vet: `make fmt vet` (or `make lint`)
|
||||
|
||||
````
|
||||
|
||||
Useful CLI commands:
|
||||
### Core (Go)
|
||||
|
||||
```bash
|
||||
./bin/orama health
|
||||
./bin/orama peers
|
||||
./bin/orama status
|
||||
````
|
||||
cd core
|
||||
make deps
|
||||
make build
|
||||
make test
|
||||
```
|
||||
|
||||
## Versioning
|
||||
### Website
|
||||
|
||||
- The CLI reports its version via `orama version`.
|
||||
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
|
||||
```bash
|
||||
cd website
|
||||
pnpm install
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Vault (Zig)
|
||||
|
||||
```bash
|
||||
cd vault
|
||||
zig build
|
||||
zig build test
|
||||
```
|
||||
|
||||
## Pull Requests
|
||||
|
||||
1. Fork and create a topic branch.
|
||||
2. Ensure `make build test` passes; include tests for new functionality.
|
||||
3. Keep PRs focused and well-described (motivation, approach, testing).
|
||||
4. Update README/docs for behavior changes.
|
||||
1. Fork and create a topic branch from `main`.
|
||||
2. Ensure `make test` passes for affected packages.
|
||||
3. Include tests for new functionality or bug fixes.
|
||||
4. Keep PRs focused — one concern per PR.
|
||||
5. Write a clear description: motivation, approach, and how you tested it.
|
||||
6. Update docs if you're changing user-facing behavior.
|
||||
|
||||
## Code Style
|
||||
|
||||
### Go (core/, os/)
|
||||
|
||||
- Follow standard Go conventions
|
||||
- Run `make lint` before submitting
|
||||
- Wrap errors with context: `fmt.Errorf("failed to X: %w", err)`
|
||||
- No magic values — use named constants
|
||||
|
||||
### TypeScript (website/)
|
||||
|
||||
- TypeScript strict mode
|
||||
- Follow existing patterns in the codebase
|
||||
|
||||
### Zig (vault/)
|
||||
|
||||
- Follow standard Zig conventions
|
||||
- Run `zig build test` before submitting
|
||||
|
||||
## Security
|
||||
|
||||
If you find a security vulnerability, **do not open a public issue**. Email security@debros.io instead.
|
||||
|
||||
Thank you for contributing!
|
||||
|
||||
234
Makefile
234
Makefile
@ -1,206 +1,66 @@
|
||||
TEST?=./...
|
||||
# Orama Monorepo
|
||||
# Delegates to sub-project Makefiles
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo Running tests...
|
||||
go test -v $(TEST)
|
||||
.PHONY: help build test clean
|
||||
|
||||
# Gateway-focused E2E tests assume gateway and nodes are already running
|
||||
# Auto-discovers configuration from ~/.orama and queries database for API key
|
||||
# No environment variables required
|
||||
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
|
||||
# === Core (Go network) ===
|
||||
.PHONY: core core-build core-test core-clean core-lint
|
||||
core: core-build
|
||||
|
||||
# Production E2E tests - includes production-only tests
|
||||
test-e2e-prod:
|
||||
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
|
||||
echo "❌ ORAMA_GATEWAY_URL not set"; \
|
||||
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
|
||||
go test -v -tags "e2e production" -timeout 30m ./e2e/...
|
||||
core-build:
|
||||
$(MAKE) -C core build
|
||||
|
||||
# Generic e2e target
|
||||
test-e2e:
|
||||
@echo "Running comprehensive E2E tests..."
|
||||
@echo "Auto-discovering configuration from ~/.orama..."
|
||||
go test -v -tags e2e -timeout 30m ./e2e/...
|
||||
core-test:
|
||||
$(MAKE) -C core test
|
||||
|
||||
test-e2e-deployments:
|
||||
@echo "Running deployment E2E tests..."
|
||||
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
|
||||
core-lint:
|
||||
$(MAKE) -C core lint
|
||||
|
||||
test-e2e-fullstack:
|
||||
@echo "Running fullstack E2E tests..."
|
||||
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
|
||||
core-clean:
|
||||
$(MAKE) -C core clean
|
||||
|
||||
test-e2e-https:
|
||||
@echo "Running HTTPS/external access E2E tests..."
|
||||
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
|
||||
# === Website ===
|
||||
.PHONY: website website-dev website-build
|
||||
website-dev:
|
||||
cd website && pnpm dev
|
||||
|
||||
test-e2e-shared:
|
||||
@echo "Running shared E2E tests..."
|
||||
go test -v -tags e2e -timeout 10m ./e2e/shared/...
|
||||
website-build:
|
||||
cd website && pnpm build
|
||||
|
||||
test-e2e-cluster:
|
||||
@echo "Running cluster E2E tests..."
|
||||
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
|
||||
# === SDK (TypeScript) ===
|
||||
.PHONY: sdk sdk-build sdk-test
|
||||
sdk: sdk-build
|
||||
|
||||
test-e2e-integration:
|
||||
@echo "Running integration E2E tests..."
|
||||
go test -v -tags e2e -timeout 20m ./e2e/integration/...
|
||||
sdk-build:
|
||||
cd sdk && pnpm install && pnpm build
|
||||
|
||||
test-e2e-production:
|
||||
@echo "Running production-only E2E tests..."
|
||||
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
|
||||
sdk-test:
|
||||
cd sdk && pnpm test
|
||||
|
||||
test-e2e-quick:
|
||||
@echo "Running quick E2E smoke tests..."
|
||||
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
|
||||
# === Vault (Zig) ===
|
||||
.PHONY: vault vault-build vault-test
|
||||
vault-build:
|
||||
cd vault && zig build
|
||||
|
||||
# Network - Distributed P2P Database System
|
||||
# Makefile for development and build tasks
|
||||
vault-test:
|
||||
cd vault && zig build test
|
||||
|
||||
.PHONY: build clean test deps tidy fmt vet lint install-hooks redeploy-devnet redeploy-testnet release health
|
||||
# === OS ===
|
||||
.PHONY: os os-build
|
||||
os-build:
|
||||
$(MAKE) -C os
|
||||
|
||||
VERSION := 0.106.0
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||
LDFLAGS_LINUX := -s -w $(LDFLAGS)
|
||||
# === Aggregate ===
|
||||
build: core-build
|
||||
test: core-test
|
||||
clean: core-clean
|
||||
|
||||
# Build targets
|
||||
build: deps
|
||||
@echo "Building network executables (version=$(VERSION))..."
|
||||
@mkdir -p bin
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/rqlite-mcp ./cmd/rqlite-mcp
|
||||
# Inject gateway build metadata via pkg path variables
|
||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||
@echo "Build complete! Run ./bin/orama version"
|
||||
|
||||
# Cross-compile all binaries for Linux (used with --pre-built flag on VPS)
|
||||
# Builds: DeBros binaries + Olric + CoreDNS (with rqlite plugin) + Caddy (with orama DNS module)
|
||||
build-linux: deps
|
||||
@echo "Cross-compiling all binaries for linux/amd64 (version=$(VERSION))..."
|
||||
@mkdir -p bin-linux
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/identity ./cmd/identity
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-node ./cmd/node
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama cmd/cli/main.go
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/rqlite-mcp ./cmd/rqlite-mcp
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -trimpath -o bin-linux/gateway ./cmd/gateway
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-cli ./cmd/cli
|
||||
@echo "Building Olric for linux/amd64..."
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -trimpath -o bin-linux/olric-server github.com/olric-data/olric/cmd/olric-server
|
||||
@echo "✓ All Linux binaries built in bin-linux/"
|
||||
@echo ""
|
||||
@echo "Next steps:"
|
||||
@echo " 1. Build CoreDNS: make build-linux-coredns"
|
||||
@echo " 2. Build Caddy: make build-linux-caddy"
|
||||
@echo " 3. Or build all: make build-linux-all"
|
||||
|
||||
# Build CoreDNS with rqlite plugin for Linux
|
||||
build-linux-coredns:
|
||||
@bash scripts/build-linux-coredns.sh
|
||||
|
||||
# Build Caddy with orama DNS module for Linux
|
||||
build-linux-caddy:
|
||||
@bash scripts/build-linux-caddy.sh
|
||||
|
||||
# Build everything for Linux (all binaries + CoreDNS + Caddy)
|
||||
build-linux-all: build-linux build-linux-coredns build-linux-caddy
|
||||
@echo ""
|
||||
@echo "✅ All Linux binaries ready in bin-linux/:"
|
||||
@ls -la bin-linux/
|
||||
@echo ""
|
||||
@echo "Deploy to VPS:"
|
||||
@echo " scp bin-linux/* ubuntu@<ip>:/home/debros/bin/"
|
||||
@echo " scp bin-linux/coredns ubuntu@<ip>:/usr/local/bin/coredns"
|
||||
@echo " scp bin-linux/caddy ubuntu@<ip>:/usr/bin/caddy"
|
||||
@echo " sudo orama install --pre-built --no-pull ..."
|
||||
|
||||
# Install git hooks
|
||||
install-hooks:
|
||||
@echo "Installing git hooks..."
|
||||
@bash scripts/install-hooks.sh
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
@echo "Cleaning build artifacts..."
|
||||
rm -rf bin/
|
||||
rm -rf data/
|
||||
@echo "Clean complete!"
|
||||
|
||||
# Deploy to devnet (build + rolling upgrade all nodes)
|
||||
redeploy-devnet:
|
||||
@bash scripts/redeploy.sh --devnet
|
||||
|
||||
# Deploy to devnet without rebuilding
|
||||
redeploy-devnet-quick:
|
||||
@bash scripts/redeploy.sh --devnet --no-build
|
||||
|
||||
# Deploy to testnet (build + rolling upgrade all nodes)
|
||||
redeploy-testnet:
|
||||
@bash scripts/redeploy.sh --testnet
|
||||
|
||||
# Deploy to testnet without rebuilding
|
||||
redeploy-testnet-quick:
|
||||
@bash scripts/redeploy.sh --testnet --no-build
|
||||
|
||||
# Interactive release workflow (tag + push)
|
||||
release:
|
||||
@bash scripts/release.sh
|
||||
|
||||
# Check health of all nodes in an environment
|
||||
# Usage: make health ENV=devnet
|
||||
health:
|
||||
@if [ -z "$(ENV)" ]; then \
|
||||
echo "Usage: make health ENV=devnet|testnet"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@while IFS='|' read -r env host pass role key; do \
|
||||
[ -z "$$env" ] && continue; \
|
||||
case "$$env" in \#*) continue;; esac; \
|
||||
env="$$(echo "$$env" | xargs)"; \
|
||||
[ "$$env" != "$(ENV)" ] && continue; \
|
||||
role="$$(echo "$$role" | xargs)"; \
|
||||
bash scripts/check-node-health.sh "$$host" "$$pass" "$$host ($$role)"; \
|
||||
done < scripts/remote-nodes.conf
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "Available targets:"
|
||||
@echo " build - Build all executables"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo " test - Run unit tests"
|
||||
@echo "Orama Monorepo"
|
||||
@echo ""
|
||||
@echo "E2E Testing:"
|
||||
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
|
||||
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
|
||||
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
|
||||
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
|
||||
@echo " make test-e2e-deployments - Run deployment E2E tests"
|
||||
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
|
||||
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
|
||||
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
|
||||
@echo " Core (Go): make core-build | core-test | core-lint | core-clean"
|
||||
@echo " Website: make website-dev | website-build"
|
||||
@echo " Vault (Zig): make vault-build | vault-test"
|
||||
@echo " OS: make os-build"
|
||||
@echo ""
|
||||
@echo " Example:"
|
||||
@echo " ORAMA_GATEWAY_URL=https://orama-devnet.network make test-e2e-prod"
|
||||
@echo ""
|
||||
@echo "Deployment:"
|
||||
@echo " make redeploy-devnet - Build + rolling deploy to all devnet nodes"
|
||||
@echo " make redeploy-devnet-quick - Deploy to devnet without rebuilding"
|
||||
@echo " make redeploy-testnet - Build + rolling deploy to all testnet nodes"
|
||||
@echo " make redeploy-testnet-quick- Deploy to testnet without rebuilding"
|
||||
@echo " make health ENV=devnet - Check health of all nodes in an environment"
|
||||
@echo " make release - Interactive release workflow (tag + push)"
|
||||
@echo ""
|
||||
@echo "Maintenance:"
|
||||
@echo " deps - Download dependencies"
|
||||
@echo " tidy - Tidy dependencies"
|
||||
@echo " fmt - Format code"
|
||||
@echo " vet - Vet code"
|
||||
@echo " lint - Lint code (fmt + vet)"
|
||||
@echo " help - Show this help"
|
||||
@echo " Aggregate: make build | test | clean (delegates to core)"
|
||||
|
||||
495
README.md
495
README.md
@ -1,475 +1,50 @@
|
||||
# Orama Network - Distributed P2P Platform
|
||||
# Orama Network
|
||||
|
||||
A high-performance API Gateway and distributed platform built in Go. Provides a unified HTTP/HTTPS API for distributed SQL (RQLite), distributed caching (Olric), decentralized storage (IPFS), pub/sub messaging, and serverless WebAssembly execution.
|
||||
A decentralized infrastructure platform combining distributed SQL, IPFS storage, caching, serverless WASM execution, and privacy relay — all managed through a unified API gateway.
|
||||
|
||||
**Architecture:** Modular Gateway / Edge Proxy following SOLID principles
|
||||
## Packages
|
||||
|
||||
## Features
|
||||
|
||||
- **🔐 Authentication** - Wallet signatures, API keys, JWT tokens
|
||||
- **💾 Storage** - IPFS-based decentralized file storage with encryption
|
||||
- **⚡ Cache** - Distributed cache with Olric (in-memory key-value)
|
||||
- **🗄️ Database** - RQLite distributed SQL with Raft consensus + Per-namespace SQLite databases
|
||||
- **📡 Pub/Sub** - Real-time messaging via LibP2P and WebSocket
|
||||
- **⚙️ Serverless** - WebAssembly function execution with host functions
|
||||
- **🌐 HTTP Gateway** - Unified REST API with automatic HTTPS (Let's Encrypt)
|
||||
- **📦 Client SDK** - Type-safe Go SDK for all services
|
||||
- **🚀 App Deployments** - Deploy React, Next.js, Go, Node.js apps with automatic domains
|
||||
- **🗄️ SQLite Databases** - Per-namespace isolated databases with IPFS backups
|
||||
|
||||
## Application Deployments
|
||||
|
||||
Deploy full-stack applications with automatic domain assignment and namespace isolation.
|
||||
|
||||
### Deploy a React App
|
||||
|
||||
```bash
|
||||
# Build your app
|
||||
cd my-react-app
|
||||
npm run build
|
||||
|
||||
# Deploy to Orama Network
|
||||
orama deploy static ./dist --name my-app
|
||||
|
||||
# Your app is now live at: https://my-app.orama.network
|
||||
```
|
||||
|
||||
### Deploy Next.js with SSR
|
||||
|
||||
```bash
|
||||
cd my-nextjs-app
|
||||
|
||||
# Ensure next.config.js has: output: 'standalone'
|
||||
npm run build
|
||||
orama deploy nextjs . --name my-nextjs --ssr
|
||||
|
||||
# Live at: https://my-nextjs.orama.network
|
||||
```
|
||||
|
||||
### Deploy Go Backend
|
||||
|
||||
```bash
|
||||
# Build for Linux (name binary 'app' for auto-detection)
|
||||
GOOS=linux GOARCH=amd64 go build -o app main.go
|
||||
|
||||
# Deploy (must implement /health endpoint)
|
||||
orama deploy go ./app --name my-api
|
||||
|
||||
# API live at: https://my-api.orama.network
|
||||
```
|
||||
|
||||
### Create SQLite Database
|
||||
|
||||
```bash
|
||||
# Create database
|
||||
orama db create my-database
|
||||
|
||||
# Create schema
|
||||
orama db query my-database "CREATE TABLE users (id INT, name TEXT)"
|
||||
|
||||
# Insert data
|
||||
orama db query my-database "INSERT INTO users VALUES (1, 'Alice')"
|
||||
|
||||
# Query data
|
||||
orama db query my-database "SELECT * FROM users"
|
||||
|
||||
# Backup to IPFS
|
||||
orama db backup my-database
|
||||
```
|
||||
|
||||
### Full-Stack Example
|
||||
|
||||
Deploy a complete app with React frontend, Go backend, and SQLite database:
|
||||
|
||||
```bash
|
||||
# 1. Create database
|
||||
orama db create myapp-db
|
||||
orama db query myapp-db "CREATE TABLE users (id INT PRIMARY KEY, name TEXT)"
|
||||
|
||||
# 2. Deploy Go backend (connects to database)
|
||||
GOOS=linux GOARCH=amd64 go build -o api main.go
|
||||
orama deploy go ./api --name myapp-api
|
||||
|
||||
# 3. Deploy React frontend (calls backend API)
|
||||
cd frontend && npm run build
|
||||
orama deploy static ./dist --name myapp
|
||||
|
||||
# Access:
|
||||
# Frontend: https://myapp.orama.network
|
||||
# Backend: https://myapp-api.orama.network
|
||||
```
|
||||
|
||||
**📖 Full Guide**: See [Deployment Guide](docs/DEPLOYMENT_GUIDE.md) for complete documentation, examples, and best practices.
|
||||
| Package | Language | Description |
|
||||
|---------|----------|-------------|
|
||||
| [core/](core/) | Go | API gateway, distributed node, CLI, and client SDK |
|
||||
| [sdk/](sdk/) | TypeScript | `@debros/orama` — JavaScript/TypeScript SDK ([npm](https://www.npmjs.com/package/@debros/orama)) |
|
||||
| [website/](website/) | TypeScript | Marketing website and invest portal |
|
||||
| [vault/](vault/) | Zig | Distributed secrets vault (Shamir's Secret Sharing) |
|
||||
| [os/](os/) | Go + Buildroot | OramaOS — hardened minimal Linux for network nodes |
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Building
|
||||
|
||||
```bash
|
||||
# Build all binaries
|
||||
make build
|
||||
# Build the core network binaries
|
||||
make core-build
|
||||
|
||||
# Run tests
|
||||
make core-test
|
||||
|
||||
# Start website dev server
|
||||
make website-dev
|
||||
|
||||
# Build vault
|
||||
make vault-build
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Authentication
|
||||
|
||||
```bash
|
||||
orama auth login # Authenticate with wallet
|
||||
orama auth status # Check authentication
|
||||
orama auth logout # Clear credentials
|
||||
```
|
||||
|
||||
### Application Deployments
|
||||
|
||||
```bash
|
||||
# Deploy applications
|
||||
orama deploy static <path> --name myapp # React, Vue, static sites
|
||||
orama deploy nextjs <path> --name myapp --ssr # Next.js with SSR (requires output: 'standalone')
|
||||
orama deploy go <path> --name myapp # Go binaries (must have /health endpoint)
|
||||
orama deploy nodejs <path> --name myapp # Node.js apps (must have /health endpoint)
|
||||
|
||||
# Manage deployments
|
||||
orama deployments list # List all deployments
|
||||
orama deployments get <name> # Get deployment details
|
||||
orama deployments logs <name> --follow # View logs
|
||||
orama deployments delete <name> # Delete deployment
|
||||
orama deployments rollback <name> --version 1 # Rollback to version
|
||||
```
|
||||
|
||||
### SQLite Databases
|
||||
|
||||
```bash
|
||||
orama db create <name> # Create database
|
||||
orama db query <name> "SELECT * FROM t" # Execute SQL query
|
||||
orama db list # List all databases
|
||||
orama db backup <name> # Backup to IPFS
|
||||
orama db backups <name> # List backups
|
||||
```
|
||||
|
||||
### Network Status
|
||||
|
||||
```bash
|
||||
orama health # Cluster health check
|
||||
orama peers # List connected peers
|
||||
orama status # Network status
|
||||
```
|
||||
|
||||
### RQLite Operations
|
||||
|
||||
```bash
|
||||
orama query "SELECT * FROM users"
|
||||
orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
|
||||
orama transaction --file ops.json
|
||||
```
|
||||
|
||||
### Pub/Sub
|
||||
|
||||
```bash
|
||||
orama pubsub publish <topic> <message>
|
||||
orama pubsub subscribe <topic> 30s
|
||||
orama pubsub topics
|
||||
```
|
||||
|
||||
## Serverless Functions (WASM)
|
||||
|
||||
Orama supports high-performance serverless function execution using WebAssembly (WASM). Functions are isolated, secure, and can interact with network services like the distributed cache.
|
||||
|
||||
### 1. Build Functions
|
||||
|
||||
Functions must be compiled to WASM. We recommend using [TinyGo](https://tinygo.org/).
|
||||
|
||||
```bash
|
||||
# Build example functions to examples/functions/bin/
|
||||
./examples/functions/build.sh
|
||||
```
|
||||
|
||||
### 2. Deployment
|
||||
|
||||
Deploy your compiled `.wasm` file to the network via the Gateway.
|
||||
|
||||
```bash
|
||||
# Deploy a function
|
||||
curl -X POST https://your-node.example.com/v1/functions \
|
||||
-H "Authorization: Bearer <your_api_key>" \
|
||||
-F "name=hello-world" \
|
||||
-F "namespace=default" \
|
||||
-F "wasm=@./examples/functions/bin/hello.wasm"
|
||||
```
|
||||
|
||||
### 3. Invocation
|
||||
|
||||
Trigger your function with a JSON payload. The function receives the payload via `stdin` and returns its response via `stdout`.
|
||||
|
||||
```bash
|
||||
# Invoke via HTTP
|
||||
curl -X POST https://your-node.example.com/v1/functions/hello-world/invoke \
|
||||
-H "Authorization: Bearer <your_api_key>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "Developer"}'
|
||||
```
|
||||
|
||||
### 4. Management
|
||||
|
||||
```bash
|
||||
# List all functions in a namespace
|
||||
curl https://your-node.example.com/v1/functions?namespace=default
|
||||
|
||||
# Delete a function
|
||||
curl -X DELETE https://your-node.example.com/v1/functions/hello-world?namespace=default
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Ubuntu 22.04+ or Debian 12+
|
||||
- `amd64` or `arm64` architecture
|
||||
- 4GB RAM, 50GB SSD, 2 CPU cores
|
||||
|
||||
### Required Ports
|
||||
|
||||
**External (must be open in firewall):**
|
||||
|
||||
- **80** - HTTP (ACME/Let's Encrypt certificate challenges)
|
||||
- **443** - HTTPS (Main gateway API endpoint)
|
||||
- **4101** - IPFS Swarm (peer connections)
|
||||
- **7001** - RQLite Raft (cluster consensus)
|
||||
|
||||
**Internal (bound to localhost, no firewall needed):**
|
||||
|
||||
- 4501 - IPFS API
|
||||
- 5001 - RQLite HTTP API
|
||||
- 6001 - Unified Gateway
|
||||
- 8080 - IPFS Gateway
|
||||
- 9050 - Anyone SOCKS5 proxy
|
||||
- 9094 - IPFS Cluster API
|
||||
- 3320/3322 - Olric Cache
|
||||
|
||||
**Anyone Relay Mode (optional, for earning rewards):**
|
||||
|
||||
- 9001 - Anyone ORPort (relay traffic, must be open externally)
|
||||
|
||||
### Anyone Network Integration
|
||||
|
||||
Orama Network integrates with the [Anyone Protocol](https://anyone.io) for anonymous routing. By default, nodes run as **clients** (consuming the network). Optionally, you can run as a **relay operator** to earn rewards.
|
||||
|
||||
**Client Mode (Default):**
|
||||
- Routes traffic through Anyone network for anonymity
|
||||
- SOCKS5 proxy on localhost:9050
|
||||
- No rewards, just consumes network
|
||||
|
||||
**Relay Mode (Earn Rewards):**
|
||||
- Provide bandwidth to the Anyone network
|
||||
- Earn $ANYONE tokens as a relay operator
|
||||
- Requires 100 $ANYONE tokens in your wallet
|
||||
- Requires ORPort (9001) open to the internet
|
||||
|
||||
```bash
|
||||
# Install as relay operator (earn rewards)
|
||||
sudo orama install --vps-ip <IP> --domain <domain> \
|
||||
--anyone-relay \
|
||||
--anyone-nickname "MyRelay" \
|
||||
--anyone-contact "operator@email.com" \
|
||||
--anyone-wallet "0x1234...abcd"
|
||||
|
||||
# With exit relay (legal implications apply)
|
||||
sudo orama install --vps-ip <IP> --domain <domain> \
|
||||
--anyone-relay \
|
||||
--anyone-exit \
|
||||
--anyone-nickname "MyExitRelay" \
|
||||
--anyone-contact "operator@email.com" \
|
||||
--anyone-wallet "0x1234...abcd"
|
||||
|
||||
# Migrate existing Anyone installation
|
||||
sudo orama install --vps-ip <IP> --domain <domain> \
|
||||
--anyone-relay \
|
||||
--anyone-migrate \
|
||||
--anyone-nickname "MyRelay" \
|
||||
--anyone-contact "operator@email.com" \
|
||||
--anyone-wallet "0x1234...abcd"
|
||||
```
|
||||
|
||||
**Important:** After installation, register your relay at [dashboard.anyone.io](https://dashboard.anyone.io) to start earning rewards.
|
||||
|
||||
### Installation
|
||||
|
||||
**macOS (Homebrew):**
|
||||
|
||||
```bash
|
||||
brew install DeBrosOfficial/tap/orama
|
||||
```
|
||||
|
||||
**Linux (Debian/Ubuntu):**
|
||||
|
||||
```bash
|
||||
# Download and install the latest .deb package
|
||||
curl -sL https://github.com/DeBrosOfficial/network/releases/latest/download/orama_$(curl -s https://api.github.com/repos/DeBrosOfficial/network/releases/latest | grep tag_name | cut -d '"' -f 4 | tr -d 'v')_linux_amd64.deb -o orama.deb
|
||||
sudo dpkg -i orama.deb
|
||||
```
|
||||
|
||||
**From Source:**
|
||||
|
||||
```bash
|
||||
go install github.com/DeBrosOfficial/network/cmd/cli@latest
|
||||
```
|
||||
|
||||
**Setup (after installation):**
|
||||
|
||||
```bash
|
||||
sudo orama install --interactive
|
||||
```
|
||||
|
||||
### Service Management
|
||||
|
||||
```bash
|
||||
# Status
|
||||
orama status
|
||||
|
||||
# Control services
|
||||
sudo orama start
|
||||
sudo orama stop
|
||||
sudo orama restart
|
||||
|
||||
# View logs
|
||||
orama logs node --follow
|
||||
orama logs gateway --follow
|
||||
orama logs ipfs --follow
|
||||
```
|
||||
|
||||
### Upgrade
|
||||
|
||||
```bash
|
||||
# Upgrade to latest version
|
||||
sudo orama upgrade --interactive
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration lives in `~/.orama/`:
|
||||
|
||||
- `configs/node.yaml` - Node configuration
|
||||
- `configs/gateway.yaml` - Gateway configuration
|
||||
- `configs/olric.yaml` - Cache configuration
|
||||
- `secrets/` - Keys and certificates
|
||||
- `data/` - Service data directories
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Services Not Starting
|
||||
|
||||
```bash
|
||||
# Check status
|
||||
systemctl status debros-node
|
||||
|
||||
# View logs
|
||||
journalctl -u debros-node -f
|
||||
|
||||
# Check log files
|
||||
tail -f /home/debros/.orama/logs/node.log
|
||||
```
|
||||
|
||||
### Port Conflicts
|
||||
|
||||
```bash
|
||||
# Check what's using specific ports
|
||||
sudo lsof -i :443 # HTTPS Gateway
|
||||
sudo lsof -i :7001 # TCP/SNI Gateway
|
||||
sudo lsof -i :6001 # Internal Gateway
|
||||
```
|
||||
|
||||
### RQLite Cluster Issues
|
||||
|
||||
```bash
|
||||
# Connect to RQLite CLI
|
||||
rqlite -H localhost -p 5001
|
||||
|
||||
# Check cluster status
|
||||
.nodes
|
||||
.status
|
||||
.ready
|
||||
|
||||
# Check consistency level
|
||||
.consistency
|
||||
```
|
||||
|
||||
### Reset Installation
|
||||
|
||||
```bash
|
||||
# Production reset (⚠️ DESTROYS DATA)
|
||||
sudo orama uninstall
|
||||
sudo rm -rf /home/debros/.orama
|
||||
sudo orama install
|
||||
```
|
||||
|
||||
## HTTP Gateway API
|
||||
|
||||
### Main Gateway Endpoints
|
||||
|
||||
- `GET /health` - Health status
|
||||
- `GET /v1/status` - Full status
|
||||
- `GET /v1/version` - Version info
|
||||
- `POST /v1/rqlite/exec` - Execute SQL
|
||||
- `POST /v1/rqlite/query` - Query database
|
||||
- `GET /v1/rqlite/schema` - Get schema
|
||||
- `POST /v1/pubsub/publish` - Publish message
|
||||
- `GET /v1/pubsub/topics` - List topics
|
||||
- `GET /v1/pubsub/ws?topic=<name>` - WebSocket subscribe
|
||||
- `POST /v1/functions` - Deploy function (multipart/form-data)
|
||||
- `POST /v1/functions/{name}/invoke` - Invoke function
|
||||
- `GET /v1/functions` - List functions
|
||||
- `DELETE /v1/functions/{name}` - Delete function
|
||||
- `GET /v1/functions/{name}/logs` - Get function logs
|
||||
|
||||
See `openapi/gateway.yaml` for complete API specification.
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Deployment Guide](docs/DEPLOYMENT_GUIDE.md)** - Deploy React, Next.js, Go apps and manage databases
|
||||
- **[Architecture Guide](docs/ARCHITECTURE.md)** - System architecture and design patterns
|
||||
- **[Client SDK](docs/CLIENT_SDK.md)** - Go SDK documentation and examples
|
||||
- **[Gateway API](docs/GATEWAY_API.md)** - Complete HTTP API reference
|
||||
- **[Security Deployment](docs/SECURITY_DEPLOYMENT_GUIDE.md)** - Production security hardening
|
||||
- **[Testing Plan](docs/TESTING_PLAN.md)** - Comprehensive testing strategy and implementation
|
||||
|
||||
## Resources
|
||||
|
||||
- [RQLite Documentation](https://rqlite.io/docs/)
|
||||
- [IPFS Documentation](https://docs.ipfs.tech/)
|
||||
- [LibP2P Documentation](https://docs.libp2p.io/)
|
||||
- [WebAssembly](https://webassembly.org/)
|
||||
- [GitHub Repository](https://github.com/DeBrosOfficial/network)
|
||||
- [Issue Tracker](https://github.com/DeBrosOfficial/network/issues)
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
network/
|
||||
├── cmd/ # Binary entry points
|
||||
│ ├── cli/ # CLI tool
|
||||
│ ├── gateway/ # HTTP Gateway
|
||||
│ ├── node/ # P2P Node
|
||||
│ └── rqlite-mcp/ # RQLite MCP server
|
||||
├── pkg/ # Core packages
|
||||
│ ├── gateway/ # Gateway implementation
|
||||
│ │ └── handlers/ # HTTP handlers by domain
|
||||
│ ├── client/ # Go SDK
|
||||
│ ├── serverless/ # WASM engine
|
||||
│ ├── rqlite/ # Database ORM
|
||||
│ ├── contracts/ # Interface definitions
|
||||
│ ├── httputil/ # HTTP utilities
|
||||
│ └── errors/ # Error handling
|
||||
├── docs/ # Documentation
|
||||
├── e2e/ # End-to-end tests
|
||||
└── examples/ # Example code
|
||||
```
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [Architecture](core/docs/ARCHITECTURE.md) | System architecture and design patterns |
|
||||
| [Deployment Guide](core/docs/DEPLOYMENT_GUIDE.md) | Deploy apps, databases, and domains |
|
||||
| [Dev & Deploy](core/docs/DEV_DEPLOY.md) | Building, deploying to VPS, rolling upgrades |
|
||||
| [Security](core/docs/SECURITY.md) | Security hardening and threat model |
|
||||
| [Monitoring](core/docs/MONITORING.md) | Cluster health monitoring |
|
||||
| [Client SDK](core/docs/CLIENT_SDK.md) | Go SDK documentation |
|
||||
| [Serverless](core/docs/SERVERLESS.md) | WASM serverless functions |
|
||||
| [Common Problems](core/docs/COMMON_PROBLEMS.md) | Troubleshooting known issues |
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! This project follows:
|
||||
- **SOLID Principles** - Single responsibility, open/closed, etc.
|
||||
- **DRY Principle** - Don't repeat yourself
|
||||
- **Clean Architecture** - Clear separation of concerns
|
||||
- **Test Coverage** - Unit and E2E tests required
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for setup, development, and PR guidelines.
|
||||
|
||||
See our architecture docs for design patterns and guidelines.
|
||||
## License
|
||||
|
||||
[AGPL-3.0](LICENSE)
|
||||
|
||||
223
cmd/cli/main.go
223
cmd/cli/main.go
@ -1,223 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
timeout = 30 * time.Second
|
||||
format = "table"
|
||||
)
|
||||
|
||||
// version metadata populated via -ldflags at build time
|
||||
var (
|
||||
version = "dev"
|
||||
commit = ""
|
||||
date = ""
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
showHelp()
|
||||
return
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
args := os.Args[2:]
|
||||
|
||||
// Parse global flags
|
||||
parseGlobalFlags(args)
|
||||
|
||||
switch command {
|
||||
case "version":
|
||||
fmt.Printf("orama %s", version)
|
||||
if commit != "" {
|
||||
fmt.Printf(" (commit %s)", commit)
|
||||
}
|
||||
if date != "" {
|
||||
fmt.Printf(" built %s", date)
|
||||
}
|
||||
fmt.Println()
|
||||
return
|
||||
|
||||
// Production environment commands (legacy with 'prod' prefix)
|
||||
case "prod":
|
||||
cli.HandleProdCommand(args)
|
||||
|
||||
// Direct production commands (new simplified interface)
|
||||
case "invite":
|
||||
cli.HandleProdCommand(append([]string{"invite"}, args...))
|
||||
case "install":
|
||||
cli.HandleProdCommand(append([]string{"install"}, args...))
|
||||
case "upgrade":
|
||||
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
|
||||
case "migrate":
|
||||
cli.HandleProdCommand(append([]string{"migrate"}, args...))
|
||||
case "status":
|
||||
cli.HandleProdCommand(append([]string{"status"}, args...))
|
||||
case "start":
|
||||
cli.HandleProdCommand(append([]string{"start"}, args...))
|
||||
case "stop":
|
||||
cli.HandleProdCommand(append([]string{"stop"}, args...))
|
||||
case "restart":
|
||||
cli.HandleProdCommand(append([]string{"restart"}, args...))
|
||||
case "logs":
|
||||
cli.HandleProdCommand(append([]string{"logs"}, args...))
|
||||
case "uninstall":
|
||||
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
|
||||
|
||||
// Authentication commands
|
||||
case "auth":
|
||||
cli.HandleAuthCommand(args)
|
||||
|
||||
// Deployment commands
|
||||
case "deploy":
|
||||
cli.HandleDeployCommand(args)
|
||||
case "deployments":
|
||||
cli.HandleDeploymentsCommand(args)
|
||||
|
||||
// Database commands
|
||||
case "db":
|
||||
cli.HandleDBCommand(args)
|
||||
|
||||
// Cluster management
|
||||
case "cluster":
|
||||
cli.HandleClusterCommand(args)
|
||||
|
||||
// Cluster inspection
|
||||
case "inspect":
|
||||
cli.HandleInspectCommand(args)
|
||||
|
||||
// Namespace management
|
||||
case "namespace":
|
||||
cli.HandleNamespaceCommand(args)
|
||||
|
||||
// Environment management
|
||||
case "env":
|
||||
cli.HandleEnvCommand(args)
|
||||
|
||||
// Help
|
||||
case "help", "--help", "-h":
|
||||
showHelp()
|
||||
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command)
|
||||
showHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func parseGlobalFlags(args []string) {
|
||||
for i, arg := range args {
|
||||
switch arg {
|
||||
case "-f", "--format":
|
||||
if i+1 < len(args) {
|
||||
format = args[i+1]
|
||||
}
|
||||
case "-t", "--timeout":
|
||||
if i+1 < len(args) {
|
||||
if d, err := time.ParseDuration(args[i+1]); err == nil {
|
||||
timeout = d
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func showHelp() {
|
||||
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
|
||||
fmt.Printf("Usage: orama <command> [args...]\n\n")
|
||||
|
||||
fmt.Printf("🚀 Production Deployment:\n")
|
||||
fmt.Printf(" install - Install production node (requires root/sudo)\n")
|
||||
fmt.Printf(" upgrade - Upgrade existing installation\n")
|
||||
fmt.Printf(" status - Show production service status\n")
|
||||
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" logs <service> - View production service logs\n")
|
||||
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
|
||||
|
||||
fmt.Printf("🔐 Authentication:\n")
|
||||
fmt.Printf(" auth login - Authenticate with wallet\n")
|
||||
fmt.Printf(" auth logout - Clear stored credentials\n")
|
||||
fmt.Printf(" auth whoami - Show current authentication\n")
|
||||
fmt.Printf(" auth status - Show detailed auth info\n")
|
||||
fmt.Printf(" auth help - Show auth command help\n\n")
|
||||
|
||||
fmt.Printf("📦 Deployments:\n")
|
||||
fmt.Printf(" deploy static <path> - Deploy a static site (React, Vue, etc.)\n")
|
||||
fmt.Printf(" deploy nextjs <path> - Deploy a Next.js application\n")
|
||||
fmt.Printf(" deploy go <path> - Deploy a Go backend\n")
|
||||
fmt.Printf(" deploy nodejs <path> - Deploy a Node.js backend\n")
|
||||
fmt.Printf(" deployments list - List all deployments\n")
|
||||
fmt.Printf(" deployments get <name> - Get deployment details\n")
|
||||
fmt.Printf(" deployments logs <name> - View deployment logs\n")
|
||||
fmt.Printf(" deployments delete <name> - Delete a deployment\n")
|
||||
fmt.Printf(" deployments rollback <name> - Rollback to previous version\n\n")
|
||||
|
||||
fmt.Printf("🗄️ Databases:\n")
|
||||
fmt.Printf(" db create <name> - Create a SQLite database\n")
|
||||
fmt.Printf(" db query <name> \"<sql>\" - Execute SQL query\n")
|
||||
fmt.Printf(" db list - List all databases\n")
|
||||
fmt.Printf(" db backup <name> - Backup database to IPFS\n")
|
||||
fmt.Printf(" db backups <name> - List database backups\n\n")
|
||||
|
||||
fmt.Printf("🏢 Namespaces:\n")
|
||||
fmt.Printf(" namespace delete - Delete current namespace and all resources\n")
|
||||
fmt.Printf(" namespace repair <name> - Repair under-provisioned cluster (add missing nodes)\n\n")
|
||||
|
||||
fmt.Printf("🔧 Cluster Management:\n")
|
||||
fmt.Printf(" cluster status - Show cluster node status\n")
|
||||
fmt.Printf(" cluster health - Run cluster health checks\n")
|
||||
fmt.Printf(" cluster rqlite status - Show detailed Raft state\n")
|
||||
fmt.Printf(" cluster rqlite voters - Show voter list\n")
|
||||
fmt.Printf(" cluster rqlite backup - Trigger manual backup\n")
|
||||
fmt.Printf(" cluster watch - Live cluster status monitor\n\n")
|
||||
|
||||
fmt.Printf("🔍 Cluster Inspection:\n")
|
||||
fmt.Printf(" inspect - Inspect cluster health via SSH\n")
|
||||
fmt.Printf(" inspect --env devnet - Inspect devnet nodes\n")
|
||||
fmt.Printf(" inspect --subsystem rqlite - Inspect only RQLite subsystem\n")
|
||||
fmt.Printf(" inspect --format json - Output as JSON\n\n")
|
||||
|
||||
fmt.Printf("🌍 Environments:\n")
|
||||
fmt.Printf(" env list - List all environments\n")
|
||||
fmt.Printf(" env current - Show current environment\n")
|
||||
fmt.Printf(" env switch <name> - Switch to environment\n\n")
|
||||
|
||||
fmt.Printf("Global Flags:\n")
|
||||
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
|
||||
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
|
||||
fmt.Printf(" --help, -h - Show this help message\n\n")
|
||||
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" # Deploy a React app\n")
|
||||
fmt.Printf(" cd my-react-app && npm run build\n")
|
||||
fmt.Printf(" orama deploy static ./dist --name my-app\n\n")
|
||||
|
||||
fmt.Printf(" # Deploy a Next.js app with SSR\n")
|
||||
fmt.Printf(" cd my-nextjs-app && npm run build\n")
|
||||
fmt.Printf(" orama deploy nextjs . --name my-nextjs --ssr\n\n")
|
||||
|
||||
fmt.Printf(" # Create and use a database\n")
|
||||
fmt.Printf(" orama db create my-db\n")
|
||||
fmt.Printf(" orama db query my-db \"CREATE TABLE users (id INT, name TEXT)\"\n")
|
||||
fmt.Printf(" orama db query my-db \"INSERT INTO users VALUES (1, 'Alice')\"\n\n")
|
||||
|
||||
fmt.Printf(" # Manage deployments\n")
|
||||
fmt.Printf(" orama deployments list\n")
|
||||
fmt.Printf(" orama deployments get my-app\n")
|
||||
fmt.Printf(" orama deployments logs my-app --follow\n\n")
|
||||
|
||||
fmt.Printf(" # First node (creates new cluster)\n")
|
||||
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
|
||||
|
||||
fmt.Printf(" # Service management\n")
|
||||
fmt.Printf(" orama status\n")
|
||||
fmt.Printf(" orama logs node --follow\n")
|
||||
}
|
||||
@ -1,326 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rqlite/gorqlite"
|
||||
)
|
||||
|
||||
// MCP JSON-RPC types
|
||||
type JSONRPCRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID any `json:"id,omitempty"`
|
||||
Method string `json:"method"`
|
||||
Params json.RawMessage `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
type JSONRPCResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID any `json:"id"`
|
||||
Result any `json:"result,omitempty"`
|
||||
Error *ResponseError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type ResponseError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Tool definition
|
||||
type Tool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
InputSchema any `json:"inputSchema"`
|
||||
}
|
||||
|
||||
// Tool call types
|
||||
type CallToolRequest struct {
|
||||
Name string `json:"name"`
|
||||
Arguments json.RawMessage `json:"arguments"`
|
||||
}
|
||||
|
||||
type TextContent struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
type CallToolResult struct {
|
||||
Content []TextContent `json:"content"`
|
||||
IsError bool `json:"isError,omitempty"`
|
||||
}
|
||||
|
||||
type MCPServer struct {
|
||||
conn *gorqlite.Connection
|
||||
}
|
||||
|
||||
func NewMCPServer(rqliteURL string) (*MCPServer, error) {
|
||||
// Disable gorqlite cluster discovery to avoid /nodes timeouts from unreachable peers
|
||||
if strings.Contains(rqliteURL, "?") {
|
||||
rqliteURL += "&disableClusterDiscovery=true"
|
||||
} else {
|
||||
rqliteURL += "?disableClusterDiscovery=true"
|
||||
}
|
||||
conn, err := gorqlite.Open(rqliteURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MCPServer{
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *MCPServer) handleRequest(req JSONRPCRequest) JSONRPCResponse {
|
||||
var resp JSONRPCResponse
|
||||
resp.JSONRPC = "2.0"
|
||||
resp.ID = req.ID
|
||||
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
// log.Printf("Received method: %s", req.Method)
|
||||
|
||||
switch req.Method {
|
||||
case "initialize":
|
||||
resp.Result = map[string]any{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]any{
|
||||
"tools": map[string]any{},
|
||||
},
|
||||
"serverInfo": map[string]any{
|
||||
"name": "rqlite-mcp",
|
||||
"version": "0.1.0",
|
||||
},
|
||||
}
|
||||
|
||||
case "notifications/initialized":
|
||||
// This is a notification, no response needed
|
||||
return JSONRPCResponse{}
|
||||
|
||||
case "tools/list":
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
tools := []Tool{
|
||||
{
|
||||
Name: "list_tables",
|
||||
Description: "List all tables in the Rqlite database",
|
||||
InputSchema: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "query",
|
||||
Description: "Run a SELECT query on the Rqlite database",
|
||||
InputSchema: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"sql": map[string]any{
|
||||
"type": "string",
|
||||
"description": "The SQL SELECT query to run",
|
||||
},
|
||||
},
|
||||
"required": []string{"sql"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "execute",
|
||||
Description: "Run an INSERT, UPDATE, or DELETE statement on the Rqlite database",
|
||||
InputSchema: map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"sql": map[string]any{
|
||||
"type": "string",
|
||||
"description": "The SQL statement (INSERT, UPDATE, DELETE) to run",
|
||||
},
|
||||
},
|
||||
"required": []string{"sql"},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp.Result = map[string]any{"tools": tools}
|
||||
|
||||
case "tools/call":
|
||||
var callReq CallToolRequest
|
||||
if err := json.Unmarshal(req.Params, &callReq); err != nil {
|
||||
resp.Error = &ResponseError{Code: -32700, Message: "Parse error"}
|
||||
return resp
|
||||
}
|
||||
resp.Result = s.handleToolCall(callReq)
|
||||
|
||||
default:
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
resp.Error = &ResponseError{Code: -32601, Message: "Method not found"}
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (s *MCPServer) handleToolCall(req CallToolRequest) CallToolResult {
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
// log.Printf("Tool call: %s", req.Name)
|
||||
|
||||
switch req.Name {
|
||||
case "list_tables":
|
||||
rows, err := s.conn.QueryOne("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
|
||||
if err != nil {
|
||||
return errorResult(fmt.Sprintf("Error listing tables: %v", err))
|
||||
}
|
||||
var tables []string
|
||||
for rows.Next() {
|
||||
slice, err := rows.Slice()
|
||||
if err == nil && len(slice) > 0 {
|
||||
tables = append(tables, fmt.Sprint(slice[0]))
|
||||
}
|
||||
}
|
||||
if len(tables) == 0 {
|
||||
return textResult("No tables found")
|
||||
}
|
||||
return textResult(strings.Join(tables, "\n"))
|
||||
|
||||
case "query":
|
||||
var args struct {
|
||||
SQL string `json:"sql"`
|
||||
}
|
||||
if err := json.Unmarshal(req.Arguments, &args); err != nil {
|
||||
return errorResult(fmt.Sprintf("Invalid arguments: %v", err))
|
||||
}
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
rows, err := s.conn.QueryOne(args.SQL)
|
||||
if err != nil {
|
||||
return errorResult(fmt.Sprintf("Query error: %v", err))
|
||||
}
|
||||
|
||||
var result strings.Builder
|
||||
cols := rows.Columns()
|
||||
result.WriteString(strings.Join(cols, " | ") + "\n")
|
||||
result.WriteString(strings.Repeat("-", len(cols)*10) + "\n")
|
||||
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
vals, err := rows.Slice()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rowCount++
|
||||
for i, v := range vals {
|
||||
if i > 0 {
|
||||
result.WriteString(" | ")
|
||||
}
|
||||
result.WriteString(fmt.Sprint(v))
|
||||
}
|
||||
result.WriteString("\n")
|
||||
}
|
||||
result.WriteString(fmt.Sprintf("\n(%d rows)", rowCount))
|
||||
return textResult(result.String())
|
||||
|
||||
case "execute":
|
||||
var args struct {
|
||||
SQL string `json:"sql"`
|
||||
}
|
||||
if err := json.Unmarshal(req.Arguments, &args); err != nil {
|
||||
return errorResult(fmt.Sprintf("Invalid arguments: %v", err))
|
||||
}
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
res, err := s.conn.WriteOne(args.SQL)
|
||||
if err != nil {
|
||||
return errorResult(fmt.Sprintf("Execution error: %v", err))
|
||||
}
|
||||
return textResult(fmt.Sprintf("Rows affected: %d", res.RowsAffected))
|
||||
|
||||
default:
|
||||
return errorResult(fmt.Sprintf("Unknown tool: %s", req.Name))
|
||||
}
|
||||
}
|
||||
|
||||
func textResult(text string) CallToolResult {
|
||||
return CallToolResult{
|
||||
Content: []TextContent{
|
||||
{
|
||||
Type: "text",
|
||||
Text: text,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func errorResult(text string) CallToolResult {
|
||||
return CallToolResult{
|
||||
Content: []TextContent{
|
||||
{
|
||||
Type: "text",
|
||||
Text: text,
|
||||
},
|
||||
},
|
||||
IsError: true,
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Log to stderr so stdout is clean for JSON-RPC
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
rqliteURL := "http://localhost:5001"
|
||||
if u := os.Getenv("RQLITE_URL"); u != "" {
|
||||
rqliteURL = u
|
||||
}
|
||||
|
||||
var server *MCPServer
|
||||
var err error
|
||||
|
||||
// Retry connecting to rqlite
|
||||
maxRetries := 30
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
server, err = NewMCPServer(rqliteURL)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if i%5 == 0 {
|
||||
log.Printf("Waiting for Rqlite at %s... (%d/%d)", rqliteURL, i+1, maxRetries)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to Rqlite after %d retries: %v", maxRetries, err)
|
||||
}
|
||||
|
||||
log.Printf("MCP Rqlite server started (stdio transport)")
|
||||
log.Printf("Connected to Rqlite at %s", rqliteURL)
|
||||
|
||||
// Read JSON-RPC requests from stdin, write responses to stdout
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var req JSONRPCRequest
|
||||
if err := json.Unmarshal([]byte(line), &req); err != nil {
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
continue
|
||||
}
|
||||
|
||||
resp := server.handleRequest(req)
|
||||
|
||||
// Don't send response for notifications (no ID)
|
||||
if req.ID == nil && strings.HasPrefix(req.Method, "notifications/") {
|
||||
continue
|
||||
}
|
||||
|
||||
respData, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println(string(respData))
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
// Debug logging disabled to prevent excessive disk writes
|
||||
}
|
||||
}
|
||||
8
core/.env.example
Normal file
8
core/.env.example
Normal file
@ -0,0 +1,8 @@
|
||||
# OpenRouter API Key for changelog generation
|
||||
# Get your API key from https://openrouter.ai/keys
|
||||
OPENROUTER_API_KEY=your-api-key-here
|
||||
|
||||
# ZeroSSL API Key for TLS certificates (alternative to Let's Encrypt)
|
||||
# Get your free API key from https://app.zerossl.com/developer
|
||||
# If not set, Caddy will use Let's Encrypt as the default CA
|
||||
ZEROSSL_API_KEY=
|
||||
@ -8,7 +8,7 @@ NOCOLOR='\033[0m'
|
||||
|
||||
# Run tests before push
|
||||
echo -e "\n${CYAN}Running tests...${NOCOLOR}"
|
||||
go test ./... # Runs all tests in your repo
|
||||
cd "$(git rev-parse --show-toplevel)/core" && go test ./...
|
||||
status=$?
|
||||
if [ $status -ne 0 ]; then
|
||||
echo -e "${RED}Push aborted: some tests failed.${NOCOLOR}"
|
||||
181
core/Makefile
Normal file
181
core/Makefile
Normal file
@ -0,0 +1,181 @@
|
||||
TEST?=./...
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo Running tests...
|
||||
go test -v $(TEST)
|
||||
|
||||
# Gateway-focused E2E tests assume gateway and nodes are already running
|
||||
# Auto-discovers configuration from ~/.orama and queries database for API key
|
||||
# No environment variables required
|
||||
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
|
||||
|
||||
# Production E2E tests - includes production-only tests
|
||||
test-e2e-prod:
|
||||
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
|
||||
echo "❌ ORAMA_GATEWAY_URL not set"; \
|
||||
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
|
||||
go test -v -tags "e2e production" -timeout 30m ./e2e/...
|
||||
|
||||
# Generic e2e target
|
||||
test-e2e:
|
||||
@echo "Running comprehensive E2E tests..."
|
||||
@echo "Auto-discovering configuration from ~/.orama..."
|
||||
go test -v -tags e2e -timeout 30m ./e2e/...
|
||||
|
||||
test-e2e-deployments:
|
||||
@echo "Running deployment E2E tests..."
|
||||
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
|
||||
|
||||
test-e2e-fullstack:
|
||||
@echo "Running fullstack E2E tests..."
|
||||
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
|
||||
|
||||
test-e2e-https:
|
||||
@echo "Running HTTPS/external access E2E tests..."
|
||||
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
|
||||
|
||||
test-e2e-shared:
|
||||
@echo "Running shared E2E tests..."
|
||||
go test -v -tags e2e -timeout 10m ./e2e/shared/...
|
||||
|
||||
test-e2e-cluster:
|
||||
@echo "Running cluster E2E tests..."
|
||||
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
|
||||
|
||||
test-e2e-integration:
|
||||
@echo "Running integration E2E tests..."
|
||||
go test -v -tags e2e -timeout 20m ./e2e/integration/...
|
||||
|
||||
test-e2e-production:
|
||||
@echo "Running production-only E2E tests..."
|
||||
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
|
||||
|
||||
test-e2e-quick:
|
||||
@echo "Running quick E2E smoke tests..."
|
||||
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
|
||||
|
||||
# Network - Distributed P2P Database System
|
||||
# Makefile for development and build tasks
|
||||
|
||||
.PHONY: build clean test deps tidy fmt vet lint install-hooks push-devnet push-testnet rollout-devnet rollout-testnet release
|
||||
|
||||
VERSION := 0.120.0
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||
LDFLAGS_LINUX := -s -w $(LDFLAGS)
|
||||
|
||||
# Build targets
|
||||
build: deps
|
||||
@echo "Building network executables (version=$(VERSION))..."
|
||||
@mkdir -p bin
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
|
||||
# Inject gateway build metadata via pkg path variables
|
||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/sfu ./cmd/sfu
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/turn ./cmd/turn
|
||||
@echo "Build complete! Run ./bin/orama version"
|
||||
|
||||
# Cross-compile CLI for Linux (only binary needed locally; VPS builds everything else from source)
|
||||
build-linux: deps
|
||||
@echo "Cross-compiling CLI for linux/amd64 (version=$(VERSION))..."
|
||||
@mkdir -p bin-linux
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama ./cmd/cli/
|
||||
@echo "✓ CLI built at bin-linux/orama"
|
||||
@echo ""
|
||||
@echo "Prefer 'make build-archive' for full pre-built binary archive."
|
||||
|
||||
# Build pre-compiled binary archive for deployment (all binaries + deps)
|
||||
build-archive: deps
|
||||
@echo "Building binary archive (version=$(VERSION))..."
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
|
||||
./bin/orama build --output /tmp/orama-$(VERSION)-linux-amd64.tar.gz
|
||||
|
||||
# Install git hooks
|
||||
install-hooks:
|
||||
@echo "Installing git hooks..."
|
||||
@bash scripts/install-hooks.sh
|
||||
|
||||
# Install orama CLI to ~/.local/bin and configure PATH
|
||||
install: build
|
||||
@bash scripts/install.sh
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
@echo "Cleaning build artifacts..."
|
||||
rm -rf bin/
|
||||
rm -rf data/
|
||||
@echo "Clean complete!"
|
||||
|
||||
# Push binary archive to devnet nodes (fanout distribution)
|
||||
push-devnet:
|
||||
./bin/orama node push --env devnet
|
||||
|
||||
# Push binary archive to testnet nodes (fanout distribution)
|
||||
push-testnet:
|
||||
./bin/orama node push --env testnet
|
||||
|
||||
# Full rollout to devnet (build + push + rolling upgrade)
|
||||
rollout-devnet:
|
||||
./bin/orama node rollout --env devnet --yes
|
||||
|
||||
# Full rollout to testnet (build + push + rolling upgrade)
|
||||
rollout-testnet:
|
||||
./bin/orama node rollout --env testnet --yes
|
||||
|
||||
# Interactive release workflow (tag + push)
|
||||
release:
|
||||
@bash scripts/release.sh
|
||||
|
||||
# Check health of all nodes in an environment
|
||||
# Usage: make health ENV=devnet
|
||||
health:
|
||||
@if [ -z "$(ENV)" ]; then \
|
||||
echo "Usage: make health ENV=devnet|testnet"; \
|
||||
exit 1; \
|
||||
fi
|
||||
./bin/orama monitor report --env $(ENV)
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "Available targets:"
|
||||
@echo " build - Build all executables"
|
||||
@echo " install - Build and install 'orama' CLI to ~/.local/bin"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo " test - Run unit tests"
|
||||
@echo ""
|
||||
@echo "E2E Testing:"
|
||||
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
|
||||
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
|
||||
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
|
||||
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
|
||||
@echo " make test-e2e-deployments - Run deployment E2E tests"
|
||||
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
|
||||
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
|
||||
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
|
||||
@echo ""
|
||||
@echo " Example:"
|
||||
@echo " ORAMA_GATEWAY_URL=https://orama-devnet.network make test-e2e-prod"
|
||||
@echo ""
|
||||
@echo "Deployment:"
|
||||
@echo " make build-archive - Build pre-compiled binary archive for deployment"
|
||||
@echo " make push-devnet - Push binary archive to devnet nodes"
|
||||
@echo " make push-testnet - Push binary archive to testnet nodes"
|
||||
@echo " make rollout-devnet - Full rollout: build + push + rolling upgrade (devnet)"
|
||||
@echo " make rollout-testnet - Full rollout: build + push + rolling upgrade (testnet)"
|
||||
@echo " make health ENV=devnet - Check health of all nodes in an environment"
|
||||
@echo " make release - Interactive release workflow (tag + push)"
|
||||
@echo ""
|
||||
@echo "Maintenance:"
|
||||
@echo " deps - Download dependencies"
|
||||
@echo " tidy - Tidy dependencies"
|
||||
@echo " fmt - Format code"
|
||||
@echo " vet - Vet code"
|
||||
@echo " lint - Lint code (fmt + vet)"
|
||||
@echo " help - Show this help"
|
||||
5
core/cmd/cli/main.go
Normal file
5
core/cmd/cli/main.go
Normal file
@ -0,0 +1,5 @@
|
||||
package main
|
||||
|
||||
func main() {
|
||||
runCLI()
|
||||
}
|
||||
103
core/cmd/cli/root.go
Normal file
103
core/cmd/cli/root.go
Normal file
@ -0,0 +1,103 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
// Command groups
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/app"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/authcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/buildcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/dbcmd"
|
||||
deploycmd "github.com/DeBrosOfficial/network/pkg/cli/cmd/deploy"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/envcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/functioncmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/inspectcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/monitorcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/namespacecmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/node"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/sandboxcmd"
|
||||
)
|
||||
|
||||
// version metadata populated via -ldflags at build time
|
||||
// Must match Makefile: -X 'main.version=...' -X 'main.commit=...' -X 'main.date=...'
|
||||
var (
|
||||
version = "dev"
|
||||
commit = ""
|
||||
date = ""
|
||||
)
|
||||
|
||||
func newRootCmd() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "orama",
|
||||
Short: "Orama CLI - Distributed P2P Network Management Tool",
|
||||
Long: `Orama CLI is a tool for managing nodes, deploying applications,
|
||||
and interacting with the Orama distributed network.`,
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
}
|
||||
|
||||
// Version command
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version information",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("orama %s", version)
|
||||
if commit != "" {
|
||||
fmt.Printf(" (commit %s)", commit)
|
||||
}
|
||||
if date != "" {
|
||||
fmt.Printf(" built %s", date)
|
||||
}
|
||||
fmt.Println()
|
||||
},
|
||||
})
|
||||
|
||||
// Node operator commands (was "prod")
|
||||
rootCmd.AddCommand(node.Cmd)
|
||||
|
||||
// Deploy command (top-level, upsert)
|
||||
rootCmd.AddCommand(deploycmd.Cmd)
|
||||
|
||||
// App management (was "deployments")
|
||||
rootCmd.AddCommand(app.Cmd)
|
||||
|
||||
// Database commands
|
||||
rootCmd.AddCommand(dbcmd.Cmd)
|
||||
|
||||
// Namespace commands
|
||||
rootCmd.AddCommand(namespacecmd.Cmd)
|
||||
|
||||
// Environment commands
|
||||
rootCmd.AddCommand(envcmd.Cmd)
|
||||
|
||||
// Auth commands
|
||||
rootCmd.AddCommand(authcmd.Cmd)
|
||||
|
||||
// Inspect command
|
||||
rootCmd.AddCommand(inspectcmd.Cmd)
|
||||
|
||||
// Monitor command
|
||||
rootCmd.AddCommand(monitorcmd.Cmd)
|
||||
|
||||
// Serverless function commands
|
||||
rootCmd.AddCommand(functioncmd.Cmd)
|
||||
|
||||
// Build command (cross-compile binary archive)
|
||||
rootCmd.AddCommand(buildcmd.Cmd)
|
||||
|
||||
// Sandbox command (ephemeral Hetzner Cloud clusters)
|
||||
rootCmd.AddCommand(sandboxcmd.Cmd)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func runCLI() {
|
||||
rootCmd := newRootCmd()
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -69,6 +69,13 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
}
|
||||
|
||||
// Load YAML
|
||||
type yamlWebRTCCfg struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
SFUPort int `yaml:"sfu_port"`
|
||||
TURNDomain string `yaml:"turn_domain"`
|
||||
TURNSecret string `yaml:"turn_secret"`
|
||||
}
|
||||
|
||||
type yamlCfg struct {
|
||||
ListenAddr string `yaml:"listen_addr"`
|
||||
ClientNamespace string `yaml:"client_namespace"`
|
||||
@ -84,6 +91,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
IPFSAPIURL string `yaml:"ipfs_api_url"`
|
||||
IPFSTimeout string `yaml:"ipfs_timeout"`
|
||||
IPFSReplicationFactor int `yaml:"ipfs_replication_factor"`
|
||||
WebRTC yamlWebRTCCfg `yaml:"webrtc"`
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
@ -192,6 +200,18 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
cfg.IPFSReplicationFactor = y.IPFSReplicationFactor
|
||||
}
|
||||
|
||||
// WebRTC configuration
|
||||
cfg.WebRTCEnabled = y.WebRTC.Enabled
|
||||
if y.WebRTC.SFUPort > 0 {
|
||||
cfg.SFUPort = y.WebRTC.SFUPort
|
||||
}
|
||||
if v := strings.TrimSpace(y.WebRTC.TURNDomain); v != "" {
|
||||
cfg.TURNDomain = v
|
||||
}
|
||||
if v := strings.TrimSpace(y.WebRTC.TURNSecret); v != "" {
|
||||
cfg.TURNSecret = v
|
||||
}
|
||||
|
||||
// Validate configuration
|
||||
if errs := cfg.ValidateConfig(); len(errs) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))
|
||||
118
core/cmd/sfu/config.go
Normal file
118
core/cmd/sfu/config.go
Normal file
@ -0,0 +1,118 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"github.com/DeBrosOfficial/network/pkg/sfu"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// newSFUServer creates a new SFU server from config and logger.
|
||||
// Wrapper to keep main.go clean and avoid importing sfu in main.
|
||||
func newSFUServer(cfg *sfu.Config, logger *zap.Logger) (*sfu.Server, error) {
|
||||
return sfu.NewServer(cfg, logger)
|
||||
}
|
||||
|
||||
func parseSFUConfig(logger *logging.ColoredLogger) *sfu.Config {
|
||||
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||
flag.Parse()
|
||||
|
||||
var configPath string
|
||||
var err error
|
||||
if *configFlag != "" {
|
||||
if filepath.IsAbs(*configFlag) {
|
||||
configPath = *configFlag
|
||||
} else {
|
||||
configPath, err = config.DefaultPath(*configFlag)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
configPath, err = config.DefaultPath("sfu.yaml")
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type yamlTURNServer struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
Secure bool `yaml:"secure"`
|
||||
}
|
||||
|
||||
type yamlCfg struct {
|
||||
ListenAddr string `yaml:"listen_addr"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
MediaPortStart int `yaml:"media_port_start"`
|
||||
MediaPortEnd int `yaml:"media_port_end"`
|
||||
TURNServers []yamlTURNServer `yaml:"turn_servers"`
|
||||
TURNSecret string `yaml:"turn_secret"`
|
||||
TURNCredentialTTL int `yaml:"turn_credential_ttl"`
|
||||
RQLiteDSN string `yaml:"rqlite_dsn"`
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Config file not found",
|
||||
zap.String("path", configPath), zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var y yamlCfg
|
||||
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Failed to parse SFU config", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var turnServers []sfu.TURNServerConfig
|
||||
for _, ts := range y.TURNServers {
|
||||
turnServers = append(turnServers, sfu.TURNServerConfig{
|
||||
Host: ts.Host,
|
||||
Port: ts.Port,
|
||||
Secure: ts.Secure,
|
||||
})
|
||||
}
|
||||
|
||||
cfg := &sfu.Config{
|
||||
ListenAddr: y.ListenAddr,
|
||||
Namespace: y.Namespace,
|
||||
MediaPortStart: y.MediaPortStart,
|
||||
MediaPortEnd: y.MediaPortEnd,
|
||||
TURNServers: turnServers,
|
||||
TURNSecret: y.TURNSecret,
|
||||
TURNCredentialTTL: y.TURNCredentialTTL,
|
||||
RQLiteDSN: y.RQLiteDSN,
|
||||
}
|
||||
|
||||
if errs := cfg.Validate(); len(errs) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nSFU configuration errors (%d):\n", len(errs))
|
||||
for _, e := range errs {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", e)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentSFU, "Loaded SFU configuration",
|
||||
zap.String("path", configPath),
|
||||
zap.String("listen_addr", cfg.ListenAddr),
|
||||
zap.String("namespace", cfg.Namespace),
|
||||
zap.Int("media_ports", cfg.MediaPortEnd-cfg.MediaPortStart),
|
||||
zap.Int("turn_servers", len(cfg.TURNServers)),
|
||||
)
|
||||
|
||||
return cfg
|
||||
}
|
||||
61
core/cmd/sfu/main.go
Normal file
61
core/cmd/sfu/main.go
Normal file
@ -0,0 +1,61 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "unknown"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger, err := logging.NewColoredLogger(logging.ComponentSFU, true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentSFU, "Starting SFU server",
|
||||
zap.String("version", version),
|
||||
zap.String("commit", commit))
|
||||
|
||||
cfg := parseSFUConfig(logger)
|
||||
|
||||
server, err := newSFUServer(cfg, logger.Logger)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Failed to create SFU server", zap.Error(err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Start HTTP server in background
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
logger.ComponentError(logging.ComponentSFU, "SFU server error", zap.Error(err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for termination signal
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||
sig := <-quit
|
||||
|
||||
logger.ComponentInfo(logging.ComponentSFU, "Shutdown signal received", zap.String("signal", sig.String()))
|
||||
|
||||
// Graceful drain: notify peers and wait
|
||||
server.Drain(30 * time.Second)
|
||||
|
||||
if err := server.Close(); err != nil {
|
||||
logger.ComponentError(logging.ComponentSFU, "Error during shutdown", zap.Error(err))
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentSFU, "SFU server shutdown complete")
|
||||
}
|
||||
100
core/cmd/turn/config.go
Normal file
100
core/cmd/turn/config.go
Normal file
@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"github.com/DeBrosOfficial/network/pkg/turn"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func parseTURNConfig(logger *logging.ColoredLogger) *turn.Config {
|
||||
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||
flag.Parse()
|
||||
|
||||
var configPath string
|
||||
var err error
|
||||
if *configFlag != "" {
|
||||
if filepath.IsAbs(*configFlag) {
|
||||
configPath = *configFlag
|
||||
} else {
|
||||
configPath, err = config.DefaultPath(*configFlag)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
configPath, err = config.DefaultPath("turn.yaml")
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type yamlCfg struct {
|
||||
ListenAddr string `yaml:"listen_addr"`
|
||||
TURNSListenAddr string `yaml:"turns_listen_addr"`
|
||||
PublicIP string `yaml:"public_ip"`
|
||||
Realm string `yaml:"realm"`
|
||||
AuthSecret string `yaml:"auth_secret"`
|
||||
RelayPortStart int `yaml:"relay_port_start"`
|
||||
RelayPortEnd int `yaml:"relay_port_end"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
TLSCertPath string `yaml:"tls_cert_path"`
|
||||
TLSKeyPath string `yaml:"tls_key_path"`
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Config file not found",
|
||||
zap.String("path", configPath), zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var y yamlCfg
|
||||
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Failed to parse TURN config", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cfg := &turn.Config{
|
||||
ListenAddr: y.ListenAddr,
|
||||
TURNSListenAddr: y.TURNSListenAddr,
|
||||
PublicIP: y.PublicIP,
|
||||
Realm: y.Realm,
|
||||
AuthSecret: y.AuthSecret,
|
||||
RelayPortStart: y.RelayPortStart,
|
||||
RelayPortEnd: y.RelayPortEnd,
|
||||
Namespace: y.Namespace,
|
||||
TLSCertPath: y.TLSCertPath,
|
||||
TLSKeyPath: y.TLSKeyPath,
|
||||
}
|
||||
|
||||
if errs := cfg.Validate(); len(errs) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "\nTURN configuration errors (%d):\n", len(errs))
|
||||
for _, e := range errs {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", e)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentTURN, "Loaded TURN configuration",
|
||||
zap.String("path", configPath),
|
||||
zap.String("listen_addr", cfg.ListenAddr),
|
||||
zap.String("namespace", cfg.Namespace),
|
||||
zap.String("realm", cfg.Realm),
|
||||
)
|
||||
|
||||
return cfg
|
||||
}
|
||||
48
core/cmd/turn/main.go
Normal file
48
core/cmd/turn/main.go
Normal file
@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"github.com/DeBrosOfficial/network/pkg/turn"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "unknown"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger, err := logging.NewColoredLogger(logging.ComponentTURN, true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentTURN, "Starting TURN server",
|
||||
zap.String("version", version),
|
||||
zap.String("commit", commit))
|
||||
|
||||
cfg := parseTURNConfig(logger)
|
||||
|
||||
server, err := turn.NewServer(cfg, logger.Logger)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Failed to start TURN server", zap.Error(err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Wait for termination signal
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||
sig := <-quit
|
||||
|
||||
logger.ComponentInfo(logging.ComponentTURN, "Shutdown signal received", zap.String("signal", sig.String()))
|
||||
|
||||
if err := server.Close(); err != nil {
|
||||
logger.ComponentError(logging.ComponentTURN, "Error during shutdown", zap.Error(err))
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentTURN, "TURN server shutdown complete")
|
||||
}
|
||||
@ -4,7 +4,7 @@ Section: net
|
||||
Priority: optional
|
||||
Architecture: amd64
|
||||
Depends: libc6
|
||||
Maintainer: DeBros Team <dev@debros.io>
|
||||
Maintainer: DeBros Team <dev@orama.io>
|
||||
Description: Orama Network - Distributed P2P Database System
|
||||
Orama is a distributed peer-to-peer network that combines
|
||||
RQLite for distributed SQL, IPFS for content-addressed storage,
|
||||
@ -357,11 +357,36 @@ Function Invocation:
|
||||
|
||||
All inter-node communication is encrypted via a WireGuard VPN mesh:
|
||||
|
||||
- **WireGuard IPs:** Each node gets a private IP (10.0.0.x) used for all cluster traffic
|
||||
- **WireGuard IPs:** Each node gets a private IP (10.0.0.x/24) used for all cluster traffic
|
||||
- **UFW Firewall:** Only public ports are exposed: 22 (SSH), 53 (DNS, nameservers only), 80/443 (HTTP/HTTPS), 51820 (WireGuard UDP)
|
||||
- **IPv6 disabled:** System-wide via sysctl to prevent bypass of IPv4 firewall rules
|
||||
- **Internal services** (RQLite 5001/7001, IPFS 4001/4501, Olric 3320/3322, Gateway 6001) are only accessible via WireGuard or localhost
|
||||
- **Invite tokens:** Single-use, time-limited tokens for secure node joining. No shared secrets on the CLI
|
||||
- **Join flow:** New nodes authenticate via HTTPS (443), establish WireGuard tunnel, then join all services over the encrypted mesh
|
||||
- **Join flow:** New nodes authenticate via HTTPS (443) with TOFU certificate pinning, establish WireGuard tunnel, then join all services over the encrypted mesh
|
||||
|
||||
### Service Authentication
|
||||
|
||||
- **RQLite:** HTTP basic auth on all queries/executions — credentials generated at genesis, distributed via join response
|
||||
- **Olric:** Memberlist gossip encrypted with a shared 32-byte key
|
||||
- **IPFS Cluster:** TrustedPeers restricted to known cluster peer IDs (not `*`)
|
||||
- **Internal endpoints:** `/v1/internal/wg/peers` and `/v1/internal/wg/peer/remove` require cluster secret
|
||||
- **Vault:** V1 push/pull endpoints require session token authentication when guardian is configured
|
||||
- **WebSockets:** Origin header validated against the node's configured domain
|
||||
|
||||
### Token & Key Security
|
||||
|
||||
- **Refresh tokens:** Stored as SHA-256 hashes (never plaintext)
|
||||
- **API keys:** Stored as HMAC-SHA256 hashes with a server-side secret
|
||||
- **TURN secrets:** Encrypted at rest with AES-256-GCM (key derived from cluster secret)
|
||||
- **Binary signing:** Build archives signed with rootwallet EVM signature, verified on install
|
||||
|
||||
### Process Isolation
|
||||
|
||||
- **Dedicated user:** All services run as `orama` user (not root)
|
||||
- **systemd hardening:** `ProtectSystem=strict`, `NoNewPrivileges=yes`, `PrivateDevices=yes`, etc.
|
||||
- **Capabilities:** Caddy and CoreDNS get `CAP_NET_BIND_SERVICE` for privileged ports
|
||||
|
||||
See [SECURITY.md](SECURITY.md) for the full security hardening reference.
|
||||
|
||||
### TLS/HTTPS
|
||||
|
||||
@ -474,6 +499,61 @@ configured, use the IP over HTTP port 80 (`http://<ip>`) which goes through Cadd
|
||||
|
||||
Planned containerization with Docker Compose and Kubernetes support.
|
||||
|
||||
## WebRTC (Voice/Video/Data)
|
||||
|
||||
Namespaces can opt in to WebRTC support for real-time voice, video, and data channels.
|
||||
|
||||
### Components
|
||||
|
||||
- **SFU (Selective Forwarding Unit)** — Pion WebRTC server that handles signaling (WebSocket), SDP negotiation, and RTP forwarding. Runs on all 3 cluster nodes, binds only to WireGuard IPs.
|
||||
- **TURN Server** — Pion TURN relay that provides NAT traversal. Runs on 2 of 3 nodes for redundancy. Public-facing (UDP 3478, 443, relay range 49152-65535).
|
||||
|
||||
### Security Model
|
||||
|
||||
- **TURN-shielded**: SFU binds only to WireGuard (10.0.0.x), never 0.0.0.0. All client media flows through TURN relay.
|
||||
- **Forced relay**: `iceTransportPolicy: relay` enforced server-side — no direct peer connections.
|
||||
- **HMAC credentials**: Per-namespace TURN shared secret with 10-minute TTL.
|
||||
- **Namespace isolation**: Each namespace has its own TURN secret, port ranges, and rooms.
|
||||
|
||||
### Port Allocation
|
||||
|
||||
WebRTC uses a separate port allocation system from core namespace services:
|
||||
|
||||
| Service | Port Range |
|
||||
|---------|-----------|
|
||||
| SFU signaling | 30000-30099 |
|
||||
| SFU media (RTP) | 20000-29999 |
|
||||
| TURN listen | 3478/udp (standard) |
|
||||
| TURN TLS | 443/udp |
|
||||
| TURN relay | 49152-65535/udp |
|
||||
|
||||
See [docs/WEBRTC.md](WEBRTC.md) for full details including client integration, API reference, and debugging.
|
||||
|
||||
## OramaOS
|
||||
|
||||
For mainnet, devnet, and testnet environments, nodes run **OramaOS** — a custom minimal Linux image built with Buildroot.
|
||||
|
||||
**Key properties:**
|
||||
- No SSH, no shell — operators cannot access the filesystem
|
||||
- LUKS full-disk encryption with Shamir key distribution across peers
|
||||
- Read-only rootfs (SquashFS + dm-verity)
|
||||
- A/B partition updates with cryptographic signature verification
|
||||
- Service sandboxing via Linux namespaces + seccomp
|
||||
- Single root process: the **orama-agent**
|
||||
|
||||
**The orama-agent manages:**
|
||||
- Boot sequence and LUKS key reconstruction
|
||||
- WireGuard tunnel setup
|
||||
- Service lifecycle in sandboxed namespaces
|
||||
- Command reception from Gateway over WireGuard (port 9998)
|
||||
- OS updates (download, verify, A/B swap, reboot with rollback)
|
||||
|
||||
**Node enrollment:** OramaOS nodes join via `orama node enroll` instead of `orama node install`. The enrollment flow uses a registration code + invite token + wallet verification.
|
||||
|
||||
See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for the full deployment guide.
|
||||
|
||||
Sandbox clusters remain on Ubuntu for development convenience.
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **GraphQL Support** - GraphQL gateway alongside REST
|
||||
@ -2,17 +2,25 @@
|
||||
|
||||
How to completely remove all Orama Network state from a VPS so it can be reinstalled fresh.
|
||||
|
||||
> **OramaOS nodes:** This guide applies to Ubuntu-based nodes only. OramaOS has no SSH or shell access. To remove an OramaOS node: use `POST /v1/node/leave` via the Gateway API for graceful departure, or reflash the OramaOS image via your VPS provider's dashboard for a factory reset. See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for details.
|
||||
|
||||
## Quick Clean (Copy-Paste)
|
||||
|
||||
Run this as root or with sudo on the target VPS:
|
||||
|
||||
```bash
|
||||
# 1. Stop and disable all services
|
||||
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
|
||||
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
|
||||
sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||
sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||
|
||||
# 1b. Kill leftover processes (binaries may run outside systemd)
|
||||
sudo pkill -f orama-node 2>/dev/null; sudo pkill -f ipfs-cluster-service 2>/dev/null
|
||||
sudo pkill -f "ipfs daemon" 2>/dev/null; sudo pkill -f olric-server 2>/dev/null
|
||||
sudo pkill -f rqlited 2>/dev/null; sudo pkill -f coredns 2>/dev/null
|
||||
sleep 1
|
||||
|
||||
# 2. Remove systemd service files
|
||||
sudo rm -f /etc/systemd/system/debros-*.service
|
||||
sudo rm -f /etc/systemd/system/orama-*.service
|
||||
sudo rm -f /etc/systemd/system/coredns.service
|
||||
sudo rm -f /etc/systemd/system/caddy.service
|
||||
sudo systemctl daemon-reload
|
||||
@ -31,14 +39,15 @@ sudo ufw --force reset
|
||||
sudo ufw allow 22/tcp
|
||||
sudo ufw --force enable
|
||||
|
||||
# 5. Remove debros user and home directory
|
||||
sudo userdel -r debros 2>/dev/null
|
||||
sudo rm -rf /home/debros
|
||||
# 5. Remove orama data directory
|
||||
sudo rm -rf /opt/orama
|
||||
|
||||
# 6. Remove sudoers files
|
||||
sudo rm -f /etc/sudoers.d/debros-access
|
||||
sudo rm -f /etc/sudoers.d/debros-deployments
|
||||
sudo rm -f /etc/sudoers.d/debros-wireguard
|
||||
# 6. Remove legacy orama user (if exists from old installs)
|
||||
sudo userdel -r orama 2>/dev/null
|
||||
sudo rm -rf /home/orama
|
||||
sudo rm -f /etc/sudoers.d/orama-access
|
||||
sudo rm -f /etc/sudoers.d/orama-deployments
|
||||
sudo rm -f /etc/sudoers.d/orama-wireguard
|
||||
|
||||
# 7. Remove CoreDNS config
|
||||
sudo rm -rf /etc/coredns
|
||||
@ -62,17 +71,16 @@ echo "Node cleaned. Ready for fresh install."
|
||||
|
||||
| Category | Paths |
|
||||
|----------|-------|
|
||||
| **User** | `debros` system user and `/home/debros/` |
|
||||
| **App data** | `/home/debros/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) |
|
||||
| **Source code** | `/home/debros/src/` |
|
||||
| **Binaries** | `/home/debros/bin/orama-node`, `/home/debros/bin/gateway` |
|
||||
| **Systemd** | `debros-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` |
|
||||
| **App data** | `/opt/orama/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) |
|
||||
| **Source code** | `/opt/orama/src/` |
|
||||
| **Binaries** | `/opt/orama/bin/orama-node`, `/opt/orama/bin/gateway` |
|
||||
| **Systemd** | `orama-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` |
|
||||
| **WireGuard** | `/etc/wireguard/wg0.conf`, `wg-quick@wg0` systemd unit |
|
||||
| **Firewall** | All UFW rules (reset to default + SSH only) |
|
||||
| **Sudoers** | `/etc/sudoers.d/debros-*` |
|
||||
| **Legacy** | `orama` user, `/etc/sudoers.d/orama-*` (old installs only) |
|
||||
| **CoreDNS** | `/etc/coredns/Corefile` |
|
||||
| **Caddy** | `/etc/caddy/Caddyfile`, `/var/lib/caddy/` (TLS certs) |
|
||||
| **Anyone Relay** | `debros-anyone-relay.service`, `debros-anyone-client.service` |
|
||||
| **Anyone Relay** | `orama-anyone-relay.service`, `orama-anyone-client.service` |
|
||||
| **Temp files** | `/tmp/orama`, `/tmp/network-source.*`, build dirs |
|
||||
|
||||
## What This Does NOT Remove
|
||||
@ -121,18 +129,19 @@ for entry in "${NODES[@]}"; do
|
||||
IFS=: read -r userhost pass <<< "$entry"
|
||||
echo "Cleaning $userhost..."
|
||||
sshpass -p "$pass" ssh -o StrictHostKeyChecking=no "$userhost" 'bash -s' << 'CLEAN'
|
||||
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
|
||||
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
|
||||
sudo rm -f /etc/systemd/system/debros-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service
|
||||
sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||
sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||
sudo rm -f /etc/systemd/system/orama-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl stop wg-quick@wg0 2>/dev/null
|
||||
sudo wg-quick down wg0 2>/dev/null
|
||||
sudo systemctl disable wg-quick@wg0 2>/dev/null
|
||||
sudo rm -f /etc/wireguard/wg0.conf
|
||||
sudo ufw --force reset && sudo ufw allow 22/tcp && sudo ufw --force enable
|
||||
sudo userdel -r debros 2>/dev/null
|
||||
sudo rm -rf /home/debros
|
||||
sudo rm -f /etc/sudoers.d/debros-access /etc/sudoers.d/debros-deployments /etc/sudoers.d/debros-wireguard
|
||||
sudo rm -rf /opt/orama
|
||||
sudo userdel -r orama 2>/dev/null
|
||||
sudo rm -rf /home/orama
|
||||
sudo rm -f /etc/sudoers.d/orama-access /etc/sudoers.d/orama-deployments /etc/sudoers.d/orama-wireguard
|
||||
sudo rm -rf /etc/coredns /etc/caddy /var/lib/caddy
|
||||
sudo rm -f /tmp/orama /tmp/network-source.tar.gz
|
||||
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build
|
||||
@ -32,7 +32,7 @@ wg set wg0 peer <NodeA-pubkey> remove
|
||||
wg set wg0 peer <NodeA-pubkey> endpoint <NodeA-public-ip>:51820 allowed-ips <NodeA-wg-ip>/32 persistent-keepalive 25
|
||||
```
|
||||
|
||||
Then restart services: `sudo orama prod restart`
|
||||
Then restart services: `sudo orama node restart`
|
||||
|
||||
You can find peer public keys with `wg show wg0`.
|
||||
|
||||
@ -41,19 +41,19 @@ You can find peer public keys with `wg show wg0`.
|
||||
Check the Olric config on each node:
|
||||
|
||||
```bash
|
||||
cat /home/debros/.orama/data/namespaces/<name>/configs/olric-*.yaml
|
||||
cat /opt/orama/.orama/data/namespaces/<name>/configs/olric-*.yaml
|
||||
```
|
||||
|
||||
If `bindAddr` is `0.0.0.0`, the node will try to bind to IPv6 on dual-stack hosts, breaking memberlist gossip.
|
||||
|
||||
**Fix:** Edit the YAML to use the node's WireGuard IP (run `ip addr show wg0` to find it), then restart: `sudo orama prod restart`
|
||||
**Fix:** Edit the YAML to use the node's WireGuard IP (run `ip addr show wg0` to find it), then restart: `sudo orama node restart`
|
||||
|
||||
This was fixed in code (BindAddr validation in `SpawnOlric`), so new namespaces won't have this issue.
|
||||
|
||||
### Check 3: Olric logs show "Failed UDP ping" constantly
|
||||
|
||||
```bash
|
||||
journalctl -u debros-namespace-olric@<name>.service --no-pager -n 30
|
||||
journalctl -u orama-namespace-olric@<name>.service --no-pager -n 30
|
||||
```
|
||||
|
||||
If every UDP ping fails but TCP stream connections succeed, it's the WireGuard packet loss issue (see Check 1).
|
||||
@ -69,7 +69,7 @@ If every UDP ping fails but TCP stream connections succeed, it's the WireGuard p
|
||||
**Fix:** Edit the gateway config manually:
|
||||
|
||||
```bash
|
||||
vim /home/debros/.orama/data/namespaces/<name>/configs/gateway-*.yaml
|
||||
vim /opt/orama/.orama/data/namespaces/<name>/configs/gateway-*.yaml
|
||||
```
|
||||
|
||||
Add/fix:
|
||||
@ -82,7 +82,7 @@ olric_servers:
|
||||
- "10.0.0.Z:10002"
|
||||
```
|
||||
|
||||
Then: `sudo orama prod restart`
|
||||
Then: `sudo orama node restart`
|
||||
|
||||
This was fixed in code, so new namespaces get the correct config.
|
||||
|
||||
@ -90,12 +90,12 @@ This was fixed in code, so new namespaces get the correct config.
|
||||
|
||||
## 3. Namespace not restoring after restart (missing cluster-state.json)
|
||||
|
||||
**Symptom:** After `orama prod restart`, the namespace services don't come back because `RestoreLocalClustersFromDisk` has no state file.
|
||||
**Symptom:** After `orama node restart`, the namespace services don't come back because `RestoreLocalClustersFromDisk` has no state file.
|
||||
|
||||
**Check:**
|
||||
|
||||
```bash
|
||||
ls /home/debros/.orama/data/namespaces/<name>/cluster-state.json
|
||||
ls /opt/orama/.orama/data/namespaces/<name>/cluster-state.json
|
||||
```
|
||||
|
||||
If the file doesn't exist, the node can't restore the namespace.
|
||||
@ -117,17 +117,17 @@ This was fixed in code — `ProvisionCluster` now saves state to all nodes (incl
|
||||
|
||||
## 4. Namespace gateway processes not restarting after upgrade
|
||||
|
||||
**Symptom:** After `orama upgrade --restart` or `orama prod restart`, namespace gateway/olric/rqlite services don't start.
|
||||
**Symptom:** After `orama upgrade --restart` or `orama node restart`, namespace gateway/olric/rqlite services don't start.
|
||||
|
||||
**Cause:** `orama prod stop` disables systemd template services (`debros-namespace-gateway@<name>.service`). They have `PartOf=debros-node.service`, but that only propagates restart to **enabled** services.
|
||||
**Cause:** `orama node stop` disables systemd template services (`orama-namespace-gateway@<name>.service`). They have `PartOf=orama-node.service`, but that only propagates restart to **enabled** services.
|
||||
|
||||
**Fix:** Re-enable the services before restarting:
|
||||
|
||||
```bash
|
||||
systemctl enable debros-namespace-rqlite@<name>.service
|
||||
systemctl enable debros-namespace-olric@<name>.service
|
||||
systemctl enable debros-namespace-gateway@<name>.service
|
||||
sudo orama prod restart
|
||||
systemctl enable orama-namespace-rqlite@<name>.service
|
||||
systemctl enable orama-namespace-olric@<name>.service
|
||||
systemctl enable orama-namespace-gateway@<name>.service
|
||||
sudo orama node restart
|
||||
```
|
||||
|
||||
This was fixed in code — the upgrade orchestrator now re-enables `@` services before restarting.
|
||||
@ -150,11 +150,68 @@ ssh -n user@host 'command'
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## 6. RQLite returns 401 Unauthorized
|
||||
|
||||
**Symptom:** RQLite queries fail with HTTP 401 after security hardening.
|
||||
|
||||
**Cause:** RQLite now requires basic auth. The client isn't sending credentials.
|
||||
|
||||
**Fix:** Ensure the RQLite client is configured with the credentials from `/opt/orama/.orama/secrets/rqlite-auth.json`. The central RQLite client wrapper (`pkg/rqlite/client.go`) handles this automatically. If using a standalone client (e.g., CoreDNS plugin), ensure it's also configured.
|
||||
|
||||
---
|
||||
|
||||
## 7. Olric cluster split after upgrade
|
||||
|
||||
**Symptom:** Olric nodes can't gossip after enabling memberlist encryption.
|
||||
|
||||
**Cause:** Olric memberlist encryption is all-or-nothing. Nodes with encryption can't communicate with nodes without it.
|
||||
|
||||
**Fix:** All nodes must be restarted simultaneously when enabling Olric encryption. The cache will be lost (it rebuilds from DB). This is expected — Olric is a cache, not persistent storage.
|
||||
|
||||
---
|
||||
|
||||
## 8. OramaOS: LUKS unlock fails
|
||||
|
||||
**Symptom:** OramaOS node can't reconstruct its LUKS key after reboot.
|
||||
|
||||
**Cause:** Not enough peer vault-guardians are online to meet the Shamir threshold (K = max(3, N/3)).
|
||||
|
||||
**Fix:** Ensure enough cluster nodes are online and reachable over WireGuard. The agent retries with exponential backoff. For genesis nodes before 5+ peers exist, use:
|
||||
|
||||
```bash
|
||||
orama node unlock --genesis --node-ip <wg-ip>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. OramaOS: Enrollment timeout
|
||||
|
||||
**Symptom:** `orama node enroll` hangs or times out.
|
||||
|
||||
**Cause:** The OramaOS node's port 9999 isn't reachable, or the Gateway can't reach the node's WebSocket.
|
||||
|
||||
**Fix:** Check that port 9999 is open in your VPS provider's external firewall (Hetzner firewall, AWS security groups, etc.). OramaOS opens it internally, but provider-level firewalls must be configured separately.
|
||||
|
||||
---
|
||||
|
||||
## 10. Binary signature verification fails
|
||||
|
||||
**Symptom:** `orama node install` rejects the binary archive with a signature error.
|
||||
|
||||
**Cause:** The archive was tampered with, or the manifest.sig file is missing/corrupted.
|
||||
|
||||
**Fix:** Rebuild the archive with `orama build` and re-sign with `make sign` (in the orama-os repo). Ensure you're using the rootwallet that matches the embedded signer address.
|
||||
|
||||
---
|
||||
|
||||
## General Debugging Tips
|
||||
|
||||
- **Always use `sudo orama prod restart`** instead of raw `systemctl` commands
|
||||
- **Namespace data lives at:** `/home/debros/.orama/data/namespaces/<name>/`
|
||||
- **Check service logs:** `journalctl -u debros-namespace-olric@<name>.service --no-pager -n 50`
|
||||
- **Always use `sudo orama node restart`** instead of raw `systemctl` commands
|
||||
- **Namespace data lives at:** `/opt/orama/.orama/data/namespaces/<name>/`
|
||||
- **Check service logs:** `journalctl -u orama-namespace-olric@<name>.service --no-pager -n 50`
|
||||
- **Check WireGuard:** `wg show wg0` — look for recent handshakes and transfer bytes
|
||||
- **Check gateway health:** `curl http://localhost:<port>/v1/health` from the node itself
|
||||
- **Node IPs:** Check `scripts/remote-nodes.conf` for credentials, `wg show wg0` for WG IPs
|
||||
- **OramaOS nodes:** No SSH access — use Gateway API endpoints (`/v1/node/status`, `/v1/node/logs`) for diagnostics
|
||||
@ -163,7 +163,7 @@ orama deploy nextjs ./nextjs.tar.gz --name my-nextjs --ssr
|
||||
# URLs:
|
||||
# • https://my-nextjs.orama.network
|
||||
#
|
||||
# ⚠️ Note: SSR deployment may take a minute to start. Check status with: orama deployments get my-nextjs
|
||||
# ⚠️ Note: SSR deployment may take a minute to start. Check status with: orama app get my-nextjs
|
||||
```
|
||||
|
||||
### What Happens Behind the Scenes
|
||||
@ -369,7 +369,7 @@ orama db create my-database
|
||||
# Output:
|
||||
# ✅ Database created: my-database
|
||||
# Home Node: node-abc123
|
||||
# File Path: /home/debros/.orama/data/sqlite/your-namespace/my-database.db
|
||||
# File Path: /opt/orama/.orama/data/sqlite/your-namespace/my-database.db
|
||||
```
|
||||
|
||||
### Executing Queries
|
||||
@ -588,7 +588,7 @@ func main() {
|
||||
// DATABASE_NAME env var is automatically set by Orama
|
||||
dbPath := os.Getenv("DATABASE_PATH")
|
||||
if dbPath == "" {
|
||||
dbPath = "/home/debros/.orama/data/sqlite/" + os.Getenv("NAMESPACE") + "/myapp-db.db"
|
||||
dbPath = "/opt/orama/.orama/data/sqlite/" + os.Getenv("NAMESPACE") + "/myapp-db.db"
|
||||
}
|
||||
|
||||
var err error
|
||||
@ -795,7 +795,7 @@ Open your browser to:
|
||||
### List All Deployments
|
||||
|
||||
```bash
|
||||
orama deployments list
|
||||
orama app list
|
||||
|
||||
# Output:
|
||||
# NAME TYPE STATUS VERSION CREATED
|
||||
@ -809,7 +809,7 @@ orama deployments list
|
||||
### Get Deployment Details
|
||||
|
||||
```bash
|
||||
orama deployments get my-react-app
|
||||
orama app get my-react-app
|
||||
|
||||
# Output:
|
||||
# Deployment: my-react-app
|
||||
@ -835,17 +835,17 @@ orama deployments get my-react-app
|
||||
|
||||
```bash
|
||||
# View last 100 lines
|
||||
orama deployments logs my-nextjs
|
||||
orama app logs my-nextjs
|
||||
|
||||
# Follow logs in real-time
|
||||
orama deployments logs my-nextjs --follow
|
||||
orama app logs my-nextjs --follow
|
||||
```
|
||||
|
||||
### Rollback to Previous Version
|
||||
|
||||
```bash
|
||||
# Rollback to version 1
|
||||
orama deployments rollback my-nextjs --version 1
|
||||
orama app rollback my-nextjs --version 1
|
||||
|
||||
# Output:
|
||||
# ⚠️ Rolling back 'my-nextjs' to version 1. Continue? (y/N): y
|
||||
@ -862,7 +862,7 @@ orama deployments rollback my-nextjs --version 1
|
||||
### Delete Deployment
|
||||
|
||||
```bash
|
||||
orama deployments delete my-old-app
|
||||
orama app delete my-old-app
|
||||
|
||||
# Output:
|
||||
# ⚠️ Are you sure you want to delete deployment 'my-old-app'? (y/N): y
|
||||
@ -872,6 +872,57 @@ orama deployments delete my-old-app
|
||||
|
||||
---
|
||||
|
||||
## WebRTC (Voice/Video/Data)
|
||||
|
||||
Namespaces can enable WebRTC support for real-time communication (voice calls, video calls, data channels).
|
||||
|
||||
### Enable WebRTC
|
||||
|
||||
```bash
|
||||
# Enable WebRTC for a namespace (must be run on a cluster node)
|
||||
orama namespace enable webrtc --namespace myapp
|
||||
|
||||
# Check WebRTC status
|
||||
orama namespace webrtc-status --namespace myapp
|
||||
```
|
||||
|
||||
This provisions SFU servers on all 3 nodes and TURN relay servers on 2 nodes, allocates port blocks, creates DNS records, and opens firewall ports.
|
||||
|
||||
### Disable WebRTC
|
||||
|
||||
```bash
|
||||
orama namespace disable webrtc --namespace myapp
|
||||
```
|
||||
|
||||
Stops all SFU/TURN services, deallocates ports, removes DNS records, and closes firewall ports.
|
||||
|
||||
### Client Integration
|
||||
|
||||
```javascript
|
||||
// 1. Get TURN credentials
|
||||
const creds = await fetch('https://ns-myapp.orama.network/v1/webrtc/turn/credentials', {
|
||||
method: 'POST',
|
||||
headers: { 'Authorization': `Bearer ${jwt}` }
|
||||
});
|
||||
const { urls, username, credential, ttl } = await creds.json();
|
||||
|
||||
// 2. Create PeerConnection (forced relay)
|
||||
const pc = new RTCPeerConnection({
|
||||
iceServers: [{ urls, username, credential }],
|
||||
iceTransportPolicy: 'relay'
|
||||
});
|
||||
|
||||
// 3. Connect signaling WebSocket
|
||||
const ws = new WebSocket(
|
||||
`wss://ns-myapp.orama.network/v1/webrtc/signal?room=${roomId}`,
|
||||
['Bearer', jwt]
|
||||
);
|
||||
```
|
||||
|
||||
See [docs/WEBRTC.md](WEBRTC.md) for the full API reference, room management, credential protocol, and debugging guide.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Deployment Issues
|
||||
@ -880,10 +931,10 @@ orama deployments delete my-old-app
|
||||
|
||||
```bash
|
||||
# Check deployment details
|
||||
orama deployments get my-app
|
||||
orama app get my-app
|
||||
|
||||
# View logs for errors
|
||||
orama deployments logs my-app
|
||||
orama app logs my-app
|
||||
|
||||
# Common issues:
|
||||
# - Binary not compiled for Linux (GOOS=linux GOARCH=amd64)
|
||||
@ -896,7 +947,7 @@ orama deployments logs my-app
|
||||
|
||||
```bash
|
||||
# 1. Check deployment status
|
||||
orama deployments get my-app
|
||||
orama app get my-app
|
||||
|
||||
# 2. Verify DNS (may take up to 10 seconds to propagate)
|
||||
dig my-app.orama.network
|
||||
@ -980,7 +1031,7 @@ orama auth status
|
||||
|
||||
- **Explore the API**: See `/docs/GATEWAY_API.md` for HTTP API details
|
||||
- **Advanced Features**: Custom domains, load balancing, autoscaling (coming soon)
|
||||
- **Production Deployment**: Install nodes with `orama install` for production clusters
|
||||
- **Production Deployment**: Install nodes with `orama node install` for production clusters
|
||||
- **Client SDK**: Use the Go/JS SDK for programmatic deployments
|
||||
|
||||
---
|
||||
@ -41,7 +41,7 @@ Install nodes **one at a time**, waiting for each to complete before starting th
|
||||
```bash
|
||||
# SSH: <user>@<ns1-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--vps-ip <ns1-ip> \
|
||||
--domain <your-domain.com> \
|
||||
--base-domain <your-domain.com> \
|
||||
@ -50,7 +50,7 @@ sudo orama install --no-pull --pre-built \
|
||||
|
||||
After ns1 is installed, generate invite tokens:
|
||||
```bash
|
||||
orama invite --expiry 24h
|
||||
sudo orama node invite --expiry 24h
|
||||
```
|
||||
|
||||
## ns2 - Nameserver + Relay
|
||||
@ -58,7 +58,7 @@ orama invite --expiry 24h
|
||||
```bash
|
||||
# SSH: <user>@<ns2-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--join http://<ns1-ip> --token <TOKEN> \
|
||||
--vps-ip <ns2-ip> \
|
||||
--domain <your-domain.com> \
|
||||
@ -68,8 +68,7 @@ sudo orama install --no-pull --pre-built \
|
||||
--anyone-nickname <relay-name> \
|
||||
--anyone-wallet <wallet-address> \
|
||||
--anyone-contact "<contact-info>" \
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
|
||||
--anyone-bandwidth 30
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||
```
|
||||
|
||||
## ns3 - Nameserver + Relay
|
||||
@ -77,7 +76,7 @@ sudo orama install --no-pull --pre-built \
|
||||
```bash
|
||||
# SSH: <user>@<ns3-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--join http://<ns1-ip> --token <TOKEN> \
|
||||
--vps-ip <ns3-ip> \
|
||||
--domain <your-domain.com> \
|
||||
@ -87,27 +86,25 @@ sudo orama install --no-pull --pre-built \
|
||||
--anyone-nickname <relay-name> \
|
||||
--anyone-wallet <wallet-address> \
|
||||
--anyone-contact "<contact-info>" \
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
|
||||
--anyone-bandwidth 30
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||
```
|
||||
|
||||
## node4 - Non-Nameserver + Relay
|
||||
|
||||
Domain is auto-generated (e.g., `node-a3f8k2.<your-domain.com>`). No `--domain` flag needed.
|
||||
|
||||
```bash
|
||||
# SSH: <user>@<node4-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--join http://<ns1-ip> --token <TOKEN> \
|
||||
--vps-ip <node4-ip> \
|
||||
--domain node4.<your-domain.com> \
|
||||
--base-domain <your-domain.com> \
|
||||
--skip-checks \
|
||||
--anyone-relay --anyone-migrate \
|
||||
--anyone-nickname <relay-name> \
|
||||
--anyone-wallet <wallet-address> \
|
||||
--anyone-contact "<contact-info>" \
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
|
||||
--anyone-bandwidth 30
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||
```
|
||||
|
||||
## node5 - Non-Nameserver + Relay
|
||||
@ -115,18 +112,15 @@ sudo orama install --no-pull --pre-built \
|
||||
```bash
|
||||
# SSH: <user>@<node5-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--join http://<ns1-ip> --token <TOKEN> \
|
||||
--vps-ip <node5-ip> \
|
||||
--domain node5.<your-domain.com> \
|
||||
--base-domain <your-domain.com> \
|
||||
--skip-checks \
|
||||
--anyone-relay --anyone-migrate \
|
||||
--anyone-nickname <relay-name> \
|
||||
--anyone-wallet <wallet-address> \
|
||||
--anyone-contact "<contact-info>" \
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
|
||||
--anyone-bandwidth 30
|
||||
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||
```
|
||||
|
||||
## node6 - Non-Nameserver (No Anyone Relay)
|
||||
@ -134,12 +128,10 @@ sudo orama install --no-pull --pre-built \
|
||||
```bash
|
||||
# SSH: <user>@<node6-ip>
|
||||
|
||||
sudo orama install --no-pull --pre-built \
|
||||
sudo orama node install \
|
||||
--join http://<ns1-ip> --token <TOKEN> \
|
||||
--vps-ip <node6-ip> \
|
||||
--domain node6.<your-domain.com> \
|
||||
--base-domain <your-domain.com> \
|
||||
--skip-checks
|
||||
--base-domain <your-domain.com>
|
||||
```
|
||||
|
||||
## Verification
|
||||
@ -147,13 +139,14 @@ sudo orama install --no-pull --pre-built \
|
||||
After all nodes are installed, verify cluster health:
|
||||
|
||||
```bash
|
||||
# Check RQLite cluster (from any node)
|
||||
# Full cluster report (from local machine)
|
||||
./bin/orama monitor report --env devnet
|
||||
|
||||
# Single node health
|
||||
./bin/orama monitor report --env devnet --node <ip>
|
||||
|
||||
# Or manually from any VPS:
|
||||
curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num_peers'
|
||||
# Should show: Leader (on one node) and N-1 peers
|
||||
|
||||
# Check gateway health
|
||||
curl -s http://localhost:6001/health
|
||||
|
||||
# Check Anyone relay (on nodes with relays)
|
||||
systemctl status debros-anyone-relay
|
||||
systemctl status orama-anyone-relay
|
||||
```
|
||||
@ -17,7 +17,6 @@ make build
|
||||
# bin/orama — the CLI
|
||||
# bin/gateway — standalone gateway (optional)
|
||||
# bin/identity — identity tool
|
||||
# bin/rqlite-mcp — RQLite MCP server
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
@ -28,182 +27,122 @@ make test
|
||||
|
||||
## Deploying to VPS
|
||||
|
||||
There are two deployment workflows: **development** (fast iteration, no git required) and **production** (via git).
|
||||
All binaries are pre-compiled locally and shipped as a binary archive. Zero compilation on the VPS.
|
||||
|
||||
### Development Deployment (Fast Iteration)
|
||||
|
||||
Use this when iterating quickly — no need to commit or push to git.
|
||||
### Deploy Workflow
|
||||
|
||||
```bash
|
||||
# 1. Build the CLI for Linux
|
||||
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli
|
||||
# One-command: build + push + rolling upgrade
|
||||
orama node rollout --env testnet
|
||||
|
||||
# 2. Generate a source archive (excludes .git, node_modules, bin/, etc.)
|
||||
./scripts/generate-source-archive.sh
|
||||
# Creates: /tmp/network-source.tar.gz
|
||||
# Or step by step:
|
||||
|
||||
# 3. Copy CLI and source to the VPS
|
||||
sshpass -p '<password>' scp -o StrictHostKeyChecking=no orama-cli-linux ubuntu@<ip>:/tmp/orama
|
||||
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
|
||||
# 1. Build binary archive (cross-compiles all binaries for linux/amd64)
|
||||
orama build
|
||||
# Creates: /tmp/orama-<version>-linux-amd64.tar.gz
|
||||
|
||||
# 4. On the VPS: extract source and install the CLI
|
||||
ssh ubuntu@<ip>
|
||||
sudo rm -rf /home/debros/src && sudo mkdir -p /home/debros/src
|
||||
sudo tar xzf /tmp/network-source.tar.gz -C /home/debros/src
|
||||
sudo chown -R debros:debros /home/debros/src
|
||||
sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama
|
||||
# 2. Push archive to all nodes (fanout via hub node)
|
||||
orama node push --env testnet
|
||||
|
||||
# 5. Upgrade using local source (skips git pull)
|
||||
sudo orama upgrade --no-pull --restart
|
||||
# 3. Rolling upgrade (one node at a time, followers first, leader last)
|
||||
orama node upgrade --env testnet
|
||||
```
|
||||
|
||||
### Development Deployment with Pre-Built Binaries (Fastest)
|
||||
|
||||
Cross-compile everything locally and skip all Go compilation on the VPS. This is significantly faster because your local machine compiles much faster than the VPS.
|
||||
### Fresh Node Install
|
||||
|
||||
```bash
|
||||
# 1. Cross-compile all binaries for Linux (DeBros + Olric + CoreDNS + Caddy)
|
||||
make build-linux-all
|
||||
# Outputs everything to bin-linux/
|
||||
# Build the archive first (if not already built)
|
||||
orama build
|
||||
|
||||
# 2. Generate a single deploy archive (source + pre-built binaries)
|
||||
./scripts/generate-source-archive.sh
|
||||
# Creates: /tmp/network-source.tar.gz (includes bin-linux/ if present)
|
||||
|
||||
# 3. Copy the single archive to the VPS
|
||||
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
|
||||
|
||||
# 4. Extract and install everything on the VPS
|
||||
sshpass -p '<password>' ssh -o StrictHostKeyChecking=no ubuntu@<ip> \
|
||||
'sudo bash -s' < scripts/extract-deploy.sh
|
||||
|
||||
# 5. Install/upgrade with --pre-built (skips ALL Go compilation on VPS)
|
||||
sudo orama install --no-pull --pre-built --vps-ip <ip> ...
|
||||
# or
|
||||
sudo orama upgrade --no-pull --pre-built --restart
|
||||
# Install on a new VPS (auto-uploads binary archive, zero compilation)
|
||||
orama node install --vps-ip <ip> --nameserver --domain <domain> --base-domain <domain>
|
||||
```
|
||||
|
||||
**What `--pre-built` skips:** Go installation, `make build`, Olric `go install`, CoreDNS build, Caddy/xcaddy build.
|
||||
|
||||
**What `--pre-built` still runs:** apt dependencies, RQLite/IPFS/IPFS Cluster downloads (pre-built binary downloads, fast), Anyone relay setup, config generation, systemd service creation.
|
||||
|
||||
### Production Deployment (Via Git)
|
||||
|
||||
For production releases — pulls source from GitHub on the VPS.
|
||||
|
||||
```bash
|
||||
# 1. Commit and push your changes
|
||||
git push origin <branch>
|
||||
|
||||
# 2. Build the CLI for Linux
|
||||
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli
|
||||
|
||||
# 3. Deploy the CLI to the VPS
|
||||
sshpass -p '<password>' scp orama-cli-linux ubuntu@<ip>:/tmp/orama
|
||||
ssh ubuntu@<ip> "sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama"
|
||||
|
||||
# 4. Run upgrade (downloads source from GitHub)
|
||||
ssh ubuntu@<ip> "sudo orama upgrade --branch <branch> --restart"
|
||||
```
|
||||
The installer auto-detects the binary archive at `/opt/orama/manifest.json` and copies pre-built binaries instead of compiling from source.
|
||||
|
||||
### Upgrading a Multi-Node Cluster (CRITICAL)
|
||||
|
||||
**NEVER restart all nodes simultaneously.** RQLite uses Raft consensus and requires a majority (quorum) to function. Restarting all nodes at once can cause cluster splits where nodes elect different leaders or form isolated clusters.
|
||||
**NEVER restart all nodes simultaneously.** RQLite uses Raft consensus and requires a majority (quorum) to function.
|
||||
|
||||
#### Safe Upgrade Procedure (Rolling Restart)
|
||||
|
||||
Always upgrade nodes **one at a time**, waiting for each to rejoin before proceeding:
|
||||
#### Safe Upgrade Procedure
|
||||
|
||||
```bash
|
||||
# 1. Build locally
|
||||
make build-linux-all
|
||||
./scripts/generate-source-archive.sh
|
||||
# Creates: /tmp/network-source.tar.gz (includes bin-linux/)
|
||||
# Full rollout (build + push + rolling upgrade, one command)
|
||||
orama node rollout --env testnet
|
||||
|
||||
# 2. Upload to ONE node first (the "hub" node)
|
||||
sshpass -p '<password>' scp /tmp/network-source.tar.gz ubuntu@<hub-ip>:/tmp/
|
||||
# Or with more control:
|
||||
orama node push --env testnet # Push archive to all nodes
|
||||
orama node upgrade --env testnet # Rolling upgrade (auto-detects leader)
|
||||
orama node upgrade --env testnet --node 1.2.3.4 # Single node only
|
||||
orama node upgrade --env testnet --delay 60 # 60s between nodes
|
||||
```
|
||||
|
||||
# 3. Fan out from hub to all other nodes (server-to-server is faster)
|
||||
ssh ubuntu@<hub-ip>
|
||||
for ip in <ip2> <ip3> <ip4> <ip5> <ip6>; do
|
||||
scp /tmp/network-source.tar.gz ubuntu@$ip:/tmp/
|
||||
done
|
||||
exit
|
||||
The rolling upgrade automatically:
|
||||
1. Upgrades **follower** nodes first
|
||||
2. Upgrades the **leader** last
|
||||
3. Waits a configurable delay between nodes (default: 30s)
|
||||
|
||||
# 4. Extract on ALL nodes (can be done in parallel, no restart yet)
|
||||
for ip in <ip1> <ip2> <ip3> <ip4> <ip5> <ip6>; do
|
||||
ssh ubuntu@$ip 'sudo bash -s' < scripts/extract-deploy.sh
|
||||
done
|
||||
|
||||
# 5. Find the RQLite leader (upgrade this one LAST)
|
||||
ssh ubuntu@<any-node> 'curl -s http://localhost:5001/status | jq -r .store.raft.state'
|
||||
|
||||
# 6. Upgrade FOLLOWER nodes one at a time
|
||||
# First stop services, then upgrade, which restarts them
|
||||
ssh ubuntu@<follower-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart'
|
||||
|
||||
# Wait for rejoin before proceeding to next node
|
||||
ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft.num_peers'
|
||||
# Should show expected number of peers (N-1)
|
||||
|
||||
# Repeat for each follower...
|
||||
|
||||
# 7. Upgrade the LEADER node last
|
||||
ssh ubuntu@<leader-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart'
|
||||
After each node, verify health:
|
||||
```bash
|
||||
orama monitor report --env testnet
|
||||
```
|
||||
|
||||
#### What NOT to Do
|
||||
|
||||
- **DON'T** stop all nodes, replace binaries, then start all nodes
|
||||
- **DON'T** run `orama upgrade --restart` on multiple nodes in parallel
|
||||
- **DON'T** run `orama node upgrade --restart` on multiple nodes in parallel
|
||||
- **DON'T** clear RQLite data directories unless doing a full cluster rebuild
|
||||
- **DON'T** use `systemctl stop debros-node` on multiple nodes simultaneously
|
||||
- **DON'T** use `systemctl stop orama-node` on multiple nodes simultaneously
|
||||
|
||||
#### Recovery from Cluster Split
|
||||
|
||||
If nodes get stuck in "Candidate" state or show "leader not found" errors:
|
||||
|
||||
1. Identify which node has the most recent data (usually the old leader)
|
||||
2. Keep that node running as the new leader
|
||||
3. On each other node, clear RQLite data and restart:
|
||||
```bash
|
||||
sudo orama prod stop
|
||||
sudo rm -rf /home/debros/.orama/data/rqlite
|
||||
sudo systemctl start debros-node
|
||||
```
|
||||
4. The node should automatically rejoin using its configured `rqlite_join_address`
|
||||
|
||||
If automatic rejoin fails, the node may have started without the `-join` flag. Check:
|
||||
```bash
|
||||
ps aux | grep rqlited
|
||||
# Should include: -join 10.0.0.1:7001 (or similar)
|
||||
# Recover the Raft cluster (specify the node with highest commit index as leader)
|
||||
orama node recover-raft --env testnet --leader 1.2.3.4
|
||||
```
|
||||
|
||||
If `-join` is missing, the node bootstrapped standalone. You'll need to either:
|
||||
- Restart debros-node (it should detect empty data and use join)
|
||||
- Or do a full cluster rebuild from CLEAN_NODE.md
|
||||
This will:
|
||||
1. Stop orama-node on ALL nodes
|
||||
2. Backup + delete raft/ on non-leader nodes
|
||||
3. Start the leader, wait for Leader state
|
||||
4. Start remaining nodes in batches
|
||||
5. Verify cluster health
|
||||
|
||||
### Deploying to Multiple Nodes
|
||||
### Cleaning Nodes for Reinstallation
|
||||
|
||||
To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS IP.
|
||||
```bash
|
||||
# Wipe all data and services (preserves Anyone relay keys)
|
||||
orama node clean --env testnet --force
|
||||
|
||||
**Important:** When using `--restart`, do nodes one at a time (see "Upgrading a Multi-Node Cluster" above).
|
||||
# Also remove shared binaries (rqlited, ipfs, caddy, etc.)
|
||||
orama node clean --env testnet --nuclear --force
|
||||
|
||||
# Single node only
|
||||
orama node clean --env testnet --node 1.2.3.4 --force
|
||||
```
|
||||
|
||||
### Push Options
|
||||
|
||||
```bash
|
||||
orama node push --env devnet # Fanout via hub (default, fastest)
|
||||
orama node push --env testnet --node 1.2.3.4 # Single node
|
||||
orama node push --env testnet --direct # Sequential, no fanout
|
||||
```
|
||||
|
||||
### CLI Flags Reference
|
||||
|
||||
#### `orama install`
|
||||
#### `orama node install`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--vps-ip <ip>` | VPS public IP address (required) |
|
||||
| `--domain <domain>` | Domain for HTTPS certificates. Nameserver nodes use the base domain (e.g., `example.com`); non-nameserver nodes use a subdomain (e.g., `node-4.example.com`) |
|
||||
| `--domain <domain>` | Domain for HTTPS certificates. Required for nameserver nodes (use the base domain, e.g., `example.com`). Auto-generated for non-nameserver nodes if omitted (e.g., `node-a3f8k2.example.com`) |
|
||||
| `--base-domain <domain>` | Base domain for deployment routing (e.g., example.com) |
|
||||
| `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) |
|
||||
| `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) |
|
||||
| `--token <token>` | Invite token for joining (from `orama invite` on existing node) |
|
||||
| `--branch <branch>` | Git branch to use (default: main) |
|
||||
| `--no-pull` | Skip git clone/pull, use existing `/home/debros/src` |
|
||||
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk (see above) |
|
||||
| `--token <token>` | Invite token for joining (from `orama node invite` on existing node) |
|
||||
| `--force` | Force reconfiguration even if already installed |
|
||||
| `--skip-firewall` | Skip UFW firewall setup |
|
||||
| `--skip-checks` | Skip minimum resource checks (RAM/CPU) |
|
||||
@ -218,7 +157,7 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
|
||||
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited). Runs a speedtest during install to measure available bandwidth |
|
||||
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
|
||||
|
||||
#### `orama invite`
|
||||
#### `orama node invite`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
@ -230,54 +169,132 @@ To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS
|
||||
- **Expiry is checked in UTC.** RQLite uses `datetime('now')` which is always UTC. If your local timezone differs, account for the offset when choosing expiry durations.
|
||||
- **Use longer expiry for multi-node deployments.** When deploying multiple nodes, use `--expiry 24h` to avoid tokens expiring mid-deployment.
|
||||
|
||||
#### `orama upgrade`
|
||||
#### `orama node upgrade`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--branch <branch>` | Git branch to pull from |
|
||||
| `--no-pull` | Skip git pull, use existing source |
|
||||
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk |
|
||||
| `--restart` | Restart all services after upgrade |
|
||||
| `--restart` | Restart all services after upgrade (local mode) |
|
||||
| `--env <env>` | Target environment for remote rolling upgrade |
|
||||
| `--node <ip>` | Upgrade a single node only |
|
||||
| `--delay <seconds>` | Delay between nodes during rolling upgrade (default: 30) |
|
||||
| `--anyone-relay` | Enable Anyone relay (same flags as install) |
|
||||
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited) |
|
||||
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
|
||||
|
||||
#### `orama prod` (Service Management)
|
||||
#### `orama build`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--arch <arch>` | Target architecture (default: amd64) |
|
||||
| `--output <path>` | Output archive path |
|
||||
| `--verbose` | Verbose build output |
|
||||
|
||||
#### `orama node push`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--env <env>` | Target environment (required) |
|
||||
| `--node <ip>` | Push to a single node only |
|
||||
| `--direct` | Sequential upload (no hub fanout) |
|
||||
|
||||
#### `orama node rollout`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--env <env>` | Target environment (required) |
|
||||
| `--no-build` | Skip the build step |
|
||||
| `--yes` | Skip confirmation |
|
||||
| `--delay <seconds>` | Delay between nodes (default: 30) |
|
||||
|
||||
#### `orama node clean`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--env <env>` | Target environment (required) |
|
||||
| `--node <ip>` | Clean a single node only |
|
||||
| `--nuclear` | Also remove shared binaries |
|
||||
| `--force` | Skip confirmation (DESTRUCTIVE) |
|
||||
|
||||
#### `orama node recover-raft`
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--env <env>` | Target environment (required) |
|
||||
| `--leader <ip>` | Leader node IP — highest commit index (required) |
|
||||
| `--force` | Skip confirmation (DESTRUCTIVE) |
|
||||
|
||||
#### `orama node` (Service Management)
|
||||
|
||||
Use these commands to manage services on production nodes:
|
||||
|
||||
```bash
|
||||
# Stop all services (debros-node, coredns, caddy)
|
||||
sudo orama prod stop
|
||||
# Stop all services (orama-node, coredns, caddy)
|
||||
sudo orama node stop
|
||||
|
||||
# Start all services
|
||||
sudo orama prod start
|
||||
sudo orama node start
|
||||
|
||||
# Restart all services
|
||||
sudo orama prod restart
|
||||
sudo orama node restart
|
||||
|
||||
# Check service status
|
||||
sudo orama prod status
|
||||
sudo orama node status
|
||||
|
||||
# Diagnose common issues
|
||||
sudo orama node doctor
|
||||
```
|
||||
|
||||
**Note:** Always use `orama prod stop` instead of manually running `systemctl stop`. The CLI ensures all related services (including CoreDNS and Caddy on nameserver nodes) are handled correctly.
|
||||
**Note:** Always use `orama node stop` instead of manually running `systemctl stop`. The CLI ensures all related services (including CoreDNS and Caddy on nameserver nodes) are handled correctly.
|
||||
|
||||
#### `orama node report`
|
||||
|
||||
Outputs comprehensive health data as JSON. Used by `orama monitor` over SSH:
|
||||
|
||||
```bash
|
||||
sudo orama node report --json
|
||||
```
|
||||
|
||||
See [MONITORING.md](MONITORING.md) for full details.
|
||||
|
||||
#### `orama monitor`
|
||||
|
||||
Real-time cluster monitoring from your local machine:
|
||||
|
||||
```bash
|
||||
# Interactive TUI
|
||||
orama monitor --env testnet
|
||||
|
||||
# Cluster overview
|
||||
orama monitor cluster --env testnet
|
||||
|
||||
# Alerts only
|
||||
orama monitor alerts --env testnet
|
||||
|
||||
# Full JSON for LLM analysis
|
||||
orama monitor report --env testnet
|
||||
```
|
||||
|
||||
See [MONITORING.md](MONITORING.md) for all subcommands and flags.
|
||||
|
||||
### Node Join Flow
|
||||
|
||||
```bash
|
||||
# 1. Genesis node (first node, creates cluster)
|
||||
# Nameserver nodes use the base domain as --domain
|
||||
sudo orama install --vps-ip 1.2.3.4 --domain example.com \
|
||||
sudo orama node install --vps-ip 1.2.3.4 --domain example.com \
|
||||
--base-domain example.com --nameserver
|
||||
|
||||
# 2. On genesis node, generate an invite
|
||||
orama invite
|
||||
# Output: sudo orama install --join https://example.com --token <TOKEN> --vps-ip <IP>
|
||||
orama node invite --expiry 24h
|
||||
# Output: sudo orama node install --join https://example.com --token <TOKEN> --vps-ip <IP>
|
||||
|
||||
# 3. On the new node, run the printed command
|
||||
# Nameserver nodes use the base domain; non-nameserver nodes use subdomains (e.g., node-4.example.com)
|
||||
sudo orama install --join https://example.com --token abc123... \
|
||||
# 3a. Join as nameserver (requires --domain set to base domain)
|
||||
sudo orama node install --join http://1.2.3.4 --token abc123... \
|
||||
--vps-ip 5.6.7.8 --domain example.com --base-domain example.com --nameserver
|
||||
|
||||
# 3b. Join as regular node (domain auto-generated, no --domain needed)
|
||||
sudo orama node install --join http://1.2.3.4 --token abc123... \
|
||||
--vps-ip 5.6.7.8 --base-domain example.com
|
||||
```
|
||||
|
||||
The join flow establishes a WireGuard VPN tunnel before starting cluster services.
|
||||
@ -293,7 +310,7 @@ node's IP so that `node1.example.com` resolves publicly.
|
||||
**If DNS is not yet configured**, you can use the genesis node's public IP with HTTP as a fallback:
|
||||
|
||||
```bash
|
||||
sudo orama install --join http://1.2.3.4 --vps-ip 5.6.7.8 --token abc123... --nameserver
|
||||
sudo orama node install --join http://1.2.3.4 --vps-ip 5.6.7.8 --token abc123... --nameserver
|
||||
```
|
||||
|
||||
This works because Caddy's `:80` block proxies all HTTP traffic to the gateway. However, once DNS
|
||||
@ -303,9 +320,37 @@ is properly configured, always use the HTTPS domain URL.
|
||||
UFW from external access. The join request goes through Caddy on port 80 (HTTP) or 443 (HTTPS),
|
||||
which proxies to the gateway internally.
|
||||
|
||||
## Pre-Install Checklist
|
||||
## OramaOS Enrollment
|
||||
|
||||
Before running `orama install` on a VPS, ensure:
|
||||
For OramaOS nodes (mainnet, devnet, testnet), use the enrollment flow instead of `orama node install`:
|
||||
|
||||
```bash
|
||||
# 1. Flash OramaOS image to VPS (via provider dashboard)
|
||||
# 2. Generate invite token on existing cluster node
|
||||
orama node invite --expiry 24h
|
||||
|
||||
# 3. Enroll the OramaOS node
|
||||
orama node enroll --node-ip <vps-public-ip> --token <invite-token> --gateway <gateway-url>
|
||||
|
||||
# 4. For genesis node reboots (before 5+ peers exist)
|
||||
orama node unlock --genesis --node-ip <wg-ip>
|
||||
```
|
||||
|
||||
OramaOS nodes have no SSH access. All management happens through the Gateway API:
|
||||
|
||||
```bash
|
||||
# Status, logs, commands — all via Gateway proxy
|
||||
curl "https://gateway.example.com/v1/node/status?node_id=<id>"
|
||||
curl "https://gateway.example.com/v1/node/logs?node_id=<id>&service=gateway"
|
||||
```
|
||||
|
||||
See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for the full guide.
|
||||
|
||||
**Note:** `orama node clean` does not work on OramaOS nodes (no SSH). Use `orama node leave` for graceful departure, or reflash the image for a factory reset.
|
||||
|
||||
## Pre-Install Checklist (Ubuntu Only)
|
||||
|
||||
Before running `orama node install` on a VPS, ensure:
|
||||
|
||||
1. **Stop Docker if running.** Docker commonly binds ports 4001 and 8080 which conflict with IPFS. The installer checks for port conflicts and shows which process is using each port, but it's easier to stop Docker first:
|
||||
```bash
|
||||
@ -318,12 +363,7 @@ Before running `orama install` on a VPS, ensure:
|
||||
sudo systemctl stop ipfs
|
||||
```
|
||||
|
||||
3. **Ensure `make` is installed.** Required for building CoreDNS and Caddy from source:
|
||||
```bash
|
||||
sudo apt-get install -y make
|
||||
```
|
||||
|
||||
4. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
|
||||
3. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
|
||||
|
||||
## Recovering from Failed Joins
|
||||
|
||||
@ -62,7 +62,7 @@ Multiple subsystems can be combined: `--subsystem rqlite,olric,dns`
|
||||
| **ipfs** | Daemon active, cluster active, swarm peer count, cluster peer count, cluster errors, repo usage %, swarm key present, bootstrap list empty, cross-node version consistency |
|
||||
| **dns** | CoreDNS active, Caddy active, ports (53/80/443), memory, restart count, log errors, Corefile exists, SOA/NS/wildcard/base-A resolution, TLS cert expiry, cross-node nameserver availability |
|
||||
| **wireguard** | Interface up, service active, correct 10.0.0.x IP, listen port 51820, peer count vs expected, MTU 1420, config exists + permissions 600, peer handshakes (fresh/stale/never), peer traffic, catch-all route detection, cross-node peer count + MTU consistency |
|
||||
| **system** | Core services (debros-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (debros), panic count, expected ports |
|
||||
| **system** | Core services (orama-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (orama), panic count, expected ports |
|
||||
| **network** | Internet reachability, default route, WireGuard route, TCP connection count, TIME_WAIT count, TCP retransmission rate, WireGuard mesh ping (all peers) |
|
||||
| **namespace** | Per-namespace: RQLite up + raft state + readyz, Olric memberlist, Gateway HTTP health. Cross-namespace: all-healthy check, RQLite quorum per namespace |
|
||||
|
||||
@ -167,18 +167,18 @@ The inspector reads node definitions from a pipe-delimited config file (default:
|
||||
### Format
|
||||
|
||||
```
|
||||
# environment|user@host|password|role|ssh_key
|
||||
devnet|ubuntu@1.2.3.4|mypassword|node|
|
||||
devnet|ubuntu@5.6.7.8|mypassword|nameserver-ns1|/path/to/key
|
||||
# environment|user@host|role
|
||||
devnet|ubuntu@1.2.3.4|node
|
||||
devnet|ubuntu@5.6.7.8|nameserver-ns1
|
||||
```
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `environment` | Cluster name (`devnet`, `testnet`) |
|
||||
| `user@host` | SSH credentials |
|
||||
| `password` | SSH password |
|
||||
| `role` | `node` or `nameserver-ns1`, `nameserver-ns2`, etc. |
|
||||
| `ssh_key` | Optional path to SSH private key |
|
||||
|
||||
SSH keys are resolved from rootwallet (`rw vault ssh get <host>/<user> --priv`).
|
||||
|
||||
Blank lines and lines starting with `#` are ignored.
|
||||
|
||||
278
core/docs/MONITORING.md
Normal file
278
core/docs/MONITORING.md
Normal file
@ -0,0 +1,278 @@
|
||||
# Monitoring
|
||||
|
||||
Real-time cluster health monitoring via SSH. The system has two parts:
|
||||
|
||||
1. **`orama node report`** — Runs on each VPS node, collects all local health data, outputs JSON
|
||||
2. **`orama monitor`** — Runs on your local machine, SSHes into nodes, aggregates results, displays via TUI or tables
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Developer Machine VPS Nodes (via SSH)
|
||||
┌──────────────────┐ ┌────────────────────┐
|
||||
│ orama monitor │ ──SSH──────────>│ orama node report │
|
||||
│ (TUI / tables) │ <──JSON─────── │ (local collector) │
|
||||
│ │ └────────────────────┘
|
||||
│ CollectOnce() │ ──SSH──────────>│ orama node report │
|
||||
│ DeriveAlerts() │ <──JSON─────── │ (local collector) │
|
||||
│ Render() │ └────────────────────┘
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
Each node runs `orama node report --json` locally (no SSH to other nodes), collecting data via `os/exec` and `net/http` to localhost services. The monitor SSHes into all nodes in parallel, collects reports, then runs cross-node analysis to detect cluster-wide issues.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Interactive TUI (auto-refreshes every 30s)
|
||||
orama monitor --env testnet
|
||||
|
||||
# Cluster overview table
|
||||
orama monitor cluster --env testnet
|
||||
|
||||
# Alerts only
|
||||
orama monitor alerts --env testnet
|
||||
|
||||
# Full JSON report (pipe to jq or feed to LLM)
|
||||
orama monitor report --env testnet
|
||||
```
|
||||
|
||||
## `orama monitor` — Local Orchestrator
|
||||
|
||||
### Usage
|
||||
|
||||
```
|
||||
orama monitor [subcommand] --env <environment> [flags]
|
||||
```
|
||||
|
||||
Without a subcommand, launches the interactive TUI.
|
||||
|
||||
### Global Flags
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--env` | *(required)* | Environment: `devnet`, `testnet`, `mainnet` |
|
||||
| `--json` | `false` | Machine-readable JSON output (for one-shot subcommands) |
|
||||
| `--node` | | Filter to a specific node host/IP |
|
||||
| `--config` | `scripts/remote-nodes.conf` | Path to node configuration file |
|
||||
|
||||
### Subcommands
|
||||
|
||||
| Subcommand | Description |
|
||||
|------------|-------------|
|
||||
| `live` | Interactive TUI monitor (default when no subcommand) |
|
||||
| `cluster` | Cluster overview: all nodes, roles, RQLite state, WG peers |
|
||||
| `node` | Per-node health details (system, services, WG, DNS) |
|
||||
| `service` | Service status matrix across all nodes |
|
||||
| `mesh` | WireGuard mesh connectivity and peer details |
|
||||
| `dns` | DNS health: CoreDNS, Caddy, TLS cert expiry, resolution |
|
||||
| `namespaces` | Namespace health across nodes |
|
||||
| `alerts` | Active alerts and warnings sorted by severity |
|
||||
| `report` | Full JSON dump optimized for LLM consumption |
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Cluster overview
|
||||
orama monitor cluster --env testnet
|
||||
|
||||
# Cluster overview as JSON
|
||||
orama monitor cluster --env testnet --json
|
||||
|
||||
# Alerts for all nodes
|
||||
orama monitor alerts --env testnet
|
||||
|
||||
# Single-node deep dive
|
||||
orama monitor node --env testnet --node 51.195.109.238
|
||||
|
||||
# Services for one node
|
||||
orama monitor service --env testnet --node 51.195.109.238
|
||||
|
||||
# WireGuard mesh details
|
||||
orama monitor mesh --env testnet
|
||||
|
||||
# DNS health
|
||||
orama monitor dns --env testnet
|
||||
|
||||
# Namespace health
|
||||
orama monitor namespaces --env testnet
|
||||
|
||||
# Full report for LLM analysis
|
||||
orama monitor report --env testnet | jq .
|
||||
|
||||
# Single-node report
|
||||
orama monitor report --env testnet --node 51.195.109.238
|
||||
|
||||
# Custom config file
|
||||
orama monitor cluster --config /path/to/nodes.conf --env devnet
|
||||
```
|
||||
|
||||
### Interactive TUI
|
||||
|
||||
The `live` subcommand (default) launches a full-screen terminal UI:
|
||||
|
||||
**Tabs:** Overview | Nodes | Services | WG Mesh | DNS | Namespaces | Alerts
|
||||
|
||||
**Key Bindings:**
|
||||
|
||||
| Key | Action |
|
||||
|-----|--------|
|
||||
| `Tab` / `Shift+Tab` | Switch tabs |
|
||||
| `j` / `k` or `↑` / `↓` | Scroll content |
|
||||
| `r` | Force refresh |
|
||||
| `q` / `Ctrl+C` | Quit |
|
||||
|
||||
The TUI auto-refreshes every 30 seconds. A spinner shows during data collection. Colors indicate health: green = healthy, red = critical, yellow = warning.
|
||||
|
||||
### LLM Report Format
|
||||
|
||||
`orama monitor report` outputs structured JSON designed for AI consumption:
|
||||
|
||||
```json
|
||||
{
|
||||
"meta": {
|
||||
"environment": "testnet",
|
||||
"collected_at": "2026-02-16T12:00:00Z",
|
||||
"duration_seconds": 3.2,
|
||||
"node_count": 3,
|
||||
"healthy_count": 3
|
||||
},
|
||||
"summary": {
|
||||
"rqlite_leader": "10.0.0.1",
|
||||
"rqlite_voters": "3/3",
|
||||
"rqlite_raft_term": 42,
|
||||
"wg_mesh_status": "all connected",
|
||||
"service_health": "all nominal",
|
||||
"critical_alerts": 0,
|
||||
"warning_alerts": 1,
|
||||
"info_alerts": 0
|
||||
},
|
||||
"alerts": [...],
|
||||
"nodes": [
|
||||
{
|
||||
"host": "51.195.109.238",
|
||||
"status": "healthy",
|
||||
"collection_ms": 526,
|
||||
"report": { ... }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## `orama node report` — VPS-Side Collector
|
||||
|
||||
Runs locally on a VPS node. Collects all system and service data in parallel and outputs a single JSON blob. Requires root privileges.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# On a VPS node
|
||||
sudo orama node report --json
|
||||
```
|
||||
|
||||
### What It Collects
|
||||
|
||||
| Section | Data |
|
||||
|---------|------|
|
||||
| **system** | CPU count, load average, memory/disk/swap usage, OOM kills, kernel version, uptime, clock time |
|
||||
| **services** | Systemd service states (active, restarts, memory, CPU, restart loop detection) for 10 core services |
|
||||
| **rqlite** | Raft state, leader, term, applied/commit index, peers, strong read test, readyz, debug vars |
|
||||
| **olric** | Service state, memberlist, member count, restarts, memory, log analysis |
|
||||
| **ipfs** | Daemon/cluster state, swarm/cluster peers, repo size, versions, swarm key |
|
||||
| **gateway** | HTTP health check, subsystem status |
|
||||
| **wireguard** | Interface state, WG IP, peers, handshake ages, MTU, config permissions |
|
||||
| **dns** | CoreDNS/Caddy state, port bindings, resolution tests, TLS cert expiry |
|
||||
| **anyone** | Relay/client state, bootstrap progress, fingerprint |
|
||||
| **network** | Internet reachability, TCP stats, retransmission rate, listening ports, UFW rules |
|
||||
| **processes** | Zombie count, orphan orama processes, panic/fatal count in logs |
|
||||
| **namespaces** | Per-namespace service probes (RQLite, Olric, Gateway) |
|
||||
|
||||
### Performance
|
||||
|
||||
All 12 collectors run in parallel with goroutines. Typical collection time is **< 1 second** per node. HTTP timeouts are 3 seconds, command timeouts are 4 seconds.
|
||||
|
||||
### Output Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-02-16T12:00:00Z",
|
||||
"hostname": "ns1",
|
||||
"version": "0.107.0",
|
||||
"collect_ms": 526,
|
||||
"errors": [],
|
||||
"system": { "cpu_count": 4, "load_avg_1": 0.1, "mem_total_mb": 7937, ... },
|
||||
"services": { "services": [...], "failed_units": [] },
|
||||
"rqlite": { "responsive": true, "raft_state": "Leader", "term": 42, ... },
|
||||
"olric": { "service_active": true, "memberlist_up": true, ... },
|
||||
"ipfs": { "daemon_active": true, "swarm_peers": 2, ... },
|
||||
"gateway": { "responsive": true, "http_status": 200, ... },
|
||||
"wireguard": { "interface_up": true, "wg_ip": "10.0.0.1", "peers": [...], ... },
|
||||
"dns": { "coredns_active": true, "caddy_active": true, "base_tls_days_left": 88, ... },
|
||||
"anyone": { "relay_active": true, "bootstrapped": true, ... },
|
||||
"network": { "internet_reachable": true, "ufw_active": true, ... },
|
||||
"processes": { "zombie_count": 0, "orphan_count": 0, "panic_count": 0, ... },
|
||||
"namespaces": []
|
||||
}
|
||||
```
|
||||
|
||||
## Alert Detection
|
||||
|
||||
Alerts are derived from cross-node analysis of all collected reports. Each alert has a severity level and identifies the affected subsystem and node.
|
||||
|
||||
### Alert Severities
|
||||
|
||||
| Severity | Examples |
|
||||
|----------|----------|
|
||||
| **critical** | SSH collection failed (node unreachable), no RQLite leader, split brain, RQLite unresponsive, WireGuard interface down, WG peer never handshaked, OOM kills, service failed, UFW inactive |
|
||||
| **warning** | Strong read failed, memory > 90%, disk > 85%, stale WG handshake (> 3min), Raft term inconsistency, applied index lag > 100, restart loop detected, TLS cert < 14 days, DNS down, namespace gateway down, Anyone not bootstrapped, clock skew > 5s, binary version mismatch, internet unreachable, high TCP retransmission |
|
||||
| **info** | Zombie processes, orphan orama processes, swap usage > 30% |
|
||||
|
||||
### Cross-Node Checks
|
||||
|
||||
These checks compare data across all nodes:
|
||||
|
||||
- **RQLite Leader**: Exactly one leader exists (no split brain)
|
||||
- **Leader Agreement**: All nodes agree on the same leader address
|
||||
- **Raft Term Consistency**: Term values within 1 of each other
|
||||
- **Applied Index Lag**: Followers within 100 entries of the leader
|
||||
- **WireGuard Peer Symmetry**: Each node has N-1 peers
|
||||
- **Clock Skew**: Node clocks within 5 seconds of each other
|
||||
- **Binary Version**: All nodes running the same version
|
||||
- **WebRTC SFU Coverage**: SFU running on expected nodes (3/3) per namespace
|
||||
- **WebRTC TURN Redundancy**: TURN running on expected nodes (2/3) per namespace
|
||||
|
||||
### Per-Node Checks
|
||||
|
||||
- **RQLite**: Responsive, ready, strong read
|
||||
- **WireGuard**: Interface up, handshake freshness
|
||||
- **System**: Memory, disk, load, OOM kills, swap
|
||||
- **Services**: Systemd state, restart loops
|
||||
- **DNS**: CoreDNS/Caddy up, TLS cert expiry, SOA resolution
|
||||
- **Anyone**: Bootstrap progress
|
||||
- **Processes**: Zombies, orphans, panics in logs
|
||||
- **Namespaces**: Gateway and RQLite per namespace
|
||||
- **WebRTC**: SFU and TURN service health (when provisioned)
|
||||
- **Network**: UFW, internet reachability, TCP retransmission
|
||||
|
||||
## Monitor vs Inspector
|
||||
|
||||
Both tools check cluster health, but they serve different purposes:
|
||||
|
||||
| | `orama monitor` | `orama inspect` |
|
||||
|---|---|---|
|
||||
| **Data source** | `orama node report --json` (single SSH call per node) | 15+ SSH commands per node per subsystem |
|
||||
| **Speed** | ~3-5s for full cluster | ~4-10s for full cluster |
|
||||
| **Output** | TUI, tables, JSON | Tables, JSON |
|
||||
| **Focus** | Real-time monitoring, alert detection | Deep diagnostic checks with pass/fail/warn |
|
||||
| **AI support** | `report` subcommand for LLM input | `--ai` flag for inline analysis |
|
||||
| **Use case** | "Is anything wrong right now?" | "What exactly is wrong and why?" |
|
||||
|
||||
Use `monitor` for day-to-day health checks and the interactive TUI. Use `inspect` for deep diagnostics when something is already known to be broken.
|
||||
|
||||
## Configuration
|
||||
|
||||
Uses the same `scripts/remote-nodes.conf` as the inspector. See [INSPECTOR.md](INSPECTOR.md#configuration) for format details.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Nodes must have the `orama` CLI installed (via `orama node install` or `upload-source.sh`). The monitor runs `sudo orama node report --json` over SSH, so the binary must be at `/usr/local/bin/orama` on each node.
|
||||
233
core/docs/ORAMAOS_DEPLOYMENT.md
Normal file
233
core/docs/ORAMAOS_DEPLOYMENT.md
Normal file
@ -0,0 +1,233 @@
|
||||
# OramaOS Deployment Guide
|
||||
|
||||
OramaOS is a custom minimal Linux image built with Buildroot. It replaces the standard Ubuntu-based node deployment for mainnet, devnet, and testnet environments. Sandbox clusters remain on Ubuntu for development convenience.
|
||||
|
||||
## What is OramaOS?
|
||||
|
||||
OramaOS is a locked-down operating system designed specifically for Orama node operators. Key properties:
|
||||
|
||||
- **No SSH, no shell** — operators cannot access the filesystem or run commands on the machine
|
||||
- **LUKS full-disk encryption** — the data partition is encrypted; the key is split via Shamir's Secret Sharing across peer nodes
|
||||
- **Read-only rootfs** — the OS image uses SquashFS with dm-verity integrity verification
|
||||
- **A/B partition updates** — signed OS images are applied atomically with automatic rollback on failure
|
||||
- **Service sandboxing** — each service runs in its own Linux namespace with seccomp syscall filtering
|
||||
- **Signed binaries** — all updates are cryptographically signed with the Orama rootwallet
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Partition Layout:
|
||||
/dev/sda1 — ESP (EFI System Partition, systemd-boot)
|
||||
/dev/sda2 — rootfs-A (SquashFS, read-only, dm-verity)
|
||||
/dev/sda3 — rootfs-B (standby, for A/B updates)
|
||||
/dev/sda4 — data (LUKS2 encrypted, ext4)
|
||||
|
||||
Boot Flow:
|
||||
systemd-boot → dm-verity rootfs → orama-agent → WireGuard → services
|
||||
```
|
||||
|
||||
The **orama-agent** is the only root process. It manages:
|
||||
- Boot sequence and LUKS key reconstruction
|
||||
- WireGuard tunnel setup
|
||||
- Service lifecycle (start, stop, restart in sandboxed namespaces)
|
||||
- Command reception from the Gateway over WireGuard
|
||||
- OS updates (download, verify signature, A/B swap, reboot)
|
||||
|
||||
## Enrollment Flow
|
||||
|
||||
OramaOS nodes join the cluster through an enrollment process (different from the Ubuntu `orama node install` flow):
|
||||
|
||||
### Step 1: Flash OramaOS to VPS
|
||||
|
||||
Download the OramaOS image and flash it to your VPS:
|
||||
|
||||
```bash
|
||||
# Download image (URL provided upon acceptance)
|
||||
wget https://releases.orama.network/oramaos-v1.0.0-amd64.qcow2
|
||||
|
||||
# Flash to VPS (provider-specific — Hetzner, Vultr, etc.)
|
||||
# Most providers support uploading custom images via their dashboard
|
||||
```
|
||||
|
||||
### Step 2: First Boot — Enrollment Mode
|
||||
|
||||
On first boot, the agent:
|
||||
1. Generates a random 8-character registration code
|
||||
2. Starts a temporary HTTP server on port 9999
|
||||
3. Opens an outbound WebSocket to the Gateway
|
||||
4. Waits for enrollment to complete
|
||||
|
||||
The registration code is displayed on the VPS console (if available) and served at `http://<vps-ip>:9999/`.
|
||||
|
||||
### Step 3: Run Enrollment from CLI
|
||||
|
||||
On your local machine (where you have the `orama` CLI and rootwallet):
|
||||
|
||||
```bash
|
||||
# Generate an invite token on any existing cluster node
|
||||
orama node invite --expiry 24h
|
||||
|
||||
# Enroll the OramaOS node
|
||||
orama node enroll --node-ip <vps-public-ip> --token <invite-token> --gateway <gateway-url>
|
||||
```
|
||||
|
||||
The enrollment command:
|
||||
1. Fetches the registration code from the node (port 9999)
|
||||
2. Sends the code + invite token to the Gateway
|
||||
3. Gateway validates everything, assigns a WireGuard IP, and pushes config to the node
|
||||
4. Node configures WireGuard, formats the LUKS-encrypted data partition
|
||||
5. LUKS key is split via Shamir and distributed to peer vault-guardians
|
||||
6. Services start in sandboxed namespaces
|
||||
7. Port 9999 closes permanently
|
||||
|
||||
### Step 4: Verify
|
||||
|
||||
```bash
|
||||
# Check the node is online and healthy
|
||||
orama monitor report --env <env>
|
||||
```
|
||||
|
||||
## Genesis Node
|
||||
|
||||
The first OramaOS node in a cluster is the **genesis node**. It has a special boot path because there are no peers yet for Shamir key distribution:
|
||||
|
||||
1. Genesis generates a LUKS key and encrypts the data partition
|
||||
2. The LUKS key is encrypted with a rootwallet-derived key and stored on the unencrypted rootfs
|
||||
3. On reboot (before enough peers exist), the operator must manually unlock:
|
||||
|
||||
```bash
|
||||
orama node unlock --genesis --node-ip <wg-ip>
|
||||
```
|
||||
|
||||
This command:
|
||||
1. Fetches the encrypted genesis key from the node
|
||||
2. Decrypts it using the rootwallet (`rw decrypt`)
|
||||
3. Sends the decrypted LUKS key to the agent over WireGuard
|
||||
|
||||
Once 5+ peers have joined, the genesis node distributes Shamir shares to peers, deletes the local encrypted key, and transitions to normal Shamir-based unlock. After this transition, `orama node unlock` is no longer needed.
|
||||
|
||||
## Normal Reboot (Shamir Unlock)
|
||||
|
||||
When an enrolled OramaOS node reboots:
|
||||
|
||||
1. Agent starts, brings up WireGuard
|
||||
2. Contacts peer vault-guardians over WireGuard
|
||||
3. Fetches K Shamir shares (K = threshold, typically `max(3, N/3)`)
|
||||
4. Reconstructs LUKS key via Lagrange interpolation over GF(256)
|
||||
5. Decrypts and mounts data partition
|
||||
6. Starts all services
|
||||
7. Zeros key from memory
|
||||
|
||||
If not enough peers are available, the agent enters a degraded "waiting for peers" state and retries with exponential backoff (1s, 2s, 4s, 8s, 16s, max 5 retries per cycle).
|
||||
|
||||
## Node Management
|
||||
|
||||
Since OramaOS has no SSH, all management happens through the Gateway API:
|
||||
|
||||
```bash
|
||||
# Check node status
|
||||
curl "https://gateway.example.com/v1/node/status?node_id=<id>"
|
||||
|
||||
# Send a command (e.g., restart a service)
|
||||
curl -X POST "https://gateway.example.com/v1/node/command?node_id=<id>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"action":"restart","service":"rqlite"}'
|
||||
|
||||
# View logs
|
||||
curl "https://gateway.example.com/v1/node/logs?node_id=<id>&service=gateway&lines=100"
|
||||
|
||||
# Graceful node departure
|
||||
curl -X POST "https://gateway.example.com/v1/node/leave" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"node_id":"<id>"}'
|
||||
```
|
||||
|
||||
The Gateway proxies these requests to the agent over WireGuard (port 9998). The agent is never directly accessible from the public internet.
|
||||
|
||||
## OS Updates
|
||||
|
||||
OramaOS uses an A/B partition scheme for atomic, rollback-safe updates:
|
||||
|
||||
1. Agent periodically checks for new versions
|
||||
2. Downloads the signed image (P2P over WireGuard between nodes)
|
||||
3. Verifies the rootwallet EVM signature against the embedded public key
|
||||
4. Writes to the standby partition (if running from A, writes to B)
|
||||
5. Sets systemd-boot to boot from B with `tries_left=3`
|
||||
6. Reboots
|
||||
7. If B boots successfully (agent starts, WG connects, services healthy): marks B as "good"
|
||||
8. If B fails 3 times: systemd-boot automatically falls back to A
|
||||
|
||||
No operator intervention is needed for updates. Failed updates are automatically rolled back.
|
||||
|
||||
## Service Sandboxing
|
||||
|
||||
Each service on OramaOS runs in an isolated environment:
|
||||
|
||||
- **Mount namespace** — each service only sees its own data directory as writable; everything else is read-only
|
||||
- **UTS namespace** — isolated hostname
|
||||
- **Dedicated UID/GID** — each service runs as a different user (not root)
|
||||
- **Seccomp filtering** — per-service syscall allowlist (initially in audit mode, then enforce mode)
|
||||
|
||||
Services and their sandbox profiles:
|
||||
| Service | Writable Path | Extra Syscalls |
|
||||
|---------|--------------|----------------|
|
||||
| RQLite | `/opt/orama/.orama/data/rqlite` | fsync, fdatasync (Raft + SQLite WAL) |
|
||||
| Olric | `/opt/orama/.orama/data/olric` | sendmmsg, recvmmsg (gossip) |
|
||||
| IPFS | `/opt/orama/.orama/data/ipfs` | sendfile, splice (data transfer) |
|
||||
| Gateway | `/opt/orama/.orama/data/gateway` | sendfile, splice (HTTP) |
|
||||
| CoreDNS | `/opt/orama/.orama/data/coredns` | sendmmsg, recvmmsg (DNS) |
|
||||
|
||||
## OramaOS vs Ubuntu Deployment
|
||||
|
||||
| Feature | Ubuntu | OramaOS |
|
||||
|---------|--------|---------|
|
||||
| SSH access | Yes | No |
|
||||
| Shell access | Yes | No |
|
||||
| Disk encryption | No | LUKS2 (Shamir) |
|
||||
| OS updates | Manual (`orama node upgrade`) | Automatic (signed, A/B) |
|
||||
| Service isolation | systemd only | Namespaces + seccomp |
|
||||
| Rootfs integrity | None | dm-verity |
|
||||
| Binary signing | Optional | Required |
|
||||
| Operator data access | Full | None |
|
||||
| Environments | All (including sandbox) | Mainnet, devnet, testnet |
|
||||
|
||||
## Cleaning / Factory Reset
|
||||
|
||||
OramaOS nodes cannot be cleaned with the standard `orama node clean` command (no SSH access). Instead:
|
||||
|
||||
- **Graceful departure:** `orama node leave` via the Gateway API — stops services, redistributes Shamir shares, removes WG peer
|
||||
- **Factory reset:** Reflash the OramaOS image on the VPS via the hosting provider's dashboard
|
||||
- **Data is unrecoverable:** Since the LUKS key is distributed across peers, reflashing destroys all data permanently
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Node stuck in enrollment mode
|
||||
The node boots but enrollment never completes.
|
||||
|
||||
**Check:** Can you reach `http://<vps-ip>:9999/` from your machine? If not, the VPS firewall may be blocking port 9999.
|
||||
|
||||
**Fix:** Ensure port 9999 is open in the VPS provider's firewall. OramaOS opens it automatically via its internal firewall, but external provider firewalls (Hetzner, AWS security groups) must be configured separately.
|
||||
|
||||
### LUKS unlock fails (not enough peers)
|
||||
After reboot, the node can't reconstruct its LUKS key.
|
||||
|
||||
**Check:** How many peer nodes are online? The node needs at least K peers (threshold) to be reachable over WireGuard.
|
||||
|
||||
**Fix:** Ensure enough cluster nodes are online. If this is the genesis node and fewer than 5 peers exist, use:
|
||||
```bash
|
||||
orama node unlock --genesis --node-ip <wg-ip>
|
||||
```
|
||||
|
||||
### Update failed, node rolled back
|
||||
The node applied an update but reverted to the previous version.
|
||||
|
||||
**Check:** The agent logs will show why the new partition failed to boot (accessible via `GET /v1/node/logs?service=agent`).
|
||||
|
||||
**Common causes:** Corrupted download (signature verification should catch this), hardware issue, or incompatible configuration.
|
||||
|
||||
### Services not starting after reboot
|
||||
The node rebooted and LUKS unlocked, but services are unhealthy.
|
||||
|
||||
**Check:** `GET /v1/node/status` — which services are down?
|
||||
|
||||
**Fix:** Try restarting the specific service via `POST /v1/node/command` with `{"action":"restart","service":"<name>"}`. If the issue persists, check service logs.
|
||||
208
core/docs/SANDBOX.md
Normal file
208
core/docs/SANDBOX.md
Normal file
@ -0,0 +1,208 @@
|
||||
# Sandbox: Ephemeral Hetzner Cloud Clusters
|
||||
|
||||
Spin up temporary 5-node Orama clusters on Hetzner Cloud for development and testing. Total cost: ~€0.04/hour.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# One-time setup (API key, domain, floating IPs, SSH key)
|
||||
orama sandbox setup
|
||||
|
||||
# Create a cluster (~5 minutes)
|
||||
orama sandbox create --name my-feature
|
||||
|
||||
# Check health
|
||||
orama sandbox status
|
||||
|
||||
# SSH into a node
|
||||
orama sandbox ssh 1
|
||||
|
||||
# Deploy code changes
|
||||
orama sandbox rollout
|
||||
|
||||
# Tear it down
|
||||
orama sandbox destroy
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### 1. Hetzner Cloud Account
|
||||
|
||||
Create a project at [console.hetzner.cloud](https://console.hetzner.cloud) and generate an API token with read/write permissions under **Security > API Tokens**.
|
||||
|
||||
### 2. Domain with Glue Records
|
||||
|
||||
You need a domain (or subdomain) that points to Hetzner Floating IPs. The `orama sandbox setup` wizard will guide you through this.
|
||||
|
||||
**Example:** Using `sbx.dbrs.space`
|
||||
|
||||
At your domain registrar:
|
||||
1. Create glue records (Personal DNS Servers):
|
||||
- `ns1.sbx.dbrs.space` → `<floating-ip-1>`
|
||||
- `ns2.sbx.dbrs.space` → `<floating-ip-2>`
|
||||
2. Set custom nameservers for `sbx.dbrs.space`:
|
||||
- `ns1.sbx.dbrs.space`
|
||||
- `ns2.sbx.dbrs.space`
|
||||
|
||||
DNS propagation can take up to 48 hours.
|
||||
|
||||
### 3. Binary Archive
|
||||
|
||||
Build the binary archive before creating a cluster:
|
||||
|
||||
```bash
|
||||
orama build
|
||||
```
|
||||
|
||||
This creates `/tmp/orama-<version>-linux-amd64.tar.gz` with all pre-compiled binaries.
|
||||
|
||||
## Setup
|
||||
|
||||
Run the interactive setup wizard:
|
||||
|
||||
```bash
|
||||
orama sandbox setup
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Prompt for your Hetzner API token and validate it
|
||||
2. Ask for your sandbox domain
|
||||
3. Create or reuse 2 Hetzner Floating IPs (~$0.005/hr each)
|
||||
4. Create a firewall with sandbox rules
|
||||
5. Create a rootwallet SSH entry (`sandbox/root`) if it doesn't exist
|
||||
6. Upload the wallet-derived public key to Hetzner
|
||||
7. Display DNS configuration instructions
|
||||
|
||||
Config is saved to `~/.orama/sandbox.yaml`.
|
||||
|
||||
## Commands
|
||||
|
||||
### `orama sandbox create [--name <name>]`
|
||||
|
||||
Creates a new 5-node cluster. If `--name` is omitted, a random name is generated (e.g., "swift-falcon").
|
||||
|
||||
**Cluster layout:**
|
||||
- Nodes 1-2: Nameservers (CoreDNS + Caddy + all services)
|
||||
- Nodes 3-5: Regular nodes (all services except CoreDNS)
|
||||
|
||||
**Phases:**
|
||||
1. Provision 5 CX22 servers on Hetzner (parallel, ~90s)
|
||||
2. Assign floating IPs to nameserver nodes (~10s)
|
||||
3. Upload binary archive to all nodes (parallel, ~60s)
|
||||
4. Install genesis node + generate invite tokens (~120s)
|
||||
5. Join remaining 4 nodes (serial with health checks, ~180s)
|
||||
6. Verify cluster health (~15s)
|
||||
|
||||
**One sandbox at a time.** Since the floating IPs are shared, only one sandbox can own the nameservers. Destroy the active sandbox before creating a new one.
|
||||
|
||||
### `orama sandbox destroy [--name <name>] [--force]`
|
||||
|
||||
Tears down a cluster:
|
||||
1. Unassigns floating IPs
|
||||
2. Deletes all 5 servers (parallel)
|
||||
3. Removes state file
|
||||
|
||||
Use `--force` to skip confirmation.
|
||||
|
||||
### `orama sandbox list`
|
||||
|
||||
Lists all sandboxes with their status. Also checks Hetzner for orphaned servers that don't have a corresponding state file.
|
||||
|
||||
### `orama sandbox status [--name <name>]`
|
||||
|
||||
Shows per-node health including:
|
||||
- Service status (active/inactive)
|
||||
- RQLite role (Leader/Follower)
|
||||
- Cluster summary (commit index, voter count)
|
||||
|
||||
### `orama sandbox rollout [--name <name>]`
|
||||
|
||||
Deploys code changes:
|
||||
1. Uses the latest binary archive from `/tmp/` (run `orama build` first)
|
||||
2. Pushes to all nodes
|
||||
3. Rolling upgrade: followers first, leader last, 15s between nodes
|
||||
|
||||
### `orama sandbox ssh <node-number>`
|
||||
|
||||
Opens an interactive SSH session to a sandbox node (1-5).
|
||||
|
||||
```bash
|
||||
orama sandbox ssh 1 # SSH into node 1 (genesis/ns1)
|
||||
orama sandbox ssh 3 # SSH into node 3 (regular node)
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Floating IPs
|
||||
|
||||
Hetzner Floating IPs are persistent IPv4 addresses that can be reassigned between servers. They solve the DNS chicken-and-egg problem:
|
||||
|
||||
- Glue records at the registrar point to 2 Floating IPs (configured once)
|
||||
- Each new sandbox assigns the Floating IPs to its nameserver nodes
|
||||
- DNS works instantly — no propagation delay between clusters
|
||||
|
||||
### SSH Authentication
|
||||
|
||||
Sandbox uses a rootwallet-derived SSH key (`sandbox/root` vault entry), the same mechanism as production. The wallet must be unlocked (`rw unlock`) before running sandbox commands that use SSH. The public key is uploaded to Hetzner during setup and injected into every server at creation time.
|
||||
|
||||
### Server Naming
|
||||
|
||||
Servers: `sbx-<name>-<N>` (e.g., `sbx-swift-falcon-1` through `sbx-swift-falcon-5`)
|
||||
|
||||
### State Files
|
||||
|
||||
Sandbox state is stored at `~/.orama/sandboxes/<name>.yaml`. This tracks server IDs, IPs, roles, and cluster status.
|
||||
|
||||
## Cost
|
||||
|
||||
| Resource | Cost | Qty | Total |
|
||||
|----------|------|-----|-------|
|
||||
| CX22 (2 vCPU, 4GB) | €0.006/hr | 5 | €0.03/hr |
|
||||
| Floating IPv4 | €0.005/hr | 2 | €0.01/hr |
|
||||
| **Total** | | | **~€0.04/hr** |
|
||||
|
||||
Servers are billed per hour. Floating IPs are billed as long as they exist (even unassigned). Destroy the sandbox when not in use to save on server costs.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "sandbox not configured"
|
||||
|
||||
Run `orama sandbox setup` first.
|
||||
|
||||
### "no binary archive found"
|
||||
|
||||
Run `orama build` to create the binary archive.
|
||||
|
||||
### "sandbox X is already active"
|
||||
|
||||
Only one sandbox can be active at a time. Destroy it first:
|
||||
```bash
|
||||
orama sandbox destroy --name <name>
|
||||
```
|
||||
|
||||
### Server creation fails
|
||||
|
||||
Check:
|
||||
- Hetzner API token is valid and has read/write permissions
|
||||
- You haven't hit Hetzner's server limit (default: 10 per project)
|
||||
- The selected location has CX22 capacity
|
||||
|
||||
### Genesis install fails
|
||||
|
||||
SSH into the node to debug:
|
||||
```bash
|
||||
orama sandbox ssh 1
|
||||
journalctl -u orama-node -f
|
||||
```
|
||||
|
||||
The sandbox will be left in "error" state. You can destroy and recreate it.
|
||||
|
||||
### DNS not resolving
|
||||
|
||||
1. Verify glue records are configured at your registrar
|
||||
2. Check propagation: `dig NS sbx.dbrs.space @8.8.8.8`
|
||||
3. Propagation can take 24-48 hours for new domains
|
||||
|
||||
### Orphaned servers
|
||||
|
||||
If `orama sandbox list` shows orphaned servers, delete them manually at [console.hetzner.cloud](https://console.hetzner.cloud). Sandbox servers are labeled `orama-sandbox=<name>` for easy identification.
|
||||
194
core/docs/SECURITY.md
Normal file
194
core/docs/SECURITY.md
Normal file
@ -0,0 +1,194 @@
|
||||
# Security Hardening
|
||||
|
||||
This document describes all security measures applied to the Orama Network, covering both Phase 1 (service hardening on existing Ubuntu nodes) and Phase 2 (OramaOS locked-down image).
|
||||
|
||||
## Phase 1: Service Hardening
|
||||
|
||||
These measures apply to all nodes (Ubuntu and OramaOS).
|
||||
|
||||
### Network Isolation
|
||||
|
||||
**CIDR Validation (Step 1.1)**
|
||||
- WireGuard subnet restricted to `10.0.0.0/24` across all components: firewall rules, rate limiter, auth module, and WireGuard PostUp/PostDown iptables rules
|
||||
- Prevents other tenants on shared VPS providers from bypassing the firewall via overlapping `10.x.x.x` ranges
|
||||
|
||||
**IPv6 Disabled (Step 1.2)**
|
||||
- IPv6 disabled system-wide via sysctl: `net.ipv6.conf.all.disable_ipv6=1`
|
||||
- Prevents services bound to `0.0.0.0` from being reachable via IPv6 (which had no firewall rules)
|
||||
|
||||
### Authentication
|
||||
|
||||
**Internal Endpoint Auth (Step 1.3)**
|
||||
- `/v1/internal/wg/peers` and `/v1/internal/wg/peer/remove` now require cluster secret validation
|
||||
- Peer removal additionally validates the request originates from a WireGuard subnet IP
|
||||
|
||||
**RQLite Authentication (Step 1.7)**
|
||||
- RQLite runs with `-auth` flag pointing to a credentials file
|
||||
- All RQLite HTTP requests include `Authorization: Basic <base64>` headers
|
||||
- Credentials generated at cluster genesis, distributed to joining nodes via join response
|
||||
- Both the central RQLite client wrapper and the standalone CoreDNS RQLite client send auth
|
||||
|
||||
**Olric Gossip Encryption (Step 1.8)**
|
||||
- Olric memberlist uses a 32-byte encryption key for all gossip traffic
|
||||
- Key generated at genesis, distributed via join response
|
||||
- Prevents rogue nodes from joining the gossip ring and poisoning caches
|
||||
- Note: encryption is all-or-nothing (coordinated restart required when enabling)
|
||||
|
||||
**IPFS Cluster TrustedPeers (Step 1.9)**
|
||||
- IPFS Cluster `TrustedPeers` populated with actual cluster peer IDs (was `["*"]`)
|
||||
- New peers added to TrustedPeers on all existing nodes during join
|
||||
- Prevents unauthorized peers from controlling IPFS pinning
|
||||
|
||||
**Vault V1 Auth Enforcement (Step 1.14)**
|
||||
- V1 push/pull endpoints require a valid session token when vault-guardian is configured
|
||||
- Previously, auth was optional for backward compatibility — any WG peer could read/overwrite Shamir shares
|
||||
|
||||
### Token & Key Storage
|
||||
|
||||
**Refresh Token Hashing (Step 1.5)**
|
||||
- Refresh tokens stored as SHA-256 hashes in RQLite (never plaintext)
|
||||
- On lookup: hash the incoming token, query by hash
|
||||
- On revocation: hash before revoking (both single-token and by-subject)
|
||||
- Existing tokens invalidated on upgrade (users re-authenticate)
|
||||
|
||||
**API Key Hashing (Step 1.6)**
|
||||
- API keys stored as HMAC-SHA256 hashes using a server-side secret
|
||||
- HMAC secret generated at cluster genesis, stored in `~/.orama/secrets/api-key-hmac-secret`
|
||||
- On lookup: compute HMAC, query by hash — fast enough for every request (unlike bcrypt)
|
||||
- In-memory cache uses raw key as cache key (never persisted)
|
||||
- During rolling upgrade: dual lookup (HMAC first, then raw as fallback) until all nodes upgraded
|
||||
|
||||
**TURN Secret Encryption (Step 1.15)**
|
||||
- TURN shared secrets encrypted at rest in RQLite using AES-256-GCM
|
||||
- Encryption key derived via HKDF from the cluster secret with purpose string `"turn-encryption"`
|
||||
|
||||
### TLS & Transport
|
||||
|
||||
**InsecureSkipVerify Fix (Step 1.10)**
|
||||
- During node join, TLS verification uses TOFU (Trust On First Use)
|
||||
- Invite token output includes the CA certificate fingerprint (SHA-256)
|
||||
- Joining node verifies the server cert fingerprint matches before proceeding
|
||||
- After join: CA cert stored locally for future connections
|
||||
|
||||
**WebSocket Origin Validation (Step 1.4)**
|
||||
- All WebSocket upgraders validate the `Origin` header against the node's configured domain
|
||||
- Non-browser clients (no Origin header) are still allowed
|
||||
- Prevents cross-site WebSocket hijacking attacks
|
||||
|
||||
### Process Isolation
|
||||
|
||||
**Dedicated User (Step 1.11)**
|
||||
- All services run as the `orama` user (not root)
|
||||
- Caddy and CoreDNS get `AmbientCapabilities=CAP_NET_BIND_SERVICE` for ports 80/443 and 53
|
||||
- WireGuard stays as root (kernel netlink requires it)
|
||||
- vault-guardian already had proper hardening
|
||||
|
||||
**systemd Hardening (Step 1.12)**
|
||||
- All service units include:
|
||||
```ini
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths=/opt/orama/.orama
|
||||
```
|
||||
- Applied to both template files (`pkg/environments/templates/`) and hardcoded unit generators (`pkg/environments/production/services.go`)
|
||||
|
||||
### Supply Chain
|
||||
|
||||
**Binary Signing (Step 1.13)**
|
||||
- Build archives include `manifest.sig` — a rootwallet EVM signature of the manifest hash
|
||||
- During install, the signature is verified against the embedded Orama public key
|
||||
- Unsigned or tampered archives are rejected
|
||||
|
||||
## Phase 2: OramaOS
|
||||
|
||||
These measures apply only to OramaOS nodes (mainnet, devnet, testnet).
|
||||
|
||||
### Immutable OS
|
||||
|
||||
- **Read-only rootfs** — SquashFS with dm-verity integrity verification
|
||||
- **No shell** — `/bin/sh` symlinked to `/bin/false`, no bash/ash/ssh
|
||||
- **No SSH** — OpenSSH not included in the image
|
||||
- **Minimal packages** — only what's needed for systemd, cryptsetup, and the agent
|
||||
|
||||
### Full-Disk Encryption
|
||||
|
||||
- **LUKS2** with AES-XTS-Plain64 on the data partition
|
||||
- **Shamir's Secret Sharing** over GF(256) — LUKS key split across peer vault-guardians
|
||||
- **Adaptive threshold** — K = max(3, N/3) where N is the number of peers
|
||||
- **Key zeroing** — LUKS key wiped from memory immediately after use
|
||||
- **Malicious share detection** — fetch K+1 shares when possible, verify consistency
|
||||
|
||||
### Service Sandboxing
|
||||
|
||||
Each service runs in isolated Linux namespaces:
|
||||
- **CLONE_NEWNS** — mount namespace (filesystem isolation)
|
||||
- **CLONE_NEWUTS** — hostname namespace
|
||||
- **Dedicated UID/GID** — each service has its own user
|
||||
- **Seccomp filtering** — per-service syscall allowlist
|
||||
|
||||
Note: CLONE_NEWPID is intentionally omitted — it makes services PID 1 in their namespace, which changes signal semantics (SIGTERM ignored by default for PID 1).
|
||||
|
||||
### Signed Updates
|
||||
|
||||
- A/B partition scheme with systemd-boot and boot counting (`tries_left=3`)
|
||||
- All updates signed with rootwallet EVM signature (secp256k1 + keccak256)
|
||||
- Signer address: `0xb5d8a496c8b2412990d7D467E17727fdF5954afC`
|
||||
- P2P distribution over WireGuard between nodes
|
||||
- Automatic rollback on 3 consecutive boot failures
|
||||
|
||||
### Zero Operator Access
|
||||
|
||||
- Operators cannot read data on the machine (LUKS encrypted, no shell)
|
||||
- Management only through Gateway API → agent over WireGuard
|
||||
- All commands are logged and auditable
|
||||
- No root access, no console access, no file system access
|
||||
|
||||
## Rollout Strategy
|
||||
|
||||
### Phase 1 Batches
|
||||
|
||||
```
|
||||
Batch 1 (zero-risk, no restart):
|
||||
- CIDR fix
|
||||
- IPv6 disable
|
||||
- Internal endpoint auth
|
||||
- WebSocket origin check
|
||||
|
||||
Batch 2 (medium-risk, restart needed):
|
||||
- Hash refresh tokens
|
||||
- Hash API keys
|
||||
- Binary signing
|
||||
- Vault V1 auth enforcement
|
||||
- TURN secret encryption
|
||||
|
||||
Batch 3 (high-risk, coordinated rollout):
|
||||
- RQLite auth (followers first, leader last)
|
||||
- Olric encryption (simultaneous restart)
|
||||
- IPFS Cluster TrustedPeers
|
||||
|
||||
Batch 4 (infrastructure changes):
|
||||
- InsecureSkipVerify fix
|
||||
- Dedicated user
|
||||
- systemd hardening
|
||||
```
|
||||
|
||||
### Phase 2
|
||||
|
||||
1. Build and test OramaOS image in QEMU
|
||||
2. Deploy to sandbox cluster alongside Ubuntu nodes
|
||||
3. Verify interop and stability
|
||||
4. Gradual migration: testnet → devnet → mainnet (one node at a time, maintaining Raft quorum)
|
||||
|
||||
## Verification
|
||||
|
||||
All changes verified on sandbox cluster before production deployment:
|
||||
|
||||
- `make test` — all unit tests pass
|
||||
- `orama monitor report --env sandbox` — full cluster health
|
||||
- Manual endpoint testing (e.g., curl without auth → 401)
|
||||
- Security-specific checks (IPv6 listeners, RQLite auth, binary signatures)
|
||||
374
core/docs/SERVERLESS.md
Normal file
374
core/docs/SERVERLESS.md
Normal file
@ -0,0 +1,374 @@
|
||||
# Serverless Functions
|
||||
|
||||
Orama Network runs serverless functions as sandboxed WebAssembly (WASM) modules. Functions are written in Go, compiled to WASM with TinyGo, and executed in an isolated wazero runtime with configurable memory limits and timeouts.
|
||||
|
||||
Functions receive input via **stdin** (JSON) and return output via **stdout** (JSON). They can also access Orama services — database, cache, storage, secrets, PubSub, and HTTP — through **host functions** injected by the runtime.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# 1. Scaffold a new function
|
||||
orama function init my-function
|
||||
|
||||
# 2. Edit your handler
|
||||
cd my-function
|
||||
# edit function.go
|
||||
|
||||
# 3. Build to WASM
|
||||
orama function build
|
||||
|
||||
# 4. Deploy
|
||||
orama function deploy
|
||||
|
||||
# 5. Invoke
|
||||
orama function invoke my-function --data '{"name": "World"}'
|
||||
|
||||
# 6. View logs
|
||||
orama function logs my-function
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
my-function/
|
||||
├── function.go # Handler code
|
||||
└── function.yaml # Configuration
|
||||
```
|
||||
|
||||
### function.yaml
|
||||
|
||||
```yaml
|
||||
name: my-function # Required. Letters, digits, hyphens, underscores.
|
||||
public: false # Allow unauthenticated invocation (default: false)
|
||||
memory: 64 # Memory limit in MB (1-256, default: 64)
|
||||
timeout: 30 # Execution timeout in seconds (1-300, default: 30)
|
||||
retry:
|
||||
count: 0 # Retry attempts on failure (default: 0)
|
||||
delay: 5 # Seconds between retries (default: 5)
|
||||
env: # Environment variables (accessible via get_env)
|
||||
MY_VAR: "value"
|
||||
```
|
||||
|
||||
### function.go (minimal)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Read JSON input from stdin
|
||||
var input []byte
|
||||
buf := make([]byte, 4096)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
if n > 0 {
|
||||
input = append(input, buf[:n]...)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var payload map[string]interface{}
|
||||
json.Unmarshal(input, &payload)
|
||||
|
||||
// Process and return JSON output via stdout
|
||||
response := map[string]interface{}{
|
||||
"result": "Hello!",
|
||||
}
|
||||
output, _ := json.Marshal(response)
|
||||
os.Stdout.Write(output)
|
||||
}
|
||||
```
|
||||
|
||||
### Building
|
||||
|
||||
Functions are compiled to WASM using [TinyGo](https://tinygo.org/):
|
||||
|
||||
```bash
|
||||
# Using the CLI (recommended)
|
||||
orama function build
|
||||
|
||||
# Or manually
|
||||
tinygo build -o function.wasm -target wasi function.go
|
||||
```
|
||||
|
||||
## Host Functions API
|
||||
|
||||
Host functions let your WASM code interact with Orama services. They are imported from the `"env"` or `"host"` module (both work) and use a pointer/length ABI for string parameters.
|
||||
|
||||
All host functions are registered at runtime by the engine. They are available to every function without additional configuration.
|
||||
|
||||
### Context
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `get_caller_wallet()` → string | Wallet address of the caller (from JWT) |
|
||||
| `get_request_id()` → string | Unique invocation ID |
|
||||
| `get_env(key)` → string | Environment variable from function.yaml |
|
||||
| `get_secret(name)` → string | Decrypted secret value (see [Managing Secrets](#managing-secrets)) |
|
||||
|
||||
### Database (RQLite)
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `db_query(sql, argsJSON)` → JSON | Execute SELECT query. Args as JSON array. Returns JSON array of row objects. |
|
||||
| `db_execute(sql, argsJSON)` → int | Execute INSERT/UPDATE/DELETE. Returns affected row count. |
|
||||
|
||||
Example query from WASM:
|
||||
```
|
||||
db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?", '["user123"]')
|
||||
→ [{"push_token": "abc...", "device_type": "ios"}]
|
||||
```
|
||||
|
||||
### Cache (Olric Distributed Cache)
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `cache_get(key)` → bytes | Get cached value by key. Returns empty on miss. |
|
||||
| `cache_set(key, value, ttl)` | Store value with TTL in seconds. |
|
||||
| `cache_incr(key)` → int64 | Atomically increment by 1 (init to 0 if missing). |
|
||||
| `cache_incr_by(key, delta)` → int64 | Atomically increment by delta. |
|
||||
|
||||
### HTTP
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `http_fetch(method, url, headersJSON, body)` → JSON | Make outbound HTTP request. Headers as JSON object. Returns `{"status": 200, "headers": {...}, "body": "..."}`. Timeout: 30s. |
|
||||
|
||||
### PubSub
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `pubsub_publish(topic, dataJSON)` → bool | Publish message to a PubSub topic. Returns true on success. |
|
||||
|
||||
### Logging
|
||||
|
||||
| Function | Description |
|
||||
|----------|-------------|
|
||||
| `log_info(message)` | Log info-level message (captured in invocation logs). |
|
||||
| `log_error(message)` | Log error-level message. |
|
||||
|
||||
## Managing Secrets
|
||||
|
||||
Secrets are encrypted at rest (AES-256-GCM) and scoped to your namespace. Functions read them via `get_secret("name")` at runtime.
|
||||
|
||||
### CLI Commands
|
||||
|
||||
```bash
|
||||
# Set a secret (inline value)
|
||||
orama function secrets set APNS_KEY_ID "ABC123DEF"
|
||||
|
||||
# Set a secret from a file (useful for PEM keys, certificates)
|
||||
orama function secrets set APNS_AUTH_KEY --from-file ./AuthKey_ABC123.p8
|
||||
|
||||
# List all secret names (values are never shown)
|
||||
orama function secrets list
|
||||
|
||||
# Delete a secret
|
||||
orama function secrets delete APNS_KEY_ID
|
||||
|
||||
# Delete without confirmation
|
||||
orama function secrets delete APNS_KEY_ID --force
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **You set secrets** via the CLI → encrypted and stored in the database
|
||||
2. **Functions read secrets** at runtime via `get_secret("name")` → decrypted on demand
|
||||
3. **Namespace isolation** → each namespace has its own secret store; functions in namespace A cannot read secrets from namespace B
|
||||
|
||||
## PubSub Triggers
|
||||
|
||||
Triggers let functions react to events automatically. When a message is published to a PubSub topic, all functions with a trigger on that topic are invoked asynchronously.
|
||||
|
||||
### CLI Commands
|
||||
|
||||
```bash
|
||||
# Add a trigger: invoke "call-push-handler" when messages hit "calls:invite"
|
||||
orama function triggers add call-push-handler --topic calls:invite
|
||||
|
||||
# List triggers for a function
|
||||
orama function triggers list call-push-handler
|
||||
|
||||
# Delete a trigger
|
||||
orama function triggers delete call-push-handler <trigger-id>
|
||||
```
|
||||
|
||||
### Trigger Event Payload
|
||||
|
||||
When triggered via PubSub, the function receives this JSON via stdin:
|
||||
|
||||
```json
|
||||
{
|
||||
"topic": "calls:invite",
|
||||
"data": { ... },
|
||||
"namespace": "my-namespace",
|
||||
"trigger_depth": 1,
|
||||
"timestamp": 1708972800
|
||||
}
|
||||
```
|
||||
|
||||
### Depth Limiting
|
||||
|
||||
To prevent infinite loops (function A publishes to topic → triggers function A again), trigger depth is tracked. Maximum depth is **5**. If a function's output triggers another function, `trigger_depth` increments. At depth 5, no further triggers fire.
|
||||
|
||||
## Function Lifecycle
|
||||
|
||||
### Versioning
|
||||
|
||||
Each deploy creates a new version. The WASM binary is stored in **IPFS** (content-addressed) and metadata is stored in **RQLite**.
|
||||
|
||||
```bash
|
||||
# List versions
|
||||
orama function versions my-function
|
||||
|
||||
# Invoke a specific version
|
||||
curl -X POST .../v1/functions/my-function@2/invoke
|
||||
```
|
||||
|
||||
### Invocation Logging
|
||||
|
||||
Every invocation is logged with: request ID, duration, status (success/error/timeout), input/output size, and any `log_info`/`log_error` messages.
|
||||
|
||||
```bash
|
||||
orama function logs my-function
|
||||
```
|
||||
|
||||
## CLI Reference
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `orama function init <name>` | Scaffold a new function project |
|
||||
| `orama function build [dir]` | Compile Go to WASM |
|
||||
| `orama function deploy [dir]` | Deploy WASM to the network |
|
||||
| `orama function invoke <name> --data <json>` | Invoke a function |
|
||||
| `orama function list` | List deployed functions |
|
||||
| `orama function get <name>` | Get function details |
|
||||
| `orama function delete <name>` | Delete a function |
|
||||
| `orama function logs <name>` | View invocation logs |
|
||||
| `orama function versions <name>` | List function versions |
|
||||
| `orama function secrets set <name> <value>` | Set an encrypted secret |
|
||||
| `orama function secrets list` | List secret names |
|
||||
| `orama function secrets delete <name>` | Delete a secret |
|
||||
| `orama function triggers add <fn> --topic <t>` | Add PubSub trigger |
|
||||
| `orama function triggers list <fn>` | List triggers |
|
||||
| `orama function triggers delete <fn> <id>` | Delete a trigger |
|
||||
|
||||
## HTTP API Reference
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| POST | `/v1/functions` | Deploy function (multipart/form-data) |
|
||||
| GET | `/v1/functions` | List functions |
|
||||
| GET | `/v1/functions/{name}` | Get function info |
|
||||
| DELETE | `/v1/functions/{name}` | Delete function |
|
||||
| POST | `/v1/functions/{name}/invoke` | Invoke function |
|
||||
| GET | `/v1/functions/{name}/versions` | List versions |
|
||||
| GET | `/v1/functions/{name}/logs` | Get logs |
|
||||
| WS | `/v1/functions/{name}/ws` | WebSocket invoke (streaming) |
|
||||
| PUT | `/v1/functions/secrets` | Set a secret |
|
||||
| GET | `/v1/functions/secrets` | List secret names |
|
||||
| DELETE | `/v1/functions/secrets/{name}` | Delete a secret |
|
||||
| POST | `/v1/functions/{name}/triggers` | Add PubSub trigger |
|
||||
| GET | `/v1/functions/{name}/triggers` | List triggers |
|
||||
| DELETE | `/v1/functions/{name}/triggers/{id}` | Delete trigger |
|
||||
| POST | `/v1/invoke/{namespace}/{name}` | Direct invoke (alt endpoint) |
|
||||
|
||||
## Example: Call Push Handler
|
||||
|
||||
A real-world function that sends VoIP push notifications when a call invite is published to PubSub:
|
||||
|
||||
```yaml
|
||||
# function.yaml
|
||||
name: call-push-handler
|
||||
memory: 128
|
||||
timeout: 30
|
||||
```
|
||||
|
||||
```go
|
||||
// function.go — triggered by PubSub on "calls:invite"
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
// This function:
|
||||
// 1. Receives a call invite event from PubSub trigger
|
||||
// 2. Queries the database for the callee's device info
|
||||
// 3. Reads push notification credentials from secrets
|
||||
// 4. Sends a push notification via http_fetch
|
||||
|
||||
func main() {
|
||||
// Read PubSub trigger event from stdin
|
||||
var input []byte
|
||||
buf := make([]byte, 4096)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
if n > 0 {
|
||||
input = append(input, buf[:n]...)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the trigger event wrapper
|
||||
var event struct {
|
||||
Topic string `json:"topic"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
json.Unmarshal(input, &event)
|
||||
|
||||
// Parse the actual call invite data
|
||||
var invite struct {
|
||||
CalleeID string `json:"calleeId"`
|
||||
CallerName string `json:"callerName"`
|
||||
CallType string `json:"callType"`
|
||||
}
|
||||
json.Unmarshal(event.Data, &invite)
|
||||
|
||||
// At this point, the function would use host functions:
|
||||
//
|
||||
// 1. db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?",
|
||||
// json.Marshal([]string{invite.CalleeID}))
|
||||
//
|
||||
// 2. get_secret("FCM_SERVER_KEY") for Android push
|
||||
// get_secret("APNS_KEY_PEM") for iOS push
|
||||
//
|
||||
// 3. http_fetch("POST", "https://fcm.googleapis.com/v1/...", headers, body)
|
||||
//
|
||||
// 4. log_info("Push sent to " + invite.CalleeID)
|
||||
//
|
||||
// Note: Host functions use the WASM ABI (pointer/length).
|
||||
// A Go SDK for ergonomic access is planned.
|
||||
|
||||
response := map[string]interface{}{
|
||||
"status": "sent",
|
||||
"callee": invite.CalleeID,
|
||||
}
|
||||
output, _ := json.Marshal(response)
|
||||
os.Stdout.Write(output)
|
||||
}
|
||||
```
|
||||
|
||||
Deploy and wire the trigger:
|
||||
```bash
|
||||
orama function build
|
||||
orama function deploy
|
||||
|
||||
# Set push notification secrets
|
||||
orama function secrets set FCM_SERVER_KEY "your-fcm-key"
|
||||
orama function secrets set APNS_KEY_PEM --from-file ./AuthKey.p8
|
||||
orama function secrets set APNS_KEY_ID "ABC123"
|
||||
orama function secrets set APNS_TEAM_ID "TEAM456"
|
||||
|
||||
# Wire the PubSub trigger
|
||||
orama function triggers add call-push-handler --topic calls:invite
|
||||
```
|
||||
291
core/docs/WEBRTC.md
Normal file
291
core/docs/WEBRTC.md
Normal file
@ -0,0 +1,291 @@
|
||||
# WebRTC Integration
|
||||
|
||||
Real-time voice, video, and data channels for Orama Network namespaces.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Client A Client B
|
||||
│ │
|
||||
│ 1. Get TURN credentials (REST) │
|
||||
│ 2. Connect WebSocket (signaling) │
|
||||
│ 3. Exchange SDP/ICE via SFU │
|
||||
│ │
|
||||
▼ ▼
|
||||
┌──────────┐ UDP relay ┌──────────┐
|
||||
│ TURN │◄──────────────────►│ TURN │
|
||||
│ Server │ (public IPs) │ Server │
|
||||
│ Node 1 │ │ Node 2 │
|
||||
└────┬─────┘ └────┬─────┘
|
||||
│ WireGuard │ WireGuard
|
||||
▼ ▼
|
||||
┌──────────────────────────────────────────┐
|
||||
│ SFU Servers (3 nodes) │
|
||||
│ - WebSocket signaling (WireGuard only) │
|
||||
│ - Pion WebRTC (RTP forwarding) │
|
||||
│ - Room management │
|
||||
│ - Track publish/subscribe │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Key design decisions:**
|
||||
- **TURN-shielded**: SFU binds only to WireGuard IPs. All client media flows through TURN relay.
|
||||
- **`iceTransportPolicy: relay`** enforced server-side — no direct peer connections.
|
||||
- **Opt-in per namespace** via `orama namespace enable webrtc`.
|
||||
- **SFU on all 3 nodes**, **TURN on 2 of 3 nodes** (redundancy without over-provisioning).
|
||||
- **Separate port allocation** from existing namespace services.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Namespace must be provisioned with a ready cluster (RQLite + Olric + Gateway running).
|
||||
- Command must be run on a cluster node (uses internal gateway endpoint).
|
||||
|
||||
## Enable / Disable
|
||||
|
||||
```bash
|
||||
# Enable WebRTC for a namespace
|
||||
orama namespace enable webrtc --namespace myapp
|
||||
|
||||
# Check status
|
||||
orama namespace webrtc-status --namespace myapp
|
||||
|
||||
# Disable WebRTC (stops services, deallocates ports, removes DNS)
|
||||
orama namespace disable webrtc --namespace myapp
|
||||
```
|
||||
|
||||
### What happens on enable:
|
||||
1. Generates a per-namespace TURN shared secret (32 bytes, crypto/rand)
|
||||
2. Inserts `namespace_webrtc_config` DB record
|
||||
3. Allocates WebRTC port blocks on each node (SFU signaling + media range, TURN relay range)
|
||||
4. Spawns TURN on 2 nodes (selected by capacity)
|
||||
5. Spawns SFU on all 3 nodes
|
||||
6. Creates DNS A records: `turn.ns-{name}.{baseDomain}` pointing to TURN node public IPs
|
||||
7. Updates cluster state on all nodes (for cold-boot restoration)
|
||||
|
||||
### What happens on disable:
|
||||
1. Stops SFU on all 3 nodes
|
||||
2. Stops TURN on 2 nodes
|
||||
3. Deallocates all WebRTC ports
|
||||
4. Deletes TURN DNS records
|
||||
5. Cleans up DB records (`namespace_webrtc_config`, `webrtc_rooms`)
|
||||
6. Updates cluster state
|
||||
|
||||
## Client Integration (JavaScript)
|
||||
|
||||
### Authentication
|
||||
|
||||
All WebRTC endpoints require authentication. Use one of:
|
||||
|
||||
```
|
||||
# Option A: API Key via header (recommended)
|
||||
X-API-Key: <your-namespace-api-key>
|
||||
|
||||
# Option B: API Key via Authorization header
|
||||
Authorization: ApiKey <your-namespace-api-key>
|
||||
|
||||
# Option C: JWT Bearer token
|
||||
Authorization: Bearer <jwt>
|
||||
```
|
||||
|
||||
### 1. Get TURN Credentials
|
||||
|
||||
```javascript
|
||||
const response = await fetch('https://ns-myapp.orama-devnet.network/v1/webrtc/turn/credentials', {
|
||||
method: 'POST',
|
||||
headers: { 'X-API-Key': apiKey }
|
||||
});
|
||||
|
||||
const { uris, username, password, ttl } = await response.json();
|
||||
// uris: [
|
||||
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=udp",
|
||||
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=tcp",
|
||||
// "turns:turn.ns-myapp.orama-devnet.network:5349"
|
||||
// ]
|
||||
// username: "{expiry_unix}:{namespace}"
|
||||
// password: HMAC-SHA1 derived (base64)
|
||||
// ttl: 600 (seconds)
|
||||
```
|
||||
|
||||
### 2. Create PeerConnection
|
||||
|
||||
```javascript
|
||||
const pc = new RTCPeerConnection({
|
||||
iceServers: [{ urls: uris, username, credential: password }],
|
||||
iceTransportPolicy: 'relay' // enforced by SFU
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Connect Signaling WebSocket
|
||||
|
||||
```javascript
|
||||
const ws = new WebSocket(
|
||||
`wss://ns-myapp.orama-devnet.network/v1/webrtc/signal?room=${roomId}&api_key=${apiKey}`
|
||||
);
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
const msg = JSON.parse(event.data);
|
||||
switch (msg.type) {
|
||||
case 'offer': handleOffer(msg); break;
|
||||
case 'answer': handleAnswer(msg); break;
|
||||
case 'ice-candidate': handleICE(msg); break;
|
||||
case 'peer-joined': handleJoin(msg); break;
|
||||
case 'peer-left': handleLeave(msg); break;
|
||||
case 'turn-credentials':
|
||||
case 'refresh-credentials':
|
||||
updateTURN(msg); // SFU sends refreshed creds at 80% TTL
|
||||
break;
|
||||
case 'server-draining':
|
||||
reconnect(); // SFU shutting down, reconnect to another node
|
||||
break;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 4. Room Management (REST)
|
||||
|
||||
```javascript
|
||||
const headers = { 'X-API-Key': apiKey, 'Content-Type': 'application/json' };
|
||||
|
||||
// Create room
|
||||
await fetch('/v1/webrtc/rooms', {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify({ room_id: 'my-room' })
|
||||
});
|
||||
|
||||
// List rooms
|
||||
const rooms = await fetch('/v1/webrtc/rooms', { headers });
|
||||
|
||||
// Close room
|
||||
await fetch('/v1/webrtc/rooms?room_id=my-room', {
|
||||
method: 'DELETE',
|
||||
headers
|
||||
});
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### REST Endpoints
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| POST | `/v1/webrtc/turn/credentials` | JWT/API key | Get TURN relay credentials |
|
||||
| GET/WS | `/v1/webrtc/signal` | JWT/API key | WebSocket signaling |
|
||||
| GET | `/v1/webrtc/rooms` | JWT/API key | List rooms |
|
||||
| POST | `/v1/webrtc/rooms` | JWT/API key (owner) | Create room |
|
||||
| DELETE | `/v1/webrtc/rooms` | JWT/API key (owner) | Close room |
|
||||
|
||||
### Signaling Messages
|
||||
|
||||
| Type | Direction | Description |
|
||||
|------|-----------|-------------|
|
||||
| `join` | Client → SFU | Join room |
|
||||
| `offer` | Client ↔ SFU | SDP offer |
|
||||
| `answer` | Client ↔ SFU | SDP answer |
|
||||
| `ice-candidate` | Client ↔ SFU | ICE candidate |
|
||||
| `leave` | Client → SFU | Leave room |
|
||||
| `peer-joined` | SFU → Client | New peer notification |
|
||||
| `peer-left` | SFU → Client | Peer departure |
|
||||
| `turn-credentials` | SFU → Client | Initial TURN credentials |
|
||||
| `refresh-credentials` | SFU → Client | Refreshed credentials (at 80% TTL) |
|
||||
| `server-draining` | SFU → Client | SFU shutting down |
|
||||
|
||||
## Port Allocation
|
||||
|
||||
WebRTC uses a **separate port allocation system** from the core namespace ports:
|
||||
|
||||
| Service | Port Range | Protocol | Per Namespace |
|
||||
|---------|-----------|----------|---------------|
|
||||
| SFU signaling | 30000-30099 | TCP (WireGuard only) | 1 port |
|
||||
| SFU media (RTP) | 20000-29999 | UDP (WireGuard only) | 500 ports |
|
||||
| TURN listen | 3478 | UDP + TCP | fixed |
|
||||
| TURNS (TLS) | 5349 | TCP | fixed |
|
||||
| TURN relay | 49152-65535 | UDP | 800 ports |
|
||||
|
||||
## TURN Credential Protocol
|
||||
|
||||
- Credentials use HMAC-SHA1 with a per-namespace shared secret
|
||||
- Username format: `{expiry_unix}:{namespace}`
|
||||
- Password: `base64(HMAC-SHA1(shared_secret, username))`
|
||||
- Default TTL: 600 seconds (10 minutes)
|
||||
- SFU proactively sends `refresh-credentials` at 80% of TTL (8 minutes)
|
||||
- Clients should update ICE servers on receiving refresh
|
||||
|
||||
## TURNS TLS Certificate
|
||||
|
||||
TURNS (port 5349) uses TLS. Certificate provisioning:
|
||||
|
||||
1. **Let's Encrypt (primary)**: On TURN spawn, the TURN domain is added to the local Caddy instance's Caddyfile. Caddy provisions a Let's Encrypt cert via DNS-01 ACME challenge (using the orama DNS provider). TURN reads the cert from Caddy's storage.
|
||||
2. **Self-signed (fallback)**: If Caddy cert provisioning fails (timeout, Caddy not running), a self-signed cert is generated with the node's public IP as SAN.
|
||||
|
||||
Caddy auto-renews Let's Encrypt certs at ~60 days. TURN picks up renewed certs on restart.
|
||||
|
||||
## Monitoring
|
||||
|
||||
```bash
|
||||
# Check WebRTC status
|
||||
orama namespace webrtc-status --namespace myapp
|
||||
|
||||
# Monitor report includes SFU/TURN status
|
||||
orama monitor report --env devnet
|
||||
|
||||
# Inspector checks WebRTC health
|
||||
orama inspector --env devnet
|
||||
```
|
||||
|
||||
The monitoring report includes per-namespace `sfu_up` and `turn_up` fields. The inspector runs cross-node checks to verify SFU coverage (3 nodes) and TURN redundancy (2 nodes).
|
||||
|
||||
## Debugging
|
||||
|
||||
```bash
|
||||
# SFU logs
|
||||
journalctl -u orama-namespace-sfu@myapp -f
|
||||
|
||||
# TURN logs
|
||||
journalctl -u orama-namespace-turn@myapp -f
|
||||
|
||||
# Check service status
|
||||
systemctl status orama-namespace-sfu@myapp
|
||||
systemctl status orama-namespace-turn@myapp
|
||||
```
|
||||
|
||||
## Security Model
|
||||
|
||||
- **Forced relay**: `iceTransportPolicy: relay` enforced server-side. Clients cannot bypass TURN.
|
||||
- **HMAC credentials**: Per-namespace TURN shared secret. Credentials expire after 10 minutes.
|
||||
- **Namespace isolation**: Each namespace has its own TURN secret, port ranges, and rooms.
|
||||
- **Authentication required**: All WebRTC endpoints require API key or JWT (`X-API-Key` header, `Authorization: ApiKey`, or `Authorization: Bearer`).
|
||||
- **Room management**: Creating/closing rooms requires namespace ownership.
|
||||
- **SFU on WireGuard only**: SFU binds to 10.0.0.x, never 0.0.0.0. Only reachable via TURN relay.
|
||||
- **Permissions-Policy**: `camera=(self), microphone=(self)` — only same-origin can access media devices.
|
||||
|
||||
## Firewall
|
||||
|
||||
When WebRTC is enabled, the following ports are opened via UFW on TURN nodes:
|
||||
|
||||
| Port | Protocol | Purpose |
|
||||
|------|----------|---------|
|
||||
| 3478 | UDP | TURN standard |
|
||||
| 3478 | TCP | TURN TCP fallback (for clients behind UDP-blocking firewalls) |
|
||||
| 5349 | TCP | TURNS — TURN over TLS (encrypted, works through strict firewalls/DPI) |
|
||||
| 49152-65535 | UDP | TURN relay range (allocated per namespace) |
|
||||
|
||||
SFU ports are NOT opened in the firewall — they are WireGuard-internal only.
|
||||
|
||||
## Database Tables
|
||||
|
||||
| Table | Purpose |
|
||||
|-------|---------|
|
||||
| `namespace_webrtc_config` | Per-namespace WebRTC config (enabled, TURN secret, node counts) |
|
||||
| `webrtc_rooms` | Room-to-SFU-node affinity |
|
||||
| `webrtc_port_allocations` | SFU/TURN port tracking |
|
||||
|
||||
## Cold Boot Recovery
|
||||
|
||||
On node restart, the cluster state file (`cluster_state.json`) includes `has_sfu`, `has_turn`, and port allocation data. The restore process:
|
||||
|
||||
1. Core services restore first: RQLite → Olric → Gateway
|
||||
2. If `has_turn` is set: fetches TURN shared secret from DB, spawns TURN
|
||||
3. If `has_sfu` is set: fetches WebRTC config from DB, spawns SFU with TURN server list
|
||||
|
||||
If the DB is unavailable during restore, SFU/TURN restoration is skipped with a warning log. They will be restored on the next successful DB connection.
|
||||
@ -297,7 +297,7 @@ func GetRQLiteNodes() []string {
|
||||
// queryAPIKeyFromRQLite queries the SQLite database directly for an API key
|
||||
func queryAPIKeyFromRQLite() (string, error) {
|
||||
// 1. Check environment variable first
|
||||
if envKey := os.Getenv("DEBROS_API_KEY"); envKey != "" {
|
||||
if envKey := os.Getenv("ORAMA_API_KEY"); envKey != "" {
|
||||
return envKey, nil
|
||||
}
|
||||
|
||||
@ -424,7 +424,7 @@ func GetAPIKey() string {
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// 1. Check env var
|
||||
if envKey := os.Getenv("DEBROS_API_KEY"); envKey != "" {
|
||||
if envKey := os.Getenv("ORAMA_API_KEY"); envKey != "" {
|
||||
cacheMutex.Lock()
|
||||
apiKeyCache = envKey
|
||||
cacheMutex.Unlock()
|
||||
@ -57,16 +57,16 @@ func TestDomainRouting_BasicRouting(t *testing.T) {
|
||||
t.Logf("✓ Standard domain routing works: %s", domain)
|
||||
})
|
||||
|
||||
t.Run("Non-debros domain passes through", func(t *testing.T) {
|
||||
// Request with non-debros domain should not route to deployment
|
||||
t.Run("Non-orama domain passes through", func(t *testing.T) {
|
||||
// Request with non-orama domain should not route to deployment
|
||||
resp := e2e.TestDeploymentWithHostHeader(t, env, "example.com", "/")
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should either return 404 or pass to default handler
|
||||
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
|
||||
"Non-debros domain should not route to deployment")
|
||||
"Non-orama domain should not route to deployment")
|
||||
|
||||
t.Logf("✓ Non-debros domains correctly pass through (status: %d)", resp.StatusCode)
|
||||
t.Logf("✓ Non-orama domains correctly pass through (status: %d)", resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("API paths bypass domain routing", func(t *testing.T) {
|
||||
@ -118,7 +118,7 @@ func TestNetwork_ProxyAnonSuccess(t *testing.T) {
|
||||
Body: map[string]interface{}{
|
||||
"url": "https://httpbin.org/get",
|
||||
"method": "GET",
|
||||
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
|
||||
"headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
|
||||
},
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
|
||||
Body: map[string]interface{}{
|
||||
"url": "https://httpbin.org/post",
|
||||
"method": "POST",
|
||||
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
|
||||
"headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
|
||||
"body": "test_data",
|
||||
},
|
||||
}
|
||||
241
core/e2e/shared/webrtc_test.go
Normal file
241
core/e2e/shared/webrtc_test.go
Normal file
@ -0,0 +1,241 @@
|
||||
//go:build e2e
|
||||
|
||||
package shared_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||
)
|
||||
|
||||
// turnCredentialsResponse is the expected response from the TURN credentials endpoint.
|
||||
type turnCredentialsResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
Username string `json:"username"`
|
||||
Credential string `json:"credential"`
|
||||
TTL int `json:"ttl"`
|
||||
}
|
||||
|
||||
// TestWebRTC_TURNCredentials_RequiresAuth verifies that the TURN credentials endpoint
|
||||
// rejects unauthenticated requests.
|
||||
func TestWebRTC_TURNCredentials_RequiresAuth(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebRTC_TURNCredentials_ValidResponse verifies that authenticated requests to the
|
||||
// TURN credentials endpoint return a valid credential structure.
|
||||
func TestWebRTC_TURNCredentials_ValidResponse(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
apiKey := e2e.GetAPIKey()
|
||||
if apiKey == "" {
|
||||
t.Skip("no API key configured")
|
||||
}
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200 OK, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var creds turnCredentialsResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&creds); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if len(creds.URLs) == 0 {
|
||||
t.Fatal("expected at least one TURN URL")
|
||||
}
|
||||
if creds.Username == "" {
|
||||
t.Fatal("expected non-empty username")
|
||||
}
|
||||
if creds.Credential == "" {
|
||||
t.Fatal("expected non-empty credential")
|
||||
}
|
||||
if creds.TTL <= 0 {
|
||||
t.Fatalf("expected positive TTL, got %d", creds.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebRTC_Rooms_RequiresAuth verifies that the rooms endpoint rejects unauthenticated requests.
|
||||
func TestWebRTC_Rooms_RequiresAuth(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebRTC_Signal_RequiresAuth verifies that the signaling WebSocket rejects
|
||||
// unauthenticated connections.
|
||||
func TestWebRTC_Signal_RequiresAuth(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
// Use regular HTTP GET to the signal endpoint — without auth it should return 401
|
||||
// before WebSocket upgrade
|
||||
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/signal?room=test-room", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("expected 401, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebRTC_Rooms_CreateAndList verifies room creation and listing with proper auth.
|
||||
func TestWebRTC_Rooms_CreateAndList(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
apiKey := e2e.GetAPIKey()
|
||||
if apiKey == "" {
|
||||
t.Skip("no API key configured")
|
||||
}
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
roomID := e2e.GenerateUniqueID("e2e-webrtc-room")
|
||||
|
||||
// Create room
|
||||
createBody, _ := json.Marshal(map[string]string{"room_id": roomID})
|
||||
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/rooms", bytes.NewReader(createBody))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("create room failed: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("expected 200/201, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// List rooms
|
||||
req, err = http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("list rooms failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Clean up: delete room
|
||||
req, err = http.NewRequest("DELETE", gatewayURL+"/v1/webrtc/rooms?room_id="+roomID, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp2, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("delete room failed: %v", err)
|
||||
}
|
||||
resp2.Body.Close()
|
||||
}
|
||||
|
||||
// TestWebRTC_PermissionsPolicy verifies the Permissions-Policy header allows camera and microphone.
|
||||
func TestWebRTC_PermissionsPolicy(t *testing.T) {
|
||||
e2e.SkipIfMissingGateway(t)
|
||||
|
||||
gatewayURL := e2e.GetGatewayURL()
|
||||
apiKey := e2e.GetAPIKey()
|
||||
if apiKey == "" {
|
||||
t.Skip("no API key configured")
|
||||
}
|
||||
client := e2e.NewHTTPClient(10 * time.Second)
|
||||
|
||||
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
pp := resp.Header.Get("Permissions-Policy")
|
||||
if pp == "" {
|
||||
t.Skip("Permissions-Policy header not set")
|
||||
}
|
||||
|
||||
if !strings.Contains(pp, "camera=(self)") {
|
||||
t.Errorf("Permissions-Policy missing camera=(self), got: %s", pp)
|
||||
}
|
||||
if !strings.Contains(pp, "microphone=(self)") {
|
||||
t.Errorf("Permissions-Policy missing microphone=(self), got: %s", pp)
|
||||
}
|
||||
}
|
||||
@ -20,6 +20,10 @@ require (
|
||||
github.com/miekg/dns v1.1.70
|
||||
github.com/multiformats/go-multiaddr v0.16.0
|
||||
github.com/olric-data/olric v0.7.0
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/rtcp v1.2.15
|
||||
github.com/pion/turn/v4 v4.0.2
|
||||
github.com/pion/webrtc/v4 v4.1.2
|
||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
@ -123,11 +127,9 @@ require (
|
||||
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/interceptor v0.1.40 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/rtp v1.8.19 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
@ -136,8 +138,6 @@ require (
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.0 // indirect
|
||||
@ -1,4 +1,4 @@
|
||||
-- DeBros Gateway - Initial database schema (SQLite/RQLite dialect)
|
||||
-- Orama Gateway - Initial database schema (SQLite/RQLite dialect)
|
||||
-- This file scaffolds core tables used by the HTTP gateway for auth, observability, and namespacing.
|
||||
-- Apply via your migration tooling or manual execution in RQLite.
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
-- DeBros Gateway - Core schema (Phase 2)
|
||||
-- Orama Gateway - Core schema (Phase 2)
|
||||
-- Adds apps, nonces, subscriptions, refresh_tokens, audit_events, namespace_ownership
|
||||
-- SQLite/RQLite dialect
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
-- DeBros Gateway - Wallet to API Key linkage (Phase 3)
|
||||
-- Orama Gateway - Wallet to API Key linkage (Phase 3)
|
||||
-- Ensures one API key per (namespace, wallet) and enables lookup
|
||||
|
||||
BEGIN;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user