mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-05-01 05:04:13 +00:00
feat(cli): add node management and rollout commands
- implement `nodes`, `rollout`, `ssh`, and `status` commands - add `migrate-conf` utility to register existing nodes with the gateway - update database schema to support operator wallet tracking for nodes
This commit is contained in:
parent
2017fcb432
commit
8d7d1c6621
@ -18,7 +18,11 @@ import (
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/monitorcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/namespacecmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/node"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/nodescmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/rolloutcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/sandboxcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/sshcmd"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/cmd/statuscmd"
|
||||
)
|
||||
|
||||
// version metadata populated via -ldflags at build time
|
||||
@ -91,6 +95,12 @@ and interacting with the Orama distributed network.`,
|
||||
// Sandbox command (ephemeral Hetzner Cloud clusters)
|
||||
rootCmd.AddCommand(sandboxcmd.Cmd)
|
||||
|
||||
// Unified node management commands
|
||||
rootCmd.AddCommand(nodescmd.Cmd)
|
||||
rootCmd.AddCommand(rolloutcmd.Cmd)
|
||||
rootCmd.AddCommand(statuscmd.Cmd)
|
||||
rootCmd.AddCommand(sshcmd.Cmd)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
|
||||
14
core/migrations/020_node_operators.sql
Normal file
14
core/migrations/020_node_operators.sql
Normal file
@ -0,0 +1,14 @@
|
||||
-- Add operator wallet tracking to nodes.
|
||||
-- operator_wallet links nodes to the wallet that provisioned them.
|
||||
|
||||
ALTER TABLE dns_nodes ADD COLUMN operator_wallet TEXT;
|
||||
ALTER TABLE dns_nodes ADD COLUMN environment TEXT DEFAULT 'production';
|
||||
ALTER TABLE dns_nodes ADD COLUMN ssh_user TEXT DEFAULT 'root';
|
||||
ALTER TABLE dns_nodes ADD COLUMN role TEXT DEFAULT 'node';
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_dns_nodes_operator ON dns_nodes(operator_wallet);
|
||||
CREATE INDEX IF NOT EXISTS idx_dns_nodes_environment ON dns_nodes(environment);
|
||||
|
||||
ALTER TABLE wireguard_peers ADD COLUMN operator_wallet TEXT;
|
||||
|
||||
ALTER TABLE invite_tokens ADD COLUMN operator_wallet TEXT;
|
||||
116
core/pkg/cli/cmd/node/migrate_conf.go
Normal file
116
core/pkg/cli/cmd/node/migrate_conf.go
Normal file
@ -0,0 +1,116 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var migrateConfEnv string
|
||||
|
||||
var migrateConfCmd = &cobra.Command{
|
||||
Use: "migrate-conf",
|
||||
Short: "Register nodes.conf nodes with your wallet",
|
||||
Long: `One-time migration: reads nodes from nodes.conf for an environment
|
||||
and registers each with your wallet via the gateway API. After migration,
|
||||
these nodes will appear in 'orama nodes' output.
|
||||
|
||||
Requires: orama auth login (for API authentication)`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
env := migrateConfEnv
|
||||
if env == "" {
|
||||
active, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active environment: %w", err)
|
||||
}
|
||||
env = active.Name
|
||||
}
|
||||
|
||||
// Load nodes from nodes.conf
|
||||
nodes, err := remotessh.LoadEnvNodes(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load nodes.conf: %w", err)
|
||||
}
|
||||
|
||||
// Get gateway URL
|
||||
envConfig, err := cli.GetEnvironmentByName(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("environment %q not configured: %w", env, err)
|
||||
}
|
||||
|
||||
// Load stored credentials
|
||||
store, err := auth.LoadEnhancedCredentials()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load credentials: %w", err)
|
||||
}
|
||||
creds := store.GetDefaultCredential(envConfig.GatewayURL)
|
||||
if creds == nil || creds.APIKey == "" {
|
||||
return fmt.Errorf("no credentials for %s — run 'orama auth login' first", envConfig.GatewayURL)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
fmt.Printf("No nodes found for environment %q in nodes.conf\n", env)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Migrating %d node(s) from nodes.conf to %s...\n\n", len(nodes), env)
|
||||
|
||||
httpClient := &http.Client{Timeout: 10 * time.Second}
|
||||
registered := 0
|
||||
|
||||
for _, n := range nodes {
|
||||
body := map[string]string{
|
||||
"ip_address": n.Host,
|
||||
"environment": env,
|
||||
"role": n.Role,
|
||||
"ssh_user": n.User,
|
||||
}
|
||||
payload, _ := json.Marshal(body)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost,
|
||||
envConfig.GatewayURL+"/v1/operator/node/register",
|
||||
bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " %s: failed to create request: %v\n", n.Host, err)
|
||||
continue
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-API-Key", creds.APIKey)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " %s: request failed: %v\n", n.Host, err)
|
||||
continue
|
||||
}
|
||||
respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 4096))
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
fmt.Printf(" %s (%s): registered\n", n.Host, n.Role)
|
||||
registered++
|
||||
} else if resp.StatusCode == http.StatusNotFound {
|
||||
fmt.Printf(" %s: not found in cluster (node may not have joined yet)\n", n.Host)
|
||||
} else {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), " %s: HTTP %d: %s\n", n.Host, resp.StatusCode, string(respBody))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n%d/%d nodes registered with your wallet\n", registered, len(nodes))
|
||||
if registered < len(nodes) {
|
||||
fmt.Println("Nodes not found may need to join the cluster first, then re-run this command.")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
migrateConfCmd.Flags().StringVar(&migrateConfEnv, "env", "", "Environment to migrate (default: active)")
|
||||
}
|
||||
@ -32,4 +32,5 @@ func init() {
|
||||
Cmd.AddCommand(recoverRaftCmd)
|
||||
Cmd.AddCommand(enrollCmd)
|
||||
Cmd.AddCommand(unlockCmd)
|
||||
Cmd.AddCommand(migrateConfCmd)
|
||||
}
|
||||
|
||||
58
core/pkg/cli/cmd/nodescmd/nodes.go
Normal file
58
core/pkg/cli/cmd/nodescmd/nodes.go
Normal file
@ -0,0 +1,58 @@
|
||||
package nodescmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/noderesolver"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var envFlag string
|
||||
|
||||
// Cmd is the top-level "nodes" command — lists operator's nodes.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "nodes",
|
||||
Short: "List your nodes across environments",
|
||||
Long: `List all nodes owned by your wallet. Queries the network API
|
||||
with your stored credentials, falling back to nodes.conf.
|
||||
|
||||
Requires: orama auth login (for API-based resolution)`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
env := envFlag
|
||||
if env == "" {
|
||||
active, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active environment: %w", err)
|
||||
}
|
||||
env = active.Name
|
||||
}
|
||||
|
||||
nodes, err := noderesolver.ResolveNodes(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve nodes: %w", err)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
fmt.Printf("No nodes found for environment %q\n", env)
|
||||
fmt.Println("Register nodes with: orama node setup <ip> --env", env)
|
||||
return nil
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "IP\tROLE\tUSER\tENVIRONMENT\n")
|
||||
for _, n := range nodes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", n.Host, n.Role, n.User, n.Environment)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
fmt.Printf("\n%d node(s) in %s\n", len(nodes), env)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.Flags().StringVar(&envFlag, "env", "", "Filter by environment (default: active environment)")
|
||||
}
|
||||
234
core/pkg/cli/cmd/rolloutcmd/rollout.go
Normal file
234
core/pkg/cli/cmd/rolloutcmd/rollout.go
Normal file
@ -0,0 +1,234 @@
|
||||
package rolloutcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/noderesolver"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||
"github.com/DeBrosOfficial/network/pkg/inspector"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
envFlag string
|
||||
delaySec int
|
||||
)
|
||||
|
||||
// Cmd is the top-level "rollout" command — build + push + rolling upgrade.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "rollout",
|
||||
Short: "Rolling upgrade of your nodes",
|
||||
Long: `Build, push, and perform a rolling upgrade on all your nodes in an environment.
|
||||
Upgrades followers first, leader last, with health checks between each node.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
env := envFlag
|
||||
if env == "" {
|
||||
active, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active environment: %w", err)
|
||||
}
|
||||
env = active.Name
|
||||
}
|
||||
|
||||
nodes, err := noderesolver.ResolveNodes(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve nodes: %w", err)
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return fmt.Errorf("no nodes found for environment %q", env)
|
||||
}
|
||||
|
||||
cleanup, err := remotessh.PrepareNodeKeys(nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare SSH keys: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
fmt.Printf("Rolling out to %d node(s) in %s\n\n", len(nodes), env)
|
||||
|
||||
// Step 1: Find archive
|
||||
archivePath := findNewestArchive()
|
||||
if archivePath == "" {
|
||||
return fmt.Errorf("no binary archive found in /tmp/ (run `orama build` first)")
|
||||
}
|
||||
info, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat archive %s: %w", archivePath, err)
|
||||
}
|
||||
fmt.Printf("Archive: %s (%s)\n\n", filepath.Base(archivePath), formatBytes(info.Size()))
|
||||
|
||||
// Step 2: Push archive to all nodes
|
||||
fmt.Println("Pushing archive to all nodes...")
|
||||
if err := pushArchive(nodes, archivePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 3: Rolling upgrade — followers first, leader last
|
||||
fmt.Println("\nRolling upgrade (followers first, leader last)...")
|
||||
|
||||
leaderIdx := findLeaderIndex(nodes)
|
||||
if leaderIdx < 0 {
|
||||
fmt.Fprintf(os.Stderr, " Warning: could not detect RQLite leader, upgrading in order\n")
|
||||
}
|
||||
|
||||
// Determine SSH options based on environment
|
||||
var sshOpts []remotessh.SSHOption
|
||||
if env == "sandbox" {
|
||||
sshOpts = append(sshOpts, remotessh.WithNoHostKeyCheck())
|
||||
}
|
||||
|
||||
delay := time.Duration(delaySec) * time.Second
|
||||
|
||||
// Upgrade non-leaders first
|
||||
count := 0
|
||||
for i := range nodes {
|
||||
if i == leaderIdx {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
if err := upgradeNode(nodes[i], count, len(nodes), sshOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if count < len(nodes) {
|
||||
fmt.Printf(" Waiting %s before next node...\n", delay)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
// Upgrade leader last
|
||||
if leaderIdx >= 0 {
|
||||
count++
|
||||
if err := upgradeNode(nodes[leaderIdx], count, len(nodes), sshOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nRollout complete for %s (%d nodes)\n", env, len(nodes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.Flags().StringVar(&envFlag, "env", "", "Environment (default: active)")
|
||||
Cmd.Flags().IntVar(&delaySec, "delay", 30, "Seconds to wait between node upgrades")
|
||||
}
|
||||
|
||||
// findLeaderIndex returns the index of the RQLite leader, or -1 if unknown.
|
||||
func findLeaderIndex(nodes []inspector.Node) int {
|
||||
for i, n := range nodes {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
result := inspector.RunSSH(ctx, n, "curl -sf http://localhost:5001/status 2>/dev/null | grep -o '\"state\":\"[^\"]*\"'")
|
||||
cancel()
|
||||
if result.OK() && strings.Contains(result.Stdout, "Leader") {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// upgradeNode performs orama node upgrade --restart on a single node.
|
||||
func upgradeNode(node inspector.Node, current, total int, sshOpts []remotessh.SSHOption) error {
|
||||
fmt.Printf(" [%d/%d] Upgrading %s...\n", current, total, node.Host)
|
||||
|
||||
// Pre-replace orama CLI binary to avoid ETXTBSY
|
||||
preReplace := "rm -f /usr/local/bin/orama && cp /opt/orama/bin/orama /usr/local/bin/orama"
|
||||
if err := remotessh.RunSSHStreaming(node, preReplace, sshOpts...); err != nil {
|
||||
return fmt.Errorf("pre-replace orama binary on %s: %w", node.Host, err)
|
||||
}
|
||||
|
||||
if err := remotessh.RunSSHStreaming(node, "orama node upgrade --restart", sshOpts...); err != nil {
|
||||
return fmt.Errorf("upgrade %s: %w", node.Host, err)
|
||||
}
|
||||
|
||||
// Wait for health
|
||||
fmt.Printf(" Checking health...")
|
||||
if err := waitForHealth(node, 2*time.Minute); err != nil {
|
||||
fmt.Printf(" WARN: %v\n", err)
|
||||
} else {
|
||||
fmt.Println(" OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushArchive uploads the archive to the first node, then fans out server-to-server.
|
||||
func pushArchive(nodes []inspector.Node, archivePath string) error {
|
||||
if len(nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
remotePath := "/tmp/" + filepath.Base(archivePath)
|
||||
|
||||
// Upload to first node
|
||||
hub := nodes[0]
|
||||
fmt.Printf(" Uploading to %s...\n", hub.Host)
|
||||
if err := remotessh.UploadFile(hub, archivePath, remotePath); err != nil {
|
||||
return fmt.Errorf("upload to %s: %w", hub.Host, err)
|
||||
}
|
||||
|
||||
// Extract on hub
|
||||
extractCmd := fmt.Sprintf("mkdir -p /opt/orama && tar xzf %s -C /opt/orama && rm -f %s", remotePath, remotePath)
|
||||
if err := remotessh.RunSSHStreaming(hub, extractCmd); err != nil {
|
||||
return fmt.Errorf("extract on %s: %w", hub.Host, err)
|
||||
}
|
||||
|
||||
// For remaining nodes, upload directly and extract
|
||||
for _, n := range nodes[1:] {
|
||||
fmt.Printf(" Uploading to %s...\n", n.Host)
|
||||
if err := remotessh.UploadFile(n, archivePath, remotePath); err != nil {
|
||||
return fmt.Errorf("upload to %s: %w", n.Host, err)
|
||||
}
|
||||
if err := remotessh.RunSSHStreaming(n, extractCmd); err != nil {
|
||||
return fmt.Errorf("extract on %s: %w", n.Host, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForHealth polls RQLite health on a node until it reaches Leader or Follower state.
|
||||
func waitForHealth(node inspector.Node, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
result := inspector.RunSSH(ctx, node, "curl -sf http://localhost:5001/status 2>/dev/null | grep -o '\"state\":\"[^\"]*\"'")
|
||||
cancel()
|
||||
if result.OK() && (strings.Contains(result.Stdout, "Leader") || strings.Contains(result.Stdout, "Follower")) {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for healthy state on %s", node.Host)
|
||||
}
|
||||
|
||||
// findNewestArchive finds the newest orama binary archive in /tmp/.
|
||||
func findNewestArchive() string {
|
||||
matches, err := filepath.Glob("/tmp/orama-*-linux-*.tar.gz")
|
||||
if err != nil || len(matches) == 0 {
|
||||
return ""
|
||||
}
|
||||
sort.Slice(matches, func(i, j int) bool {
|
||||
fi, _ := os.Stat(matches[i])
|
||||
fj, _ := os.Stat(matches[j])
|
||||
if fi == nil || fj == nil {
|
||||
return false
|
||||
}
|
||||
return fi.ModTime().After(fj.ModTime())
|
||||
})
|
||||
return matches[0]
|
||||
}
|
||||
|
||||
func formatBytes(b int64) string {
|
||||
const mb = 1024 * 1024
|
||||
if b >= mb {
|
||||
return fmt.Sprintf("%.1f MB", float64(b)/float64(mb))
|
||||
}
|
||||
return fmt.Sprintf("%d KB", b/1024)
|
||||
}
|
||||
87
core/pkg/cli/cmd/sshcmd/ssh.go
Normal file
87
core/pkg/cli/cmd/sshcmd/ssh.go
Normal file
@ -0,0 +1,87 @@
|
||||
package sshcmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/noderesolver"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||
"github.com/DeBrosOfficial/network/pkg/inspector"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var envFlag string
|
||||
|
||||
// Cmd is the top-level "ssh" command — SSH into any node by IP or hostname.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "ssh <ip-or-hostname>",
|
||||
Short: "SSH into a node",
|
||||
Long: `SSH into a node by IP address or hostname.
|
||||
Resolves the SSH key from rootwallet automatically.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
target := args[0]
|
||||
|
||||
env := envFlag
|
||||
if env == "" {
|
||||
active, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active environment: %w", err)
|
||||
}
|
||||
env = active.Name
|
||||
}
|
||||
|
||||
// Resolve nodes to find the target
|
||||
nodes, err := noderesolver.ResolveNodes(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve nodes: %w", err)
|
||||
}
|
||||
|
||||
// Match by IP
|
||||
for _, n := range nodes {
|
||||
if n.Host == target {
|
||||
return sshInto(n)
|
||||
}
|
||||
}
|
||||
|
||||
// Not found — try direct SSH with default vault target
|
||||
fmt.Printf("Node %q not found in %s nodes, attempting direct SSH...\n", target, env)
|
||||
return sshInto(inspector.Node{
|
||||
Host: target,
|
||||
User: "root",
|
||||
VaultTarget: target + "/root",
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.Flags().StringVar(&envFlag, "env", "", "Environment to search (default: active)")
|
||||
}
|
||||
|
||||
func sshInto(node inspector.Node) error {
|
||||
nodes := []inspector.Node{node}
|
||||
cleanup, err := remotessh.PrepareNodeKeys(nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve SSH key: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
keyPath := nodes[0].SSHKey
|
||||
|
||||
sshBin, err := exec.LookPath("ssh")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh not found in PATH: %w", err)
|
||||
}
|
||||
|
||||
sshCmd := exec.Command(sshBin,
|
||||
"-i", keyPath,
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
fmt.Sprintf("%s@%s", node.User, node.Host),
|
||||
)
|
||||
sshCmd.Stdin = os.Stdin
|
||||
sshCmd.Stdout = os.Stdout
|
||||
sshCmd.Stderr = os.Stderr
|
||||
return sshCmd.Run()
|
||||
}
|
||||
143
core/pkg/cli/cmd/statuscmd/status.go
Normal file
143
core/pkg/cli/cmd/statuscmd/status.go
Normal file
@ -0,0 +1,143 @@
|
||||
package statuscmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/noderesolver"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||
"github.com/DeBrosOfficial/network/pkg/inspector"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
envFlag string
|
||||
jsonFlag bool
|
||||
)
|
||||
|
||||
// Cmd is the top-level "status" command — health check for operator's nodes.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show health status of your nodes",
|
||||
Long: `Check the health of all your nodes in an environment.
|
||||
SSHes into each node and runs orama node report to collect health data.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
env := envFlag
|
||||
if env == "" {
|
||||
active, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get active environment: %w", err)
|
||||
}
|
||||
env = active.Name
|
||||
}
|
||||
|
||||
nodes, err := noderesolver.ResolveNodes(env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve nodes: %w", err)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
fmt.Printf("No nodes found for environment %q\n", env)
|
||||
return nil
|
||||
}
|
||||
|
||||
cleanup, err := remotessh.PrepareNodeKeys(nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare SSH keys: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
fmt.Printf("Checking %d node(s) in %s...\n\n", len(nodes), env)
|
||||
|
||||
type nodeResult struct {
|
||||
Host string `json:"host"`
|
||||
Role string `json:"role"`
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
results := make([]nodeResult, len(nodes))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i, n := range nodes {
|
||||
wg.Add(1)
|
||||
go func(idx int, node inspector.Node) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := inspector.RunSSH(ctx, node, "sudo orama node report --json")
|
||||
nr := nodeResult{Host: node.Host, Role: node.Role}
|
||||
|
||||
if !result.OK() {
|
||||
nr.Status = "unreachable"
|
||||
nr.Error = fmt.Sprintf("SSH failed (exit %d)", result.ExitCode)
|
||||
if result.Stderr != "" {
|
||||
nr.Error = result.Stderr
|
||||
if len(nr.Error) > 100 {
|
||||
nr.Error = nr.Error[:100] + "..."
|
||||
}
|
||||
}
|
||||
results[idx] = nr
|
||||
return
|
||||
}
|
||||
|
||||
var report struct {
|
||||
Gateway struct {
|
||||
Responsive bool `json:"responsive"`
|
||||
} `json:"gateway"`
|
||||
RQLite struct {
|
||||
RaftState string `json:"raft_state"`
|
||||
} `json:"rqlite"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(result.Stdout), &report); err != nil {
|
||||
nr.Status = "unknown"
|
||||
nr.Error = "failed to parse report"
|
||||
results[idx] = nr
|
||||
return
|
||||
}
|
||||
|
||||
if report.Gateway.Responsive && (report.RQLite.RaftState == "Leader" || report.RQLite.RaftState == "Follower") {
|
||||
nr.Status = "healthy"
|
||||
} else {
|
||||
nr.Status = "degraded"
|
||||
}
|
||||
results[idx] = nr
|
||||
}(i, n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if jsonFlag {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(results)
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "IP\tROLE\tSTATUS\tDETAILS\n")
|
||||
healthy := 0
|
||||
for _, r := range results {
|
||||
details := r.Error
|
||||
if r.Status == "healthy" {
|
||||
healthy++
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", r.Host, r.Role, r.Status, details)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
fmt.Printf("\n%d/%d nodes healthy\n", healthy, len(results))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
Cmd.Flags().StringVar(&envFlag, "env", "", "Environment (default: active)")
|
||||
Cmd.Flags().BoolVar(&jsonFlag, "json", false, "Output as JSON")
|
||||
}
|
||||
156
core/pkg/cli/noderesolver/resolver.go
Normal file
156
core/pkg/cli/noderesolver/resolver.go
Normal file
@ -0,0 +1,156 @@
|
||||
// Package noderesolver provides unified node discovery for the orama CLI.
|
||||
//
|
||||
// It resolves operator-owned nodes by querying the network's gateway API
|
||||
// (primary) or falling back to the legacy nodes.conf file.
|
||||
package noderesolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||
"github.com/DeBrosOfficial/network/pkg/inspector"
|
||||
)
|
||||
|
||||
// httpClient is the shared HTTP client for API calls.
|
||||
var httpClient = &http.Client{Timeout: 10 * time.Second}
|
||||
|
||||
// ResolveNodes returns the operator's nodes for a given environment.
|
||||
// It first tries the network API (GET /v1/operator/nodes), then falls
|
||||
// back to nodes.conf if the API is unreachable or returns no results.
|
||||
func ResolveNodes(env string) ([]inspector.Node, error) {
|
||||
nodes, err := resolveFromNetwork(env)
|
||||
if err == nil && len(nodes) > 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// Fallback to nodes.conf
|
||||
confNodes, confErr := remotessh.LoadEnvNodes(env)
|
||||
if confErr != nil {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("network API: %w; nodes.conf: %v", err, confErr)
|
||||
}
|
||||
return nil, confErr
|
||||
}
|
||||
return confNodes, nil
|
||||
}
|
||||
|
||||
// ResolveNodesNetworkOnly queries only the network API without nodes.conf fallback.
|
||||
func ResolveNodesNetworkOnly(env string) ([]inspector.Node, error) {
|
||||
return resolveFromNetwork(env)
|
||||
}
|
||||
|
||||
// resolveFromNetwork queries the gateway API for operator-owned nodes.
|
||||
func resolveFromNetwork(env string) ([]inspector.Node, error) {
|
||||
// 1. Get gateway URL for the environment
|
||||
gatewayURL, err := gatewayURLForEnv(env)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve gateway URL: %w", err)
|
||||
}
|
||||
|
||||
// 2. Load stored credentials for this gateway
|
||||
apiKey, err := loadAPIKey(gatewayURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no credentials for %s: %w (run 'orama auth login' first)", gatewayURL, err)
|
||||
}
|
||||
|
||||
return resolveFromNetworkWithURL(gatewayURL, apiKey, env)
|
||||
}
|
||||
|
||||
// resolveFromNetworkWithURL queries a specific gateway URL with an API key.
|
||||
// Exported for testing.
|
||||
func resolveFromNetworkWithURL(gatewayURL, apiKey, env string) ([]inspector.Node, error) {
|
||||
endpoint := fmt.Sprintf("%s/v1/operator/nodes?env=%s", gatewayURL, url.QueryEscape(env))
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
req.Header.Set("X-API-Key", apiKey)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reach gateway: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("gateway returned HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Nodes []struct {
|
||||
ID string `json:"id"`
|
||||
IPAddress string `json:"ip_address"`
|
||||
InternalIP string `json:"internal_ip"`
|
||||
Environment string `json:"environment"`
|
||||
Role string `json:"role"`
|
||||
SSHUser string `json:"ssh_user"`
|
||||
Status string `json:"status"`
|
||||
} `json:"nodes"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
nodes := make([]inspector.Node, 0, len(result.Nodes))
|
||||
for _, n := range result.Nodes {
|
||||
user := n.SSHUser
|
||||
if user == "" {
|
||||
user = "root"
|
||||
}
|
||||
nodes = append(nodes, inspector.Node{
|
||||
Environment: n.Environment,
|
||||
User: user,
|
||||
Host: n.IPAddress,
|
||||
Role: n.Role,
|
||||
VaultTarget: fmt.Sprintf("%s/%s", n.IPAddress, user),
|
||||
})
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// gatewayURLForEnv returns the gateway URL for a given environment name.
|
||||
// If env is empty, uses the active environment.
|
||||
func gatewayURLForEnv(env string) (string, error) {
|
||||
if env == "" {
|
||||
e, err := cli.GetActiveEnvironment()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return e.GatewayURL, nil
|
||||
}
|
||||
|
||||
e, err := cli.GetEnvironmentByName(env)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return e.GatewayURL, nil
|
||||
}
|
||||
|
||||
// loadAPIKey loads the stored API key for a gateway URL.
|
||||
func loadAPIKey(gatewayURL string) (string, error) {
|
||||
store, err := auth.LoadEnhancedCredentials()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to load credentials: %w", err)
|
||||
}
|
||||
|
||||
creds := store.GetDefaultCredential(gatewayURL)
|
||||
if creds == nil || creds.APIKey == "" {
|
||||
return "", fmt.Errorf("no credentials found for %s", gatewayURL)
|
||||
}
|
||||
|
||||
return creds.APIKey, nil
|
||||
}
|
||||
152
core/pkg/cli/noderesolver/resolver_test.go
Normal file
152
core/pkg/cli/noderesolver/resolver_test.go
Normal file
@ -0,0 +1,152 @@
|
||||
package noderesolver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGatewayURLForEnv_knownEnv(t *testing.T) {
|
||||
url, err := gatewayURLForEnv("devnet")
|
||||
if err != nil {
|
||||
t.Fatalf("gatewayURLForEnv(devnet): %v", err)
|
||||
}
|
||||
if url == "" {
|
||||
t.Error("expected non-empty gateway URL for devnet")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatewayURLForEnv_unknownEnv(t *testing.T) {
|
||||
_, err := gatewayURLForEnv("nonexistent")
|
||||
if err == nil {
|
||||
t.Error("expected error for unknown environment")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_happyPath(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/v1/operator/nodes" {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if r.Header.Get("X-API-Key") != "test-key" {
|
||||
http.Error(w, "unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
env := r.URL.Query().Get("env")
|
||||
resp := map[string]interface{}{
|
||||
"nodes": []map[string]string{
|
||||
{"id": "node-1", "ip_address": "1.2.3.4", "environment": env, "role": "nameserver", "ssh_user": "root", "status": "active"},
|
||||
{"id": "node-2", "ip_address": "5.6.7.8", "environment": env, "role": "node", "ssh_user": "ubuntu", "status": "active"},
|
||||
},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
nodes, err := resolveFromNetworkWithURL(server.URL, "test-key", "devnet")
|
||||
if err != nil {
|
||||
t.Fatalf("resolveFromNetworkWithURL: %v", err)
|
||||
}
|
||||
|
||||
if len(nodes) != 2 {
|
||||
t.Fatalf("expected 2 nodes, got %d", len(nodes))
|
||||
}
|
||||
|
||||
if nodes[0].Host != "1.2.3.4" {
|
||||
t.Errorf("node 0 host = %q, want %q", nodes[0].Host, "1.2.3.4")
|
||||
}
|
||||
if nodes[0].Role != "nameserver" {
|
||||
t.Errorf("node 0 role = %q, want %q", nodes[0].Role, "nameserver")
|
||||
}
|
||||
if nodes[0].VaultTarget != "1.2.3.4/root" {
|
||||
t.Errorf("node 0 vault target = %q, want %q", nodes[0].VaultTarget, "1.2.3.4/root")
|
||||
}
|
||||
if nodes[0].Environment != "devnet" {
|
||||
t.Errorf("node 0 environment = %q, want %q", nodes[0].Environment, "devnet")
|
||||
}
|
||||
if nodes[1].User != "ubuntu" {
|
||||
t.Errorf("node 1 user = %q, want %q", nodes[1].User, "ubuntu")
|
||||
}
|
||||
if nodes[1].VaultTarget != "5.6.7.8/ubuntu" {
|
||||
t.Errorf("node 1 vault target = %q, want %q", nodes[1].VaultTarget, "5.6.7.8/ubuntu")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_emptySSHUser(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
resp := map[string]interface{}{
|
||||
"nodes": []map[string]string{
|
||||
{"id": "node-1", "ip_address": "1.2.3.4", "environment": "devnet", "role": "node", "ssh_user": "", "status": "active"},
|
||||
},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
nodes, err := resolveFromNetworkWithURL(server.URL, "key", "devnet")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(nodes) != 1 {
|
||||
t.Fatalf("expected 1 node, got %d", len(nodes))
|
||||
}
|
||||
if nodes[0].User != "root" {
|
||||
t.Errorf("user = %q, want %q (default)", nodes[0].User, "root")
|
||||
}
|
||||
if nodes[0].VaultTarget != "1.2.3.4/root" {
|
||||
t.Errorf("vault target = %q, want %q", nodes[0].VaultTarget, "1.2.3.4/root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_unauthorized(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, `{"error":"unauthorized"}`, http.StatusUnauthorized)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
_, err := resolveFromNetworkWithURL(server.URL, "bad-key", "devnet")
|
||||
if err == nil {
|
||||
t.Error("expected error for unauthorized request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_emptyNodes(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{"nodes": []interface{}{}})
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
nodes, err := resolveFromNetworkWithURL(server.URL, "key", "devnet")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(nodes) != 0 {
|
||||
t.Errorf("expected 0 nodes, got %d", len(nodes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_malformedJSON(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`<html>not json</html>`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
_, err := resolveFromNetworkWithURL(server.URL, "key", "devnet")
|
||||
if err == nil {
|
||||
t.Error("expected error for malformed JSON response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveFromMockServer_serverDown(t *testing.T) {
|
||||
_, err := resolveFromNetworkWithURL("http://127.0.0.1:1", "key", "devnet")
|
||||
if err == nil {
|
||||
t.Error("expected error for unreachable server")
|
||||
}
|
||||
}
|
||||
@ -154,6 +154,9 @@ func Create(name string) error {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to switch to sandbox environment: %v\n", err)
|
||||
}
|
||||
|
||||
// Tag all nodes with operator wallet for unified node management
|
||||
registerNodesWithOperator(state, sshKeyPath)
|
||||
|
||||
printCreateSummary(cfg, state)
|
||||
return nil
|
||||
}
|
||||
@ -643,6 +646,36 @@ func printCreateSummary(cfg *Config, state *SandboxState) {
|
||||
fmt.Println("Destroy: orama sandbox destroy")
|
||||
}
|
||||
|
||||
// registerNodesWithOperator tags all sandbox nodes with the operator's wallet
|
||||
// via a direct RQLite UPDATE on the genesis node. This enables `orama nodes`
|
||||
// to discover sandbox nodes alongside production nodes.
|
||||
func registerNodesWithOperator(state *SandboxState, sshKeyPath string) {
|
||||
client := rwagent.New(os.Getenv("RW_AGENT_SOCK"))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
addrData, err := client.GetAddress(ctx, "evm")
|
||||
if err != nil || addrData == nil || addrData.Address == "" {
|
||||
fmt.Fprintf(os.Stderr, "Warning: could not get operator wallet, nodes not tagged: %v\n", err)
|
||||
return
|
||||
}
|
||||
wallet := addrData.Address
|
||||
|
||||
if len(state.Servers) == 0 {
|
||||
return
|
||||
}
|
||||
genesis := state.Servers[0]
|
||||
|
||||
node := inspector.Node{User: "root", Host: genesis.IP, SSHKey: sshKeyPath}
|
||||
// Use RQLite's parameterized query to avoid any injection risk.
|
||||
// The JSON payload has the wallet as a parameter, not interpolated into SQL.
|
||||
payload := fmt.Sprintf(`[["UPDATE dns_nodes SET operator_wallet = ?, environment = 'sandbox' WHERE operator_wallet IS NULL OR operator_wallet = ''", %q]]`, wallet)
|
||||
cmd := fmt.Sprintf(`curl -sf -X POST http://localhost:5001/db/execute -H 'Content-Type: application/json' -d '%s'`, payload)
|
||||
if _, err := runSSHOutput(node, cmd); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to tag nodes with operator wallet: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupFailedCreate deletes any servers that were created during a failed provision.
|
||||
func cleanupFailedCreate(client *HetznerClient, state *SandboxState) {
|
||||
if len(state.Servers) == 0 {
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -107,10 +108,25 @@ func Destroy(name string, force bool) error {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to remove sandbox environment: %v\n", err)
|
||||
}
|
||||
|
||||
// Clean up SSH known_hosts entries for destroyed server IPs.
|
||||
// This prevents "REMOTE HOST IDENTIFICATION HAS CHANGED" errors
|
||||
// when the same IPs are reused by a new sandbox.
|
||||
cleanupKnownHosts(state)
|
||||
|
||||
fmt.Printf("\nSandbox %q destroyed (%d servers deleted)\n", state.Name, len(state.Servers))
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupKnownHosts removes SSH known_hosts entries for all sandbox server IPs.
|
||||
func cleanupKnownHosts(state *SandboxState) {
|
||||
for _, srv := range state.Servers {
|
||||
cmd := exec.Command("ssh-keygen", "-R", srv.IP)
|
||||
cmd.Stdout = nil
|
||||
cmd.Stderr = nil
|
||||
cmd.Run() // best-effort, ignore errors
|
||||
}
|
||||
}
|
||||
|
||||
// resolveSandbox finds a sandbox by name or returns the active one.
|
||||
func resolveSandbox(name string) (*SandboxState, error) {
|
||||
if name != "" {
|
||||
|
||||
@ -32,6 +32,7 @@ import (
|
||||
enrollhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/enroll"
|
||||
joinhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/join"
|
||||
webrtchandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/webrtc"
|
||||
operatorhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/operator"
|
||||
vaulthandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/vault"
|
||||
wireguardhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/wireguard"
|
||||
sqlitehandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/sqlite"
|
||||
@ -168,7 +169,8 @@ type Gateway struct {
|
||||
proxyTransport *http.Transport
|
||||
|
||||
// Vault proxy handlers
|
||||
vaultHandlers *vaulthandlers.Handlers
|
||||
vaultHandlers *vaulthandlers.Handlers
|
||||
operatorHandler *operatorhandlers.Handler
|
||||
|
||||
// Namespace health state (local service probes + hourly reconciliation)
|
||||
nsHealth *namespaceHealthState
|
||||
@ -405,6 +407,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
||||
gw.joinHandler = joinhandlers.NewHandler(logger.Logger, deps.ORMClient, cfg.DataDir)
|
||||
gw.enrollHandler = enrollhandlers.NewHandler(logger.Logger, deps.ORMClient, cfg.DataDir)
|
||||
gw.vaultHandlers = vaulthandlers.NewHandlers(logger, deps.Client)
|
||||
gw.operatorHandler = operatorhandlers.NewHandler(logger.Logger, deps.ORMClient)
|
||||
}
|
||||
|
||||
// Initialize deployment system
|
||||
|
||||
@ -129,6 +129,9 @@ func (h *Handler) HandleJoin(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// 1b. Look up the operator wallet from the consumed token (may be empty for legacy tokens)
|
||||
operatorWallet := h.tokenOperatorWallet(ctx, req.Token)
|
||||
|
||||
// 2. Clean up stale WG entries for this public IP (from previous installs).
|
||||
// This prevents ghost peers: old rows with different node_id/wg_key that
|
||||
// the sync loop would keep trying to reach.
|
||||
@ -150,8 +153,8 @@ func (h *Handler) HandleJoin(w http.ResponseWriter, r *http.Request) {
|
||||
// 4. Register WG peer in database
|
||||
nodeID := fmt.Sprintf("node-%s", wgIP) // temporary ID based on WG IP
|
||||
_, err = h.rqliteClient.Exec(ctx,
|
||||
"INSERT OR REPLACE INTO wireguard_peers (node_id, wg_ip, public_key, public_ip, wg_port) VALUES (?, ?, ?, ?, ?)",
|
||||
nodeID, wgIP, req.WGPublicKey, req.PublicIP, 51820)
|
||||
"INSERT OR REPLACE INTO wireguard_peers (node_id, wg_ip, public_key, public_ip, wg_port, operator_wallet) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
nodeID, wgIP, req.WGPublicKey, req.PublicIP, 51820, operatorWallet)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to register WG peer", zap.Error(err))
|
||||
http.Error(w, "failed to register peer", http.StatusInternalServerError)
|
||||
@ -307,6 +310,22 @@ func (h *Handler) consumeToken(ctx context.Context, token, usedByIP string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// tokenOperatorWallet looks up the operator_wallet from a consumed invite token.
|
||||
// Returns empty string if the token has no operator (legacy tokens).
|
||||
func (h *Handler) tokenOperatorWallet(ctx context.Context, token string) string {
|
||||
var rows []struct {
|
||||
Wallet string `db:"operator_wallet"`
|
||||
}
|
||||
if err := h.rqliteClient.Query(ctx, &rows,
|
||||
"SELECT COALESCE(operator_wallet, '') AS operator_wallet FROM invite_tokens WHERE token = ?", token); err != nil {
|
||||
return ""
|
||||
}
|
||||
if len(rows) > 0 {
|
||||
return rows[0].Wallet
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// assignWGIP finds the next available 10.0.0.x IP by querying all peers and
|
||||
// finding the numerically highest IP. This avoids lexicographic comparison issues
|
||||
// where MAX("10.0.0.9") > MAX("10.0.0.10") in SQL string comparison.
|
||||
|
||||
50
core/pkg/gateway/handlers/operator/handler.go
Normal file
50
core/pkg/gateway/handlers/operator/handler.go
Normal file
@ -0,0 +1,50 @@
|
||||
// Package operator provides HTTP handlers for node operator management.
|
||||
//
|
||||
// Operators authenticate via wallet JWT (same auth flow as namespaces).
|
||||
// Each operator's nodes are tracked by their wallet address in the
|
||||
// dns_nodes and wireguard_peers tables.
|
||||
package operator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway/ctxkeys"
|
||||
"github.com/DeBrosOfficial/network/pkg/rqlite"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Handler provides HTTP handlers for operator node management.
|
||||
type Handler struct {
|
||||
logger *zap.Logger
|
||||
rqliteClient rqlite.Client
|
||||
}
|
||||
|
||||
// NewHandler creates an operator handler.
|
||||
func NewHandler(logger *zap.Logger, rqliteClient rqlite.Client) *Handler {
|
||||
return &Handler{
|
||||
logger: logger,
|
||||
rqliteClient: rqliteClient,
|
||||
}
|
||||
}
|
||||
|
||||
// walletFromRequest extracts the operator's wallet address from the JWT
|
||||
// stored in the request context by the auth middleware.
|
||||
func walletFromRequest(r *http.Request) string {
|
||||
claims, ok := r.Context().Value(ctxkeys.JWT).(*auth.JWTClaims)
|
||||
if !ok || claims == nil {
|
||||
return ""
|
||||
}
|
||||
return claims.Sub
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, status int, v interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func writeError(w http.ResponseWriter, status int, msg string) {
|
||||
writeJSON(w, status, map[string]string{"error": msg})
|
||||
}
|
||||
206
core/pkg/gateway/handlers/operator/handler_test.go
Normal file
206
core/pkg/gateway/handlers/operator/handler_test.go
Normal file
@ -0,0 +1,206 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway/ctxkeys"
|
||||
)
|
||||
|
||||
func TestWalletFromRequest_withClaims(t *testing.T) {
|
||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
claims := &auth.JWTClaims{Sub: "0xabc123"}
|
||||
ctx := context.WithValue(r.Context(), ctxkeys.JWT, claims)
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet != "0xabc123" {
|
||||
t.Errorf("wallet = %q, want %q", wallet, "0xabc123")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalletFromRequest_noClaims(t *testing.T) {
|
||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet != "" {
|
||||
t.Errorf("wallet = %q, want empty", wallet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalletFromRequest_nilClaims(t *testing.T) {
|
||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
ctx := context.WithValue(r.Context(), ctxkeys.JWT, (*auth.JWTClaims)(nil))
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet != "" {
|
||||
t.Errorf("wallet = %q, want empty", wallet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeJSON_valid(t *testing.T) {
|
||||
body := strings.NewReader(`{"node_id":"test-node","environment":"devnet"}`)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", body)
|
||||
|
||||
var req RegisterRequest
|
||||
if err := decodeJSON(r, &req); err != nil {
|
||||
t.Fatalf("decodeJSON: %v", err)
|
||||
}
|
||||
if req.NodeID != "test-node" {
|
||||
t.Errorf("NodeID = %q, want %q", req.NodeID, "test-node")
|
||||
}
|
||||
if req.Environment != "devnet" {
|
||||
t.Errorf("Environment = %q, want %q", req.Environment, "devnet")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeJSON_invalid(t *testing.T) {
|
||||
body := strings.NewReader(`not-json`)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", body)
|
||||
|
||||
var req RegisterRequest
|
||||
if err := decodeJSON(r, &req); err == nil {
|
||||
t.Error("expected error for invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleInvite_noAuth(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/invite", nil)
|
||||
|
||||
h.HandleInvite(w, r)
|
||||
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleInvite_wrongMethod(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodGet, "/v1/operator/invite", nil)
|
||||
|
||||
h.HandleInvite(w, r)
|
||||
|
||||
if w.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListNodes_noAuth(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodGet, "/v1/operator/nodes", nil)
|
||||
|
||||
h.HandleListNodes(w, r)
|
||||
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListNodes_wrongMethod(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/nodes", nil)
|
||||
|
||||
h.HandleListNodes(w, r)
|
||||
|
||||
if w.Code != http.StatusMethodNotAllowed {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegister_noAuth(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/node/register", strings.NewReader(`{"node_id":"test"}`))
|
||||
|
||||
h.HandleRegister(w, r)
|
||||
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegister_missingFields(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/node/register", strings.NewReader(`{}`))
|
||||
claims := &auth.JWTClaims{Sub: "0xabc"}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ctxkeys.JWT, claims))
|
||||
|
||||
h.HandleRegister(w, r)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegister_invalidEnvironment(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/node/register",
|
||||
strings.NewReader(`{"node_id":"test","environment":"<script>alert(1)</script>"}`))
|
||||
claims := &auth.JWTClaims{Sub: "0xabc"}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ctxkeys.JWT, claims))
|
||||
|
||||
h.HandleRegister(w, r)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegister_invalidRole(t *testing.T) {
|
||||
h := NewHandler(nil, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPost, "/v1/operator/node/register",
|
||||
strings.NewReader(`{"node_id":"test","role":"admin"}`))
|
||||
claims := &auth.JWTClaims{Sub: "0xabc"}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ctxkeys.JWT, claims))
|
||||
|
||||
h.HandleRegister(w, r)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedEnvironments(t *testing.T) {
|
||||
valid := []string{"devnet", "testnet", "sandbox", "production", "mainnet"}
|
||||
invalid := []string{"staging", "local", "<script>", ""}
|
||||
|
||||
for _, env := range valid {
|
||||
if !allowedEnvironments[env] {
|
||||
t.Errorf("expected %q to be allowed", env)
|
||||
}
|
||||
}
|
||||
for _, env := range invalid {
|
||||
if allowedEnvironments[env] {
|
||||
t.Errorf("expected %q to be disallowed", env)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedRoles(t *testing.T) {
|
||||
valid := []string{"node", "nameserver", "nameserver-ns1", "nameserver-ns2", "nameserver-ns3"}
|
||||
invalid := []string{"admin", "root", ""}
|
||||
|
||||
for _, role := range valid {
|
||||
if !allowedRoles[role] {
|
||||
t.Errorf("expected %q to be allowed", role)
|
||||
}
|
||||
}
|
||||
for _, role := range invalid {
|
||||
if allowedRoles[role] {
|
||||
t.Errorf("expected %q to be disallowed", role)
|
||||
}
|
||||
}
|
||||
}
|
||||
79
core/pkg/gateway/handlers/operator/invite.go
Normal file
79
core/pkg/gateway/handlers/operator/invite.go
Normal file
@ -0,0 +1,79 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// InviteRequest is the optional body for POST /v1/operator/invite.
|
||||
type InviteRequest struct {
|
||||
ExpiryMinutes int `json:"expiry_minutes,omitempty"` // Default: 60
|
||||
}
|
||||
|
||||
// InviteResponse is returned on success.
|
||||
type InviteResponse struct {
|
||||
Token string `json:"token"`
|
||||
ExpiresAt string `json:"expires_at"`
|
||||
}
|
||||
|
||||
// HandleInvite generates an invite token tagged with the operator's wallet.
|
||||
// Requires wallet JWT authentication.
|
||||
//
|
||||
// POST /v1/operator/invite
|
||||
func (h *Handler) HandleInvite(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
|
||||
return
|
||||
}
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet == "" {
|
||||
writeError(w, http.StatusUnauthorized, "wallet authentication required")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse optional expiry from body (default: 60min, max: 7 days).
|
||||
expiryMinutes := 60
|
||||
if r.Body != nil && r.ContentLength > 0 {
|
||||
var req InviteRequest
|
||||
if err := decodeJSON(r, &req); err == nil && req.ExpiryMinutes > 0 {
|
||||
expiryMinutes = req.ExpiryMinutes
|
||||
}
|
||||
}
|
||||
const maxExpiryMinutes = 10080 // 7 days
|
||||
if expiryMinutes > maxExpiryMinutes {
|
||||
expiryMinutes = maxExpiryMinutes
|
||||
}
|
||||
|
||||
// Generate random 32-byte token.
|
||||
tokenBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(tokenBytes); err != nil {
|
||||
h.logger.Error("failed to generate invite token", zap.Error(err))
|
||||
writeError(w, http.StatusInternalServerError, "failed to generate token")
|
||||
return
|
||||
}
|
||||
token := hex.EncodeToString(tokenBytes)
|
||||
|
||||
expiresAt := time.Now().UTC().Add(time.Duration(expiryMinutes) * time.Minute)
|
||||
expiresAtStr := expiresAt.Format("2006-01-02 15:04:05")
|
||||
|
||||
ctx := r.Context()
|
||||
_, err := h.rqliteClient.Exec(ctx,
|
||||
"INSERT INTO invite_tokens (token, created_by, expires_at, operator_wallet) VALUES (?, ?, ?, ?)",
|
||||
token, fmt.Sprintf("operator:%s", wallet), expiresAtStr, wallet)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to store invite token", zap.Error(err))
|
||||
writeError(w, http.StatusInternalServerError, "failed to create invite token")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, InviteResponse{
|
||||
Token: token,
|
||||
ExpiresAt: expiresAtStr,
|
||||
})
|
||||
}
|
||||
74
core/pkg/gateway/handlers/operator/nodes.go
Normal file
74
core/pkg/gateway/handlers/operator/nodes.go
Normal file
@ -0,0 +1,74 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// NodeInfo represents a node owned by the operator.
|
||||
type NodeInfo struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
IPAddress string `json:"ip_address" db:"ip_address"`
|
||||
InternalIP string `json:"internal_ip,omitempty" db:"internal_ip"`
|
||||
Environment string `json:"environment,omitempty" db:"environment"`
|
||||
Role string `json:"role,omitempty" db:"role"`
|
||||
SSHUser string `json:"ssh_user,omitempty" db:"ssh_user"`
|
||||
Status string `json:"status" db:"status"`
|
||||
Region string `json:"region,omitempty" db:"region"`
|
||||
LastSeen string `json:"last_seen,omitempty" db:"last_seen"`
|
||||
OperatorWallet string `json:"operator_wallet,omitempty" db:"operator_wallet"`
|
||||
}
|
||||
|
||||
// ListNodesResponse is returned by GET /v1/operator/nodes.
|
||||
type ListNodesResponse struct {
|
||||
Nodes []NodeInfo `json:"nodes"`
|
||||
}
|
||||
|
||||
// HandleListNodes returns all nodes owned by the authenticated operator.
|
||||
// Optionally filtered by ?env=<environment>.
|
||||
//
|
||||
// GET /v1/operator/nodes
|
||||
func (h *Handler) HandleListNodes(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
|
||||
return
|
||||
}
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet == "" {
|
||||
writeError(w, http.StatusUnauthorized, "wallet authentication required")
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
envFilter := r.URL.Query().Get("env")
|
||||
|
||||
query := `SELECT id, ip_address, COALESCE(internal_ip, '') AS internal_ip,
|
||||
COALESCE(environment, 'production') AS environment,
|
||||
COALESCE(role, 'node') AS role, COALESCE(ssh_user, 'root') AS ssh_user,
|
||||
status, COALESCE(region, '') AS region, COALESCE(last_seen, '') AS last_seen,
|
||||
COALESCE(operator_wallet, '') AS operator_wallet
|
||||
FROM dns_nodes WHERE operator_wallet = ?`
|
||||
args := []interface{}{wallet}
|
||||
|
||||
if envFilter != "" {
|
||||
query += " AND environment = ?"
|
||||
args = append(args, envFilter)
|
||||
}
|
||||
|
||||
query += " ORDER BY environment, ip_address"
|
||||
|
||||
var nodes []NodeInfo
|
||||
if err := h.rqliteClient.Query(ctx, &nodes, query, args...); err != nil {
|
||||
h.logger.Error("failed to query operator nodes", zap.Error(err))
|
||||
writeError(w, http.StatusInternalServerError, "failed to query nodes")
|
||||
return
|
||||
}
|
||||
|
||||
if nodes == nil {
|
||||
nodes = []NodeInfo{}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, ListNodesResponse{Nodes: nodes})
|
||||
}
|
||||
138
core/pkg/gateway/handlers/operator/register.go
Normal file
138
core/pkg/gateway/handlers/operator/register.go
Normal file
@ -0,0 +1,138 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RegisterRequest is the body for POST /v1/operator/node/register.
|
||||
type RegisterRequest struct {
|
||||
NodeID string `json:"node_id"` // dns_nodes.id (peer ID or hostname)
|
||||
IPAddress string `json:"ip_address,omitempty"` // Public IP (alternative lookup key)
|
||||
Environment string `json:"environment,omitempty"` // e.g., "devnet", "sandbox"
|
||||
Role string `json:"role,omitempty"` // e.g., "node", "nameserver"
|
||||
SSHUser string `json:"ssh_user,omitempty"` // SSH user (default: "root")
|
||||
}
|
||||
|
||||
var (
|
||||
allowedEnvironments = map[string]bool{
|
||||
"production": true, "devnet": true, "testnet": true, "sandbox": true, "mainnet": true,
|
||||
}
|
||||
allowedRoles = map[string]bool{
|
||||
"node": true, "nameserver": true, "nameserver-ns1": true, "nameserver-ns2": true, "nameserver-ns3": true,
|
||||
}
|
||||
)
|
||||
|
||||
// HandleRegister tags an existing node with the operator's wallet.
|
||||
// The node must already exist in dns_nodes and be either unclaimed or
|
||||
// already owned by the requesting operator.
|
||||
//
|
||||
// POST /v1/operator/node/register
|
||||
func (h *Handler) HandleRegister(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
|
||||
return
|
||||
}
|
||||
|
||||
wallet := walletFromRequest(r)
|
||||
if wallet == "" {
|
||||
writeError(w, http.StatusUnauthorized, "wallet authentication required")
|
||||
return
|
||||
}
|
||||
|
||||
var req RegisterRequest
|
||||
if err := decodeJSON(r, &req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.NodeID == "" && req.IPAddress == "" {
|
||||
writeError(w, http.StatusBadRequest, "node_id or ip_address required")
|
||||
return
|
||||
}
|
||||
if req.Environment != "" && !allowedEnvironments[req.Environment] {
|
||||
writeError(w, http.StatusBadRequest, "invalid environment")
|
||||
return
|
||||
}
|
||||
if req.Role != "" && !allowedRoles[req.Role] {
|
||||
writeError(w, http.StatusBadRequest, "invalid role")
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
// Build the UPDATE dynamically based on what fields are provided.
|
||||
setClauses := "operator_wallet = ?"
|
||||
args := []interface{}{wallet}
|
||||
|
||||
if req.Environment != "" {
|
||||
setClauses += ", environment = ?"
|
||||
args = append(args, req.Environment)
|
||||
}
|
||||
if req.Role != "" {
|
||||
setClauses += ", role = ?"
|
||||
args = append(args, req.Role)
|
||||
}
|
||||
if req.SSHUser != "" {
|
||||
setClauses += ", ssh_user = ?"
|
||||
args = append(args, req.SSHUser)
|
||||
}
|
||||
|
||||
setClauses += ", updated_at = datetime('now')"
|
||||
|
||||
// Match by node_id or ip_address. Only allow claiming unclaimed nodes
|
||||
// or nodes already owned by this operator (prevents hijacking).
|
||||
var whereClause string
|
||||
if req.NodeID != "" {
|
||||
whereClause = "id = ? AND (operator_wallet IS NULL OR operator_wallet = '' OR operator_wallet = ?)"
|
||||
args = append(args, req.NodeID, wallet)
|
||||
} else {
|
||||
whereClause = "ip_address = ? AND (operator_wallet IS NULL OR operator_wallet = '' OR operator_wallet = ?)"
|
||||
args = append(args, req.IPAddress, wallet)
|
||||
}
|
||||
|
||||
query := "UPDATE dns_nodes SET " + setClauses + " WHERE " + whereClause
|
||||
result, err := h.rqliteClient.Exec(ctx, query, args...)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to register node with operator", zap.Error(err))
|
||||
writeError(w, http.StatusInternalServerError, "failed to register node")
|
||||
return
|
||||
}
|
||||
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
h.logger.Error("failed to check rows affected", zap.Error(err))
|
||||
writeError(w, http.StatusInternalServerError, "failed to register node")
|
||||
return
|
||||
}
|
||||
if rowsAffected == 0 {
|
||||
writeError(w, http.StatusNotFound, "node not found or owned by another operator")
|
||||
return
|
||||
}
|
||||
|
||||
// Also update wireguard_peers if we can match by public_ip.
|
||||
if req.IPAddress != "" {
|
||||
if _, err := h.rqliteClient.Exec(ctx,
|
||||
"UPDATE wireguard_peers SET operator_wallet = ? WHERE public_ip = ?",
|
||||
wallet, req.IPAddress); err != nil {
|
||||
h.logger.Warn("failed to update operator_wallet on wireguard_peers", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{
|
||||
"status": "registered",
|
||||
"wallet": wallet,
|
||||
"node_id": req.NodeID,
|
||||
})
|
||||
}
|
||||
|
||||
func decodeJSON(r *http.Request, v interface{}) error {
|
||||
body, err := io.ReadAll(io.LimitReader(r.Body, 4096))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(body, v)
|
||||
}
|
||||
@ -123,6 +123,13 @@ func (g *Gateway) Routes() http.Handler {
|
||||
mux.HandleFunc("/v1/pubsub/presence", g.pubsubHandlers.PresenceHandler)
|
||||
}
|
||||
|
||||
// operator node management (wallet JWT auth via middleware)
|
||||
if g.operatorHandler != nil {
|
||||
mux.HandleFunc("/v1/operator/invite", g.operatorHandler.HandleInvite)
|
||||
mux.HandleFunc("/v1/operator/nodes", g.operatorHandler.HandleListNodes)
|
||||
mux.HandleFunc("/v1/operator/node/register", g.operatorHandler.HandleRegister)
|
||||
}
|
||||
|
||||
// vault proxy (public, rate-limited per identity within handler)
|
||||
if g.vaultHandlers != nil {
|
||||
mux.HandleFunc("/v1/vault/push", g.vaultHandlers.HandlePush)
|
||||
|
||||
@ -130,6 +130,12 @@ export const OPERATOR_DOCS: DocLink[] = [
|
||||
icon: Server,
|
||||
description: "Install and configure a node",
|
||||
},
|
||||
{
|
||||
title: "Node Management",
|
||||
slug: "operator/node-management",
|
||||
icon: LayoutDashboard,
|
||||
description: "Unified commands for managing your nodes",
|
||||
},
|
||||
{
|
||||
title: "Monitoring",
|
||||
slug: "operator/monitoring",
|
||||
|
||||
@ -61,14 +61,21 @@ orama node push --env devnet --direct
|
||||
Build and deploy in one step with automatic rolling restart:
|
||||
|
||||
```bash
|
||||
# Build and deploy to all nodes
|
||||
# Build the archive
|
||||
orama build
|
||||
|
||||
# Roll out to all your nodes (recommended — uses wallet-based node discovery)
|
||||
orama rollout --env devnet
|
||||
|
||||
# Custom delay between nodes
|
||||
orama rollout --env devnet --delay 60
|
||||
```
|
||||
|
||||
The legacy `orama node rollout` command is still available and uses `nodes.conf` directly:
|
||||
|
||||
```bash
|
||||
orama node rollout --env devnet
|
||||
|
||||
# Skip the build step (use existing archive)
|
||||
orama node rollout --env devnet --no-build
|
||||
|
||||
# Skip confirmation
|
||||
orama node rollout --env devnet --yes
|
||||
orama node rollout --env devnet --no-build --yes
|
||||
```
|
||||
|
||||
### Upgrade (Restart Only)
|
||||
|
||||
@ -156,121 +156,93 @@ The `orama node install` command configures the entire node automatically:
|
||||
|
||||
## Node Management
|
||||
|
||||
After installation, use these commands to manage your node:
|
||||
After installation, your node is automatically registered with your wallet. Use these unified commands from your local machine:
|
||||
|
||||
```bash
|
||||
# List all your nodes
|
||||
orama nodes --env devnet
|
||||
|
||||
# Check health of all your nodes
|
||||
orama status --env devnet
|
||||
|
||||
# SSH into any node
|
||||
orama ssh <IP>
|
||||
|
||||
# Rolling upgrade all your nodes
|
||||
orama rollout --env devnet
|
||||
```
|
||||
|
||||
See [Node Management](/docs/operator/node-management) for the full guide.
|
||||
|
||||
### On-Node Commands
|
||||
|
||||
These commands run directly on a VPS with `sudo`:
|
||||
|
||||
```bash
|
||||
# Check service status
|
||||
sudo orama node status
|
||||
|
||||
# Start all services
|
||||
# Start/stop/restart all services
|
||||
sudo orama node start
|
||||
|
||||
# Stop all services (masks services to prevent auto-restart)
|
||||
sudo orama node stop
|
||||
sudo orama node stop --force # bypass quorum safety check
|
||||
|
||||
# Restart all services
|
||||
sudo orama node restart
|
||||
sudo orama node restart --force # bypass quorum safety check
|
||||
|
||||
# Run diagnostic checks
|
||||
sudo orama node doctor
|
||||
|
||||
# View service logs
|
||||
sudo orama node logs <service>
|
||||
# Service aliases: node, ipfs, cluster, gateway, olric
|
||||
|
||||
# Output node health data as JSON
|
||||
sudo orama node report --json
|
||||
```
|
||||
|
||||
### Additional Node Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `orama node uninstall` | Remove all Orama services from this node |
|
||||
| `orama node upgrade --restart` | Upgrade binary and restart services |
|
||||
| `orama node push --env <env>` | Push binary archive to remote nodes |
|
||||
| `orama node rollout --env <env>` | Build, push, and rolling upgrade all nodes |
|
||||
| `orama node clean --env <env>` | Wipe remote nodes for reinstallation |
|
||||
| `orama node invite --expiry <duration>` | Generate an invite token for new nodes |
|
||||
| `orama node recover-raft --leader <ip>` | Recover RQLite cluster from split-brain |
|
||||
| `orama node migrate` | Migrate from an older installation layout |
|
||||
|
||||
## Upgrading
|
||||
|
||||
Upgrades are performed via binary replacement with a rolling restart:
|
||||
Deploy new code to all your nodes with one command:
|
||||
|
||||
```bash
|
||||
# Upgrade this node (extracts new binary and restarts services)
|
||||
sudo orama node upgrade --restart
|
||||
orama build # Build the binary archive
|
||||
orama rollout --env devnet # Push + rolling upgrade (followers first, leader last)
|
||||
```
|
||||
|
||||
For multi-node clusters, upgrade one node at a time and verify health between each:
|
||||
Or upgrade a single node directly on the VPS:
|
||||
|
||||
```bash
|
||||
# Check cluster health
|
||||
orama monitor report --env devnet
|
||||
|
||||
# Check single node health
|
||||
orama monitor report --env devnet --node <IP>
|
||||
sudo orama node upgrade --restart
|
||||
```
|
||||
|
||||
See [Upgrades](/docs/operator/upgrades) for the full rolling upgrade procedure.
|
||||
|
||||
## Monitoring
|
||||
|
||||
The `orama monitor` command provides real-time cluster monitoring from your local machine:
|
||||
Quick health check of all your nodes:
|
||||
|
||||
```bash
|
||||
orama status --env devnet
|
||||
```
|
||||
|
||||
For detailed monitoring with cross-node analysis and alerts:
|
||||
|
||||
```bash
|
||||
# Interactive TUI
|
||||
orama monitor --env devnet
|
||||
|
||||
# Cluster overview (one-shot)
|
||||
orama monitor cluster --env devnet
|
||||
|
||||
# Per-node health details
|
||||
orama monitor node --env devnet
|
||||
|
||||
# Service status across the cluster
|
||||
orama monitor service --env devnet
|
||||
|
||||
# WireGuard mesh connectivity
|
||||
orama monitor mesh --env devnet
|
||||
|
||||
# DNS health
|
||||
orama monitor dns --env devnet
|
||||
|
||||
# Namespace usage summary
|
||||
orama monitor namespaces --env devnet
|
||||
|
||||
# Active alerts
|
||||
orama monitor alerts --env devnet
|
||||
|
||||
# Full cluster report
|
||||
# Full cluster report (JSON)
|
||||
orama monitor report --env devnet
|
||||
|
||||
# Active alerts only
|
||||
orama monitor alerts --env devnet
|
||||
```
|
||||
|
||||
All monitor subcommands support `--json` for machine-readable output and `--node <IP>` to filter to a specific node.
|
||||
|
||||
The `orama inspect` command provides deeper SSH-based health checks with optional AI analysis:
|
||||
|
||||
```bash
|
||||
# Inspect all subsystems
|
||||
orama inspect --env devnet
|
||||
|
||||
# Inspect specific subsystem
|
||||
orama inspect --env devnet --subsystem wg
|
||||
|
||||
# With AI analysis of failures
|
||||
orama inspect --env devnet --ai
|
||||
```
|
||||
|
||||
Available subsystems for inspect: `rqlite`, `olric`, `ipfs`, `dns`, `wg`, `system`, `network`, `anyone`, `all`.
|
||||
See [Monitoring](/docs/operator/monitoring) for the full monitoring guide.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Node Setup](/docs/operator/node-setup) -- Detailed setup instructions
|
||||
- [Node Management](/docs/operator/node-management) -- Unified commands: `orama nodes`, `ssh`, `status`, `rollout`
|
||||
- [Node Setup](/docs/operator/node-setup) -- Detailed VPS installation instructions
|
||||
- [WireGuard](/docs/operator/wireguard) -- Mesh network documentation
|
||||
- [Nameserver](/docs/operator/nameserver) -- CoreDNS nameserver setup
|
||||
- [Monitoring](/docs/operator/monitoring) -- Health monitoring and alerting
|
||||
|
||||
@ -11,7 +11,21 @@ The monitoring system is split into two parts:
|
||||
|
||||
Each node runs `orama node report --json` locally (no SSH to other nodes), collecting data via `os/exec` and `net/http` to localhost services. The monitor aggregates everything and derives alerts from cross-node analysis.
|
||||
|
||||
## Quick Start
|
||||
## Quick Status Check
|
||||
|
||||
The fastest way to check your nodes:
|
||||
|
||||
```bash
|
||||
# Health of all your nodes (uses wallet-based node discovery)
|
||||
orama status --env devnet
|
||||
|
||||
# JSON output for automation
|
||||
orama status --env devnet --json
|
||||
```
|
||||
|
||||
## Detailed Monitoring
|
||||
|
||||
For cross-node analysis, alerts, and interactive TUI:
|
||||
|
||||
```bash
|
||||
# Interactive TUI (auto-refreshes every 30s)
|
||||
|
||||
131
website/src/docs/operator/node-management.mdx
Normal file
131
website/src/docs/operator/node-management.mdx
Normal file
@ -0,0 +1,131 @@
|
||||
# Node Management
|
||||
|
||||
The Orama CLI provides unified commands for managing all your nodes across environments. Your wallet identity (via RootWallet) links you to your nodes — the network tracks which nodes belong to which operator.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **RootWallet** desktop app running and unlocked
|
||||
- **Authenticated** with the network: `orama auth login`
|
||||
- **Environment set**: `orama env switch devnet` (or your target environment)
|
||||
|
||||
## Listing Your Nodes
|
||||
|
||||
View all nodes registered to your wallet:
|
||||
|
||||
```bash
|
||||
# List nodes in the active environment
|
||||
orama nodes
|
||||
|
||||
# List nodes in a specific environment
|
||||
orama nodes --env devnet
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```
|
||||
IP ROLE USER ENVIRONMENT
|
||||
1.2.3.4 nameserver root devnet
|
||||
5.6.7.8 node root devnet
|
||||
9.10.11.12 node root devnet
|
||||
|
||||
3 node(s) in devnet
|
||||
```
|
||||
|
||||
## SSH into a Node
|
||||
|
||||
Connect to any of your nodes by IP:
|
||||
|
||||
```bash
|
||||
# SSH into a node (resolves key from RootWallet automatically)
|
||||
orama ssh 1.2.3.4
|
||||
|
||||
# Specify environment if needed
|
||||
orama ssh 5.6.7.8 --env devnet
|
||||
```
|
||||
|
||||
No manual SSH key management required — RootWallet provides the key.
|
||||
|
||||
## Rolling Upgrades
|
||||
|
||||
Deploy new code to all your nodes with a single command:
|
||||
|
||||
```bash
|
||||
# Build the archive first
|
||||
orama build
|
||||
|
||||
# Roll out to all nodes (followers first, leader last)
|
||||
orama rollout --env devnet
|
||||
|
||||
# Custom delay between nodes (default: 30 seconds)
|
||||
orama rollout --env devnet --delay 60
|
||||
```
|
||||
|
||||
The rollout command:
|
||||
1. Finds the pre-built archive in `/tmp/`
|
||||
2. Pushes it to all your nodes
|
||||
3. Detects the RQLite leader
|
||||
4. Upgrades followers first, leader last
|
||||
5. Checks health between each node
|
||||
|
||||
## Health Status
|
||||
|
||||
Check the health of all your nodes:
|
||||
|
||||
```bash
|
||||
# Quick status table
|
||||
orama status --env devnet
|
||||
|
||||
# JSON output (for automation)
|
||||
orama status --env devnet --json
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```
|
||||
Checking 3 node(s) in devnet...
|
||||
|
||||
IP ROLE STATUS DETAILS
|
||||
1.2.3.4 nameserver healthy
|
||||
5.6.7.8 node healthy
|
||||
9.10.11.12 node healthy
|
||||
|
||||
3/3 nodes healthy
|
||||
```
|
||||
|
||||
For detailed monitoring with alerts, use the full monitor:
|
||||
|
||||
```bash
|
||||
orama monitor report --env devnet
|
||||
```
|
||||
|
||||
## How Node Ownership Works
|
||||
|
||||
When you install a node (via `orama node install` with an invite token), the network records your wallet address as the node's operator. This happens automatically through the invite token system:
|
||||
|
||||
1. You generate an invite token (authenticated with your wallet)
|
||||
2. The new node joins using that token
|
||||
3. The network tags the node with your wallet address from the token
|
||||
|
||||
This means `orama nodes` can query the network for "all nodes owned by my wallet" — no local files to maintain.
|
||||
|
||||
## Migrating from nodes.conf
|
||||
|
||||
If you have existing nodes configured in `scripts/nodes.conf`, migrate them to the wallet-based system:
|
||||
|
||||
```bash
|
||||
orama node migrate-conf --env devnet
|
||||
```
|
||||
|
||||
This reads your nodes.conf, then registers each node with your wallet via the gateway API. After migration, nodes appear in `orama nodes` output.
|
||||
|
||||
The legacy `nodes.conf` continues to work as a fallback — you can migrate at your own pace.
|
||||
|
||||
## Command Reference
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `orama nodes [--env]` | List your nodes |
|
||||
| `orama ssh <ip> [--env]` | SSH into a node |
|
||||
| `orama status [--env] [--json]` | Health check all nodes |
|
||||
| `orama rollout [--env] [--delay]` | Rolling upgrade all nodes |
|
||||
| `orama node migrate-conf [--env]` | Migrate nodes.conf to wallet-based tracking |
|
||||
@ -2,6 +2,8 @@
|
||||
|
||||
This guide walks you through the full process of setting up an Orama node on a fresh VPS — from system requirements through installation, verification, and day-to-day lifecycle management.
|
||||
|
||||
> **Tip:** After installation, manage your nodes with the unified CLI commands: `orama nodes`, `orama ssh`, `orama status`, `orama rollout`. See [Node Management](/docs/operator/node-management) for details.
|
||||
|
||||
## System Requirements
|
||||
|
||||
Provision a VPS with the following minimum specifications:
|
||||
|
||||
@ -114,31 +114,37 @@ Do not proceed to the next node until all checks pass. If a node shows issues af
|
||||
|
||||
---
|
||||
|
||||
## Rollout Command
|
||||
## Rollout Command (Recommended)
|
||||
|
||||
The `orama node rollout` command builds the binary locally and performs a full rolling upgrade in one step. This is the preferred method for deploying code changes.
|
||||
The `orama rollout` command pushes a pre-built archive to all your nodes and performs a rolling upgrade. This is the preferred method for deploying code changes.
|
||||
|
||||
```bash
|
||||
# Build and deploy to all nodes
|
||||
orama node rollout --env testnet
|
||||
# Build the archive first
|
||||
orama build
|
||||
|
||||
# Skip the build step (use existing archive)
|
||||
orama node rollout --env testnet --no-build
|
||||
|
||||
# Skip confirmation prompt
|
||||
orama node rollout --env testnet --yes
|
||||
# Roll out to all your nodes (followers first, leader last)
|
||||
orama rollout --env devnet
|
||||
|
||||
# Custom delay between nodes
|
||||
orama node rollout --env testnet --delay 60
|
||||
orama rollout --env devnet --delay 60
|
||||
```
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--env` | *(required)* | Target environment (`devnet`, `testnet`) |
|
||||
| `--no-build` | `false` | Skip build step (use existing archive) |
|
||||
| `--yes` | `false` | Skip confirmation prompt |
|
||||
| `--env` | active env | Target environment |
|
||||
| `--delay` | `30` | Seconds between nodes during rolling upgrade |
|
||||
|
||||
The rollout command discovers your nodes via wallet-based ownership (same nodes shown by `orama nodes`). It falls back to `nodes.conf` if the network API is unavailable.
|
||||
|
||||
### Legacy Rollout
|
||||
|
||||
The `orama node rollout` command is still available and uses `nodes.conf` directly:
|
||||
|
||||
```bash
|
||||
orama node rollout --env testnet
|
||||
orama node rollout --env testnet --no-build --yes --delay 60
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Push Command
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
# Appendix E: Sample WASM Contract
|
||||
# Appendix E: Sample WASM Contracts
|
||||
|
||||
## Example: Private BTC Transfer Contract (Rust)
|
||||
|
||||
This example shows a WASM smart contract written in Rust that accepts BTC deposits and allows private withdrawals using Orama's privacy toggle.
|
||||
This example shows a WASM smart contract written in Rust that accepts BTC deposits and allows private withdrawals using Orama's privacy toggle. This contract deploys into a namespace.
|
||||
|
||||
```rust
|
||||
use orama_sdk::prelude::*;
|
||||
@ -153,45 +153,183 @@ impl BridgeWatcherAngel {
|
||||
}
|
||||
```
|
||||
|
||||
## Example: SQL Database Query from WASM
|
||||
## Example: Full-Stack App with SQL Database (Rust)
|
||||
|
||||
This example demonstrates Orama's unique namespace model — contracts have access to a real SQL database (RQLite), distributed cache (Olric), and IPFS storage. No external indexing infrastructure needed.
|
||||
|
||||
```rust
|
||||
use orama_sdk::prelude::*;
|
||||
use orama_sdk::sql::Database;
|
||||
use orama_sdk::namespace::{Database, Cache, Storage};
|
||||
|
||||
/// A full-stack marketplace contract deployed into a namespace.
|
||||
/// The namespace provides dedicated SQL, cache, and storage.
|
||||
#[orama_contract]
|
||||
pub struct Marketplace {
|
||||
db: Database,
|
||||
cache: Cache,
|
||||
storage: Storage,
|
||||
}
|
||||
|
||||
#[orama_contract]
|
||||
impl MyApp {
|
||||
/// Store user data in Orama's distributed SQL database
|
||||
pub fn create_user(&mut self, ctx: &Context, name: String, email: String) -> Result<u64> {
|
||||
let db = Database::connect("myapp")?;
|
||||
impl Marketplace {
|
||||
#[init]
|
||||
pub fn new(ctx: &Context) -> Result<Self> {
|
||||
let db = Database::connect()?;
|
||||
|
||||
let user_id = db.execute(
|
||||
"INSERT INTO users (name, email, created_at) VALUES (?, ?, ?)",
|
||||
&[&name, &email, &ctx.block_timestamp().to_string()],
|
||||
)?;
|
||||
// Create tables — real SQL, powered by the namespace's RQLite cluster
|
||||
db.execute("
|
||||
CREATE TABLE IF NOT EXISTS listings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
seller TEXT NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
description TEXT,
|
||||
price_rays INTEGER NOT NULL,
|
||||
image_cid TEXT,
|
||||
status TEXT DEFAULT 'active',
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
", &[])?;
|
||||
|
||||
Ok(user_id)
|
||||
db.execute("
|
||||
CREATE TABLE IF NOT EXISTS purchases (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
listing_id INTEGER NOT NULL,
|
||||
buyer TEXT NOT NULL,
|
||||
price_rays INTEGER NOT NULL,
|
||||
purchased_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (listing_id) REFERENCES listings(id)
|
||||
)
|
||||
", &[])?;
|
||||
|
||||
Ok(Self {
|
||||
db: Database::connect()?,
|
||||
cache: Cache::connect()?,
|
||||
storage: Storage::connect()?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Query users from the distributed SQL database
|
||||
#[view]
|
||||
pub fn get_user(&self, user_id: u64) -> Result<User> {
|
||||
let db = Database::connect("myapp")?;
|
||||
/// List an item for sale — stores metadata in SQL, image in IPFS
|
||||
pub fn create_listing(
|
||||
&mut self,
|
||||
ctx: &Context,
|
||||
title: String,
|
||||
description: String,
|
||||
price_rays: u64,
|
||||
image_bytes: Vec<u8>,
|
||||
) -> Result<u64> {
|
||||
let seller = ctx.caller();
|
||||
|
||||
let row = db.query_one(
|
||||
"SELECT id, name, email FROM users WHERE id = ?",
|
||||
&[&user_id.to_string()],
|
||||
// Store image in the namespace's IPFS storage
|
||||
let image_cid = self.storage.put(&image_bytes)?;
|
||||
|
||||
// Insert listing into the namespace's SQL database
|
||||
let listing_id = self.db.execute(
|
||||
"INSERT INTO listings (seller, title, description, price_rays, image_cid) VALUES (?, ?, ?, ?, ?)",
|
||||
&[&seller.to_string(), &title, &description, &price_rays.to_string(), &image_cid],
|
||||
)?;
|
||||
|
||||
Ok(User {
|
||||
id: row.get("id")?,
|
||||
name: row.get("name")?,
|
||||
email: row.get("email")?,
|
||||
})
|
||||
// Invalidate the cache so the next query gets fresh data
|
||||
self.cache.delete("listings:active")?;
|
||||
|
||||
emit!(ListingCreated {
|
||||
id: listing_id,
|
||||
seller: seller,
|
||||
title: title,
|
||||
price_rays: price_rays,
|
||||
});
|
||||
|
||||
Ok(listing_id)
|
||||
}
|
||||
|
||||
/// Buy a listed item
|
||||
#[payable(ORAMA)]
|
||||
pub fn purchase(&mut self, ctx: &Context, listing_id: u64) -> Result<()> {
|
||||
let buyer = ctx.caller();
|
||||
let payment = ctx.orama_value();
|
||||
|
||||
// Query the listing from SQL
|
||||
let row = self.db.query_one(
|
||||
"SELECT seller, price_rays, status FROM listings WHERE id = ?",
|
||||
&[&listing_id.to_string()],
|
||||
)?;
|
||||
|
||||
let seller: Address = row.get("seller")?;
|
||||
let price: u64 = row.get("price_rays")?;
|
||||
let status: String = row.get("status")?;
|
||||
|
||||
if status != "active" {
|
||||
return Err(Error::Custom("Listing is not active".into()));
|
||||
}
|
||||
if payment < price {
|
||||
return Err(Error::InsufficientPayment);
|
||||
}
|
||||
|
||||
// Transfer payment to seller
|
||||
ctx.transfer_orama(seller, price)?;
|
||||
|
||||
// Update listing status in SQL
|
||||
self.db.execute(
|
||||
"UPDATE listings SET status = 'sold' WHERE id = ?",
|
||||
&[&listing_id.to_string()],
|
||||
)?;
|
||||
|
||||
// Record the purchase
|
||||
self.db.execute(
|
||||
"INSERT INTO purchases (listing_id, buyer, price_rays) VALUES (?, ?, ?)",
|
||||
&[&listing_id.to_string(), &buyer.to_string(), &price.to_string()],
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get active listings — uses cache for performance
|
||||
/// This data is also queryable via the namespace's RPC:
|
||||
/// GET /v1/contracts/{address}/listings?status=active&sort=price_rays&order=asc
|
||||
#[view]
|
||||
pub fn get_active_listings(&self, limit: u32, offset: u32) -> Result<Vec<Listing>> {
|
||||
// Check cache first
|
||||
let cache_key = format!("listings:active:{}:{}", limit, offset);
|
||||
if let Some(cached) = self.cache.get(&cache_key)? {
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
// Cache miss — query SQL
|
||||
let rows = self.db.query(
|
||||
"SELECT id, seller, title, price_rays, image_cid, created_at
|
||||
FROM listings WHERE status = 'active'
|
||||
ORDER BY created_at DESC LIMIT ? OFFSET ?",
|
||||
&[&limit.to_string(), &offset.to_string()],
|
||||
)?;
|
||||
|
||||
let listings: Vec<Listing> = rows.iter().map(|r| Listing {
|
||||
id: r.get("id").unwrap(),
|
||||
seller: r.get("seller").unwrap(),
|
||||
title: r.get("title").unwrap(),
|
||||
price_rays: r.get("price_rays").unwrap(),
|
||||
image_cid: r.get("image_cid").unwrap(),
|
||||
}).collect();
|
||||
|
||||
// Cache for 60 seconds
|
||||
self.cache.set(&cache_key, &listings, 60)?;
|
||||
|
||||
Ok(listings)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### What This Contract Gets (From the Namespace)
|
||||
|
||||
This contract deploys into a namespace and automatically receives:
|
||||
|
||||
| Primitive | What It Does | Ethereum Equivalent |
|
||||
|---|---|---|
|
||||
| `Database::connect()` | Real SQL database (RQLite with Raft consensus across 3+ nodes) | Nothing — need The Graph or custom indexer |
|
||||
| `Cache::connect()` | Distributed cache (Olric with consistent hashing) | Nothing — need Redis or Memcached externally |
|
||||
| `Storage::connect()` | IPFS storage (content-addressed, replicated) | Need Pinata/Infura IPFS externally |
|
||||
| Namespace RPC | Rich query API with filtering, sorting, pagination | Need The Graph subgraph |
|
||||
|
||||
**The namespace IS the backend.** No external infrastructure needed.
|
||||
|
||||
## Compiling and Deploying
|
||||
|
||||
```bash
|
||||
@ -205,12 +343,17 @@ cd my-contract
|
||||
# Build to WASM
|
||||
orama build --release
|
||||
|
||||
# Deploy to Orama network (costs gas in $ORAMA)
|
||||
orama deploy --network mainnet ./target/wasm/my_contract.wasm
|
||||
# Deploy to a namespace on the Orama network
|
||||
# This creates a namespace (or uses an existing one) with dedicated
|
||||
# RQLite, Olric, IPFS, and Gateway infrastructure
|
||||
orama deploy --namespace my-marketplace --network mainnet ./target/wasm/my_contract.wasm
|
||||
|
||||
# Call a contract function
|
||||
orama call <contract-address> deposit --value 0.01btc
|
||||
orama call <contract-address> withdraw --private --to <address> --amount 0.005btc
|
||||
orama call my-marketplace::Marketplace create_listing --title "Cool NFT" --price 1000000
|
||||
orama call my-marketplace::Marketplace purchase --listing-id 1 --value 1000000rays
|
||||
|
||||
# Query via the namespace's RPC (no indexer needed)
|
||||
curl https://my-marketplace.orama.network/v1/contracts/Marketplace/listings?status=active&sort=price_rays
|
||||
```
|
||||
|
||||
## Supported Languages
|
||||
@ -223,4 +366,4 @@ While these examples are in Rust, any language that compiles to WebAssembly can
|
||||
- **C/C++** (via Emscripten)
|
||||
- **Python** (experimental)
|
||||
|
||||
The Orama SDK provides bindings for all supported languages, giving access to the full set of on-chain primitives (SQL, KV store, IPFS, BTC bridge, DEX, AI Marketplace). As new languages gain WebAssembly compilation support, they become available for Orama contract development automatically.
|
||||
The Orama SDK provides bindings for all supported languages, giving access to the full set of namespace primitives (SQL, KV cache, IPFS, BTC bridge, DEX, AI Marketplace). As new languages gain WebAssembly compilation support, they become available for Orama contract development automatically.
|
||||
|
||||
454
whitepaper/APPENDIX_G_TECHNICAL_ARCHITECTURE.md
Normal file
454
whitepaper/APPENDIX_G_TECHNICAL_ARCHITECTURE.md
Normal file
@ -0,0 +1,454 @@
|
||||
# Appendix G: Technical Architecture
|
||||
|
||||
This appendix covers the implementation-level technical decisions for the Orama blockchain. These choices are informed by the design principles in the main whitepaper and reflect the specific tradeoffs appropriate for Orama's unique two-layer architecture (global chain + namespaces).
|
||||
|
||||
## 1. Dual-State Account Model
|
||||
|
||||
Every address on Orama has a single account with two compartments:
|
||||
|
||||
```
|
||||
Account {
|
||||
Address [20]byte // Account identifier (secp256k1-derived, Ethereum-compatible)
|
||||
Username string // Optional, immutable once claimed, 3-32 chars [a-z0-9_-]
|
||||
Nonce uint64 // Replay protection
|
||||
|
||||
// Public compartment — visible to everyone
|
||||
OramaBalance uint64 // Balance in rays (1 $ORAMA = 1,000,000 rays)
|
||||
BTCBalance uint64 // Balance in satoshis
|
||||
CodeHash [32]byte // Hash of contract WASM bytecode (zero for non-contracts)
|
||||
StorageRoot [32]byte // Merkle root of contract KV storage
|
||||
|
||||
// Private compartment (Phase 2 — zero hashes at genesis)
|
||||
CommitmentRoot [32]byte // Merkle root of hidden value commitments
|
||||
NullifierRoot [32]byte // Merkle root of spent-tracking nullifiers
|
||||
}
|
||||
```
|
||||
|
||||
### Signatures & Addresses
|
||||
|
||||
- **Curve**: secp256k1 (same as Bitcoin and Ethereum)
|
||||
- **Signature**: 65 bytes (r, s, v) — ECDSA
|
||||
- **Address**: 20 bytes, derived from `Keccak256(public_key)[12:]` (Ethereum-compatible)
|
||||
- **Display format**: `orama:` prefix for human readability (e.g., `orama:742d35Cc...`)
|
||||
- **Compatibility**: Same keys and addresses work in RootWallet, Ethereum, and any hardware wallet
|
||||
|
||||
### Usernames
|
||||
|
||||
Orama accounts can optionally claim a **permanent, human-readable username** tied to their address. Usernames are a protocol-level feature, not a smart contract.
|
||||
|
||||
- **Optional**: Accounts can exist without a username. Claim anytime via a `ClaimUsername` transaction.
|
||||
- **Immutable**: Once claimed, a username can never be changed, transferred, or sold.
|
||||
- **Unique**: 1:1 mapping between username and address.
|
||||
- **Format**: Lowercase alphanumeric, hyphens, underscores. 3-32 characters.
|
||||
- **Free**: Only costs standard transaction gas.
|
||||
- **Resolvable**: Send to `@alex` instead of `orama:742d35Cc...`. The chain resolves the lookup natively.
|
||||
|
||||
The SMT stores a reverse-lookup leaf at `hash(username)` → `address`, enabling O(1) resolution.
|
||||
|
||||
### Phase 1 (Testnet): Public Only
|
||||
|
||||
All transactions operate on the public compartment. `CommitmentRoot` and `NullifierRoot` are zero hashes. The account structure ships with these fields from genesis — no migration or fork is needed when privacy is activated.
|
||||
|
||||
### Phase 2 (Post-PLONK Ceremony): Privacy Activation
|
||||
|
||||
The private compartment activates. Funds can be moved between compartments:
|
||||
|
||||
- **Shield** (public → private): Deduct from `OramaBalance`, add a Pedersen commitment to the account's commitment tree.
|
||||
- **Unshield** (private → public): Spend a commitment (publish its nullifier), add to `OramaBalance`. A PLONK proof verifies the commitment was valid.
|
||||
- **Private transfer**: Spend a commitment in sender's account, create a new commitment in receiver's account. PLONK proof verifies validity without revealing sender, receiver, or amount.
|
||||
|
||||
### Why Not a Separate Shielded Pool?
|
||||
|
||||
Other chains (Ethereum + Tornado Cash, Aztec) bolt privacy onto an account model via an external "shielded pool" contract. This creates:
|
||||
- UX friction (explicit deposit/withdraw from pool)
|
||||
- A privacy leak at the boundary (observers see pool deposits/withdrawals)
|
||||
- Two incompatible codepaths for wallets
|
||||
|
||||
Orama's dual-state model avoids this: privacy is native to each account. One account, two views, no external pool.
|
||||
|
||||
## 2. State Tree: Sparse Merkle Tree (SMT)
|
||||
|
||||
All state is organized in a Sparse Merkle Tree — a binary tree with 256 levels where most branches are empty (sparse). Each account lives at a leaf determined by `hash(address)`.
|
||||
|
||||
```
|
||||
Global State SMT (root goes in block header)
|
||||
│
|
||||
├── leaf[hash(Alice)] → Account data (serialized)
|
||||
├── leaf[hash(Bob)] → Account data
|
||||
├── leaf[hash(Contract)] → Account data
|
||||
│ └── StorageRoot → Contract Storage SMT
|
||||
│ ├── key1 → value1
|
||||
│ ├── key2 → value2
|
||||
│ └── ...
|
||||
└── (2^256 - N empty leaves, represented by known zero hashes)
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Property | Value |
|
||||
|---|---|
|
||||
| Depth | 256 levels (fixed) |
|
||||
| Leaf position | `SHA256(address)` |
|
||||
| Proof size | 256 × 32 bytes = 8 KB (compressible to ~1-2 KB) |
|
||||
| Non-existence proofs | Built-in (prove an account does NOT exist) |
|
||||
| Algorithm complexity | ~500 lines of Go |
|
||||
|
||||
### Why SMT Over Alternatives
|
||||
|
||||
| Tree | Used By | Pros | Cons |
|
||||
|---|---|---|---|
|
||||
| Merkle Patricia Trie | Ethereum | Battle-tested 10+ years | Complex (~3000 lines), deep trees, Ethereum is migrating away from it |
|
||||
| **Sparse Merkle Tree** | **Orama**, Celestia, Mina | Simple, fixed depth, non-existence proofs, predictable performance | Larger raw proofs (mitigated by compression) |
|
||||
| Verkle Tree | Ethereum (future) | Tiny proofs (~150 bytes) | Requires elliptic curve math, not yet shipped in production, quantum-vulnerable |
|
||||
|
||||
SMT's built-in non-existence proofs are critical for the privacy layer — proving a nullifier hasn't been spent requires proving a key does NOT exist in the nullifier tree.
|
||||
|
||||
### Nested SMTs
|
||||
|
||||
The same algorithm is used at every level of the state:
|
||||
|
||||
1. **Global State SMT** — accounts indexed by `hash(address)`, root in block header
|
||||
2. **Contract Storage SMT** — per-contract KV data, root in account's `StorageRoot`
|
||||
3. **Commitment SMT** (Phase 2) — per-account hidden commitments, root in `CommitmentRoot`
|
||||
4. **Nullifier SMT** (Phase 2) — per-account spent nullifiers, root in `NullifierRoot`
|
||||
|
||||
One data structure. One algorithm. Clean.
|
||||
|
||||
## 3. State Storage: BadgerDB
|
||||
|
||||
The underlying key-value store for all chain state is **BadgerDB** — a pure Go, high-performance, LSM-tree-based database.
|
||||
|
||||
### Why BadgerDB
|
||||
|
||||
| Requirement | BadgerDB |
|
||||
|---|---|
|
||||
| Pure Go (no CGO) | Yes — compiles cleanly on any platform including OramaOS |
|
||||
| Concurrent reads | Lock-free MVCC reads |
|
||||
| Batched writes | Transaction-based batch writes |
|
||||
| Crash safety | Write-ahead log with checksums |
|
||||
| Used in blockchain | IPFS, libp2p ecosystem (already in Orama's dependency tree) |
|
||||
| Maintenance | Actively maintained by Dgraph team |
|
||||
|
||||
### Storage Layout
|
||||
|
||||
```
|
||||
/var/lib/orama/chain/
|
||||
├── blocks/ # Block storage (BadgerDB)
|
||||
│ └── (block headers, transaction lists, receipts)
|
||||
├── state/ # Current state (BadgerDB)
|
||||
│ └── (SMT nodes, account data, contract storage)
|
||||
└── index/ # Query indexes (optional, per-node)
|
||||
└── (tx-by-address, blocks-by-height, etc.)
|
||||
```
|
||||
|
||||
The `blocks/` and `state/` databases are consensus-critical — every node computes identical content. The `index/` database is per-node, non-consensus, and supports the rich query RPC layer.
|
||||
|
||||
## 4. Serialization: Borsh
|
||||
|
||||
All data that is hashed, stored, or transmitted over the wire uses **Borsh** (Binary Object Representation Serializer for Hashing) — a deterministic binary serialization format created specifically for blockchain use.
|
||||
|
||||
### Why Borsh
|
||||
|
||||
| Requirement | Borsh | Protobuf | RLP (Ethereum) |
|
||||
|---|---|---|---|
|
||||
| Deterministic | By design | No (maps unordered) | By design |
|
||||
| Code generation | None (struct tags) | Required (.proto files) | None |
|
||||
| Schema | Implicit from struct | External .proto | None |
|
||||
| Created for | Blockchain hashing | RPC/APIs | Ethereum |
|
||||
| Compact | Yes | Yes | Yes |
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
type Block struct {
|
||||
Height uint64 `borsh:"height"`
|
||||
ParentHash [32]byte `borsh:"parent_hash"`
|
||||
StateRoot [32]byte `borsh:"state_root"`
|
||||
Timestamp uint64 `borsh:"timestamp"`
|
||||
ProposerAddr [20]byte `borsh:"proposer"`
|
||||
Transactions []Transaction `borsh:"transactions"`
|
||||
QC QuorumCert `borsh:"qc"`
|
||||
}
|
||||
|
||||
// Deterministic: same data always produces identical bytes
|
||||
bytes := borsh.Serialize(block)
|
||||
blockHash := sha256(bytes)
|
||||
```
|
||||
|
||||
## 5. Consensus: HotStuff BFT
|
||||
|
||||
The global chain uses a pipelined BFT protocol based on HotStuff. See Section 4 of the main whitepaper for the full protocol description.
|
||||
|
||||
### Key Implementation Details
|
||||
|
||||
**Message types:**
|
||||
|
||||
| Message | Direction | Content |
|
||||
|---|---|---|
|
||||
| `Propose` | Leader → All | Block + QC for previous round |
|
||||
| `Vote` | Validator → Next Leader | Signed block hash + validator ID |
|
||||
| `NewView` | Validator → New Leader | Timeout certificate (on leader failure) |
|
||||
|
||||
**Quorum Certificate (QC):** An aggregation of 2/3+ votes (by Effective Power) for a specific block. The QC is the proof that consensus was reached.
|
||||
|
||||
**View change:** If the current leader fails to propose within the timeout (configurable, default 4 seconds), validators send a `NewView` message to the next leader in the rotation. The next leader can immediately propose, including the timeout certificate as justification. No rounds are wasted on view-change voting.
|
||||
|
||||
**Pipelining:** Votes for block N serve as the QC for block N-1, which finalizes block N-2. This means:
|
||||
- Block produced: every 6 seconds
|
||||
- Block finalized: 18 seconds later (3 blocks)
|
||||
- No dead time between rounds
|
||||
|
||||
### P2P Transport
|
||||
|
||||
Consensus messages are Borsh-serialized and transported over LibP2P pubsub topics, running on top of the WireGuard encrypted mesh. All validator-to-validator communication is authenticated and encrypted at the network layer.
|
||||
|
||||
**Topics:**
|
||||
|
||||
| Topic | Content |
|
||||
|---|---|
|
||||
| `/orama/chain/1/blocks` | Block proposals from leaders |
|
||||
| `/orama/chain/1/votes` | HotStuff votes from validators |
|
||||
| `/orama/chain/1/txs` | New transactions (mempool gossip) |
|
||||
| `/orama/chain/1/newview` | View-change messages (leader timeout) |
|
||||
|
||||
**Message envelope:**
|
||||
```
|
||||
Message {
|
||||
Type: uint8 // Block, Vote, Transaction, NewView
|
||||
Payload: []byte // Borsh-serialized content
|
||||
Sender: [20]byte // Sender address
|
||||
Signature: [65]byte // ECDSA signature over payload
|
||||
}
|
||||
```
|
||||
|
||||
## 6. Namespace Architecture
|
||||
|
||||
Namespaces are provisioned from the global node pool. They come in tiers based on trust requirements.
|
||||
|
||||
### Namespace Tiers
|
||||
|
||||
| Tier | Name | Purpose | Blockchain Interaction |
|
||||
|---|---|---|---|
|
||||
| 0 | Cloud | App deployment (what namespaces are today) | None |
|
||||
| 1 | Secured | Smart contracts, tokens, NFTs, anything with value | State commitments to global chain, staked validators |
|
||||
| 2 | Trustless (future) | High-value protocols needing maximum security | ZK validity proofs |
|
||||
|
||||
**Tier 0** is the default. Existing namespaces are Tier 0. No changes to current behavior. Developers deploy apps just like today — no blockchain knowledge needed.
|
||||
|
||||
**Tier 1** adds verifiable state on top of the same infrastructure. It's Tier 0 plus three additions: a StateDB (BadgerDB + SMT) for cryptographic state proofs, a TxLog for replay/verification, and staked validators who submit state commitments to the global chain every epoch.
|
||||
|
||||
### Tier 0 Cluster Composition (unchanged)
|
||||
|
||||
| Service | Nodes | Purpose |
|
||||
|---|---|---|
|
||||
| RQLite | 3+ | SQL database with Raft consensus |
|
||||
| Olric | 3+ | Distributed KV cache |
|
||||
| Gateway | 3+ | HTTP/WebSocket API, WASM VM, RPC |
|
||||
| IPFS | Shared | Content-addressed storage |
|
||||
| SFU + TURN | Optional | WebRTC voice/video |
|
||||
|
||||
### Tier 1 Cluster Composition (adds verifiable state)
|
||||
|
||||
| Service | Nodes | Purpose |
|
||||
|---|---|---|
|
||||
| RQLite | 3+ | SQL database — developer queries, powers the RPC |
|
||||
| **StateDB** | **3+ (per node)** | **BadgerDB + SMT — verifiable state, produces Merkle root** |
|
||||
| **TxLog** | **3+ (per node)** | **Append-only operation log for replay/verification** |
|
||||
| Olric | 3+ | Distributed KV cache |
|
||||
| Gateway | 3+ | HTTP/WebSocket API, WASM VM, RPC |
|
||||
| IPFS | Shared | Content-addressed storage |
|
||||
| SFU + TURN | Optional | WebRTC voice/video |
|
||||
|
||||
**How the three storage layers work together in Tier 1:**
|
||||
|
||||
- **RQLite** = the read layer. Developers query with SQL. The RPC serves data from it. Fast and flexible, but not cryptographically verifiable.
|
||||
- **StateDB** = the proof layer. Stores the same data in a Sparse Merkle Tree (BadgerDB). Produces the state fingerprint (Merkle root) that gets committed to the global chain. Verifiable but not queryable with SQL.
|
||||
- **TxLog** = the replay layer. Ordered list of every state-changing operation. If someone challenges the namespace, they replay the log, compute the expected state, and compare with the submitted fingerprint.
|
||||
|
||||
Contracts write to both RQLite and StateDB atomically. They always agree on the current state.
|
||||
|
||||
### State Commitment Flow
|
||||
|
||||
The commitment mechanism strengthens across phases:
|
||||
|
||||
**Testnet: Attested Commitment**
|
||||
```
|
||||
1. Namespace executes transactions against its local state
|
||||
2. At each epoch boundary, the namespace computes a state root:
|
||||
hash(rqlite_state || olric_state || contract_storage || metadata)
|
||||
3. 2/3+ of the namespace's validators sign the state root
|
||||
4. Submit StateCommitment transaction to the global chain:
|
||||
{ namespace_id, epoch, state_root, validator_signatures }
|
||||
5. Global chain records the commitment in the namespace registry
|
||||
```
|
||||
|
||||
**Mainnet: Optimistic with Challenge Period**
|
||||
```
|
||||
1-4. Same as above
|
||||
5. State root enters "pending" state for ~100 blocks (10 minutes)
|
||||
6. During pending period, anyone can submit a fraud proof:
|
||||
→ provide the transactions + correct state root
|
||||
→ if challenge is valid, namespace validators are slashed, state root rejected
|
||||
7. After challenge period with no valid challenge → state root finalized
|
||||
```
|
||||
|
||||
**Future: ZK Validity Proofs**
|
||||
```
|
||||
1-2. Same as above
|
||||
3. Namespace generates a zero-knowledge proof that state transitions are correct
|
||||
4. Submit proof + new state root to global chain
|
||||
5. Global chain verifies proof (~5ms) → immediately finalized
|
||||
```
|
||||
|
||||
### Namespace Provisioning
|
||||
|
||||
Namespace clusters are provisioned dynamically from the global node pool. Each physical node can host multiple namespace instances (up to 20 per node, constrained by port allocation). The cluster manager handles:
|
||||
|
||||
- Node selection (balanced load distribution)
|
||||
- Port allocation (dedicated port block per namespace)
|
||||
- Service bootstrapping (RQLite → Olric → Gateway, in dependency order)
|
||||
- DNS record creation (namespace subdomain)
|
||||
- Health monitoring and recovery
|
||||
|
||||
This infrastructure already exists in the Orama Network codebase (`pkg/namespace/`).
|
||||
|
||||
## 7. Block Structure
|
||||
|
||||
```
|
||||
Block {
|
||||
// Header
|
||||
Height: uint64
|
||||
ParentHash: [32]byte
|
||||
StateRoot: [32]byte // SMT root after executing all transactions
|
||||
TransactionsRoot:[32]byte // Merkle root of transaction list
|
||||
ReceiptsRoot: [32]byte // Merkle root of execution receipts
|
||||
Timestamp: uint64 // Unix timestamp (seconds)
|
||||
ProposerAddr: [20]byte // Block proposer address
|
||||
EpochNumber: uint64 // Current epoch
|
||||
QC: QuorumCert // Quorum Certificate from previous round
|
||||
|
||||
// Body
|
||||
Transactions: []Transaction
|
||||
}
|
||||
|
||||
Transaction {
|
||||
Type: uint8 // Transfer, Stake, Unstake, DEXOrder, BridgeDeposit,
|
||||
// BridgeWithdraw, NamespaceCommit, GovernanceVote, ...
|
||||
From: [20]byte
|
||||
To: [20]byte
|
||||
Amount: uint64 // In rays or satoshis depending on asset
|
||||
Asset: uint8 // 0 = $ORAMA, 1 = BTC
|
||||
Nonce: uint64
|
||||
GasLimit: uint64
|
||||
GasTipCap: uint64 // Priority fee (EIP-1559)
|
||||
GasFeeCap: uint64 // Max total fee (EIP-1559)
|
||||
Data: []byte // Type-specific payload
|
||||
Signature: [65]byte // ECDSA signature (secp256k1)
|
||||
}
|
||||
```
|
||||
|
||||
## 8. RPC API
|
||||
|
||||
Orama uses a **REST API** as its primary interface. No JSON-RPC.
|
||||
|
||||
### Global Chain Endpoints
|
||||
|
||||
```
|
||||
GET /v1/accounts/{address_or_username}
|
||||
GET /v1/accounts/{address_or_username}/balance
|
||||
GET /v1/accounts/{address_or_username}/transactions?limit=20&sort=timestamp&order=desc
|
||||
POST /v1/transactions/send
|
||||
GET /v1/transactions/{hash}
|
||||
GET /v1/blocks/{height_or_hash}
|
||||
GET /v1/blocks/latest
|
||||
GET /v1/dex/orderbook
|
||||
GET /v1/dex/orders?owner=@alex
|
||||
POST /v1/dex/orders
|
||||
GET /v1/bridge/status
|
||||
GET /v1/validators
|
||||
GET /v1/chain/status
|
||||
```
|
||||
|
||||
### Namespace Endpoints (served by namespace gateway)
|
||||
|
||||
```
|
||||
GET /v1/namespace/{name}/contracts
|
||||
GET /v1/namespace/{name}/contracts/{address}/state/{key}
|
||||
POST /v1/namespace/{name}/contracts/{address}/call
|
||||
GET /v1/namespace/{name}/query?sql=SELECT...
|
||||
```
|
||||
|
||||
Usernames are resolvable in any endpoint: `@alex` is equivalent to `orama:742d35Cc...`.
|
||||
|
||||
All responses support filtering, sorting, and pagination via query parameters. This matches the existing Gateway pattern in `pkg/gateway/`.
|
||||
|
||||
## 9. WASM Contract Host Functions
|
||||
|
||||
These are the "system calls" available to WASM contracts running in a namespace.
|
||||
|
||||
### Database (RQLite)
|
||||
```
|
||||
orama.db.execute(query_ptr, query_len, params_ptr, params_len) → result_ptr
|
||||
orama.db.query(query_ptr, query_len, params_ptr, params_len) → result_ptr
|
||||
orama.db.query_one(query_ptr, query_len, params_ptr, params_len) → result_ptr
|
||||
```
|
||||
|
||||
### Cache (Olric)
|
||||
```
|
||||
orama.cache.get(key_ptr, key_len) → result_ptr
|
||||
orama.cache.set(key_ptr, key_len, val_ptr, val_len, ttl_seconds) → status
|
||||
orama.cache.delete(key_ptr, key_len) → status
|
||||
```
|
||||
|
||||
### Storage (IPFS)
|
||||
```
|
||||
orama.storage.put(data_ptr, data_len) → cid_ptr
|
||||
orama.storage.get(cid_ptr, cid_len) → data_ptr
|
||||
```
|
||||
|
||||
### Token Operations (global chain interaction)
|
||||
```
|
||||
orama.transfer_orama(to_ptr, amount) → status
|
||||
orama.transfer_btc(to_ptr, amount) → status
|
||||
orama.get_balance(address_ptr, asset) → amount
|
||||
```
|
||||
|
||||
### Context (read-only)
|
||||
```
|
||||
orama.ctx.caller() → address_ptr
|
||||
orama.ctx.block_height() → uint64
|
||||
orama.ctx.block_timestamp() → uint64
|
||||
orama.ctx.orama_value() → uint64
|
||||
orama.ctx.btc_value() → uint64
|
||||
```
|
||||
|
||||
### Events, Logging, Crypto
|
||||
```
|
||||
orama.emit(event_ptr, event_len) → status
|
||||
orama.log.info(msg_ptr, msg_len)
|
||||
orama.log.error(msg_ptr, msg_len)
|
||||
orama.crypto.sha256(data_ptr, data_len, out_ptr)
|
||||
orama.crypto.keccak256(data_ptr, data_len, out_ptr)
|
||||
orama.crypto.verify_signature(msg_ptr, msg_len, sig_ptr, addr_ptr) → bool
|
||||
```
|
||||
|
||||
**Design principles:**
|
||||
- No filesystem, no raw network, no non-deterministic operations
|
||||
- `block_timestamp` is the block's timestamp (deterministic), not system time
|
||||
- No `random` — use `block_hash` as a deterministic seed if needed
|
||||
- Contracts write SQL directly — no ORM or query builder in the host layer
|
||||
- Events are indexed by the namespace gateway for rich RPC queries
|
||||
|
||||
## 10. Implementation Language
|
||||
|
||||
The entire blockchain layer is implemented in **Go**, consistent with the existing Orama Network codebase. Key dependencies:
|
||||
|
||||
| Dependency | Purpose | Status |
|
||||
|---|---|---|
|
||||
| BadgerDB | State and block storage | To be added |
|
||||
| wazero | WASM contract execution | Already in project |
|
||||
| LibP2P | P2P networking, gossip | Already in project |
|
||||
| go-ethereum/crypto | secp256k1 signatures | Already in project |
|
||||
| gnark (future) | PLONK zk-SNARKs | Phase 2 |
|
||||
|
||||
The blockchain packages will be added to the existing monorepo under `pkg/chain/`.
|
||||
@ -1,6 +1,6 @@
|
||||
# Orama Network: The Eternal Decentralized Computer and Financial System
|
||||
|
||||
**Whitepaper Version 3.0**
|
||||
**Whitepaper Version 4.0**
|
||||
**Date:** March 2026
|
||||
**Author:** DeBros
|
||||
|
||||
@ -9,10 +9,11 @@
|
||||
Orama Network is a standalone Layer-1 blockchain designed to serve as humanity's eternal decentralized computer and financial system. It combines the security and scarcity of Bitcoin with the full power of a global, censorship-resistant cloud infrastructure — all in one protocol.
|
||||
|
||||
Built from first principles for a 1,000-year horizon, Orama delivers:
|
||||
- **Native BTC compatibility** from genesis (deposit, use, and withdraw BTC with Bitcoin-level security).
|
||||
- **Native BTC compatibility** from genesis (deposit, use, and withdraw BTC with progressively trust-minimized security).
|
||||
- **Namespace-based execution** — smart contracts deploy into isolated environments with dedicated SQL databases, caches, storage, and API gateways. No shared bottleneck. No external indexing infrastructure. The namespace IS the backend.
|
||||
- **Pure WASM smart contracts** so developers can write in any language they want (Rust, Go, TypeScript, C++, and any language that compiles to WebAssembly).
|
||||
- **Per-transaction public/private toggle** using PLONK zk-SNARKs for optional privacy.
|
||||
- **Hybrid consensus** (Proof-of-Stake + Proof of Contribution + Proof of Infrastructure) that gives real power to ordinary people running nodes with OramaOS.
|
||||
- **Per-transaction public/private toggle** using PLONK zk-SNARKs for optional privacy, built natively into the dual-state account model.
|
||||
- **HotStuff-based BFT consensus** with Hybrid PoS + Proof of Contribution + Proof of Infrastructure, giving real power to ordinary people running nodes with OramaOS.
|
||||
- **210 million $ORAMA** hard-capped supply with zero pre-mine — 100% of tokens are earned through mining, just like Bitcoin.
|
||||
|
||||
Orama is not an upgrade to existing chains. It is the base layer that millions of people and billions of devices will rely on for compute, storage, payments, and data ownership for centuries to come.
|
||||
@ -24,21 +25,79 @@ Centralized cloud providers control the internet's infrastructure. They can cens
|
||||
Existing Layer-1 blockchains force developers into rigid languages, expensive gas models, or centralized validator sets. Most projects also suffer from unfair token launches, infinite inflation, or governance capture.
|
||||
|
||||
Orama solves both problems at once:
|
||||
- It is the **decentralized world computer** — distributed SQL, KV store, IPFS, serverless functions, and compute — all running on a global mesh of real hardware.
|
||||
- It is the **decentralized world computer** — but unlike Ethereum's "one global computer" model where every validator executes every contract, Orama gives each application its own isolated execution environment (namespace) with dedicated databases, caches, storage, and APIs. No application can clog another. No external infrastructure needed.
|
||||
- It is the **Bitcoin-grade financial system** — BTC-only economy, native BTC bridge, scarce $ORAMA token, and per-transaction privacy.
|
||||
|
||||
## 3. Orama Network Solution & High-Level Architecture
|
||||
|
||||
Orama is a single Layer-1 chain with two tightly integrated layers that can never be separated:
|
||||
Orama is a single Layer-1 chain with a unique two-layer architecture that solves the biggest problem in blockchain: **shared resource contention**. On Ethereum, a viral game clogs the entire network. On Orama, every application gets its own isolated infrastructure.
|
||||
|
||||
1. **Immutable Financial Core** (BTC + $ORAMA economics) — designed to be unchangeable for 1,000 years.
|
||||
2. **Modular Decentralized Compute Layer** (WASM execution + primitives) — upgradable via governance but never able to break the money layer.
|
||||
### The Two Layers
|
||||
|
||||
1. **Global Chain** (Immutable Financial Core) — all nodes participate. Handles $ORAMA and BTC balances, the native DEX, the BTC bridge, staking, slashing, and the namespace registry. Designed to be unchangeable for 1,000 years.
|
||||
|
||||
2. **Namespaces** (Isolated Execution Environments) — smart contracts deploy into namespaces, not onto the global chain. Each namespace is a dedicated cluster of nodes with its own database, cache, storage, API gateway, and WASM execution engine. Namespaces periodically commit state roots to the global chain, providing cryptographic proof that their execution is correct.
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ GLOBAL CHAIN (all nodes participate via HotStuff consensus) │
|
||||
│ │
|
||||
│ • $ORAMA / BTC balances • Namespace registry │
|
||||
│ • Native DEX order book • State root commitments │
|
||||
│ • BTC bridge • Staking & slashing │
|
||||
│ • Block rewards & emission • Governance │
|
||||
└─────────────┬──────────────────┬──────────────────┬──────────┘
|
||||
│ │ │
|
||||
┌────────▼───────┐ ┌───────▼────────┐ ┌──────▼─────────┐
|
||||
│ Namespace A │ │ Namespace B │ │ Namespace C │
|
||||
│ "GameFi App" │ │ "DeFi Proto" │ │ "NFT Market" │
|
||||
│ │ │ │ │ │
|
||||
│ Own DB │ │ Own DB │ │ Own DB │
|
||||
│ Own cache │ │ Own cache │ │ Own cache │
|
||||
│ Own gateway │ │ Own gateway │ │ Own gateway │
|
||||
│ Own WASM VM │ │ Own WASM VM │ │ Own WASM VM │
|
||||
│ Own IPFS │ │ Own IPFS │ │ Own IPFS │
|
||||
└────────────────┘ └────────────────┘ └────────────────┘
|
||||
```
|
||||
|
||||
### Why This Architecture
|
||||
|
||||
Every other Layer-1 blockchain uses a single global state machine — every validator executes every smart contract. This creates a fundamental bottleneck: all applications compete for the same block space, the same throughput, and the same compute. A popular NFT mint can make a DeFi protocol unusable.
|
||||
|
||||
Orama's namespace model eliminates this problem by design:
|
||||
|
||||
- **No shared bottleneck** — each namespace runs on its own cluster of nodes with dedicated resources. One namespace cannot affect another.
|
||||
- **Real developer infrastructure** — contracts in a namespace have access to a real SQL database (RQLite with Raft consensus), a real distributed cache (Olric), real IPFS storage, and a dedicated API gateway with rich query capabilities. No external indexing infrastructure needed.
|
||||
- **Natural scaling** — adding nodes to the network allows more namespaces to be provisioned. No sharding complexity, no rollup escape hatches.
|
||||
- **The global chain stays lean** — consensus only processes financial transactions and state commitments, not contract execution. This means higher throughput for the operations that matter most.
|
||||
|
||||
### Namespace Trust Model
|
||||
|
||||
Namespaces commit cryptographic state roots to the global chain every epoch. The global chain does not re-execute namespace transactions — it verifies that the committed state is correct.
|
||||
|
||||
- **Testnet & Mainnet V1**: Namespace nodes are staked validators. If they commit an incorrect state root, they are slashed. The number of nodes per namespace scales with the value at stake (minimum 3, higher for high-value protocols).
|
||||
- **Mainnet V2 (future)**: Optimistic fraud proofs — a challenge period during which anyone can prove a state root is incorrect, triggering slashing.
|
||||
- **Long-term (future)**: ZK validity proofs — namespaces generate zero-knowledge proofs that their state transitions are correct. Trustless verification without re-execution.
|
||||
|
||||
All nodes run on real hardware with OramaOS, creating true "power to the people" instead of stake-weighted whales.
|
||||
|
||||
## 4. Consensus Mechanism
|
||||
|
||||
Orama uses a **Hybrid PoS + Proof of Contribution + Proof of Infrastructure** model.
|
||||
Orama uses a **HotStuff-based BFT consensus protocol** combined with a **Hybrid PoS + Proof of Contribution + Proof of Infrastructure** validator selection model. The consensus protocol determines how blocks are produced and agreed upon. The hybrid model determines who gets to participate and how rewards are distributed.
|
||||
|
||||
### Consensus Protocol: HotStuff BFT
|
||||
|
||||
The global chain uses a pipelined BFT (Byzantine Fault Tolerant) protocol based on HotStuff. Unlike classical BFT protocols (Tendermint/PBFT) that require O(n²) messages per block — where every validator talks to every other validator — HotStuff achieves O(n) message complexity through leader-driven voting. This scales cleanly to 300+ validators.
|
||||
|
||||
**How a block is produced (every 6 seconds):**
|
||||
|
||||
1. **Leader Selection** — a block proposer is selected deterministically, weighted by Effective Power. Every node computes the same leader independently.
|
||||
2. **Propose** — the leader collects transactions from the mempool, executes them against the current state, computes the new state root, and broadcasts the block.
|
||||
3. **Vote** — all validators verify the block (re-execute transactions, confirm the state root matches) and send a signed vote to the next leader.
|
||||
4. **Aggregate** — the next leader collects votes. When 2/3+ of Effective Power has voted, a Quorum Certificate (QC) is formed.
|
||||
5. **Finalize** — the QC is included in the next block, finalizing the block from 2 rounds ago.
|
||||
|
||||
This pipelined approach means a new block is proposed every 6 seconds, with finality achieved in 18 seconds (3 blocks). There is no dead time between rounds.
|
||||
|
||||
### Effective Power Formula
|
||||
$$
|
||||
@ -58,12 +117,14 @@ A node runner with modest stake but perfect uptime, real contribution, and Orama
|
||||
### Contribution Score (weighted every 1-hour epoch)
|
||||
- Uptime: 40%
|
||||
- Bandwidth served: 30%
|
||||
- Compute/storage/SQL queries served: 20%
|
||||
- Compute/storage/namespace queries served: 20%
|
||||
- Low latency & reliability: 10%
|
||||
|
||||
**Block time**: 6 seconds (14,400 blocks per day)
|
||||
**Block capacity**: 1,000 transactions per block
|
||||
**Epoch length**: 1 hour (600 blocks per epoch)
|
||||
**Finality**: 18 seconds (3 blocks via HotStuff pipeline)
|
||||
**Epoch checkpoints**: Every epoch, an additional BFT checkpoint is signed by 2/3+ of Effective Power as an extra layer of irreversibility.
|
||||
**Minimum stake to validate**: 1,000 $ORAMA (mainnet only — see bootstrap below)
|
||||
**Slashing**:
|
||||
- Double-signing or cheating → 100% slash
|
||||
@ -72,9 +133,16 @@ A node runner with modest stake but perfect uptime, real contribution, and Orama
|
||||
|
||||
OramaOS attestation uses TPM-based remote attestation — cryptographically verified on-chain.
|
||||
|
||||
### Finality
|
||||
### Why HotStuff Over Other Protocols
|
||||
|
||||
Orama achieves finality through **BFT checkpointing**: at the end of each epoch, validators holding at least two-thirds of total Effective Power sign a checkpoint. Once a checkpoint is signed, all transactions within that epoch are irreversible. This provides 1-hour finality with cryptographic guarantees — no epoch can be reorganized once checkpointed.
|
||||
| Protocol | Message Complexity | Finality | Max Validators | Used By |
|
||||
|---|---|---|---|---|
|
||||
| PBFT / Tendermint | O(n²) | 1 block (~6s) | ~200 | Cosmos |
|
||||
| **HotStuff (Orama)** | **O(n)** | **3 blocks (~18s)** | **1,000+** | **Orama, Aptos (variant)** |
|
||||
| Nakamoto (PoW) | O(n) | ~60 min (6 blocks) | Unlimited | Bitcoin |
|
||||
| DAG-based | O(n) | 1-2 rounds | 1,000+ | Sui |
|
||||
|
||||
HotStuff gives Orama the best balance: linear scaling for 300+ validators today with room to grow to 1,000+, near-instant finality, and a clean leader rotation mechanism that integrates naturally with the Effective Power model.
|
||||
|
||||
### Staking Bootstrap
|
||||
|
||||
@ -86,17 +154,92 @@ For new node runners joining after mainnet: acquire BTC, bridge it onto Orama, p
|
||||
|
||||
## 5. Network Primitives & Execution Environment
|
||||
|
||||
**Execution VM**: Pure WebAssembly (WASM) from genesis.
|
||||
Developers can write smart contracts in **any language** that compiles to WebAssembly. No EVM, no Solidity required.
|
||||
### Dual-State Account Model
|
||||
|
||||
First-class on-chain primitives (callable directly from WASM contracts):
|
||||
- Distributed SQL database
|
||||
- Key-Value + IPFS storage
|
||||
- Serverless compute functions
|
||||
- Native BTC bridge
|
||||
- AI Marketplace (see below)
|
||||
Every address on Orama has a single account with two compartments:
|
||||
|
||||
Gas is always paid in $ORAMA. Base fee is burned. All primitives integrate seamlessly with the public/private toggle.
|
||||
```
|
||||
Account {
|
||||
address: 0xABC...
|
||||
nonce: 42
|
||||
|
||||
// Public compartment — visible to everyone
|
||||
public: {
|
||||
orama_balance: 500,000,000 rays
|
||||
btc_balance: 50,000 sats
|
||||
code_hash: 0x... // for contract accounts
|
||||
storage_root: 0x... // Merkle root of contract storage
|
||||
}
|
||||
|
||||
// Private compartment (Phase 2 — activated post-PLONK ceremony)
|
||||
private: {
|
||||
commitment_root: 0x... // Merkle root of hidden value commitments
|
||||
nullifier_root: 0x... // Merkle root of spent-tracking nullifiers
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Public transactions** operate on the public compartment — visible, fast, and cheap. **Private transactions** (Phase 2) operate on the private compartment using PLONK zk-SNARKs, with funds moved between compartments via explicit shield/unshield operations.
|
||||
|
||||
This dual-state design is unique to Orama. Unlike hybrid approaches that bolt a separate "shielded pool" onto an account model, Orama's privacy is native to each account — one account, two views, no external pool contract. See Section 6 for details.
|
||||
|
||||
### Smart Contract Execution: Namespace Model
|
||||
|
||||
**Execution VM**: Pure WebAssembly (WASM) from genesis. Developers can write smart contracts in **any language** that compiles to WebAssembly — Rust, Go, TypeScript, C++, and more. No EVM, no Solidity required.
|
||||
|
||||
Unlike other blockchains where every validator executes every contract on a single shared state machine, **Orama smart contracts deploy into namespaces** — isolated execution environments with dedicated infrastructure.
|
||||
|
||||
**What a namespace provides to a smart contract:**
|
||||
|
||||
| Primitive | Implementation | What It Gives Developers |
|
||||
|---|---|---|
|
||||
| **SQL Database** | RQLite (distributed SQL with Raft consensus) | Real SQL queries — SELECT, INSERT, UPDATE, DELETE with JOINs, indexes, and transactions |
|
||||
| **Key-Value Cache** | Olric (distributed, consistent hashing) | Sub-millisecond reads, TTL-based expiry, perfect for hot data |
|
||||
| **Object Storage** | IPFS (content-addressed, replicated) | Store files, images, metadata — addressed by content hash |
|
||||
| **API Gateway** | Dedicated HTTP/WebSocket gateway | Rich query API with filtering, pagination, sorting — no external indexer needed |
|
||||
| **WASM Runtime** | wazero (pure Go WebAssembly engine) | Sandboxed execution with host functions for all primitives |
|
||||
| **WebRTC** | Pion SFU + TURN (optional) | Real-time voice, video, and data channels |
|
||||
|
||||
**This solves Ethereum's biggest developer pain point.** On Ethereum, deploying a contract is only the beginning — you then need The Graph for indexing, Alchemy for RPC, IPFS pinning services for storage, and a separate backend for anything that requires a database. On Orama, the namespace IS your backend. Deploy a contract and you immediately have a database, cache, storage, and a queryable API.
|
||||
|
||||
### How Contract Deployment Works
|
||||
|
||||
```
|
||||
1. Developer writes a WASM contract (Rust, Go, TypeScript, etc.)
|
||||
2. Developer deploys to a namespace (creates one or uses existing)
|
||||
3. The namespace provisions a dedicated cluster:
|
||||
- 3+ nodes with RQLite, Olric, IPFS access, Gateway, WASM VM
|
||||
- Dedicated ports, DNS subdomain, isolated from other namespaces
|
||||
4. The contract runs in the namespace's WASM engine
|
||||
5. The namespace commits state roots to the global chain every epoch
|
||||
6. The global chain records the commitment — it does not re-execute
|
||||
```
|
||||
|
||||
### Rich Query RPC
|
||||
|
||||
Every namespace's API gateway serves powerful, filterable queries over contract data — eliminating the need for external indexing infrastructure:
|
||||
|
||||
```
|
||||
GET /v1/contracts/{address}/state/users?sort=created_at&order=desc&limit=20
|
||||
GET /v1/transactions?address=0xABC...&limit=50
|
||||
GET /v1/nfts?owner=0xABC...&collection=0xDEF...
|
||||
GET /v1/blocks/12345
|
||||
```
|
||||
|
||||
The node indexes contract state changes locally and serves them through the RPC. No Graph. No subgraphs. No external infrastructure. One API call.
|
||||
|
||||
### Global Chain Primitives
|
||||
|
||||
The following primitives live on the global chain (not in namespaces) because they are part of the immutable financial core:
|
||||
|
||||
- **$ORAMA and BTC transfers** — direct balance changes on the global state
|
||||
- **Native DEX order book** — the $ORAMA/BTC trading pair (see Section 9)
|
||||
- **BTC bridge** — deposit and withdrawal of BTC (see Section 7)
|
||||
- **Staking and slashing** — validator economics
|
||||
- **Governance** — on-chain voting (see Section 12)
|
||||
- **Namespace registry** — which namespaces exist, their owners, and their committed state roots
|
||||
|
||||
Gas for global chain transactions is paid in $ORAMA. Base fee is burned. All global primitives integrate seamlessly with the public/private toggle.
|
||||
|
||||
### AI Marketplace & Angels
|
||||
|
||||
@ -112,7 +255,7 @@ Orama has a native **AI Marketplace** — a protocol-level primitive for hosting
|
||||
- Pay per use in $ORAMA — transparent pricing, competitive marketplace.
|
||||
|
||||
**For Angel builders:**
|
||||
- Deploy autonomous AI agents that can interact with Orama's on-chain primitives (SQL, storage, cache, BTC bridge, DEX).
|
||||
- Deploy autonomous AI agents that can interact with Orama's namespace primitives (SQL, storage, cache, BTC bridge, DEX).
|
||||
- Angels can hold $ORAMA, execute transactions, manage data, and interact with other Angels.
|
||||
- Revenue model: builders set per-request or subscription pricing in $ORAMA.
|
||||
|
||||
@ -157,15 +300,46 @@ If someone wants to buy $ORAMA, they acquire BTC (anywhere in the world), bridge
|
||||
|
||||
### Trust-Minimized BTC Bridge
|
||||
|
||||
Orama has a **trust-minimized BTC bridge built into the protocol from genesis**.
|
||||
Orama has a **native BTC bridge built into the protocol from genesis**, designed to progressively increase its trust guarantees as the network matures.
|
||||
|
||||
- Deposit BTC → receive native BTC on Orama (1:1).
|
||||
- Use BTC to buy $ORAMA, pay for services, or use in smart contracts.
|
||||
- Withdraw back to Bitcoin mainnet with Bitcoin-level security.
|
||||
- Security model: Bitcoin light-client + zk-proofs + BitVM-style fraud proofs (1-of-N honest assumption).
|
||||
- Withdraw back to Bitcoin mainnet.
|
||||
|
||||
The bridge is further backed by a **protocol reserve** — BTC accumulated from bonding curve sales (see Section 9). This reserve provides additional collateral beyond the 1:1 deposits, ensuring the bridge remains solvent even under extreme conditions.
|
||||
|
||||
### Bridge Phases
|
||||
|
||||
The bridge security model strengthens over time, from validator-secured to cryptographically verified:
|
||||
|
||||
**Testnet → Mainnet V1: Validator Threshold Bridge**
|
||||
|
||||
The top validators by Effective Power form a threshold signature group. Deposits are detected by validators watching the Bitcoin chain; withdrawals require a supermajority (e.g., 7-of-10) of the validator group to sign the Bitcoin transaction. Validators are staked — cheating means losing their entire stake. This model is proven, ships fast, and enables the full BTC economy (DEX, bonding curve, bridge fees) from day one.
|
||||
|
||||
**Mainnet V2: Light Client + Optimistic Fraud Proofs**
|
||||
|
||||
A Bitcoin SPV (Simple Payment Verification) light client is embedded in the Orama protocol. The chain verifies Bitcoin block headers and Merkle inclusion proofs natively — deposits are verified cryptographically with no human attestation required.
|
||||
|
||||
Withdrawals use an optimistic model: a withdrawal is posted and enters a challenge period (e.g., 24 hours). If any validator can prove the withdrawal is invalid, it is reverted and the submitter is slashed. Security assumption: **1-of-N honest** — as long as one honest watcher exists in the entire world, fraud cannot succeed.
|
||||
|
||||
**Long-term: Cryptographic Validity Proofs**
|
||||
|
||||
As BTC bridge research matures (BitVM, ZK validity proofs), the bridge upgrades to fully cryptographic verification. No challenge period — the proof IS the verification. This is the ultimate trust-minimized design.
|
||||
|
||||
**Each phase is a strict upgrade** — the bridge fee structure, user experience, and economic model remain identical. Only the trust model under the hood improves.
|
||||
|
||||
### Testing
|
||||
|
||||
The bridge code is environment-aware, connecting to different Bitcoin networks per Orama environment:
|
||||
|
||||
| Orama Environment | Bitcoin Network | BTC Source |
|
||||
|---|---|---|
|
||||
| Devnet | Bitcoin Regtest (local) | Mine instantly, unlimited test BTC |
|
||||
| Testnet | Bitcoin Testnet4 (public) | Free from faucets, zero value |
|
||||
| Mainnet | Bitcoin Mainnet | Real BTC |
|
||||
|
||||
The same bridge code runs across all environments — only the configuration changes.
|
||||
|
||||
### Bridge Fee
|
||||
|
||||
**Fee: 0.25%** of every bridge transaction (deposit or withdrawal).
|
||||
@ -218,15 +392,13 @@ $ORAMA uses a fixed block reward with a Bitcoin-style halving:
|
||||
|
||||
**Genesis fee schedule:**
|
||||
|
||||
| Operation | Cost |
|
||||
|---|---|
|
||||
| $ORAMA / BTC transfer | 1,000 rays (0.001 $ORAMA) |
|
||||
| WASM contract execution | 1,000 rays per 1M instructions |
|
||||
| SQL query | 500 rays |
|
||||
| IPFS storage | 10,000 rays per MB |
|
||||
| KV store read/write | 200 rays |
|
||||
| Private transaction (zk-SNARK) | 4× the public equivalent |
|
||||
| DEX order book trade | 1,000 rays |
|
||||
| Operation | Cost | Layer |
|
||||
|---|---|---|
|
||||
| $ORAMA / BTC transfer | 1,000 rays (0.001 $ORAMA) | Global chain |
|
||||
| DEX order book trade | 1,000 rays | Global chain |
|
||||
| Namespace state commitment | 2,000 rays | Global chain |
|
||||
| Private transaction (zk-SNARK) | 4× the public equivalent | Global chain |
|
||||
| Namespace operations (SQL, KV, IPFS, WASM) | Paid via namespace billing in $ORAMA | Namespace |
|
||||
|
||||
**Congestion multiplier:** Fees adjust dynamically based on block fullness (EIP-1559 model). When blocks are at 50% capacity (~500 transactions), the multiplier is 1×. As blocks fill toward the 1,000 transaction limit, the multiplier rises (up to 10×). When blocks are under half full, it drops below 1×. This prevents spam during peak demand and keeps fees low during normal usage.
|
||||
|
||||
@ -241,7 +413,7 @@ As usage grows and emissions shrink, $ORAMA becomes increasingly deflationary
|
||||
|
||||
## 9. Native DEX & Liquidity
|
||||
|
||||
Orama does not rely on external exchanges. The chain has its own **protocol-native exchange** built in as a first-class primitive, the same way it has native SQL, IPFS, and compute.
|
||||
Orama does not rely on external exchanges. The chain has its own **protocol-native exchange** built into the global chain as a first-class primitive.
|
||||
|
||||
### The Bootstrap Problem
|
||||
|
||||
@ -306,7 +478,7 @@ Any holder of $ORAMA can place sell orders. Any holder of BTC (bridged onto Oram
|
||||
|
||||
### Permissionless WASM DEX Contracts
|
||||
|
||||
The protocol-native order book handles the core pair: **$ORAMA/BTC**. For tokens created on Orama via WASM contracts, anyone can deploy AMMs or order books as WASM smart contracts. Custom tokens trade against $ORAMA — creating a clear asset hierarchy:
|
||||
The protocol-native order book handles the core pair: **$ORAMA/BTC**. For tokens created on Orama via WASM contracts deployed in namespaces, anyone can deploy AMMs or order books as WASM smart contracts. Custom tokens trade against $ORAMA — creating a clear asset hierarchy:
|
||||
|
||||
```
|
||||
BTC (bridged from Bitcoin mainnet)
|
||||
@ -356,11 +528,36 @@ More bridge usage → more BTC fees collected
|
||||
→ more attention on Orama → more users → more bridge usage
|
||||
```
|
||||
|
||||
## 11. Fungible Tokens & Native L2 Scaling
|
||||
## 11. Token Standards & Scaling
|
||||
|
||||
- **NFTs**: Native WASM standards with privacy support. Metadata stored on Orama IPFS/KV. Anyone can mint and trade NFTs on Orama.
|
||||
- **Fungible tokens**: Issued via WASM smart contracts. $ORAMA remains the only gas token.
|
||||
- **L2 Scaling**: Native support for optimistic and zk-rollups. L2 tokens settle finality on Orama L1. Gas on L2 can be paid in L2 token or $ORAMA.
|
||||
### OTS-1: Orama Token Standard (Fungible Tokens)
|
||||
|
||||
Fungible tokens are WASM contracts deployed in namespaces that implement the **OTS-1** interface. Designed from scratch to fix ERC-20's known problems:
|
||||
|
||||
- **No unlimited approvals** — the operator model requires an explicit spending limit. No "approve MAX_UINT" footgun that leads to wallet drains.
|
||||
- **Memo field on transfers** — attach context (invoice ID, payment reason) to any transfer.
|
||||
- **Explicit revoke** — `revoke_operator()` is a first-class operation, not `approve(addr, 0)`.
|
||||
- **Global token registry** — tokens register on the global chain so wallets and explorers discover them. Symbol uniqueness is enforced (no impersonation).
|
||||
- **Queryable** — token balances, transfer history, and holder lists are queryable via the namespace RPC. No external indexer needed.
|
||||
|
||||
$ORAMA and BTC are global chain native assets — they are not OTS-1 contracts.
|
||||
|
||||
### ONS-1: Orama NFT Standard (Non-Fungible Tokens)
|
||||
|
||||
NFTs are WASM contracts deployed in namespaces that implement the **ONS-1** interface. Designed from scratch to fix ERC-721's known problems:
|
||||
|
||||
- **On-chain metadata** — stored in the namespace's database and IPFS. No external URLs that can break. Queryable by attribute via SQL.
|
||||
- **Batch operations** — `batch_transfer` and `batch_mint` are part of the standard. Moving 50 NFTs = 1 transaction.
|
||||
- **Built-in royalties** — `royalty_info()` is mandatory, not optional. Enforced by marketplaces.
|
||||
- **Enumerable by default** — `tokens_of(owner)` is part of the standard. List someone's NFTs without an indexer.
|
||||
- **Two-level operators** — collection-wide approval OR per-token approval.
|
||||
- **Global NFT registry** — collections register on the global chain for discovery.
|
||||
|
||||
**DeBros NFTs** (100 Team + 700 Community) are a special case — they live on the global chain (not in a namespace) because they carry governance power and bridge revenue rights. They are minted once at mainnet genesis from the Solana snapshot.
|
||||
|
||||
### Scaling
|
||||
|
||||
Orama's namespace architecture provides natural horizontal scaling — each namespace is an isolated execution environment with dedicated resources. Adding nodes to the network allows more namespaces to be provisioned. No sharding, no rollup escape hatches. For extreme-scale use cases, namespaces can optionally implement optimistic or zk-rollup patterns internally, with finality settling on the global chain.
|
||||
|
||||
## 12. Governance
|
||||
|
||||
@ -444,24 +641,35 @@ Most blockchains have governance captured by whales or controlled by a handful o
|
||||
|
||||
- **51% attack**: Requires controlling a majority of Effective Power — which means real uptime, real contribution, and real stake. An attacker can't just buy tokens; they need physical infrastructure and months of contribution history. This makes attacks orders of magnitude more expensive than pure PoS chains.
|
||||
- **Nothing-at-stake**: Prevented by double-slashing — validators who sign conflicting blocks lose both their stake and their accumulated contribution score. The contribution score takes months to build, making it a meaningful deterrent.
|
||||
- **Long-range attacks**: BFT checkpoints are finalized every epoch (1 hour) by two-thirds of Effective Power. Reorganizing beyond the last checkpoint is impossible.
|
||||
- **Long-range attacks**: HotStuff BFT finalizes every block within 3 rounds (18 seconds). Additionally, epoch-level checkpoints are signed by 2/3+ of Effective Power every hour. Reorganizing beyond a finalized block is impossible.
|
||||
- **Sybil attacks**: OramaOS attestation is verified via TPM — an attacker can't fake infrastructure multipliers without the real hardware and software.
|
||||
- **Leader failure**: HotStuff's view-change mechanism handles unresponsive leaders cleanly — if the selected proposer fails to produce a block within the timeout, the next leader takes over without stalling the chain.
|
||||
|
||||
### BTC Bridge Security
|
||||
|
||||
- **Bridge deposits**: Verified via Bitcoin light-client embedded in the Orama protocol. The chain validates Bitcoin block headers and Merkle proofs natively.
|
||||
- **Bridge withdrawals**: Protected by zk-proofs + BitVM-style fraud proofs with a 1-of-N honest assumption — if even one validator is honest, fraudulent withdrawals are caught and reverted.
|
||||
The bridge security model strengthens across phases (see Section 7):
|
||||
|
||||
- **Phase 1 (testnet)**: Validator threshold signatures — staked validators sign bridge operations. Cheating means losing their entire stake.
|
||||
- **Phase 2 (mainnet)**: Bitcoin SPV light client verifies deposits cryptographically. Withdrawals use optimistic fraud proofs with a challenge period (1-of-N honest assumption).
|
||||
- **Phase 3 (future)**: Full cryptographic validity proofs — trustless verification with no challenge period.
|
||||
- **Protocol reserve**: BTC accumulated from bonding curve sales provides additional collateral beyond 1:1 deposits.
|
||||
- **Bridge halt**: If anomalous withdrawal patterns are detected (e.g., more than 10% of bridged BTC withdrawn in a single epoch), the bridge automatically pauses and requires a Tier 1 governance vote to resume.
|
||||
|
||||
### Namespace Security
|
||||
|
||||
- **Isolation**: Each namespace runs on a dedicated cluster with its own database, cache, and gateway. A compromised or malicious contract in one namespace cannot affect any other namespace or the global chain.
|
||||
- **State commitments**: Namespaces commit state roots to the global chain. Invalid state roots result in slashing of the namespace's validator nodes.
|
||||
- **Staked validators**: Namespace nodes are staked — the economic cost of attacking a namespace is proportional to the stake at risk.
|
||||
- **Scaling trust**: High-value namespaces can require more validator nodes (e.g., 5 or 10 instead of the default 3), increasing the cost of collusion.
|
||||
|
||||
### DEX & Order Book Security
|
||||
|
||||
- **Front-running prevention**: Order book transactions within the same block are processed in a randomized order, not by gas price. This eliminates MEV (Miner Extractable Value) — block proposers cannot reorder transactions to front-run traders.
|
||||
- **Front-running prevention**: Order book transactions within the same block are processed in a randomized order (using the block hash as a deterministic seed), not by gas price. This eliminates MEV (Miner Extractable Value) — block proposers cannot reorder transactions to front-run traders.
|
||||
- **Price manipulation**: The bonding curve provides a reference price that cannot be manipulated by wash trading on the order book.
|
||||
|
||||
### Network Security
|
||||
|
||||
- **Encrypted mesh**: All inter-node communication is encrypted via VPN tunnel. Internal services are never exposed on public IPs.
|
||||
- **Encrypted mesh**: All inter-node communication is encrypted via WireGuard VPN tunnel. Internal services (database, cache, gateways) are never exposed on public IPs.
|
||||
- **OramaOS hardening**: No SSH, read-only rootfs, service sandboxing — the attack surface per node is minimal (see Section 14).
|
||||
- **Forged attestation**: Nodes submitting fake infrastructure proofs are slashed 50% and permanently flagged.
|
||||
|
||||
@ -542,12 +750,13 @@ Orama does not launch mainnet until a minimum of **300 independent nodes** are r
|
||||
| Risk | Severity | Mitigation |
|
||||
|---|---|---|
|
||||
| **51% attack** | High | Proof of Infrastructure requires real uptime + contribution, not just stake. TPM attestation prevents fake nodes. |
|
||||
| **BTC bridge exploit** | Critical | Bitcoin light-client verification, zk-proofs, BitVM fraud proofs, automatic bridge halt on anomalous withdrawals, protocol reserve as additional collateral. |
|
||||
| **BTC bridge exploit** | Critical | Phased security model: validator threshold signatures (testnet) → Bitcoin light-client + optimistic fraud proofs (mainnet) → cryptographic validity proofs (future). Automatic bridge halt on anomalous withdrawals. Protocol reserve as additional collateral. |
|
||||
| **Governance capture** | High | NFT holders control 75% of voting power. Quadratic voting for token holders prevents whale dominance. Immutable financial core cannot be changed by any vote. |
|
||||
| **Quantum computing** | Medium | Post-quantum signature upgrade on roadmap. PLONK proof system can be upgraded to quantum-resistant circuits via universal setup. |
|
||||
| **Regulatory risk** | Medium | Fully decentralized, no single legal entity. OramaOS nodes have no remote access — even the operator can't be compelled to modify the software. |
|
||||
| **AI Marketplace abuse** | Medium | Compute nodes capped at 10% of network. Marketplace is purely opt-in. Malicious models can be flagged via governance. |
|
||||
| **Bonding curve manipulation** | Low | Curve price is mathematical (√n) — cannot be manipulated. Order book has randomized transaction ordering to prevent front-running. |
|
||||
| **Namespace isolation failure** | Medium | Each namespace runs on a dedicated cluster with separate database, cache, and gateway. State roots are committed to the global chain and verified. Namespace validators are staked — incorrect state roots trigger slashing. |
|
||||
|
||||
The protocol is designed to outlive any single person, company, or government.
|
||||
|
||||
@ -571,3 +780,4 @@ Together we build the eternal system.
|
||||
- [Appendix D: PLONK Trusted Setup Ceremony Specification](APPENDIX_D_PLONK_SETUP.md)
|
||||
- [Appendix E: Sample WASM Contract](APPENDIX_E_SAMPLE_CONTRACT.md)
|
||||
- [Appendix F: Effective Power & Slashing Math](APPENDIX_F_MATH_PROOFS.md)
|
||||
- [Appendix G: Technical Architecture](APPENDIX_G_TECHNICAL_ARCHITECTURE.md)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user