mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-05-01 05:04:13 +00:00
feat(cli): add node setup command
- implement automated VPS bootstrapping for Orama nodes - add SSH key management via rootwallet - support genesis node creation and cluster joining via invite tokens
This commit is contained in:
parent
c27faa02fa
commit
ab1be4105c
@ -33,4 +33,5 @@ func init() {
|
|||||||
Cmd.AddCommand(enrollCmd)
|
Cmd.AddCommand(enrollCmd)
|
||||||
Cmd.AddCommand(unlockCmd)
|
Cmd.AddCommand(unlockCmd)
|
||||||
Cmd.AddCommand(migrateConfCmd)
|
Cmd.AddCommand(migrateConfCmd)
|
||||||
|
Cmd.AddCommand(setupCmd)
|
||||||
}
|
}
|
||||||
|
|||||||
46
core/pkg/cli/cmd/node/setup.go
Normal file
46
core/pkg/cli/cmd/node/setup.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package node
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/production/setup"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var setupOpts setup.Options
|
||||||
|
|
||||||
|
var setupCmd = &cobra.Command{
|
||||||
|
Use: "setup",
|
||||||
|
Short: "Set up a fresh VPS as an Orama node",
|
||||||
|
Long: `Bootstrap a fresh VPS into a running Orama node in one command.
|
||||||
|
|
||||||
|
Creates an SSH key in rootwallet, installs it on the VPS, uploads the binary
|
||||||
|
archive, and runs the node install. For the first node, use --genesis to
|
||||||
|
create a new cluster.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Genesis node (first node, creates new cluster)
|
||||||
|
orama node setup --ip 1.2.3.4 --password 'vps-pass' --env devnet \
|
||||||
|
--base-domain orama-devnet.network --role nameserver --genesis
|
||||||
|
|
||||||
|
# Join existing cluster
|
||||||
|
orama node setup --ip 5.6.7.8 --password 'vps-pass' --env devnet \
|
||||||
|
--base-domain orama-devnet.network
|
||||||
|
|
||||||
|
# Join as nameserver
|
||||||
|
orama node setup --ip 9.10.11.12 --password 'vps-pass' --env devnet \
|
||||||
|
--base-domain orama-devnet.network --role nameserver`,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return setup.Run(setupOpts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.IP, "ip", "", "Public IP address of the VPS (required)")
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.Env, "env", "", "Target environment (default: active)")
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.Role, "role", "node", "Node role: node or nameserver")
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.User, "user", "root", "SSH user on the VPS")
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.Password, "password", "", "One-time password for initial SSH access")
|
||||||
|
setupCmd.Flags().StringVar(&setupOpts.BaseDomain, "base-domain", "", "Base domain for the network")
|
||||||
|
setupCmd.Flags().BoolVar(&setupOpts.Genesis, "genesis", false, "Create a new cluster (first node)")
|
||||||
|
setupCmd.Flags().BoolVar(&setupOpts.AnyoneRelay, "anyone-relay", false, "Run as Anyone relay operator")
|
||||||
|
setupCmd.MarkFlagRequired("ip")
|
||||||
|
}
|
||||||
331
core/pkg/cli/production/setup/command.go
Normal file
331
core/pkg/cli/production/setup/command.go
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
// Package setup implements the "orama node setup" command — a single command
|
||||||
|
// to bootstrap a fresh VPS into a running Orama node.
|
||||||
|
//
|
||||||
|
// Flow:
|
||||||
|
// 1. Create SSH key in rootwallet vault for this node
|
||||||
|
// 2. Install the public key on the VPS (one-time password-based SSH)
|
||||||
|
// 3. Upload the binary archive
|
||||||
|
// 4. For genesis: run install without --join
|
||||||
|
// 5. For joining: request invite token via operator API, run install with --join
|
||||||
|
package setup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/remotessh"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/inspector"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/rwagent"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options holds the flags for the setup command.
|
||||||
|
type Options struct {
|
||||||
|
IP string
|
||||||
|
Env string
|
||||||
|
Role string // "node" or "nameserver"
|
||||||
|
User string // SSH user (default: "root")
|
||||||
|
Password string // One-time password for initial SSH access
|
||||||
|
BaseDomain string
|
||||||
|
Genesis bool // If true, create a new cluster instead of joining
|
||||||
|
AnyoneRelay bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes the node setup.
|
||||||
|
func Run(opts Options) error {
|
||||||
|
if opts.IP == "" {
|
||||||
|
return fmt.Errorf("--ip is required")
|
||||||
|
}
|
||||||
|
if opts.User == "" {
|
||||||
|
opts.User = "root"
|
||||||
|
}
|
||||||
|
if opts.Role == "" {
|
||||||
|
opts.Role = "node"
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Ensure rootwallet agent is running
|
||||||
|
fmt.Println("Checking rootwallet agent...")
|
||||||
|
agentClient := rwagent.New(os.Getenv("RW_AGENT_SOCK"))
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
status, err := agentClient.Status(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("rootwallet agent not reachable: %w (is the desktop app running?)", err)
|
||||||
|
}
|
||||||
|
if status.Locked {
|
||||||
|
return fmt.Errorf("rootwallet agent is locked — unlock it in the desktop app first")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Get operator wallet address
|
||||||
|
addrData, err := agentClient.GetAddress(ctx, "evm")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get wallet address: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Wallet: %s\n", addrData.Address)
|
||||||
|
|
||||||
|
// 3. Create SSH key in rootwallet vault for this node
|
||||||
|
vaultTarget := fmt.Sprintf("%s/%s", opts.IP, opts.User)
|
||||||
|
fmt.Printf(" Setting up SSH key for %s...\n", vaultTarget)
|
||||||
|
|
||||||
|
if err := remotessh.EnsureVaultEntry(vaultTarget); err != nil {
|
||||||
|
return fmt.Errorf("failed to create SSH key in vault: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey, err := remotessh.ResolveVaultPublicKey(vaultTarget)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get public key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Install the public key on the VPS via password SSH
|
||||||
|
if opts.Password != "" {
|
||||||
|
fmt.Printf(" Installing SSH key on %s...\n", opts.IP)
|
||||||
|
if err := installPublicKey(opts.IP, opts.User, opts.Password, pubKey); err != nil {
|
||||||
|
return fmt.Errorf("failed to install SSH key: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(" SSH key installed")
|
||||||
|
} else {
|
||||||
|
fmt.Println(" No --password provided, assuming SSH key is already installed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Test SSH with rootwallet key
|
||||||
|
fmt.Println(" Testing SSH connection...")
|
||||||
|
node := inspector.Node{
|
||||||
|
Host: opts.IP,
|
||||||
|
User: opts.User,
|
||||||
|
VaultTarget: vaultTarget,
|
||||||
|
Environment: opts.Env,
|
||||||
|
Role: opts.Role,
|
||||||
|
}
|
||||||
|
nodes := []inspector.Node{node}
|
||||||
|
cleanup, err := remotessh.PrepareNodeKeys(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare SSH key: %w", err)
|
||||||
|
}
|
||||||
|
defer cleanup()
|
||||||
|
node = nodes[0] // SSHKey is now set
|
||||||
|
|
||||||
|
testResult := inspector.RunSSH(context.Background(), node, "echo ok")
|
||||||
|
if !testResult.OK() {
|
||||||
|
return fmt.Errorf("SSH test failed: %s", testResult.Stderr)
|
||||||
|
}
|
||||||
|
fmt.Println(" SSH connection OK")
|
||||||
|
|
||||||
|
// 6. Check if binary archive needs uploading
|
||||||
|
if needsArchiveUpload(node) {
|
||||||
|
archivePath := findNewestArchive()
|
||||||
|
if archivePath == "" {
|
||||||
|
return fmt.Errorf("no binary archive found in /tmp/ (run `orama build` first)")
|
||||||
|
}
|
||||||
|
fmt.Printf(" Uploading archive (%s)...\n", filepath.Base(archivePath))
|
||||||
|
if err := remotessh.UploadFile(node, archivePath, "/tmp/archive.tar.gz"); err != nil {
|
||||||
|
return fmt.Errorf("failed to upload archive: %w", err)
|
||||||
|
}
|
||||||
|
extractCmd := "sudo bash -c 'mkdir -p /opt/orama && tar xzf /tmp/archive.tar.gz -C /opt/orama && rm -f /tmp/archive.tar.gz'"
|
||||||
|
if err := remotessh.RunSSHStreaming(node, extractCmd); err != nil {
|
||||||
|
return fmt.Errorf("failed to extract archive: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(" Archive extracted")
|
||||||
|
} else {
|
||||||
|
fmt.Println(" Binary already present on node")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. Build the install command
|
||||||
|
installCmd, err := buildInstallCommand(opts, node, agentClient)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to build install command: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n Running: %s\n\n", installCmd)
|
||||||
|
|
||||||
|
// 8. Run the install
|
||||||
|
if err := remotessh.RunSSHStreaming(node, installCmd); err != nil {
|
||||||
|
return fmt.Errorf("install failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n Node %s setup complete!\n", opts.IP)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// installPublicKey installs an SSH public key on a VPS using password authentication.
|
||||||
|
func installPublicKey(ip, user, password, pubKey string) error {
|
||||||
|
sshpassBin, err := findBinary("sshpass")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("sshpass is required for password-based SSH key installation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure .ssh directory exists and install the key
|
||||||
|
cmd := fmt.Sprintf(
|
||||||
|
`mkdir -p ~/.ssh && chmod 700 ~/.ssh && echo '%s' >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys && echo 'key installed'`,
|
||||||
|
strings.TrimSpace(pubKey),
|
||||||
|
)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-p", password,
|
||||||
|
"ssh",
|
||||||
|
"-o", "StrictHostKeyChecking=no",
|
||||||
|
"-o", "ConnectTimeout=10",
|
||||||
|
fmt.Sprintf("%s@%s", user, ip),
|
||||||
|
cmd,
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := runCommand(sshpassBin, args...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("sshpass failed: %w (%s)", err, out)
|
||||||
|
}
|
||||||
|
if !strings.Contains(out, "key installed") {
|
||||||
|
return fmt.Errorf("unexpected output: %s", out)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildInstallCommand constructs the `sudo orama node install` command.
|
||||||
|
func buildInstallCommand(opts Options, node inspector.Node, agentClient *rwagent.Client) (string, error) {
|
||||||
|
parts := []string{"sudo /opt/orama/bin/orama node install"}
|
||||||
|
parts = append(parts, "--vps-ip", opts.IP)
|
||||||
|
|
||||||
|
if opts.BaseDomain != "" {
|
||||||
|
parts = append(parts, "--base-domain", opts.BaseDomain)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(opts.Role, "nameserver") {
|
||||||
|
parts = append(parts, "--nameserver")
|
||||||
|
if opts.BaseDomain != "" {
|
||||||
|
parts = append(parts, "--domain", opts.BaseDomain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.AnyoneRelay {
|
||||||
|
parts = append(parts, "--anyone-relay")
|
||||||
|
} else {
|
||||||
|
parts = append(parts, "--anyone-client")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.Genesis {
|
||||||
|
// Get gateway URL and invite token
|
||||||
|
env := opts.Env
|
||||||
|
if env == "" {
|
||||||
|
active, err := cli.GetActiveEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to get active environment: %w", err)
|
||||||
|
}
|
||||||
|
env = active.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
envConfig, err := cli.GetEnvironmentByName(env)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("environment %q not found: %w", env, err)
|
||||||
|
}
|
||||||
|
gatewayURL := envConfig.GatewayURL
|
||||||
|
|
||||||
|
// Request invite token via operator API
|
||||||
|
token, err := requestInviteToken(gatewayURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to get invite token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = append(parts, "--join", gatewayURL, "--token", token)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(parts, " "), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestInviteToken calls POST /v1/operator/invite to get an invite token.
|
||||||
|
func requestInviteToken(gatewayURL string) (string, error) {
|
||||||
|
store, err := auth.LoadEnhancedCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to load credentials: %w", err)
|
||||||
|
}
|
||||||
|
creds := store.GetDefaultCredential(gatewayURL)
|
||||||
|
if creds == nil || creds.APIKey == "" {
|
||||||
|
return "", fmt.Errorf("no credentials for %s — run 'orama auth login' first", gatewayURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, _ := json.Marshal(map[string]int{"expiry_minutes": 60})
|
||||||
|
req, err := http.NewRequest(http.MethodPost, gatewayURL+"/v1/operator/invite", bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("X-API-Key", creds.APIKey)
|
||||||
|
|
||||||
|
client := &http.Client{Timeout: 15 * time.Second}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 4096))
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return "", fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(respBody, &result); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse response: %w", err)
|
||||||
|
}
|
||||||
|
if result.Token == "" {
|
||||||
|
return "", fmt.Errorf("empty token in response")
|
||||||
|
}
|
||||||
|
return result.Token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsArchiveUpload checks if the node already has the orama binary.
|
||||||
|
func needsArchiveUpload(node inspector.Node) bool {
|
||||||
|
result := inspector.RunSSH(context.Background(), node, "/opt/orama/bin/orama version 2>/dev/null")
|
||||||
|
return !result.OK()
|
||||||
|
}
|
||||||
|
|
||||||
|
// findNewestArchive finds the newest orama binary archive in /tmp/.
|
||||||
|
func findNewestArchive() string {
|
||||||
|
matches, _ := filepath.Glob("/tmp/orama-*-linux-*.tar.gz")
|
||||||
|
if len(matches) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
sort.Slice(matches, func(i, j int) bool {
|
||||||
|
fi, _ := os.Stat(matches[i])
|
||||||
|
fj, _ := os.Stat(matches[j])
|
||||||
|
if fi == nil || fj == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return fi.ModTime().After(fj.ModTime())
|
||||||
|
})
|
||||||
|
return matches[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func findBinary(name string) (string, error) {
|
||||||
|
paths := []string{
|
||||||
|
"/opt/homebrew/bin/" + name,
|
||||||
|
"/usr/local/bin/" + name,
|
||||||
|
"/usr/bin/" + name,
|
||||||
|
}
|
||||||
|
for _, p := range paths {
|
||||||
|
if _, err := os.Stat(p); err == nil {
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("%s not found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCommand(bin string, args ...string) (string, error) {
|
||||||
|
cmd := &exec.Cmd{
|
||||||
|
Path: bin,
|
||||||
|
Args: append([]string{bin}, args...),
|
||||||
|
}
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
return string(out), err
|
||||||
|
}
|
||||||
@ -7,12 +7,13 @@ const guardian_mod = @import("guardian.zig");
|
|||||||
const heartbeat = @import("peer/heartbeat.zig");
|
const heartbeat = @import("peer/heartbeat.zig");
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
|
||||||
/// Global shutdown flag — set by signal handlers.
|
/// Global running flag — true while the server should keep running.
|
||||||
var shutdown_flag = std.atomic.Value(bool).init(false);
|
/// Signal handlers set this to false to trigger graceful shutdown.
|
||||||
|
var running_flag = std.atomic.Value(bool).init(true);
|
||||||
|
|
||||||
fn signalHandler(sig: i32) callconv(.c) void {
|
fn signalHandler(sig: i32) callconv(.c) void {
|
||||||
_ = sig;
|
_ = sig;
|
||||||
shutdown_flag.store(true, .release);
|
running_flag.store(false, .release);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn main() !void {
|
pub fn main() !void {
|
||||||
@ -97,7 +98,7 @@ pub fn main() !void {
|
|||||||
|
|
||||||
// Start heartbeat thread
|
// Start heartbeat thread
|
||||||
var hb_thread: ?std.Thread = blk: {
|
var hb_thread: ?std.Thread = blk: {
|
||||||
break :blk std.Thread.spawn(.{}, heartbeatLoop, .{ &guardian, &shutdown_flag }) catch |err| {
|
break :blk std.Thread.spawn(.{}, heartbeatLoop, .{ &guardian, &running_flag }) catch |err| {
|
||||||
log.warn("failed to start heartbeat thread: {}, running without heartbeat", .{err});
|
log.warn("failed to start heartbeat thread: {}, running without heartbeat", .{err});
|
||||||
break :blk null;
|
break :blk null;
|
||||||
};
|
};
|
||||||
@ -113,7 +114,7 @@ pub fn main() !void {
|
|||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.guardian = &guardian,
|
.guardian = &guardian,
|
||||||
};
|
};
|
||||||
listener.serve(ctx, &shutdown_flag) catch |err| {
|
listener.serve(ctx, &running_flag) catch |err| {
|
||||||
log.err("server failed: {}", .{err});
|
log.err("server failed: {}", .{err});
|
||||||
std.process.exit(1);
|
std.process.exit(1);
|
||||||
};
|
};
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user