a lot of changes

This commit is contained in:
anonpenguin23 2026-01-22 13:04:52 +02:00
parent ccee66d525
commit 9fc9bbb8e5
42 changed files with 9153 additions and 5 deletions

5
.gitignore vendored
View File

@ -80,4 +80,7 @@ configs/
.claude/
.mcp.json
.cursor/
.cursor/
# Remote node credentials
scripts/remote-nodes.conf

View File

@ -19,7 +19,7 @@ test-e2e:
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.90.0
VERSION := 0.100.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'

View File

@ -0,0 +1,77 @@
-- Migration 005: DNS Records for CoreDNS Integration
-- This migration creates tables for managing DNS records with RQLite backend for CoreDNS
BEGIN;
-- DNS records table for dynamic DNS management
CREATE TABLE IF NOT EXISTS dns_records (
id INTEGER PRIMARY KEY AUTOINCREMENT,
fqdn TEXT NOT NULL UNIQUE, -- Fully qualified domain name (e.g., myapp.node-7prvNa.debros.network)
record_type TEXT NOT NULL DEFAULT 'A', -- DNS record type: A, AAAA, CNAME, TXT
value TEXT NOT NULL, -- IP address or target value
ttl INTEGER NOT NULL DEFAULT 300, -- Time to live in seconds
namespace TEXT NOT NULL, -- Namespace that owns this record
deployment_id TEXT, -- Optional: deployment that created this record
node_id TEXT, -- Optional: specific node ID for node-specific routing
is_active BOOLEAN NOT NULL DEFAULT TRUE,-- Enable/disable without deleting
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL -- Wallet address or 'system' for auto-created records
);
-- Indexes for fast DNS lookups
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn ON dns_records(fqdn);
CREATE INDEX IF NOT EXISTS idx_dns_records_namespace ON dns_records(namespace);
CREATE INDEX IF NOT EXISTS idx_dns_records_deployment ON dns_records(deployment_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_node_id ON dns_records(node_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_active ON dns_records(is_active);
-- DNS nodes registry for tracking active nodes
CREATE TABLE IF NOT EXISTS dns_nodes (
id TEXT PRIMARY KEY, -- Node ID (e.g., node-7prvNa)
ip_address TEXT NOT NULL, -- Public IP address
internal_ip TEXT, -- Private IP for cluster communication
region TEXT, -- Geographic region
status TEXT NOT NULL DEFAULT 'active', -- active, draining, offline
last_seen TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
capabilities TEXT, -- JSON: ["wasm", "ipfs", "cache"]
metadata TEXT, -- JSON: additional node info
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Indexes for node health monitoring
CREATE INDEX IF NOT EXISTS idx_dns_nodes_status ON dns_nodes(status);
CREATE INDEX IF NOT EXISTS idx_dns_nodes_last_seen ON dns_nodes(last_seen);
-- Reserved domains table to prevent subdomain collisions
CREATE TABLE IF NOT EXISTS reserved_domains (
domain TEXT PRIMARY KEY,
reason TEXT NOT NULL,
reserved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Seed reserved domains
INSERT INTO reserved_domains (domain, reason) VALUES
('api.debros.network', 'API gateway endpoint'),
('www.debros.network', 'Marketing website'),
('admin.debros.network', 'Admin panel'),
('ns1.debros.network', 'Nameserver 1'),
('ns2.debros.network', 'Nameserver 2'),
('ns3.debros.network', 'Nameserver 3'),
('ns4.debros.network', 'Nameserver 4'),
('mail.debros.network', 'Email service'),
('cdn.debros.network', 'Content delivery'),
('docs.debros.network', 'Documentation'),
('status.debros.network', 'Status page')
ON CONFLICT(domain) DO NOTHING;
-- Mark migration as applied
CREATE TABLE IF NOT EXISTS schema_migrations (
version INTEGER PRIMARY KEY,
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT OR IGNORE INTO schema_migrations(version) VALUES (5);
COMMIT;

View File

@ -0,0 +1,74 @@
-- Migration 006: Per-Namespace SQLite Databases
-- This migration creates infrastructure for isolated SQLite databases per namespace
BEGIN;
-- Namespace SQLite databases registry
CREATE TABLE IF NOT EXISTS namespace_sqlite_databases (
id TEXT PRIMARY KEY, -- UUID
namespace TEXT NOT NULL, -- Namespace that owns this database
database_name TEXT NOT NULL, -- Database name (unique per namespace)
home_node_id TEXT NOT NULL, -- Node ID where database file resides
file_path TEXT NOT NULL, -- Absolute path on home node
size_bytes BIGINT DEFAULT 0, -- Current database size
backup_cid TEXT, -- Latest backup CID in IPFS
last_backup_at TIMESTAMP, -- Last backup timestamp
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL, -- Wallet address that created the database
UNIQUE(namespace, database_name)
);
-- Indexes for database lookups
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_namespace ON namespace_sqlite_databases(namespace);
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_home_node ON namespace_sqlite_databases(home_node_id);
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_name ON namespace_sqlite_databases(namespace, database_name);
-- SQLite database backups history
CREATE TABLE IF NOT EXISTS namespace_sqlite_backups (
id TEXT PRIMARY KEY, -- UUID
database_id TEXT NOT NULL, -- References namespace_sqlite_databases.id
backup_cid TEXT NOT NULL, -- IPFS CID of backup file
size_bytes BIGINT NOT NULL, -- Backup file size
backup_type TEXT NOT NULL, -- 'manual', 'scheduled', 'migration'
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL,
FOREIGN KEY (database_id) REFERENCES namespace_sqlite_databases(id) ON DELETE CASCADE
);
-- Index for backup history queries
CREATE INDEX IF NOT EXISTS idx_sqlite_backups_database ON namespace_sqlite_backups(database_id, created_at DESC);
-- Namespace quotas for resource management (future use)
CREATE TABLE IF NOT EXISTS namespace_quotas (
namespace TEXT PRIMARY KEY,
-- Storage quotas
max_sqlite_databases INTEGER DEFAULT 10, -- Max SQLite databases per namespace
max_storage_bytes BIGINT DEFAULT 5368709120, -- 5GB default
max_ipfs_pins INTEGER DEFAULT 1000, -- Max pinned IPFS objects
-- Compute quotas
max_deployments INTEGER DEFAULT 20, -- Max concurrent deployments
max_cpu_percent INTEGER DEFAULT 200, -- Total CPU quota (2 cores)
max_memory_mb INTEGER DEFAULT 2048, -- Total memory quota
-- Rate limits
max_rqlite_queries_per_minute INTEGER DEFAULT 1000,
max_olric_ops_per_minute INTEGER DEFAULT 10000,
-- Current usage (updated periodically)
current_storage_bytes BIGINT DEFAULT 0,
current_deployments INTEGER DEFAULT 0,
current_sqlite_databases INTEGER DEFAULT 0,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (6);
COMMIT;

View File

@ -0,0 +1,178 @@
-- Migration 007: Deployments System
-- This migration creates the complete schema for managing custom deployments
-- (Static sites, Next.js, Go backends, Node.js backends)
BEGIN;
-- Main deployments table
CREATE TABLE IF NOT EXISTS deployments (
id TEXT PRIMARY KEY, -- UUID
namespace TEXT NOT NULL, -- Owner namespace
name TEXT NOT NULL, -- Deployment name (unique per namespace)
type TEXT NOT NULL, -- 'static', 'nextjs', 'nextjs-static', 'go-backend', 'go-wasm', 'nodejs-backend'
version INTEGER NOT NULL DEFAULT 1, -- Monotonic version counter
status TEXT NOT NULL DEFAULT 'deploying', -- 'deploying', 'active', 'failed', 'stopped', 'updating'
-- Content storage
content_cid TEXT, -- IPFS CID for static content or built assets
build_cid TEXT, -- IPFS CID for build artifacts (Next.js SSR, binaries)
-- Runtime configuration
home_node_id TEXT, -- Node ID hosting stateful data/processes
port INTEGER, -- Allocated port (NULL for static/WASM)
subdomain TEXT, -- Custom subdomain (e.g., myapp)
environment TEXT, -- JSON: {"KEY": "value", ...}
-- Resource limits
memory_limit_mb INTEGER DEFAULT 256,
cpu_limit_percent INTEGER DEFAULT 50,
disk_limit_mb INTEGER DEFAULT 1024,
-- Health & monitoring
health_check_path TEXT DEFAULT '/health', -- HTTP path for health checks
health_check_interval INTEGER DEFAULT 30, -- Seconds between health checks
restart_policy TEXT DEFAULT 'always', -- 'always', 'on-failure', 'never'
max_restart_count INTEGER DEFAULT 10, -- Max restarts before marking as failed
-- Metadata
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployed_by TEXT NOT NULL, -- Wallet address or API key
UNIQUE(namespace, name)
);
-- Indexes for deployment lookups
CREATE INDEX IF NOT EXISTS idx_deployments_namespace ON deployments(namespace);
CREATE INDEX IF NOT EXISTS idx_deployments_status ON deployments(status);
CREATE INDEX IF NOT EXISTS idx_deployments_home_node ON deployments(home_node_id);
CREATE INDEX IF NOT EXISTS idx_deployments_type ON deployments(type);
CREATE INDEX IF NOT EXISTS idx_deployments_subdomain ON deployments(subdomain);
-- Port allocations table (prevents port conflicts)
CREATE TABLE IF NOT EXISTS port_allocations (
node_id TEXT NOT NULL,
port INTEGER NOT NULL,
deployment_id TEXT NOT NULL,
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (node_id, port),
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for finding allocated ports by node
CREATE INDEX IF NOT EXISTS idx_port_allocations_node ON port_allocations(node_id, port);
CREATE INDEX IF NOT EXISTS idx_port_allocations_deployment ON port_allocations(deployment_id);
-- Home node assignments (namespace → node mapping)
CREATE TABLE IF NOT EXISTS home_node_assignments (
namespace TEXT PRIMARY KEY,
home_node_id TEXT NOT NULL,
assigned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployment_count INTEGER DEFAULT 0, -- Cached count for capacity planning
total_memory_mb INTEGER DEFAULT 0, -- Cached total memory usage
total_cpu_percent INTEGER DEFAULT 0 -- Cached total CPU usage
);
-- Index for querying by node
CREATE INDEX IF NOT EXISTS idx_home_node_by_node ON home_node_assignments(home_node_id);
-- Deployment domains (custom domain mapping)
CREATE TABLE IF NOT EXISTS deployment_domains (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
namespace TEXT NOT NULL,
domain TEXT NOT NULL UNIQUE, -- Full domain (e.g., myapp.debros.network or custom)
routing_type TEXT NOT NULL DEFAULT 'balanced', -- 'balanced' or 'node_specific'
node_id TEXT, -- For node_specific routing
is_custom BOOLEAN DEFAULT FALSE, -- True for user's own domain
tls_cert_cid TEXT, -- IPFS CID for custom TLS certificate
verified_at TIMESTAMP, -- When custom domain was verified
verification_token TEXT, -- TXT record token for domain verification
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Indexes for domain lookups
CREATE INDEX IF NOT EXISTS idx_deployment_domains_deployment ON deployment_domains(deployment_id);
CREATE INDEX IF NOT EXISTS idx_deployment_domains_domain ON deployment_domains(domain);
CREATE INDEX IF NOT EXISTS idx_deployment_domains_namespace ON deployment_domains(namespace);
-- Deployment history (version tracking and rollback)
CREATE TABLE IF NOT EXISTS deployment_history (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
version INTEGER NOT NULL,
content_cid TEXT,
build_cid TEXT,
deployed_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployed_by TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'success', -- 'success', 'failed', 'rolled_back'
error_message TEXT,
rollback_from_version INTEGER, -- If this is a rollback, original version
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Indexes for history queries
CREATE INDEX IF NOT EXISTS idx_deployment_history_deployment ON deployment_history(deployment_id, version DESC);
CREATE INDEX IF NOT EXISTS idx_deployment_history_status ON deployment_history(status);
-- Deployment environment variables (separate for security)
CREATE TABLE IF NOT EXISTS deployment_env_vars (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL, -- Encrypted in production
is_secret BOOLEAN DEFAULT FALSE, -- True for sensitive values
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(deployment_id, key),
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for env var lookups
CREATE INDEX IF NOT EXISTS idx_deployment_env_vars_deployment ON deployment_env_vars(deployment_id);
-- Deployment events log (audit trail)
CREATE TABLE IF NOT EXISTS deployment_events (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
event_type TEXT NOT NULL, -- 'created', 'started', 'stopped', 'restarted', 'updated', 'deleted', 'health_check_failed'
message TEXT,
metadata TEXT, -- JSON: additional context
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT, -- Wallet address or 'system'
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for event queries
CREATE INDEX IF NOT EXISTS idx_deployment_events_deployment ON deployment_events(deployment_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_deployment_events_type ON deployment_events(event_type);
-- Process health checks (for dynamic deployments)
CREATE TABLE IF NOT EXISTS deployment_health_checks (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
node_id TEXT NOT NULL,
status TEXT NOT NULL, -- 'healthy', 'unhealthy', 'unknown'
response_time_ms INTEGER,
status_code INTEGER,
error_message TEXT,
checked_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for health check queries (keep only recent checks)
CREATE INDEX IF NOT EXISTS idx_health_checks_deployment ON deployment_health_checks(deployment_id, checked_at DESC);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (7);
COMMIT;

463
pkg/cli/db/commands.go Normal file
View File

@ -0,0 +1,463 @@
package db
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
)
// DBCmd is the root database command
var DBCmd = &cobra.Command{
Use: "db",
Short: "Manage SQLite databases",
Long: "Create and manage per-namespace SQLite databases",
}
// CreateCmd creates a new database
var CreateCmd = &cobra.Command{
Use: "create <database_name>",
Short: "Create a new SQLite database",
Args: cobra.ExactArgs(1),
RunE: createDatabase,
}
// QueryCmd executes a SQL query
var QueryCmd = &cobra.Command{
Use: "query <database_name> <sql>",
Short: "Execute a SQL query",
Args: cobra.ExactArgs(2),
RunE: queryDatabase,
}
// ListCmd lists all databases
var ListCmd = &cobra.Command{
Use: "list",
Short: "List all databases",
RunE: listDatabases,
}
// BackupCmd backs up a database to IPFS
var BackupCmd = &cobra.Command{
Use: "backup <database_name>",
Short: "Backup database to IPFS",
Args: cobra.ExactArgs(1),
RunE: backupDatabase,
}
// BackupsCmd lists backups for a database
var BackupsCmd = &cobra.Command{
Use: "backups <database_name>",
Short: "List backups for a database",
Args: cobra.ExactArgs(1),
RunE: listBackups,
}
func init() {
DBCmd.AddCommand(CreateCmd)
DBCmd.AddCommand(QueryCmd)
DBCmd.AddCommand(ListCmd)
DBCmd.AddCommand(BackupCmd)
DBCmd.AddCommand(BackupsCmd)
}
func createDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/create"
payload := map[string]string{
"database_name": dbName,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated {
return fmt.Errorf("failed to create database: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("✅ Database created successfully!\n\n")
fmt.Printf("Name: %s\n", result["database_name"])
fmt.Printf("Home Node: %s\n", result["home_node_id"])
fmt.Printf("Created: %s\n", result["created_at"])
return nil
}
func queryDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
sql := args[1]
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/query"
payload := map[string]interface{}{
"database_name": dbName,
"query": sql,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("query failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
// Print results
if rows, ok := result["rows"].([]interface{}); ok && len(rows) > 0 {
// Print as table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
// Print headers
firstRow := rows[0].(map[string]interface{})
for col := range firstRow {
fmt.Fprintf(w, "%s\t", col)
}
fmt.Fprintln(w)
// Print rows
for _, row := range rows {
r := row.(map[string]interface{})
for _, val := range r {
fmt.Fprintf(w, "%v\t", val)
}
fmt.Fprintln(w)
}
w.Flush()
fmt.Printf("\nRows returned: %d\n", len(rows))
} else if rowsAffected, ok := result["rows_affected"].(float64); ok {
fmt.Printf("✅ Query executed successfully\n")
fmt.Printf("Rows affected: %d\n", int(rowsAffected))
}
return nil
}
func listDatabases(cmd *cobra.Command, args []string) error {
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/list"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list databases: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
databases, ok := result["databases"].([]interface{})
if !ok || len(databases) == 0 {
fmt.Println("No databases found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tSIZE\tBACKUP CID\tCREATED")
for _, db := range databases {
d := db.(map[string]interface{})
size := "0 B"
if sizeBytes, ok := d["size_bytes"].(float64); ok {
size = formatBytes(int64(sizeBytes))
}
backupCID := "-"
if cid, ok := d["backup_cid"].(string); ok && cid != "" {
if len(cid) > 12 {
backupCID = cid[:12] + "..."
} else {
backupCID = cid
}
}
createdAt := ""
if created, ok := d["created_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, created); err == nil {
createdAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
d["database_name"],
size,
backupCID,
createdAt,
)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func backupDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
fmt.Printf("📦 Backing up database '%s' to IPFS...\n", dbName)
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/backup"
payload := map[string]string{
"database_name": dbName,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("backup failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("\n✅ Backup successful!\n\n")
fmt.Printf("Database: %s\n", result["database_name"])
fmt.Printf("Backup CID: %s\n", result["backup_cid"])
fmt.Printf("IPFS URL: %s\n", result["ipfs_url"])
fmt.Printf("Backed up: %s\n", result["backed_up_at"])
return nil
}
func listBackups(cmd *cobra.Command, args []string) error {
dbName := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/db/sqlite/backups?database_name=%s", apiURL, dbName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list backups: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
backups, ok := result["backups"].([]interface{})
if !ok || len(backups) == 0 {
fmt.Println("No backups found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "CID\tSIZE\tBACKED UP")
for _, backup := range backups {
b := backup.(map[string]interface{})
cid := b["backup_cid"].(string)
if len(cid) > 20 {
cid = cid[:20] + "..."
}
size := "0 B"
if sizeBytes, ok := b["size_bytes"].(float64); ok {
size = formatBytes(int64(sizeBytes))
}
backedUpAt := ""
if backed, ok := b["backed_up_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, backed); err == nil {
backedUpAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\n", cid, size, backedUpAt)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func getAPIURL() string {
if url := os.Getenv("ORAMA_API_URL"); url != "" {
return url
}
return "https://gateway.debros.network"
}
func getAuthToken() (string, error) {
if token := os.Getenv("ORAMA_TOKEN"); token != "" {
return token, nil
}
return "", fmt.Errorf("no authentication token found. Set ORAMA_TOKEN environment variable")
}
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View File

@ -0,0 +1,399 @@
package deployments
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
)
// DeployCmd is the root deploy command
var DeployCmd = &cobra.Command{
Use: "deploy",
Short: "Deploy applications",
Long: "Deploy static sites, Next.js apps, Go backends, and Node.js backends",
}
// DeployStaticCmd deploys a static site
var DeployStaticCmd = &cobra.Command{
Use: "static <source_path>",
Short: "Deploy a static site (React, Vue, etc.)",
Args: cobra.ExactArgs(1),
RunE: deployStatic,
}
// DeployNextJSCmd deploys a Next.js application
var DeployNextJSCmd = &cobra.Command{
Use: "nextjs <source_path>",
Short: "Deploy a Next.js application",
Args: cobra.ExactArgs(1),
RunE: deployNextJS,
}
// DeployGoCmd deploys a Go backend
var DeployGoCmd = &cobra.Command{
Use: "go <source_path>",
Short: "Deploy a Go backend",
Args: cobra.ExactArgs(1),
RunE: deployGo,
}
// DeployNodeJSCmd deploys a Node.js backend
var DeployNodeJSCmd = &cobra.Command{
Use: "nodejs <source_path>",
Short: "Deploy a Node.js backend",
Args: cobra.ExactArgs(1),
RunE: deployNodeJS,
}
var (
deployName string
deploySubdomain string
deploySSR bool
deployUpdate bool
)
func init() {
DeployStaticCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployStaticCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployStaticCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployStaticCmd.MarkFlagRequired("name")
DeployNextJSCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployNextJSCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployNextJSCmd.Flags().BoolVar(&deploySSR, "ssr", false, "Deploy with SSR (server-side rendering)")
DeployNextJSCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployNextJSCmd.MarkFlagRequired("name")
DeployGoCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployGoCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployGoCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployGoCmd.MarkFlagRequired("name")
DeployNodeJSCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployNodeJSCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployNodeJSCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployNodeJSCmd.MarkFlagRequired("name")
DeployCmd.AddCommand(DeployStaticCmd)
DeployCmd.AddCommand(DeployNextJSCmd)
DeployCmd.AddCommand(DeployGoCmd)
DeployCmd.AddCommand(DeployNodeJSCmd)
}
func deployStatic(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
fmt.Printf("📦 Creating tarball from %s...\n", sourcePath)
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/static/upload"
if deployUpdate {
endpoint = "/v1/deployments/static/update"
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
func deployNextJS(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
fmt.Printf("📦 Creating tarball from %s...\n", sourcePath)
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/nextjs/upload"
if deployUpdate {
endpoint = "/v1/deployments/nextjs/update"
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
"ssr": fmt.Sprintf("%t", deploySSR),
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
if deploySSR {
fmt.Printf("⚠️ Note: SSR deployment may take a minute to start. Check status with: orama deployments get %s\n", deployName)
}
return nil
}
func deployGo(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
fmt.Printf("📦 Creating tarball from %s...\n", sourcePath)
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/go/upload"
if deployUpdate {
endpoint = "/v1/deployments/go/update"
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
func deployNodeJS(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
fmt.Printf("📦 Creating tarball from %s...\n", sourcePath)
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/nodejs/upload"
if deployUpdate {
endpoint = "/v1/deployments/nodejs/update"
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
func createTarball(sourcePath string) (string, error) {
// Create temp file
tmpFile, err := os.CreateTemp("", "orama-deploy-*.tar.gz")
if err != nil {
return "", err
}
defer tmpFile.Close()
// Create gzip writer
gzWriter := gzip.NewWriter(tmpFile)
defer gzWriter.Close()
// Create tar writer
tarWriter := tar.NewWriter(gzWriter)
defer tarWriter.Close()
// Walk directory and add files
err = filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip hidden files and node_modules
if strings.HasPrefix(info.Name(), ".") && info.Name() != "." {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if info.Name() == "node_modules" {
return filepath.SkipDir
}
// Create tar header
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return err
}
// Update header name to be relative to source
relPath, err := filepath.Rel(sourcePath, path)
if err != nil {
return err
}
header.Name = relPath
// Write header
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// Write file content if not a directory
if !info.IsDir() {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(tarWriter, file)
return err
}
return nil
})
return tmpFile.Name(), err
}
func uploadDeployment(endpoint, tarballPath string, formData map[string]string) (map[string]interface{}, error) {
// Open tarball
file, err := os.Open(tarballPath)
if err != nil {
return nil, err
}
defer file.Close()
// Create multipart request
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
// Add form fields
for key, value := range formData {
writer.WriteField(key, value)
}
// Add file
part, err := writer.CreateFormFile("tarball", filepath.Base(tarballPath))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
if err != nil {
return nil, err
}
writer.Close()
// Get API URL from config
apiURL := getAPIURL()
url := apiURL + endpoint
// Create request
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
// Add auth header
token, err := getAuthToken()
if err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
// Send request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Read response
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("deployment failed: %s", string(respBody))
}
// Parse response
var result map[string]interface{}
err = json.Unmarshal(respBody, &result)
if err != nil {
return nil, err
}
return result, nil
}
func printDeploymentInfo(resp map[string]interface{}) {
fmt.Printf("Name: %s\n", resp["name"])
fmt.Printf("Type: %s\n", resp["type"])
fmt.Printf("Status: %s\n", resp["status"])
fmt.Printf("Version: %v\n", resp["version"])
if contentCID, ok := resp["content_cid"]; ok && contentCID != "" {
fmt.Printf("Content CID: %s\n", contentCID)
}
if urls, ok := resp["urls"].([]interface{}); ok && len(urls) > 0 {
fmt.Printf("\nURLs:\n")
for _, url := range urls {
fmt.Printf(" • %s\n", url)
}
}
}
func getAPIURL() string {
// TODO: Read from config file
if url := os.Getenv("ORAMA_API_URL"); url != "" {
return url
}
return "https://gateway.debros.network"
}
func getAuthToken() (string, error) {
// TODO: Read from config file
if token := os.Getenv("ORAMA_TOKEN"); token != "" {
return token, nil
}
return "", fmt.Errorf("no authentication token found. Set ORAMA_TOKEN environment variable")
}

333
pkg/cli/deployments/list.go Normal file
View File

@ -0,0 +1,333 @@
package deployments
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
)
// ListCmd lists all deployments
var ListCmd = &cobra.Command{
Use: "list",
Short: "List all deployments",
RunE: listDeployments,
}
// GetCmd gets a specific deployment
var GetCmd = &cobra.Command{
Use: "get <name>",
Short: "Get deployment details",
Args: cobra.ExactArgs(1),
RunE: getDeployment,
}
// DeleteCmd deletes a deployment
var DeleteCmd = &cobra.Command{
Use: "delete <name>",
Short: "Delete a deployment",
Args: cobra.ExactArgs(1),
RunE: deleteDeployment,
}
// RollbackCmd rolls back a deployment
var RollbackCmd = &cobra.Command{
Use: "rollback <name>",
Short: "Rollback a deployment to a previous version",
Args: cobra.ExactArgs(1),
RunE: rollbackDeployment,
}
var (
rollbackVersion int
)
func init() {
RollbackCmd.Flags().IntVar(&rollbackVersion, "version", 0, "Version to rollback to (required)")
RollbackCmd.MarkFlagRequired("version")
}
func listDeployments(cmd *cobra.Command, args []string) error {
apiURL := getAPIURL()
url := apiURL + "/v1/deployments/list"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list deployments: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
deployments, ok := result["deployments"].([]interface{})
if !ok || len(deployments) == 0 {
fmt.Println("No deployments found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tTYPE\tSTATUS\tVERSION\tCREATED")
for _, dep := range deployments {
d := dep.(map[string]interface{})
createdAt := ""
if created, ok := d["created_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, created); err == nil {
createdAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%s\n",
d["name"],
d["type"],
d["status"],
d["version"],
createdAt,
)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func getDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/get?name=%s", apiURL, name)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to get deployment: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
// Print deployment info
fmt.Printf("Deployment: %s\n\n", result["name"])
fmt.Printf("ID: %s\n", result["id"])
fmt.Printf("Type: %s\n", result["type"])
fmt.Printf("Status: %s\n", result["status"])
fmt.Printf("Version: %v\n", result["version"])
fmt.Printf("Namespace: %s\n", result["namespace"])
if contentCID, ok := result["content_cid"]; ok && contentCID != "" {
fmt.Printf("Content CID: %s\n", contentCID)
}
if buildCID, ok := result["build_cid"]; ok && buildCID != "" {
fmt.Printf("Build CID: %s\n", buildCID)
}
if port, ok := result["port"]; ok && port != nil && port.(float64) > 0 {
fmt.Printf("Port: %v\n", port)
}
if homeNodeID, ok := result["home_node_id"]; ok && homeNodeID != "" {
fmt.Printf("Home Node: %s\n", homeNodeID)
}
if subdomain, ok := result["subdomain"]; ok && subdomain != "" {
fmt.Printf("Subdomain: %s\n", subdomain)
}
fmt.Printf("Memory Limit: %v MB\n", result["memory_limit_mb"])
fmt.Printf("CPU Limit: %v%%\n", result["cpu_limit_percent"])
fmt.Printf("Restart Policy: %s\n", result["restart_policy"])
if urls, ok := result["urls"].([]interface{}); ok && len(urls) > 0 {
fmt.Printf("\nURLs:\n")
for _, url := range urls {
fmt.Printf(" • %s\n", url)
}
}
if createdAt, ok := result["created_at"].(string); ok {
fmt.Printf("\nCreated: %s\n", createdAt)
}
if updatedAt, ok := result["updated_at"].(string); ok {
fmt.Printf("Updated: %s\n", updatedAt)
}
return nil
}
func deleteDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
fmt.Printf("⚠️ Are you sure you want to delete deployment '%s'? (y/N): ", name)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Cancelled")
return nil
}
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/delete?name=%s", apiURL, name)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to delete deployment: %s", string(body))
}
fmt.Printf("✅ Deployment '%s' deleted successfully\n", name)
return nil
}
func rollbackDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
if rollbackVersion <= 0 {
return fmt.Errorf("version must be positive")
}
fmt.Printf("⚠️ Rolling back '%s' to version %d. Continue? (y/N): ", name, rollbackVersion)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Cancelled")
return nil
}
apiURL := getAPIURL()
url := apiURL + "/v1/deployments/rollback"
payload := map[string]interface{}{
"name": name,
"version": rollbackVersion,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("rollback failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("\n✅ Rollback successful!\n\n")
fmt.Printf("Deployment: %s\n", result["name"])
fmt.Printf("Current Version: %v\n", result["version"])
fmt.Printf("Rolled Back From: %v\n", result["rolled_back_from"])
fmt.Printf("Rolled Back To: %v\n", result["rolled_back_to"])
fmt.Printf("Status: %s\n", result["status"])
return nil
}

View File

@ -0,0 +1,78 @@
package deployments
import (
"bufio"
"fmt"
"io"
"net/http"
"github.com/spf13/cobra"
)
// LogsCmd streams deployment logs
var LogsCmd = &cobra.Command{
Use: "logs <name>",
Short: "Stream deployment logs",
Args: cobra.ExactArgs(1),
RunE: streamLogs,
}
var (
logsFollow bool
logsLines int
)
func init() {
LogsCmd.Flags().BoolVarP(&logsFollow, "follow", "f", false, "Follow log output")
LogsCmd.Flags().IntVarP(&logsLines, "lines", "n", 100, "Number of lines to show")
}
func streamLogs(cmd *cobra.Command, args []string) error {
name := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/logs?name=%s&lines=%d&follow=%t",
apiURL, name, logsLines, logsFollow)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to get logs: %s", string(body))
}
// Stream logs
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
if !logsFollow {
break
}
continue
}
return err
}
fmt.Print(line)
}
return nil
}

439
pkg/coredns/README.md Normal file
View File

@ -0,0 +1,439 @@
# CoreDNS RQLite Plugin
This directory contains a custom CoreDNS plugin that serves DNS records from RQLite, enabling dynamic DNS for Orama Network deployments.
## Architecture
The plugin provides:
- **Dynamic DNS Records**: Queries RQLite for DNS records in real-time
- **Caching**: In-memory cache to reduce database load
- **Health Monitoring**: Periodic health checks of RQLite connection
- **Wildcard Support**: Handles wildcard DNS patterns (e.g., `*.node-xyz.debros.network`)
## Building CoreDNS with RQLite Plugin
CoreDNS plugins must be compiled into the binary. Follow these steps:
### 1. Install Prerequisites
```bash
# Install Go 1.21 or later
wget https://go.dev/dl/go1.21.6.linux-amd64.tar.gz
sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xzf go1.21.6.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
# Verify Go installation
go version
```
### 2. Clone CoreDNS
```bash
cd /tmp
git clone https://github.com/coredns/coredns.git
cd coredns
git checkout v1.11.1 # Match the version in install script
```
### 3. Add RQLite Plugin
Edit `plugin.cfg` in the CoreDNS root directory and add the rqlite plugin in the appropriate position (after `cache`, before `forward`):
```
# plugin.cfg
cache:cache
rqlite:github.com/DeBrosOfficial/network/pkg/coredns/rqlite
forward:forward
```
### 4. Copy Plugin Code
```bash
# From your network repository root
cd /path/to/network
cp -r pkg/coredns/rqlite /tmp/coredns/plugin/
```
### 5. Update go.mod
```bash
cd /tmp/coredns
# Add your module as a dependency
go mod edit -replace github.com/DeBrosOfficial/network=/path/to/network
# Get dependencies
go get github.com/DeBrosOfficial/network/pkg/coredns/rqlite
go mod tidy
```
### 6. Build CoreDNS
```bash
make
```
This creates the `coredns` binary in the current directory with the RQLite plugin compiled in.
### 7. Verify Plugin
```bash
./coredns -plugins | grep rqlite
```
You should see:
```
dns.rqlite
```
## Installation on Nodes
### Using the Install Script
```bash
# Build custom CoreDNS first (see above)
# Then copy the binary to the network repo
cp /tmp/coredns/coredns /path/to/network/bin/
# Run install script on each node
cd /path/to/network
sudo ./scripts/install-coredns.sh
# The script will:
# 1. Copy coredns binary to /usr/local/bin/
# 2. Create config directories
# 3. Install systemd service
# 4. Set up proper permissions
```
### Manual Installation
If you prefer manual installation:
```bash
# 1. Copy binary
sudo cp coredns /usr/local/bin/
sudo chmod +x /usr/local/bin/coredns
# 2. Create directories
sudo mkdir -p /etc/coredns
sudo mkdir -p /var/lib/coredns
sudo chown debros:debros /var/lib/coredns
# 3. Copy configuration
sudo cp configs/coredns/Corefile /etc/coredns/
# 4. Install systemd service
sudo cp configs/coredns/coredns.service /etc/systemd/system/
sudo systemctl daemon-reload
# 5. Configure firewall
sudo ufw allow 53/tcp
sudo ufw allow 53/udp
sudo ufw allow 8080/tcp # Health check
sudo ufw allow 9153/tcp # Metrics
# 6. Start service
sudo systemctl enable coredns
sudo systemctl start coredns
```
## Configuration
### Corefile
The Corefile at `/etc/coredns/Corefile` configures CoreDNS behavior:
```corefile
debros.network {
rqlite {
dsn http://localhost:5001 # RQLite HTTP endpoint
refresh 10s # Health check interval
ttl 300 # Cache TTL in seconds
cache_size 10000 # Max cached entries
}
cache {
success 10000 300 # Cache successful responses
denial 5000 60 # Cache NXDOMAIN responses
prefetch 10 # Prefetch before expiry
}
log { class denial error }
errors
health :8080
prometheus :9153
}
. {
forward . 8.8.8.8 8.8.4.4 1.1.1.1
cache 300
errors
}
```
### RQLite Connection
Ensure RQLite is running and accessible:
```bash
# Test RQLite connectivity
curl http://localhost:5001/status
# Test DNS record query
curl -G http://localhost:5001/db/query \
--data-urlencode 'q=SELECT * FROM dns_records LIMIT 5'
```
## Testing
### 1. Add Test DNS Record
```bash
# Via RQLite
curl -XPOST 'http://localhost:5001/db/execute' \
-H 'Content-Type: application/json' \
-d '[
["INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, created_by, is_active) VALUES (?, ?, ?, ?, ?, ?, ?)",
"test.debros.network.", "A", "1.2.3.4", 300, "test", "system", true]
]'
```
### 2. Query CoreDNS
```bash
# Query local CoreDNS
dig @localhost test.debros.network
# Expected output:
# ;; ANSWER SECTION:
# test.debros.network. 300 IN A 1.2.3.4
# Query from remote machine
dig @<node-ip> test.debros.network
```
### 3. Test Wildcard
```bash
# Add wildcard record
curl -XPOST 'http://localhost:5001/db/execute' \
-H 'Content-Type: application/json' \
-d '[
["INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, created_by, is_active) VALUES (?, ?, ?, ?, ?, ?, ?)",
"*.node-abc123.debros.network.", "A", "1.2.3.4", 300, "test", "system", true]
]'
# Test wildcard resolution
dig @localhost app1.node-abc123.debros.network
dig @localhost app2.node-abc123.debros.network
```
### 4. Check Health
```bash
# Health check endpoint
curl http://localhost:8080/health
# Prometheus metrics
curl http://localhost:9153/metrics | grep coredns_rqlite
```
### 5. Monitor Logs
```bash
# Follow CoreDNS logs
sudo journalctl -u coredns -f
# Check for errors
sudo journalctl -u coredns --since "10 minutes ago" | grep -i error
```
## Monitoring
### Metrics
CoreDNS exports Prometheus metrics on port 9153:
- `coredns_dns_requests_total` - Total DNS requests
- `coredns_dns_responses_total` - Total DNS responses by rcode
- `coredns_cache_hits_total` - Cache hit rate
- `coredns_cache_misses_total` - Cache miss rate
### Health Checks
The health endpoint at `:8080/health` returns:
- `200 OK` if RQLite is healthy
- `503 Service Unavailable` if RQLite is unhealthy
## Troubleshooting
### Plugin Not Found
If CoreDNS fails to start with "plugin not found":
1. Verify plugin was compiled in: `coredns -plugins | grep rqlite`
2. Rebuild CoreDNS with plugin included (see Build section)
### RQLite Connection Failed
```bash
# Check RQLite is running
sudo systemctl status rqlite
# Test RQLite HTTP API
curl http://localhost:5001/status
# Check firewall
sudo ufw status | grep 5001
```
### DNS Queries Not Working
```bash
# 1. Check CoreDNS is listening on port 53
sudo netstat -tulpn | grep :53
# 2. Test local query
dig @127.0.0.1 test.debros.network
# 3. Check logs for errors
sudo journalctl -u coredns --since "5 minutes ago"
# 4. Verify DNS records exist in RQLite
curl -G http://localhost:5001/db/query \
--data-urlencode 'q=SELECT * FROM dns_records WHERE is_active = TRUE'
```
### Cache Issues
If DNS responses are stale:
```bash
# Restart CoreDNS to clear cache
sudo systemctl restart coredns
# Or reduce cache TTL in Corefile:
# cache {
# success 10000 60 # Reduce to 60 seconds
# }
```
## Production Deployment
### 1. Deploy to All Nameservers
Install CoreDNS on all 4 nameserver nodes (ns1-ns4).
### 2. Configure Registrar
At your domain registrar, set NS records for `debros.network`:
```
debros.network. IN NS ns1.debros.network.
debros.network. IN NS ns2.debros.network.
debros.network. IN NS ns3.debros.network.
debros.network. IN NS ns4.debros.network.
```
Add glue records:
```
ns1.debros.network. IN A <node-1-ip>
ns2.debros.network. IN A <node-2-ip>
ns3.debros.network. IN A <node-3-ip>
ns4.debros.network. IN A <node-4-ip>
```
### 3. Verify Propagation
```bash
# Check NS records
dig NS debros.network
# Check from public DNS
dig @8.8.8.8 test.debros.network
# Check from all nameservers
dig @ns1.debros.network test.debros.network
dig @ns2.debros.network test.debros.network
dig @ns3.debros.network test.debros.network
dig @ns4.debros.network test.debros.network
```
### 4. Monitor
Set up monitoring for:
- CoreDNS uptime on all nodes
- DNS query latency
- Cache hit rate
- RQLite connection health
- Query error rate
## Security
### Firewall
Only expose necessary ports:
- Port 53 (DNS): Public
- Port 8080 (Health): Internal only
- Port 9153 (Metrics): Internal only
- Port 5001 (RQLite): Internal only
```bash
# Allow DNS from anywhere
sudo ufw allow 53/tcp
sudo ufw allow 53/udp
# Restrict health and metrics to internal network
sudo ufw allow from 10.0.0.0/8 to any port 8080
sudo ufw allow from 10.0.0.0/8 to any port 9153
```
### DNS Security
- Enable DNSSEC (future enhancement)
- Rate limit queries (add to Corefile)
- Monitor for DNS amplification attacks
- Validate RQLite data integrity
## Performance Tuning
### Cache Optimization
Adjust cache settings based on query patterns:
```corefile
cache {
success 50000 600 # 50k entries, 10 min TTL
denial 10000 300 # 10k NXDOMAIN, 5 min TTL
prefetch 20 # Prefetch 20s before expiry
}
```
### RQLite Connection Pool
The plugin maintains a connection pool:
- Max idle connections: 10
- Idle timeout: 90s
- Request timeout: 10s
Adjust in `client.go` if needed for higher load.
### System Limits
```bash
# Increase file descriptor limit
# Add to /etc/security/limits.conf:
debros soft nofile 65536
debros hard nofile 65536
```
## Next Steps
After CoreDNS is operational:
1. Implement automatic DNS record creation in deployment handlers
2. Add DNS record cleanup for deleted deployments
3. Set up DNS monitoring and alerting
4. Configure domain routing middleware in gateway
5. Test end-to-end deployment flow

View File

@ -0,0 +1,227 @@
package rqlite
import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/miekg/dns"
"go.uber.org/zap"
)
// DNSRecord represents a DNS record from RQLite
type DNSRecord struct {
FQDN string
Type uint16
Value string
TTL int
ParsedValue interface{} // Parsed IP or string value
}
// Backend handles RQLite connections and queries
type Backend struct {
dsn string
client *RQLiteClient
logger *zap.Logger
refreshRate time.Duration
mu sync.RWMutex
healthy bool
}
// NewBackend creates a new RQLite backend
func NewBackend(dsn string, refreshRate time.Duration, logger *zap.Logger) (*Backend, error) {
client, err := NewRQLiteClient(dsn, logger)
if err != nil {
return nil, fmt.Errorf("failed to create RQLite client: %w", err)
}
b := &Backend{
dsn: dsn,
client: client,
logger: logger,
refreshRate: refreshRate,
healthy: false,
}
// Test connection
if err := b.ping(); err != nil {
return nil, fmt.Errorf("failed to ping RQLite: %w", err)
}
b.healthy = true
// Start health check goroutine
go b.healthCheck()
return b, nil
}
// Query retrieves DNS records from RQLite
func (b *Backend) Query(ctx context.Context, fqdn string, qtype uint16) ([]*DNSRecord, error) {
b.mu.RLock()
defer b.mu.RUnlock()
// Normalize FQDN
fqdn = dns.Fqdn(strings.ToLower(fqdn))
// Map DNS query type to string
recordType := qTypeToString(qtype)
// Query active records matching FQDN and type
query := `
SELECT fqdn, record_type, value, ttl
FROM dns_records
WHERE fqdn = ? AND record_type = ? AND is_active = TRUE
`
rows, err := b.client.Query(ctx, query, fqdn, recordType)
if err != nil {
return nil, fmt.Errorf("query failed: %w", err)
}
records := make([]*DNSRecord, 0)
for _, row := range rows {
if len(row) < 4 {
continue
}
fqdnVal, _ := row[0].(string)
typeVal, _ := row[1].(string)
valueVal, _ := row[2].(string)
ttlVal, _ := row[3].(float64)
// Parse the value based on record type
parsedValue, err := b.parseValue(typeVal, valueVal)
if err != nil {
b.logger.Warn("Failed to parse record value",
zap.String("fqdn", fqdnVal),
zap.String("type", typeVal),
zap.String("value", valueVal),
zap.Error(err),
)
continue
}
record := &DNSRecord{
FQDN: fqdnVal,
Type: stringToQType(typeVal),
Value: valueVal,
TTL: int(ttlVal),
ParsedValue: parsedValue,
}
records = append(records, record)
}
return records, nil
}
// parseValue parses a DNS record value based on its type
func (b *Backend) parseValue(recordType, value string) (interface{}, error) {
switch strings.ToUpper(recordType) {
case "A":
ip := net.ParseIP(value)
if ip == nil || ip.To4() == nil {
return nil, fmt.Errorf("invalid IPv4 address: %s", value)
}
return &dns.A{A: ip.To4()}, nil
case "AAAA":
ip := net.ParseIP(value)
if ip == nil || ip.To16() == nil {
return nil, fmt.Errorf("invalid IPv6 address: %s", value)
}
return &dns.AAAA{AAAA: ip.To16()}, nil
case "CNAME":
return dns.Fqdn(value), nil
case "TXT":
return []string{value}, nil
default:
return nil, fmt.Errorf("unsupported record type: %s", recordType)
}
}
// ping tests the RQLite connection
func (b *Backend) ping() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
query := "SELECT 1"
_, err := b.client.Query(ctx, query)
return err
}
// healthCheck periodically checks RQLite health
func (b *Backend) healthCheck() {
ticker := time.NewTicker(b.refreshRate)
defer ticker.Stop()
for range ticker.C {
if err := b.ping(); err != nil {
b.mu.Lock()
b.healthy = false
b.mu.Unlock()
b.logger.Error("Health check failed", zap.Error(err))
} else {
b.mu.Lock()
wasUnhealthy := !b.healthy
b.healthy = true
b.mu.Unlock()
if wasUnhealthy {
b.logger.Info("Health check recovered")
}
}
}
}
// Healthy returns the current health status
func (b *Backend) Healthy() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.healthy
}
// Close closes the backend connection
func (b *Backend) Close() error {
return b.client.Close()
}
// qTypeToString converts DNS query type to string
func qTypeToString(qtype uint16) string {
switch qtype {
case dns.TypeA:
return "A"
case dns.TypeAAAA:
return "AAAA"
case dns.TypeCNAME:
return "CNAME"
case dns.TypeTXT:
return "TXT"
default:
return dns.TypeToString[qtype]
}
}
// stringToQType converts string to DNS query type
func stringToQType(s string) uint16 {
switch strings.ToUpper(s) {
case "A":
return dns.TypeA
case "AAAA":
return dns.TypeAAAA
case "CNAME":
return dns.TypeCNAME
case "TXT":
return dns.TypeTXT
default:
return 0
}
}

135
pkg/coredns/rqlite/cache.go Normal file
View File

@ -0,0 +1,135 @@
package rqlite
import (
"fmt"
"sync"
"time"
"github.com/miekg/dns"
)
// CacheEntry represents a cached DNS response
type CacheEntry struct {
msg *dns.Msg
expiresAt time.Time
}
// Cache implements a simple in-memory DNS response cache
type Cache struct {
entries map[string]*CacheEntry
mu sync.RWMutex
maxSize int
ttl time.Duration
hitCount uint64
missCount uint64
}
// NewCache creates a new DNS response cache
func NewCache(maxSize int, ttl time.Duration) *Cache {
c := &Cache{
entries: make(map[string]*CacheEntry),
maxSize: maxSize,
ttl: ttl,
}
// Start cleanup goroutine
go c.cleanup()
return c
}
// Get retrieves a cached DNS message
func (c *Cache) Get(qname string, qtype uint16) *dns.Msg {
c.mu.RLock()
defer c.mu.RUnlock()
key := c.key(qname, qtype)
entry, exists := c.entries[key]
if !exists {
c.missCount++
return nil
}
// Check if expired
if time.Now().After(entry.expiresAt) {
c.missCount++
return nil
}
c.hitCount++
return entry.msg.Copy()
}
// Set stores a DNS message in the cache
func (c *Cache) Set(qname string, qtype uint16, msg *dns.Msg) {
c.mu.Lock()
defer c.mu.Unlock()
// Enforce max size
if len(c.entries) >= c.maxSize {
// Remove oldest entry (simple eviction strategy)
c.evictOldest()
}
key := c.key(qname, qtype)
c.entries[key] = &CacheEntry{
msg: msg.Copy(),
expiresAt: time.Now().Add(c.ttl),
}
}
// key generates a cache key from qname and qtype
func (c *Cache) key(qname string, qtype uint16) string {
return fmt.Sprintf("%s:%d", qname, qtype)
}
// evictOldest removes the oldest entry from the cache
func (c *Cache) evictOldest() {
var oldestKey string
var oldestTime time.Time
first := true
for key, entry := range c.entries {
if first || entry.expiresAt.Before(oldestTime) {
oldestKey = key
oldestTime = entry.expiresAt
first = false
}
}
if oldestKey != "" {
delete(c.entries, oldestKey)
}
}
// cleanup periodically removes expired entries
func (c *Cache) cleanup() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
c.mu.Lock()
now := time.Now()
for key, entry := range c.entries {
if now.After(entry.expiresAt) {
delete(c.entries, key)
}
}
c.mu.Unlock()
}
}
// Stats returns cache statistics
func (c *Cache) Stats() (hits, misses uint64, size int) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.hitCount, c.missCount, len(c.entries)
}
// Clear removes all entries from the cache
func (c *Cache) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]*CacheEntry)
}

View File

@ -0,0 +1,101 @@
package rqlite
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"go.uber.org/zap"
)
// RQLiteClient is a simple HTTP client for RQLite
type RQLiteClient struct {
baseURL string
httpClient *http.Client
logger *zap.Logger
}
// QueryResponse represents the RQLite query response
type QueryResponse struct {
Results []QueryResult `json:"results"`
}
// QueryResult represents a single query result
type QueryResult struct {
Columns []string `json:"columns"`
Types []string `json:"types"`
Values [][]interface{} `json:"values"`
Error string `json:"error"`
}
// NewRQLiteClient creates a new RQLite HTTP client
func NewRQLiteClient(dsn string, logger *zap.Logger) (*RQLiteClient, error) {
return &RQLiteClient{
baseURL: dsn,
httpClient: &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 10,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
},
},
logger: logger,
}, nil
}
// Query executes a SQL query and returns the results
func (c *RQLiteClient) Query(ctx context.Context, query string, args ...interface{}) ([][]interface{}, error) {
// Build parameterized query
queries := [][]interface{}{append([]interface{}{query}, args...)}
reqBody, err := json.Marshal(queries)
if err != nil {
return nil, fmt.Errorf("failed to marshal query: %w", err)
}
url := c.baseURL + "/db/query"
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(reqBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("query failed with status %d: %s", resp.StatusCode, string(body))
}
var queryResp QueryResponse
if err := json.NewDecoder(resp.Body).Decode(&queryResp); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
if len(queryResp.Results) == 0 {
return [][]interface{}{}, nil
}
result := queryResp.Results[0]
if result.Error != "" {
return nil, fmt.Errorf("query error: %s", result.Error)
}
return result.Values, nil
}
// Close closes the HTTP client
func (c *RQLiteClient) Close() error {
c.httpClient.CloseIdleConnections()
return nil
}

View File

@ -0,0 +1,194 @@
package rqlite
import (
"context"
"fmt"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
"go.uber.org/zap"
)
// RQLitePlugin implements the CoreDNS plugin interface
type RQLitePlugin struct {
Next plugin.Handler
logger *zap.Logger
backend *Backend
cache *Cache
zones []string
}
// Name returns the plugin name
func (p *RQLitePlugin) Name() string {
return "rqlite"
}
// ServeDNS implements the plugin.Handler interface
func (p *RQLitePlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
state := request.Request{W: w, Req: r}
// Only handle queries for our configured zones
if !p.isOurZone(state.Name()) {
return plugin.NextOrFailure(p.Name(), p.Next, ctx, w, r)
}
// Check cache first
if cachedMsg := p.cache.Get(state.Name(), state.QType()); cachedMsg != nil {
p.logger.Debug("Cache hit",
zap.String("qname", state.Name()),
zap.Uint16("qtype", state.QType()),
)
cachedMsg.SetReply(r)
w.WriteMsg(cachedMsg)
return dns.RcodeSuccess, nil
}
// Query RQLite backend
records, err := p.backend.Query(ctx, state.Name(), state.QType())
if err != nil {
p.logger.Error("Backend query failed",
zap.String("qname", state.Name()),
zap.Error(err),
)
return dns.RcodeServerFailure, err
}
// If no exact match, try wildcard
if len(records) == 0 {
wildcardName := p.getWildcardName(state.Name())
if wildcardName != "" {
records, err = p.backend.Query(ctx, wildcardName, state.QType())
if err != nil {
p.logger.Error("Wildcard query failed",
zap.String("wildcard", wildcardName),
zap.Error(err),
)
return dns.RcodeServerFailure, err
}
}
}
// No records found
if len(records) == 0 {
p.logger.Debug("No records found",
zap.String("qname", state.Name()),
zap.Uint16("qtype", state.QType()),
)
return p.handleNXDomain(ctx, w, r, &state)
}
// Build response
msg := new(dns.Msg)
msg.SetReply(r)
msg.Authoritative = true
for _, record := range records {
rr := p.buildRR(state.Name(), record)
if rr != nil {
msg.Answer = append(msg.Answer, rr)
}
}
// Cache the response
p.cache.Set(state.Name(), state.QType(), msg)
w.WriteMsg(msg)
return dns.RcodeSuccess, nil
}
// isOurZone checks if the query is for one of our configured zones
func (p *RQLitePlugin) isOurZone(qname string) bool {
for _, zone := range p.zones {
if plugin.Name(zone).Matches(qname) == "" {
return false
}
return true
}
return false
}
// getWildcardName extracts the wildcard pattern for a given name
// e.g., myapp.node-7prvNa.debros.network -> *.node-7prvNa.debros.network
func (p *RQLitePlugin) getWildcardName(qname string) string {
labels := dns.SplitDomainName(qname)
if len(labels) < 3 {
return ""
}
// Replace first label with wildcard
labels[0] = "*"
return dns.Fqdn(dns.Fqdn(labels[0] + "." + labels[1] + "." + labels[2]))
}
// buildRR builds a DNS resource record from a DNSRecord
func (p *RQLitePlugin) buildRR(qname string, record *DNSRecord) dns.RR {
header := dns.RR_Header{
Name: qname,
Rrtype: record.Type,
Class: dns.ClassINET,
Ttl: uint32(record.TTL),
}
switch record.Type {
case dns.TypeA:
return &dns.A{
Hdr: header,
A: record.ParsedValue.(*dns.A).A,
}
case dns.TypeAAAA:
return &dns.AAAA{
Hdr: header,
AAAA: record.ParsedValue.(*dns.AAAA).AAAA,
}
case dns.TypeCNAME:
return &dns.CNAME{
Hdr: header,
Target: record.ParsedValue.(string),
}
case dns.TypeTXT:
return &dns.TXT{
Hdr: header,
Txt: record.ParsedValue.([]string),
}
default:
p.logger.Warn("Unsupported record type",
zap.Uint16("type", record.Type),
)
return nil
}
}
// handleNXDomain handles the case where no records are found
func (p *RQLitePlugin) handleNXDomain(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, state *request.Request) (int, error) {
msg := new(dns.Msg)
msg.SetRcode(r, dns.RcodeNameError)
msg.Authoritative = true
// Add SOA record for negative caching
soa := &dns.SOA{
Hdr: dns.RR_Header{
Name: p.zones[0],
Rrtype: dns.TypeSOA,
Class: dns.ClassINET,
Ttl: 300,
},
Ns: "ns1." + p.zones[0],
Mbox: "admin." + p.zones[0],
Serial: uint32(time.Now().Unix()),
Refresh: 3600,
Retry: 600,
Expire: 86400,
Minttl: 300,
}
msg.Ns = append(msg.Ns, soa)
w.WriteMsg(msg)
return dns.RcodeNameError, nil
}
// Ready implements the ready.Readiness interface
func (p *RQLitePlugin) Ready() bool {
return p.backend.Healthy()
}

125
pkg/coredns/rqlite/setup.go Normal file
View File

@ -0,0 +1,125 @@
package rqlite
import (
"fmt"
"strconv"
"time"
"github.com/coredns/caddy"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
"go.uber.org/zap"
)
func init() {
plugin.Register("rqlite", setup)
}
// setup configures the rqlite plugin
func setup(c *caddy.Controller) error {
p, err := parseConfig(c)
if err != nil {
return plugin.Error("rqlite", err)
}
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
p.Next = next
return p
})
return nil
}
// parseConfig parses the Corefile configuration
func parseConfig(c *caddy.Controller) (*RQLitePlugin, error) {
logger, err := zap.NewProduction()
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
var (
dsn = "http://localhost:5001"
refreshRate = 10 * time.Second
cacheTTL = 300 * time.Second
cacheSize = 10000
zones []string
)
// Parse zone arguments
for c.Next() {
zones = append(zones, c.Val())
zones = append(zones, plugin.OriginsFromArgsOrServerBlock(c.RemainingArgs(), c.ServerBlockKeys)...)
// Parse plugin configuration block
for c.NextBlock() {
switch c.Val() {
case "dsn":
if !c.NextArg() {
return nil, c.ArgErr()
}
dsn = c.Val()
case "refresh":
if !c.NextArg() {
return nil, c.ArgErr()
}
dur, err := time.ParseDuration(c.Val())
if err != nil {
return nil, fmt.Errorf("invalid refresh duration: %w", err)
}
refreshRate = dur
case "ttl":
if !c.NextArg() {
return nil, c.ArgErr()
}
ttlVal, err := strconv.Atoi(c.Val())
if err != nil {
return nil, fmt.Errorf("invalid TTL: %w", err)
}
cacheTTL = time.Duration(ttlVal) * time.Second
case "cache_size":
if !c.NextArg() {
return nil, c.ArgErr()
}
size, err := strconv.Atoi(c.Val())
if err != nil {
return nil, fmt.Errorf("invalid cache size: %w", err)
}
cacheSize = size
default:
return nil, c.Errf("unknown property '%s'", c.Val())
}
}
}
if len(zones) == 0 {
zones = []string{"."}
}
// Create backend
backend, err := NewBackend(dsn, refreshRate, logger)
if err != nil {
return nil, fmt.Errorf("failed to create backend: %w", err)
}
// Create cache
cache := NewCache(cacheSize, cacheTTL)
logger.Info("RQLite plugin initialized",
zap.String("dsn", dsn),
zap.Duration("refresh", refreshRate),
zap.Duration("cache_ttl", cacheTTL),
zap.Int("cache_size", cacheSize),
zap.Strings("zones", zones),
)
return &RQLitePlugin{
logger: logger,
backend: backend,
cache: cache,
zones: zones,
}, nil
}

24
pkg/database/database.go Normal file
View File

@ -0,0 +1,24 @@
// Package database provides a generic database interface for the deployment system.
// This allows different database implementations (RQLite, SQLite, etc.) to be used
// interchangeably throughout the deployment handlers.
package database
import "context"
// Database is a generic interface for database operations
// It provides methods for executing queries and commands that can be implemented
// by various database clients (RQLite, SQLite, etc.)
type Database interface {
// Query executes a SELECT query and scans results into dest
// dest should be a pointer to a slice of structs with `db` tags
Query(ctx context.Context, dest interface{}, query string, args ...interface{}) error
// QueryOne executes a SELECT query and scans a single result into dest
// dest should be a pointer to a struct with `db` tags
// Returns an error if no rows are found or multiple rows are returned
QueryOne(ctx context.Context, dest interface{}, query string, args ...interface{}) error
// Exec executes an INSERT, UPDATE, or DELETE query
// Returns the result (typically last insert ID or rows affected)
Exec(ctx context.Context, query string, args ...interface{}) (interface{}, error)
}

View File

@ -0,0 +1,271 @@
package health
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"github.com/DeBrosOfficial/network/pkg/database"
"go.uber.org/zap"
)
// deploymentRow represents a deployment record for health checking
type deploymentRow struct {
ID string `db:"id"`
Namespace string `db:"namespace"`
Name string `db:"name"`
Type string `db:"type"`
Port int `db:"port"`
HealthCheckPath string `db:"health_check_path"`
HomeNodeID string `db:"home_node_id"`
}
// HealthChecker monitors deployment health
type HealthChecker struct {
db database.Database
logger *zap.Logger
workers int
mu sync.RWMutex
active map[string]bool // deployment_id -> is_active
}
// NewHealthChecker creates a new health checker
func NewHealthChecker(db database.Database, logger *zap.Logger) *HealthChecker {
return &HealthChecker{
db: db,
logger: logger,
workers: 10,
active: make(map[string]bool),
}
}
// Start begins health monitoring
func (hc *HealthChecker) Start(ctx context.Context) error {
hc.logger.Info("Starting health checker", zap.Int("workers", hc.workers))
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
hc.logger.Info("Health checker stopped")
return ctx.Err()
case <-ticker.C:
if err := hc.checkAllDeployments(ctx); err != nil {
hc.logger.Error("Health check cycle failed", zap.Error(err))
}
}
}
}
// checkAllDeployments checks all active deployments
func (hc *HealthChecker) checkAllDeployments(ctx context.Context) error {
var rows []deploymentRow
query := `
SELECT id, namespace, name, type, port, health_check_path, home_node_id
FROM deployments
WHERE status = 'active' AND type IN ('nextjs', 'nodejs-backend', 'go-backend')
`
err := hc.db.Query(ctx, &rows, query)
if err != nil {
return fmt.Errorf("failed to query deployments: %w", err)
}
hc.logger.Info("Checking deployments", zap.Int("count", len(rows)))
// Process in parallel
sem := make(chan struct{}, hc.workers)
var wg sync.WaitGroup
for _, row := range rows {
wg.Add(1)
go func(r deploymentRow) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
healthy := hc.checkDeployment(ctx, r)
hc.recordHealthCheck(ctx, r.ID, healthy)
}(row)
}
wg.Wait()
return nil
}
// checkDeployment checks a single deployment
func (hc *HealthChecker) checkDeployment(ctx context.Context, dep deploymentRow) bool {
if dep.Port == 0 {
// Static deployments are always healthy
return true
}
// Check local port
url := fmt.Sprintf("http://localhost:%d%s", dep.Port, dep.HealthCheckPath)
checkCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(checkCtx, "GET", url, nil)
if err != nil {
hc.logger.Error("Failed to create health check request",
zap.String("deployment", dep.Name),
zap.Error(err),
)
return false
}
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Do(req)
if err != nil {
hc.logger.Warn("Health check failed",
zap.String("deployment", dep.Name),
zap.String("namespace", dep.Namespace),
zap.String("url", url),
zap.Error(err),
)
return false
}
defer resp.Body.Close()
healthy := resp.StatusCode >= 200 && resp.StatusCode < 300
if !healthy {
hc.logger.Warn("Health check returned unhealthy status",
zap.String("deployment", dep.Name),
zap.Int("status", resp.StatusCode),
)
}
return healthy
}
// recordHealthCheck records the health check result
func (hc *HealthChecker) recordHealthCheck(ctx context.Context, deploymentID string, healthy bool) {
status := "healthy"
if !healthy {
status = "unhealthy"
}
query := `
INSERT INTO deployment_health_checks (deployment_id, status, checked_at, response_time_ms)
VALUES (?, ?, ?, ?)
`
_, err := hc.db.Exec(ctx, query, deploymentID, status, time.Now(), 0)
if err != nil {
hc.logger.Error("Failed to record health check",
zap.String("deployment", deploymentID),
zap.Error(err),
)
}
// Track consecutive failures
hc.checkConsecutiveFailures(ctx, deploymentID, healthy)
}
// checkConsecutiveFailures marks deployment as failed after 3 consecutive failures
func (hc *HealthChecker) checkConsecutiveFailures(ctx context.Context, deploymentID string, currentHealthy bool) {
if currentHealthy {
return
}
type healthRow struct {
Status string `db:"status"`
}
var rows []healthRow
query := `
SELECT status
FROM deployment_health_checks
WHERE deployment_id = ?
ORDER BY checked_at DESC
LIMIT 3
`
err := hc.db.Query(ctx, &rows, query, deploymentID)
if err != nil {
hc.logger.Error("Failed to query health history", zap.Error(err))
return
}
// Check if last 3 checks all failed
if len(rows) >= 3 {
allFailed := true
for _, row := range rows {
if row.Status != "unhealthy" {
allFailed = false
break
}
}
if allFailed {
hc.logger.Error("Deployment has 3 consecutive failures, marking as failed",
zap.String("deployment", deploymentID),
)
updateQuery := `
UPDATE deployments
SET status = 'failed', updated_at = ?
WHERE id = ?
`
_, err := hc.db.Exec(ctx, updateQuery, time.Now(), deploymentID)
if err != nil {
hc.logger.Error("Failed to mark deployment as failed", zap.Error(err))
}
// Record event
eventQuery := `
INSERT INTO deployment_events (deployment_id, event_type, message, created_at)
VALUES (?, 'health_failed', 'Deployment marked as failed after 3 consecutive health check failures', ?)
`
hc.db.Exec(ctx, eventQuery, deploymentID, time.Now())
}
}
}
// GetHealthStatus gets recent health checks for a deployment
func (hc *HealthChecker) GetHealthStatus(ctx context.Context, deploymentID string, limit int) ([]HealthCheck, error) {
type healthRow struct {
Status string `db:"status"`
CheckedAt time.Time `db:"checked_at"`
ResponseTimeMs int `db:"response_time_ms"`
}
var rows []healthRow
query := `
SELECT status, checked_at, response_time_ms
FROM deployment_health_checks
WHERE deployment_id = ?
ORDER BY checked_at DESC
LIMIT ?
`
err := hc.db.Query(ctx, &rows, query, deploymentID, limit)
if err != nil {
return nil, err
}
checks := make([]HealthCheck, len(rows))
for i, row := range rows {
checks[i] = HealthCheck{
Status: row.Status,
CheckedAt: row.CheckedAt,
ResponseTimeMs: row.ResponseTimeMs,
}
}
return checks, nil
}
// HealthCheck represents a health check result
type HealthCheck struct {
Status string `json:"status"`
CheckedAt time.Time `json:"checked_at"`
ResponseTimeMs int `json:"response_time_ms"`
}

View File

@ -0,0 +1,428 @@
package deployments
import (
"context"
"fmt"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"go.uber.org/zap"
)
// HomeNodeManager manages namespace-to-node assignments
type HomeNodeManager struct {
db rqlite.Client
portAllocator *PortAllocator
logger *zap.Logger
}
// NewHomeNodeManager creates a new home node manager
func NewHomeNodeManager(db rqlite.Client, portAllocator *PortAllocator, logger *zap.Logger) *HomeNodeManager {
return &HomeNodeManager{
db: db,
portAllocator: portAllocator,
logger: logger,
}
}
// AssignHomeNode assigns a home node to a namespace (or returns existing assignment)
func (hnm *HomeNodeManager) AssignHomeNode(ctx context.Context, namespace string) (string, error) {
internalCtx := client.WithInternalAuth(ctx)
// Check if namespace already has a home node
existing, err := hnm.GetHomeNode(ctx, namespace)
if err == nil && existing != "" {
hnm.logger.Debug("Namespace already has home node",
zap.String("namespace", namespace),
zap.String("home_node_id", existing),
)
return existing, nil
}
// Get all active nodes
activeNodes, err := hnm.getActiveNodes(internalCtx)
if err != nil {
return "", err
}
if len(activeNodes) == 0 {
return "", ErrNoNodesAvailable
}
// Calculate capacity scores for each node
nodeCapacities, err := hnm.calculateNodeCapacities(internalCtx, activeNodes)
if err != nil {
return "", err
}
// Select node with highest score
bestNode := hnm.selectBestNode(nodeCapacities)
if bestNode == nil {
return "", ErrNoNodesAvailable
}
// Create home node assignment
insertQuery := `
INSERT INTO home_node_assignments (namespace, home_node_id, assigned_at, last_heartbeat, deployment_count, total_memory_mb, total_cpu_percent)
VALUES (?, ?, ?, ?, 0, 0, 0)
ON CONFLICT(namespace) DO UPDATE SET
home_node_id = excluded.home_node_id,
assigned_at = excluded.assigned_at,
last_heartbeat = excluded.last_heartbeat
`
now := time.Now()
_, err = hnm.db.Exec(internalCtx, insertQuery, namespace, bestNode.NodeID, now, now)
if err != nil {
return "", &DeploymentError{
Message: "failed to create home node assignment",
Cause: err,
}
}
hnm.logger.Info("Home node assigned",
zap.String("namespace", namespace),
zap.String("home_node_id", bestNode.NodeID),
zap.Float64("capacity_score", bestNode.Score),
zap.Int("deployment_count", bestNode.DeploymentCount),
)
return bestNode.NodeID, nil
}
// GetHomeNode retrieves the home node for a namespace
func (hnm *HomeNodeManager) GetHomeNode(ctx context.Context, namespace string) (string, error) {
internalCtx := client.WithInternalAuth(ctx)
type homeNodeResult struct {
HomeNodeID string `db:"home_node_id"`
}
var results []homeNodeResult
query := `SELECT home_node_id FROM home_node_assignments WHERE namespace = ? LIMIT 1`
err := hnm.db.Query(internalCtx, &results, query, namespace)
if err != nil {
return "", &DeploymentError{
Message: "failed to query home node",
Cause: err,
}
}
if len(results) == 0 {
return "", ErrNamespaceNotAssigned
}
return results[0].HomeNodeID, nil
}
// UpdateHeartbeat updates the last heartbeat timestamp for a namespace
func (hnm *HomeNodeManager) UpdateHeartbeat(ctx context.Context, namespace string) error {
internalCtx := client.WithInternalAuth(ctx)
query := `UPDATE home_node_assignments SET last_heartbeat = ? WHERE namespace = ?`
_, err := hnm.db.Exec(internalCtx, query, time.Now(), namespace)
if err != nil {
return &DeploymentError{
Message: "failed to update heartbeat",
Cause: err,
}
}
return nil
}
// GetStaleNamespaces returns namespaces that haven't sent a heartbeat recently
func (hnm *HomeNodeManager) GetStaleNamespaces(ctx context.Context, staleThreshold time.Duration) ([]string, error) {
internalCtx := client.WithInternalAuth(ctx)
cutoff := time.Now().Add(-staleThreshold)
type namespaceResult struct {
Namespace string `db:"namespace"`
}
var results []namespaceResult
query := `SELECT namespace FROM home_node_assignments WHERE last_heartbeat < ?`
err := hnm.db.Query(internalCtx, &results, query, cutoff.Format("2006-01-02 15:04:05"))
if err != nil {
return nil, &DeploymentError{
Message: "failed to query stale namespaces",
Cause: err,
}
}
namespaces := make([]string, 0, len(results))
for _, result := range results {
namespaces = append(namespaces, result.Namespace)
}
return namespaces, nil
}
// UpdateResourceUsage updates the cached resource usage for a namespace
func (hnm *HomeNodeManager) UpdateResourceUsage(ctx context.Context, namespace string, deploymentCount, memoryMB, cpuPercent int) error {
internalCtx := client.WithInternalAuth(ctx)
query := `
UPDATE home_node_assignments
SET deployment_count = ?, total_memory_mb = ?, total_cpu_percent = ?
WHERE namespace = ?
`
_, err := hnm.db.Exec(internalCtx, query, deploymentCount, memoryMB, cpuPercent, namespace)
if err != nil {
return &DeploymentError{
Message: "failed to update resource usage",
Cause: err,
}
}
return nil
}
// getActiveNodes retrieves all active nodes from dns_nodes table
func (hnm *HomeNodeManager) getActiveNodes(ctx context.Context) ([]string, error) {
// Query dns_nodes for active nodes with recent heartbeats
cutoff := time.Now().Add(-2 * time.Minute) // Nodes must have checked in within last 2 minutes
type nodeResult struct {
ID string `db:"id"`
}
var results []nodeResult
query := `
SELECT id FROM dns_nodes
WHERE status = 'active' AND last_seen > ?
ORDER BY id
`
err := hnm.db.Query(ctx, &results, query, cutoff.Format("2006-01-02 15:04:05"))
if err != nil {
return nil, &DeploymentError{
Message: "failed to query active nodes",
Cause: err,
}
}
nodes := make([]string, 0, len(results))
for _, result := range results {
nodes = append(nodes, result.ID)
}
hnm.logger.Debug("Found active nodes",
zap.Int("count", len(nodes)),
zap.Strings("nodes", nodes),
)
return nodes, nil
}
// calculateNodeCapacities calculates capacity scores for all nodes
func (hnm *HomeNodeManager) calculateNodeCapacities(ctx context.Context, nodeIDs []string) ([]*NodeCapacity, error) {
capacities := make([]*NodeCapacity, 0, len(nodeIDs))
for _, nodeID := range nodeIDs {
capacity, err := hnm.getNodeCapacity(ctx, nodeID)
if err != nil {
hnm.logger.Warn("Failed to get node capacity, skipping",
zap.String("node_id", nodeID),
zap.Error(err),
)
continue
}
capacities = append(capacities, capacity)
}
return capacities, nil
}
// getNodeCapacity calculates capacity metrics for a single node
func (hnm *HomeNodeManager) getNodeCapacity(ctx context.Context, nodeID string) (*NodeCapacity, error) {
// Count deployments on this node
deploymentCount, err := hnm.getDeploymentCount(ctx, nodeID)
if err != nil {
return nil, err
}
// Count allocated ports
allocatedPorts, err := hnm.portAllocator.GetNodePortCount(ctx, nodeID)
if err != nil {
return nil, err
}
availablePorts, err := hnm.portAllocator.GetAvailablePortCount(ctx, nodeID)
if err != nil {
return nil, err
}
// Get total resource usage from home_node_assignments
totalMemoryMB, totalCPUPercent, err := hnm.getNodeResourceUsage(ctx, nodeID)
if err != nil {
return nil, err
}
// Calculate capacity score (0.0 to 1.0, higher is better)
score := hnm.calculateCapacityScore(deploymentCount, allocatedPorts, availablePorts, totalMemoryMB, totalCPUPercent)
capacity := &NodeCapacity{
NodeID: nodeID,
DeploymentCount: deploymentCount,
AllocatedPorts: allocatedPorts,
AvailablePorts: availablePorts,
UsedMemoryMB: totalMemoryMB,
AvailableMemoryMB: 8192 - totalMemoryMB, // Assume 8GB per node (make configurable later)
UsedCPUPercent: totalCPUPercent,
Score: score,
}
return capacity, nil
}
// getDeploymentCount counts deployments on a node
func (hnm *HomeNodeManager) getDeploymentCount(ctx context.Context, nodeID string) (int, error) {
type countResult struct {
Count int `db:"COUNT(*)"`
}
var results []countResult
query := `SELECT COUNT(*) FROM deployments WHERE home_node_id = ? AND status IN ('active', 'deploying')`
err := hnm.db.Query(ctx, &results, query, nodeID)
if err != nil {
return 0, &DeploymentError{
Message: "failed to count deployments",
Cause: err,
}
}
if len(results) == 0 {
return 0, nil
}
return results[0].Count, nil
}
// getNodeResourceUsage sums up resource usage for all namespaces on a node
func (hnm *HomeNodeManager) getNodeResourceUsage(ctx context.Context, nodeID string) (int, int, error) {
type resourceResult struct {
TotalMemoryMB int `db:"COALESCE(SUM(total_memory_mb), 0)"`
TotalCPUPercent int `db:"COALESCE(SUM(total_cpu_percent), 0)"`
}
var results []resourceResult
query := `
SELECT COALESCE(SUM(total_memory_mb), 0), COALESCE(SUM(total_cpu_percent), 0)
FROM home_node_assignments
WHERE home_node_id = ?
`
err := hnm.db.Query(ctx, &results, query, nodeID)
if err != nil {
return 0, 0, &DeploymentError{
Message: "failed to query resource usage",
Cause: err,
}
}
if len(results) == 0 {
return 0, 0, nil
}
return results[0].TotalMemoryMB, results[0].TotalCPUPercent, nil
}
// calculateCapacityScore calculates a 0.0-1.0 score (higher is better)
func (hnm *HomeNodeManager) calculateCapacityScore(deploymentCount, allocatedPorts, availablePorts, usedMemoryMB, usedCPUPercent int) float64 {
const (
maxDeployments = 100 // Max deployments per node
maxMemoryMB = 8192 // 8GB
maxCPUPercent = 400 // 400% = 4 cores
maxPorts = 9900 // ~10k ports available
)
// Calculate individual component scores (0.0 to 1.0)
deploymentScore := 1.0 - (float64(deploymentCount) / float64(maxDeployments))
if deploymentScore < 0 {
deploymentScore = 0
}
portScore := 1.0 - (float64(allocatedPorts) / float64(maxPorts))
if portScore < 0 {
portScore = 0
}
memoryScore := 1.0 - (float64(usedMemoryMB) / float64(maxMemoryMB))
if memoryScore < 0 {
memoryScore = 0
}
cpuScore := 1.0 - (float64(usedCPUPercent) / float64(maxCPUPercent))
if cpuScore < 0 {
cpuScore = 0
}
// Weighted average (adjust weights as needed)
totalScore := (deploymentScore * 0.4) + (portScore * 0.2) + (memoryScore * 0.2) + (cpuScore * 0.2)
hnm.logger.Debug("Calculated capacity score",
zap.Int("deployments", deploymentCount),
zap.Int("allocated_ports", allocatedPorts),
zap.Int("used_memory_mb", usedMemoryMB),
zap.Int("used_cpu_percent", usedCPUPercent),
zap.Float64("deployment_score", deploymentScore),
zap.Float64("port_score", portScore),
zap.Float64("memory_score", memoryScore),
zap.Float64("cpu_score", cpuScore),
zap.Float64("total_score", totalScore),
)
return totalScore
}
// selectBestNode selects the node with the highest capacity score
func (hnm *HomeNodeManager) selectBestNode(capacities []*NodeCapacity) *NodeCapacity {
if len(capacities) == 0 {
return nil
}
best := capacities[0]
for _, capacity := range capacities[1:] {
if capacity.Score > best.Score {
best = capacity
}
}
hnm.logger.Info("Selected best node",
zap.String("node_id", best.NodeID),
zap.Float64("score", best.Score),
zap.Int("deployment_count", best.DeploymentCount),
zap.Int("allocated_ports", best.AllocatedPorts),
)
return best
}
// MigrateNamespace moves a namespace from one node to another (used for node failures)
func (hnm *HomeNodeManager) MigrateNamespace(ctx context.Context, namespace, newNodeID string) error {
internalCtx := client.WithInternalAuth(ctx)
query := `
UPDATE home_node_assignments
SET home_node_id = ?, assigned_at = ?, last_heartbeat = ?
WHERE namespace = ?
`
now := time.Now()
_, err := hnm.db.Exec(internalCtx, query, newNodeID, now, now, namespace)
if err != nil {
return &DeploymentError{
Message: fmt.Sprintf("failed to migrate namespace %s to node %s", namespace, newNodeID),
Cause: err,
}
}
hnm.logger.Info("Namespace migrated",
zap.String("namespace", namespace),
zap.String("new_home_node_id", newNodeID),
)
return nil
}

View File

@ -0,0 +1,537 @@
package deployments
import (
"context"
"database/sql"
"reflect"
"testing"
"time"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"go.uber.org/zap"
)
// mockHomeNodeDB extends mockRQLiteClient for home node testing
type mockHomeNodeDB struct {
*mockRQLiteClient
assignments map[string]string // namespace -> homeNodeID
nodes map[string]nodeData // nodeID -> nodeData
deployments map[string][]deploymentData // nodeID -> deployments
resourceUsage map[string]resourceData // nodeID -> resource usage
}
type nodeData struct {
id string
status string
lastSeen time.Time
}
type deploymentData struct {
id string
status string
}
type resourceData struct {
memoryMB int
cpuPercent int
}
func newMockHomeNodeDB() *mockHomeNodeDB {
return &mockHomeNodeDB{
mockRQLiteClient: newMockRQLiteClient(),
assignments: make(map[string]string),
nodes: make(map[string]nodeData),
deployments: make(map[string][]deploymentData),
resourceUsage: make(map[string]resourceData),
}
}
func (m *mockHomeNodeDB) Query(ctx context.Context, dest any, query string, args ...any) error {
destVal := reflect.ValueOf(dest)
if destVal.Kind() != reflect.Ptr {
return nil
}
sliceVal := destVal.Elem()
if sliceVal.Kind() != reflect.Slice {
return nil
}
elemType := sliceVal.Type().Elem()
// Handle different query types based on struct type
switch elemType.Name() {
case "nodeResult":
// Active nodes query
for _, node := range m.nodes {
if node.status == "active" {
nodeRes := reflect.New(elemType).Elem()
nodeRes.FieldByName("ID").SetString(node.id)
sliceVal.Set(reflect.Append(sliceVal, nodeRes))
}
}
return nil
case "homeNodeResult":
// Home node lookup
if len(args) > 0 {
if namespace, ok := args[0].(string); ok {
if homeNodeID, exists := m.assignments[namespace]; exists {
hnRes := reflect.New(elemType).Elem()
hnRes.FieldByName("HomeNodeID").SetString(homeNodeID)
sliceVal.Set(reflect.Append(sliceVal, hnRes))
}
}
}
return nil
case "countResult":
// Deployment count or port count
if len(args) > 0 {
if nodeID, ok := args[0].(string); ok {
count := len(m.deployments[nodeID])
countRes := reflect.New(elemType).Elem()
countRes.FieldByName("Count").SetInt(int64(count))
sliceVal.Set(reflect.Append(sliceVal, countRes))
}
}
return nil
case "resourceResult":
// Resource usage query
if len(args) > 0 {
if nodeID, ok := args[0].(string); ok {
usage := m.resourceUsage[nodeID]
resRes := reflect.New(elemType).Elem()
resRes.FieldByName("TotalMemoryMB").SetInt(int64(usage.memoryMB))
resRes.FieldByName("TotalCPUPercent").SetInt(int64(usage.cpuPercent))
sliceVal.Set(reflect.Append(sliceVal, resRes))
}
}
return nil
case "namespaceResult":
// Stale namespaces query
// For testing, we'll return empty
return nil
}
return m.mockRQLiteClient.Query(ctx, dest, query, args...)
}
func (m *mockHomeNodeDB) Exec(ctx context.Context, query string, args ...any) (sql.Result, error) {
// Handle home node assignment (INSERT)
if len(args) >= 2 {
if namespace, ok := args[0].(string); ok {
if homeNodeID, ok := args[1].(string); ok {
m.assignments[namespace] = homeNodeID
return nil, nil
}
}
}
// Handle migration (UPDATE) - args are: newNodeID, timestamp, timestamp, namespace
if len(args) >= 4 {
if newNodeID, ok := args[0].(string); ok {
// Last arg should be namespace
if namespace, ok := args[3].(string); ok {
m.assignments[namespace] = newNodeID
return nil, nil
}
}
}
return m.mockRQLiteClient.Exec(ctx, query, args...)
}
func (m *mockHomeNodeDB) addNode(id, status string) {
m.nodes[id] = nodeData{
id: id,
status: status,
lastSeen: time.Now(),
}
}
// Implement interface methods (inherited from mockRQLiteClient but need to be available)
func (m *mockHomeNodeDB) FindBy(ctx context.Context, dest any, table string, criteria map[string]any, opts ...rqlite.FindOption) error {
return m.mockRQLiteClient.FindBy(ctx, dest, table, criteria, opts...)
}
func (m *mockHomeNodeDB) FindOneBy(ctx context.Context, dest any, table string, criteria map[string]any, opts ...rqlite.FindOption) error {
return m.mockRQLiteClient.FindOneBy(ctx, dest, table, criteria, opts...)
}
func (m *mockHomeNodeDB) Save(ctx context.Context, entity any) error {
return m.mockRQLiteClient.Save(ctx, entity)
}
func (m *mockHomeNodeDB) Remove(ctx context.Context, entity any) error {
return m.mockRQLiteClient.Remove(ctx, entity)
}
func (m *mockHomeNodeDB) Repository(table string) any {
return m.mockRQLiteClient.Repository(table)
}
func (m *mockHomeNodeDB) CreateQueryBuilder(table string) *rqlite.QueryBuilder {
return m.mockRQLiteClient.CreateQueryBuilder(table)
}
func (m *mockHomeNodeDB) Tx(ctx context.Context, fn func(tx rqlite.Tx) error) error {
return m.mockRQLiteClient.Tx(ctx, fn)
}
func (m *mockHomeNodeDB) addDeployment(nodeID, deploymentID, status string) {
m.deployments[nodeID] = append(m.deployments[nodeID], deploymentData{
id: deploymentID,
status: status,
})
}
func (m *mockHomeNodeDB) setResourceUsage(nodeID string, memoryMB, cpuPercent int) {
m.resourceUsage[nodeID] = resourceData{
memoryMB: memoryMB,
cpuPercent: cpuPercent,
}
}
func TestHomeNodeManager_AssignHomeNode(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
ctx := context.Background()
// Add test nodes
mockDB.addNode("node-1", "active")
mockDB.addNode("node-2", "active")
mockDB.addNode("node-3", "active")
t.Run("assign to new namespace", func(t *testing.T) {
nodeID, err := hnm.AssignHomeNode(ctx, "test-namespace")
if err != nil {
t.Fatalf("failed to assign home node: %v", err)
}
if nodeID == "" {
t.Error("expected non-empty node ID")
}
// Verify assignment was stored
storedNodeID, err := hnm.GetHomeNode(ctx, "test-namespace")
if err != nil {
t.Fatalf("failed to get home node: %v", err)
}
if storedNodeID != nodeID {
t.Errorf("stored node ID %s doesn't match assigned %s", storedNodeID, nodeID)
}
})
t.Run("reuse existing assignment", func(t *testing.T) {
// Assign once
firstNodeID, err := hnm.AssignHomeNode(ctx, "namespace-2")
if err != nil {
t.Fatalf("failed first assignment: %v", err)
}
// Assign again - should return same node
secondNodeID, err := hnm.AssignHomeNode(ctx, "namespace-2")
if err != nil {
t.Fatalf("failed second assignment: %v", err)
}
if firstNodeID != secondNodeID {
t.Errorf("expected same node ID, got %s then %s", firstNodeID, secondNodeID)
}
})
t.Run("error when no nodes available", func(t *testing.T) {
emptyDB := newMockHomeNodeDB()
emptyHNM := NewHomeNodeManager(emptyDB, portAllocator, logger)
_, err := emptyHNM.AssignHomeNode(ctx, "test-namespace")
if err != ErrNoNodesAvailable {
t.Errorf("expected ErrNoNodesAvailable, got %v", err)
}
})
}
func TestHomeNodeManager_CalculateCapacityScore(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
tests := []struct {
name string
deploymentCount int
allocatedPorts int
availablePorts int
usedMemoryMB int
usedCPUPercent int
expectedMin float64
expectedMax float64
}{
{
name: "empty node - perfect score",
deploymentCount: 0,
allocatedPorts: 0,
availablePorts: 9900,
usedMemoryMB: 0,
usedCPUPercent: 0,
expectedMin: 0.95,
expectedMax: 1.0,
},
{
name: "half capacity",
deploymentCount: 50,
allocatedPorts: 4950,
availablePorts: 4950,
usedMemoryMB: 4096,
usedCPUPercent: 200,
expectedMin: 0.45,
expectedMax: 0.55,
},
{
name: "full capacity - low score",
deploymentCount: 100,
allocatedPorts: 9900,
availablePorts: 0,
usedMemoryMB: 8192,
usedCPUPercent: 400,
expectedMin: 0.0,
expectedMax: 0.05,
},
{
name: "light load",
deploymentCount: 10,
allocatedPorts: 1000,
availablePorts: 8900,
usedMemoryMB: 512,
usedCPUPercent: 50,
expectedMin: 0.80,
expectedMax: 0.95,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
score := hnm.calculateCapacityScore(
tt.deploymentCount,
tt.allocatedPorts,
tt.availablePorts,
tt.usedMemoryMB,
tt.usedCPUPercent,
)
if score < tt.expectedMin || score > tt.expectedMax {
t.Errorf("score %.2f outside expected range [%.2f, %.2f]", score, tt.expectedMin, tt.expectedMax)
}
// Score should always be in 0-1 range
if score < 0 || score > 1 {
t.Errorf("score %.2f outside valid range [0, 1]", score)
}
})
}
}
func TestHomeNodeManager_SelectBestNode(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
t.Run("select from multiple nodes", func(t *testing.T) {
capacities := []*NodeCapacity{
{
NodeID: "node-1",
DeploymentCount: 50,
Score: 0.5,
},
{
NodeID: "node-2",
DeploymentCount: 10,
Score: 0.9,
},
{
NodeID: "node-3",
DeploymentCount: 80,
Score: 0.2,
},
}
best := hnm.selectBestNode(capacities)
if best == nil {
t.Fatal("expected non-nil best node")
}
if best.NodeID != "node-2" {
t.Errorf("expected node-2 (highest score), got %s", best.NodeID)
}
if best.Score != 0.9 {
t.Errorf("expected score 0.9, got %.2f", best.Score)
}
})
t.Run("return nil for empty list", func(t *testing.T) {
best := hnm.selectBestNode([]*NodeCapacity{})
if best != nil {
t.Error("expected nil for empty capacity list")
}
})
t.Run("single node", func(t *testing.T) {
capacities := []*NodeCapacity{
{
NodeID: "node-1",
DeploymentCount: 5,
Score: 0.8,
},
}
best := hnm.selectBestNode(capacities)
if best == nil {
t.Fatal("expected non-nil best node")
}
if best.NodeID != "node-1" {
t.Errorf("expected node-1, got %s", best.NodeID)
}
})
}
func TestHomeNodeManager_GetHomeNode(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
ctx := context.Background()
t.Run("get non-existent assignment", func(t *testing.T) {
_, err := hnm.GetHomeNode(ctx, "non-existent")
if err != ErrNamespaceNotAssigned {
t.Errorf("expected ErrNamespaceNotAssigned, got %v", err)
}
})
t.Run("get existing assignment", func(t *testing.T) {
// Manually add assignment
mockDB.assignments["test-namespace"] = "node-123"
nodeID, err := hnm.GetHomeNode(ctx, "test-namespace")
if err != nil {
t.Fatalf("failed to get home node: %v", err)
}
if nodeID != "node-123" {
t.Errorf("expected node-123, got %s", nodeID)
}
})
}
func TestHomeNodeManager_MigrateNamespace(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
ctx := context.Background()
t.Run("migrate namespace to new node", func(t *testing.T) {
// Set up initial assignment
mockDB.assignments["test-namespace"] = "node-old"
// Migrate
err := hnm.MigrateNamespace(ctx, "test-namespace", "node-new")
if err != nil {
t.Fatalf("failed to migrate namespace: %v", err)
}
// Verify migration
nodeID, err := hnm.GetHomeNode(ctx, "test-namespace")
if err != nil {
t.Fatalf("failed to get home node after migration: %v", err)
}
if nodeID != "node-new" {
t.Errorf("expected node-new after migration, got %s", nodeID)
}
})
}
func TestHomeNodeManager_UpdateHeartbeat(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
ctx := context.Background()
t.Run("update heartbeat", func(t *testing.T) {
err := hnm.UpdateHeartbeat(ctx, "test-namespace")
if err != nil {
t.Fatalf("failed to update heartbeat: %v", err)
}
})
}
func TestHomeNodeManager_UpdateResourceUsage(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
ctx := context.Background()
t.Run("update resource usage", func(t *testing.T) {
err := hnm.UpdateResourceUsage(ctx, "test-namespace", 5, 1024, 150)
if err != nil {
t.Fatalf("failed to update resource usage: %v", err)
}
})
}
func TestCapacityScoreWeighting(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockHomeNodeDB()
portAllocator := NewPortAllocator(mockDB, logger)
hnm := NewHomeNodeManager(mockDB, portAllocator, logger)
t.Run("deployment count has highest weight", func(t *testing.T) {
// Node with low deployments but high other usage
score1 := hnm.calculateCapacityScore(10, 5000, 4900, 4000, 200)
// Node with high deployments but low other usage
score2 := hnm.calculateCapacityScore(90, 100, 9800, 100, 10)
// Score1 should be higher because deployment count has 40% weight
if score1 <= score2 {
t.Errorf("expected score1 (%.2f) > score2 (%.2f) due to deployment count weight", score1, score2)
}
})
t.Run("deployment count weight matters", func(t *testing.T) {
// Node A: 20 deployments, 50% other resources
nodeA := hnm.calculateCapacityScore(20, 4950, 4950, 4096, 200)
// Node B: 80 deployments, 50% other resources
nodeB := hnm.calculateCapacityScore(80, 4950, 4950, 4096, 200)
// Node A should score higher due to lower deployment count
// (deployment count has 40% weight, so this should make a difference)
if nodeA <= nodeB {
t.Errorf("expected node A (%.2f) > node B (%.2f) - deployment count should matter", nodeA, nodeB)
}
// Verify the difference is significant (should be about 0.24 = 60% of 40% weight)
diff := nodeA - nodeB
if diff < 0.2 {
t.Errorf("expected significant difference due to deployment count weight, got %.2f", diff)
}
})
}

View File

@ -0,0 +1,236 @@
package deployments
import (
"context"
"fmt"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"go.uber.org/zap"
)
// PortAllocator manages port allocation across nodes
type PortAllocator struct {
db rqlite.Client
logger *zap.Logger
}
// NewPortAllocator creates a new port allocator
func NewPortAllocator(db rqlite.Client, logger *zap.Logger) *PortAllocator {
return &PortAllocator{
db: db,
logger: logger,
}
}
// AllocatePort finds and allocates the next available port for a deployment on a specific node
// Port range: 10100-19999 (10000-10099 reserved for system use)
func (pa *PortAllocator) AllocatePort(ctx context.Context, nodeID, deploymentID string) (int, error) {
// Use internal auth for port allocation operations
internalCtx := client.WithInternalAuth(ctx)
// Retry logic for handling concurrent allocation conflicts
maxRetries := 10
retryDelay := 100 * time.Millisecond
for attempt := 0; attempt < maxRetries; attempt++ {
port, err := pa.tryAllocatePort(internalCtx, nodeID, deploymentID)
if err == nil {
pa.logger.Info("Port allocated successfully",
zap.String("node_id", nodeID),
zap.Int("port", port),
zap.String("deployment_id", deploymentID),
zap.Int("attempt", attempt+1),
)
return port, nil
}
// If it's a conflict error, retry with exponential backoff
if isConflictError(err) {
pa.logger.Debug("Port allocation conflict, retrying",
zap.String("node_id", nodeID),
zap.String("deployment_id", deploymentID),
zap.Int("attempt", attempt+1),
zap.Error(err),
)
time.Sleep(retryDelay)
retryDelay *= 2
continue
}
// Other errors are non-retryable
return 0, err
}
return 0, &DeploymentError{
Message: fmt.Sprintf("failed to allocate port after %d retries", maxRetries),
}
}
// tryAllocatePort attempts to allocate a port (single attempt)
func (pa *PortAllocator) tryAllocatePort(ctx context.Context, nodeID, deploymentID string) (int, error) {
// Query all allocated ports on this node
type portRow struct {
Port int `db:"port"`
}
var allocatedPortRows []portRow
query := `SELECT port FROM port_allocations WHERE node_id = ? ORDER BY port ASC`
err := pa.db.Query(ctx, &allocatedPortRows, query, nodeID)
if err != nil {
return 0, &DeploymentError{
Message: "failed to query allocated ports",
Cause: err,
}
}
// Parse allocated ports into map
allocatedPorts := make(map[int]bool)
for _, row := range allocatedPortRows {
allocatedPorts[row.Port] = true
}
// Find first available port (starting from UserMinPort = 10100)
port := UserMinPort
for port <= MaxPort {
if !allocatedPorts[port] {
break
}
port++
}
if port > MaxPort {
return 0, ErrNoPortsAvailable
}
// Attempt to insert allocation record (may conflict if another process allocated same port)
insertQuery := `
INSERT INTO port_allocations (node_id, port, deployment_id, allocated_at)
VALUES (?, ?, ?, ?)
`
_, err = pa.db.Exec(ctx, insertQuery, nodeID, port, deploymentID, time.Now())
if err != nil {
return 0, &DeploymentError{
Message: "failed to insert port allocation",
Cause: err,
}
}
return port, nil
}
// DeallocatePort removes a port allocation for a deployment
func (pa *PortAllocator) DeallocatePort(ctx context.Context, deploymentID string) error {
internalCtx := client.WithInternalAuth(ctx)
query := `DELETE FROM port_allocations WHERE deployment_id = ?`
_, err := pa.db.Exec(internalCtx, query, deploymentID)
if err != nil {
return &DeploymentError{
Message: "failed to deallocate port",
Cause: err,
}
}
pa.logger.Info("Port deallocated",
zap.String("deployment_id", deploymentID),
)
return nil
}
// GetAllocatedPort retrieves the currently allocated port for a deployment
func (pa *PortAllocator) GetAllocatedPort(ctx context.Context, deploymentID string) (int, string, error) {
internalCtx := client.WithInternalAuth(ctx)
type allocation struct {
NodeID string `db:"node_id"`
Port int `db:"port"`
}
var allocs []allocation
query := `SELECT node_id, port FROM port_allocations WHERE deployment_id = ? LIMIT 1`
err := pa.db.Query(internalCtx, &allocs, query, deploymentID)
if err != nil {
return 0, "", &DeploymentError{
Message: "failed to query allocated port",
Cause: err,
}
}
if len(allocs) == 0 {
return 0, "", &DeploymentError{
Message: "no port allocated for deployment",
}
}
return allocs[0].Port, allocs[0].NodeID, nil
}
// GetNodePortCount returns the number of allocated ports on a node
func (pa *PortAllocator) GetNodePortCount(ctx context.Context, nodeID string) (int, error) {
internalCtx := client.WithInternalAuth(ctx)
type countResult struct {
Count int `db:"COUNT(*)"`
}
var results []countResult
query := `SELECT COUNT(*) FROM port_allocations WHERE node_id = ?`
err := pa.db.Query(internalCtx, &results, query, nodeID)
if err != nil {
return 0, &DeploymentError{
Message: "failed to count allocated ports",
Cause: err,
}
}
if len(results) == 0 {
return 0, nil
}
return results[0].Count, nil
}
// GetAvailablePortCount returns the number of available ports on a node
func (pa *PortAllocator) GetAvailablePortCount(ctx context.Context, nodeID string) (int, error) {
allocatedCount, err := pa.GetNodePortCount(ctx, nodeID)
if err != nil {
return 0, err
}
totalPorts := MaxPort - UserMinPort + 1
available := totalPorts - allocatedCount
if available < 0 {
available = 0
}
return available, nil
}
// isConflictError checks if an error is due to a constraint violation (port already allocated)
func isConflictError(err error) bool {
if err == nil {
return false
}
// RQLite returns constraint violation errors as strings containing "UNIQUE constraint failed"
errStr := err.Error()
return contains(errStr, "UNIQUE") || contains(errStr, "constraint") || contains(errStr, "conflict")
}
// contains checks if a string contains a substring (case-insensitive)
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && findSubstring(s, substr))
}
func findSubstring(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}

View File

@ -0,0 +1,419 @@
package deployments
import (
"context"
"database/sql"
"reflect"
"testing"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"go.uber.org/zap"
)
// mockRQLiteClient implements a simple in-memory mock for testing
type mockRQLiteClient struct {
allocations map[string]map[int]string // nodeID -> port -> deploymentID
}
func newMockRQLiteClient() *mockRQLiteClient {
return &mockRQLiteClient{
allocations: make(map[string]map[int]string),
}
}
func (m *mockRQLiteClient) Query(ctx context.Context, dest any, query string, args ...any) error {
// Determine what type of query based on dest type
destVal := reflect.ValueOf(dest)
if destVal.Kind() != reflect.Ptr {
return nil
}
sliceVal := destVal.Elem()
if sliceVal.Kind() != reflect.Slice {
return nil
}
elemType := sliceVal.Type().Elem()
// Handle port allocation queries
if len(args) > 0 {
if nodeID, ok := args[0].(string); ok {
if elemType.Name() == "portRow" {
// Query for allocated ports
if nodeAllocs, exists := m.allocations[nodeID]; exists {
for port := range nodeAllocs {
portRow := reflect.New(elemType).Elem()
portRow.FieldByName("Port").SetInt(int64(port))
sliceVal.Set(reflect.Append(sliceVal, portRow))
}
}
return nil
}
if elemType.Name() == "allocation" {
// Query for specific deployment allocation
for nid, ports := range m.allocations {
for port := range ports {
if nid == nodeID {
alloc := reflect.New(elemType).Elem()
alloc.FieldByName("NodeID").SetString(nid)
alloc.FieldByName("Port").SetInt(int64(port))
sliceVal.Set(reflect.Append(sliceVal, alloc))
return nil
}
}
}
return nil
}
if elemType.Name() == "countResult" {
// Count query
count := 0
if nodeAllocs, exists := m.allocations[nodeID]; exists {
count = len(nodeAllocs)
}
countRes := reflect.New(elemType).Elem()
countRes.FieldByName("Count").SetInt(int64(count))
sliceVal.Set(reflect.Append(sliceVal, countRes))
return nil
}
}
}
return nil
}
func (m *mockRQLiteClient) Exec(ctx context.Context, query string, args ...any) (sql.Result, error) {
// Handle INSERT (port allocation)
if len(args) >= 3 {
nodeID, _ := args[0].(string)
port, _ := args[1].(int)
deploymentID, _ := args[2].(string)
if m.allocations[nodeID] == nil {
m.allocations[nodeID] = make(map[int]string)
}
// Check for conflict
if _, exists := m.allocations[nodeID][port]; exists {
return nil, &DeploymentError{Message: "UNIQUE constraint failed"}
}
m.allocations[nodeID][port] = deploymentID
return nil, nil
}
// Handle DELETE (deallocation)
if len(args) >= 1 {
deploymentID, _ := args[0].(string)
for nodeID, ports := range m.allocations {
for port, allocatedDepID := range ports {
if allocatedDepID == deploymentID {
delete(m.allocations[nodeID], port)
return nil, nil
}
}
}
}
return nil, nil
}
// Stub implementations for rqlite.Client interface
func (m *mockRQLiteClient) FindBy(ctx context.Context, dest any, table string, criteria map[string]any, opts ...rqlite.FindOption) error {
return nil
}
func (m *mockRQLiteClient) FindOneBy(ctx context.Context, dest any, table string, criteria map[string]any, opts ...rqlite.FindOption) error {
return nil
}
func (m *mockRQLiteClient) Save(ctx context.Context, entity any) error {
return nil
}
func (m *mockRQLiteClient) Remove(ctx context.Context, entity any) error {
return nil
}
func (m *mockRQLiteClient) Repository(table string) any {
return nil
}
func (m *mockRQLiteClient) CreateQueryBuilder(table string) *rqlite.QueryBuilder {
return nil
}
func (m *mockRQLiteClient) Tx(ctx context.Context, fn func(tx rqlite.Tx) error) error {
return nil
}
func TestPortAllocator_AllocatePort(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockRQLiteClient()
pa := NewPortAllocator(mockDB, logger)
ctx := context.Background()
nodeID := "node-test123"
t.Run("allocate first port", func(t *testing.T) {
port, err := pa.AllocatePort(ctx, nodeID, "deploy-1")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
if port != UserMinPort {
t.Errorf("expected first port to be %d, got %d", UserMinPort, port)
}
})
t.Run("allocate sequential ports", func(t *testing.T) {
port2, err := pa.AllocatePort(ctx, nodeID, "deploy-2")
if err != nil {
t.Fatalf("failed to allocate second port: %v", err)
}
if port2 != UserMinPort+1 {
t.Errorf("expected second port to be %d, got %d", UserMinPort+1, port2)
}
port3, err := pa.AllocatePort(ctx, nodeID, "deploy-3")
if err != nil {
t.Fatalf("failed to allocate third port: %v", err)
}
if port3 != UserMinPort+2 {
t.Errorf("expected third port to be %d, got %d", UserMinPort+2, port3)
}
})
t.Run("allocate on different node", func(t *testing.T) {
port, err := pa.AllocatePort(ctx, "node-other", "deploy-4")
if err != nil {
t.Fatalf("failed to allocate port on different node: %v", err)
}
if port != UserMinPort {
t.Errorf("expected first port on new node to be %d, got %d", UserMinPort, port)
}
})
}
func TestPortAllocator_DeallocatePort(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockRQLiteClient()
pa := NewPortAllocator(mockDB, logger)
ctx := context.Background()
nodeID := "node-test123"
// Allocate some ports
_, err := pa.AllocatePort(ctx, nodeID, "deploy-1")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
port2, err := pa.AllocatePort(ctx, nodeID, "deploy-2")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
t.Run("deallocate port", func(t *testing.T) {
err := pa.DeallocatePort(ctx, "deploy-1")
if err != nil {
t.Fatalf("failed to deallocate port: %v", err)
}
})
t.Run("allocate reuses gap", func(t *testing.T) {
port, err := pa.AllocatePort(ctx, nodeID, "deploy-3")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
// Should reuse the gap created by deallocating deploy-1
if port != UserMinPort {
t.Errorf("expected port to fill gap at %d, got %d", UserMinPort, port)
}
// Next allocation should be after the last allocated port
port4, err := pa.AllocatePort(ctx, nodeID, "deploy-4")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
if port4 != port2+1 {
t.Errorf("expected next sequential port %d, got %d", port2+1, port4)
}
})
}
func TestPortAllocator_GetNodePortCount(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockRQLiteClient()
pa := NewPortAllocator(mockDB, logger)
ctx := context.Background()
nodeID := "node-test123"
t.Run("empty node has zero ports", func(t *testing.T) {
count, err := pa.GetNodePortCount(ctx, nodeID)
if err != nil {
t.Fatalf("failed to get port count: %v", err)
}
if count != 0 {
t.Errorf("expected 0 ports, got %d", count)
}
})
t.Run("count after allocations", func(t *testing.T) {
// Allocate 3 ports
for i := 0; i < 3; i++ {
_, err := pa.AllocatePort(ctx, nodeID, "deploy-"+string(rune(i)))
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
}
count, err := pa.GetNodePortCount(ctx, nodeID)
if err != nil {
t.Fatalf("failed to get port count: %v", err)
}
if count != 3 {
t.Errorf("expected 3 ports, got %d", count)
}
})
}
func TestPortAllocator_GetAvailablePortCount(t *testing.T) {
logger := zap.NewNop()
mockDB := newMockRQLiteClient()
pa := NewPortAllocator(mockDB, logger)
ctx := context.Background()
nodeID := "node-test123"
totalPorts := MaxPort - UserMinPort + 1
t.Run("all ports available initially", func(t *testing.T) {
available, err := pa.GetAvailablePortCount(ctx, nodeID)
if err != nil {
t.Fatalf("failed to get available port count: %v", err)
}
if available != totalPorts {
t.Errorf("expected %d available ports, got %d", totalPorts, available)
}
})
t.Run("available decreases after allocation", func(t *testing.T) {
_, err := pa.AllocatePort(ctx, nodeID, "deploy-1")
if err != nil {
t.Fatalf("failed to allocate port: %v", err)
}
available, err := pa.GetAvailablePortCount(ctx, nodeID)
if err != nil {
t.Fatalf("failed to get available port count: %v", err)
}
expected := totalPorts - 1
if available != expected {
t.Errorf("expected %d available ports, got %d", expected, available)
}
})
}
func TestIsConflictError(t *testing.T) {
tests := []struct {
name string
err error
expected bool
}{
{
name: "nil error",
err: nil,
expected: false,
},
{
name: "UNIQUE constraint error",
err: &DeploymentError{Message: "UNIQUE constraint failed"},
expected: true,
},
{
name: "constraint error",
err: &DeploymentError{Message: "constraint violation"},
expected: true,
},
{
name: "conflict error",
err: &DeploymentError{Message: "conflict detected"},
expected: true,
},
{
name: "unrelated error",
err: &DeploymentError{Message: "network timeout"},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isConflictError(tt.err)
if result != tt.expected {
t.Errorf("isConflictError(%v) = %v, expected %v", tt.err, result, tt.expected)
}
})
}
}
func TestContains(t *testing.T) {
tests := []struct {
name string
s string
substr string
expected bool
}{
{
name: "exact match",
s: "UNIQUE",
substr: "UNIQUE",
expected: true,
},
{
name: "substring present",
s: "UNIQUE constraint failed",
substr: "constraint",
expected: true,
},
{
name: "substring not present",
s: "network error",
substr: "constraint",
expected: false,
},
{
name: "empty substring",
s: "test",
substr: "",
expected: true,
},
{
name: "empty string",
s: "",
substr: "test",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := contains(tt.s, tt.substr)
if result != tt.expected {
t.Errorf("contains(%q, %q) = %v, expected %v", tt.s, tt.substr, result, tt.expected)
}
})
}
}

View File

@ -0,0 +1,314 @@
package process
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"go.uber.org/zap"
)
// Manager manages deployment processes via systemd
type Manager struct {
logger *zap.Logger
}
// NewManager creates a new process manager
func NewManager(logger *zap.Logger) *Manager {
return &Manager{
logger: logger,
}
}
// Start starts a deployment process
func (m *Manager) Start(ctx context.Context, deployment *deployments.Deployment, workDir string) error {
serviceName := m.getServiceName(deployment)
m.logger.Info("Starting deployment process",
zap.String("deployment", deployment.Name),
zap.String("namespace", deployment.Namespace),
zap.String("service", serviceName),
)
// Create systemd service file
if err := m.createSystemdService(deployment, workDir); err != nil {
return fmt.Errorf("failed to create systemd service: %w", err)
}
// Reload systemd
if err := m.systemdReload(); err != nil {
return fmt.Errorf("failed to reload systemd: %w", err)
}
// Enable service
if err := m.systemdEnable(serviceName); err != nil {
return fmt.Errorf("failed to enable service: %w", err)
}
// Start service
if err := m.systemdStart(serviceName); err != nil {
return fmt.Errorf("failed to start service: %w", err)
}
m.logger.Info("Deployment process started",
zap.String("deployment", deployment.Name),
zap.String("service", serviceName),
)
return nil
}
// Stop stops a deployment process
func (m *Manager) Stop(ctx context.Context, deployment *deployments.Deployment) error {
serviceName := m.getServiceName(deployment)
m.logger.Info("Stopping deployment process",
zap.String("deployment", deployment.Name),
zap.String("service", serviceName),
)
// Stop service
if err := m.systemdStop(serviceName); err != nil {
m.logger.Warn("Failed to stop service", zap.Error(err))
}
// Disable service
if err := m.systemdDisable(serviceName); err != nil {
m.logger.Warn("Failed to disable service", zap.Error(err))
}
// Remove service file
serviceFile := filepath.Join("/etc/systemd/system", serviceName+".service")
if err := os.Remove(serviceFile); err != nil && !os.IsNotExist(err) {
m.logger.Warn("Failed to remove service file", zap.Error(err))
}
// Reload systemd
m.systemdReload()
return nil
}
// Restart restarts a deployment process
func (m *Manager) Restart(ctx context.Context, deployment *deployments.Deployment) error {
serviceName := m.getServiceName(deployment)
m.logger.Info("Restarting deployment process",
zap.String("deployment", deployment.Name),
zap.String("service", serviceName),
)
return m.systemdRestart(serviceName)
}
// Status gets the status of a deployment process
func (m *Manager) Status(ctx context.Context, deployment *deployments.Deployment) (string, error) {
serviceName := m.getServiceName(deployment)
cmd := exec.CommandContext(ctx, "systemctl", "is-active", serviceName)
output, err := cmd.Output()
if err != nil {
return "unknown", err
}
return strings.TrimSpace(string(output)), nil
}
// GetLogs retrieves logs for a deployment
func (m *Manager) GetLogs(ctx context.Context, deployment *deployments.Deployment, lines int, follow bool) ([]byte, error) {
serviceName := m.getServiceName(deployment)
args := []string{"-u", serviceName, "--no-pager"}
if lines > 0 {
args = append(args, "-n", fmt.Sprintf("%d", lines))
}
if follow {
args = append(args, "-f")
}
cmd := exec.CommandContext(ctx, "journalctl", args...)
return cmd.Output()
}
// createSystemdService creates a systemd service file
func (m *Manager) createSystemdService(deployment *deployments.Deployment, workDir string) error {
serviceName := m.getServiceName(deployment)
serviceFile := filepath.Join("/etc/systemd/system", serviceName+".service")
// Determine the start command based on deployment type
startCmd := m.getStartCommand(deployment, workDir)
// Build environment variables
envVars := make([]string, 0)
envVars = append(envVars, fmt.Sprintf("PORT=%d", deployment.Port))
for key, value := range deployment.Environment {
envVars = append(envVars, fmt.Sprintf("%s=%s", key, value))
}
// Create service from template
tmpl := `[Unit]
Description=Orama Deployment - {{.Namespace}}/{{.Name}}
After=network.target
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory={{.WorkDir}}
{{range .Env}}Environment="{{.}}"
{{end}}
ExecStart={{.StartCmd}}
Restart={{.RestartPolicy}}
RestartSec=5s
# Resource limits
MemoryLimit={{.MemoryLimitMB}}M
CPUQuota={{.CPULimitPercent}}%
# Security
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths={{.WorkDir}}
StandardOutput=journal
StandardError=journal
SyslogIdentifier={{.ServiceName}}
[Install]
WantedBy=multi-user.target
`
t, err := template.New("service").Parse(tmpl)
if err != nil {
return err
}
data := struct {
Namespace string
Name string
ServiceName string
WorkDir string
StartCmd string
Env []string
RestartPolicy string
MemoryLimitMB int
CPULimitPercent int
}{
Namespace: deployment.Namespace,
Name: deployment.Name,
ServiceName: serviceName,
WorkDir: workDir,
StartCmd: startCmd,
Env: envVars,
RestartPolicy: m.mapRestartPolicy(deployment.RestartPolicy),
MemoryLimitMB: deployment.MemoryLimitMB,
CPULimitPercent: deployment.CPULimitPercent,
}
file, err := os.Create(serviceFile)
if err != nil {
return err
}
defer file.Close()
return t.Execute(file, data)
}
// getStartCommand determines the start command for a deployment
func (m *Manager) getStartCommand(deployment *deployments.Deployment, workDir string) string {
switch deployment.Type {
case deployments.DeploymentTypeNextJS:
return "/usr/bin/node server.js"
case deployments.DeploymentTypeNodeJSBackend:
return "/usr/bin/node index.js"
case deployments.DeploymentTypeGoBackend:
return filepath.Join(workDir, "app")
default:
return "echo 'Unknown deployment type'"
}
}
// mapRestartPolicy maps deployment restart policy to systemd restart policy
func (m *Manager) mapRestartPolicy(policy deployments.RestartPolicy) string {
switch policy {
case deployments.RestartPolicyAlways:
return "always"
case deployments.RestartPolicyOnFailure:
return "on-failure"
case deployments.RestartPolicyNever:
return "no"
default:
return "on-failure"
}
}
// getServiceName generates a systemd service name
func (m *Manager) getServiceName(deployment *deployments.Deployment) string {
// Sanitize namespace and name for service name
namespace := strings.ReplaceAll(deployment.Namespace, ".", "-")
name := strings.ReplaceAll(deployment.Name, ".", "-")
return fmt.Sprintf("orama-deploy-%s-%s", namespace, name)
}
// systemd helper methods
func (m *Manager) systemdReload() error {
cmd := exec.Command("systemctl", "daemon-reload")
return cmd.Run()
}
func (m *Manager) systemdEnable(serviceName string) error {
cmd := exec.Command("systemctl", "enable", serviceName)
return cmd.Run()
}
func (m *Manager) systemdDisable(serviceName string) error {
cmd := exec.Command("systemctl", "disable", serviceName)
return cmd.Run()
}
func (m *Manager) systemdStart(serviceName string) error {
cmd := exec.Command("systemctl", "start", serviceName)
return cmd.Run()
}
func (m *Manager) systemdStop(serviceName string) error {
cmd := exec.Command("systemctl", "stop", serviceName)
return cmd.Run()
}
func (m *Manager) systemdRestart(serviceName string) error {
cmd := exec.Command("systemctl", "restart", serviceName)
return cmd.Run()
}
// WaitForHealthy waits for a deployment to become healthy
func (m *Manager) WaitForHealthy(ctx context.Context, deployment *deployments.Deployment, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
status, err := m.Status(ctx, deployment)
if err == nil && status == "active" {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
// Continue checking
}
}
return fmt.Errorf("deployment did not become healthy within %v", timeout)
}

248
pkg/deployments/types.go Normal file
View File

@ -0,0 +1,248 @@
// Package deployments provides infrastructure for managing custom deployments
// (static sites, Next.js apps, Go/Node.js backends, and SQLite databases)
package deployments
import (
"time"
)
// DeploymentType represents the type of deployment
type DeploymentType string
const (
DeploymentTypeStatic DeploymentType = "static" // Static sites (React, Vite)
DeploymentTypeNextJS DeploymentType = "nextjs" // Next.js SSR
DeploymentTypeNextJSStatic DeploymentType = "nextjs-static" // Next.js static export
DeploymentTypeGoBackend DeploymentType = "go-backend" // Go native binary
DeploymentTypeGoWASM DeploymentType = "go-wasm" // Go compiled to WASM
DeploymentTypeNodeJSBackend DeploymentType = "nodejs-backend" // Node.js/TypeScript backend
)
// DeploymentStatus represents the current state of a deployment
type DeploymentStatus string
const (
DeploymentStatusDeploying DeploymentStatus = "deploying"
DeploymentStatusActive DeploymentStatus = "active"
DeploymentStatusFailed DeploymentStatus = "failed"
DeploymentStatusStopped DeploymentStatus = "stopped"
DeploymentStatusUpdating DeploymentStatus = "updating"
)
// RestartPolicy defines how a deployment should restart on failure
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "always"
RestartPolicyOnFailure RestartPolicy = "on-failure"
RestartPolicyNever RestartPolicy = "never"
)
// RoutingType defines how DNS routing works for a deployment
type RoutingType string
const (
RoutingTypeBalanced RoutingType = "balanced" // Load-balanced across nodes
RoutingTypeNodeSpecific RoutingType = "node_specific" // Specific to one node
)
// Deployment represents a deployed application or service
type Deployment struct {
ID string `json:"id"`
Namespace string `json:"namespace"`
Name string `json:"name"`
Type DeploymentType `json:"type"`
Version int `json:"version"`
Status DeploymentStatus `json:"status"`
// Content storage
ContentCID string `json:"content_cid,omitempty"`
BuildCID string `json:"build_cid,omitempty"`
// Runtime configuration
HomeNodeID string `json:"home_node_id,omitempty"`
Port int `json:"port,omitempty"`
Subdomain string `json:"subdomain,omitempty"`
Environment map[string]string `json:"environment,omitempty"` // Unmarshaled from JSON
// Resource limits
MemoryLimitMB int `json:"memory_limit_mb"`
CPULimitPercent int `json:"cpu_limit_percent"`
DiskLimitMB int `json:"disk_limit_mb"`
// Health & monitoring
HealthCheckPath string `json:"health_check_path,omitempty"`
HealthCheckInterval int `json:"health_check_interval"`
RestartPolicy RestartPolicy `json:"restart_policy"`
MaxRestartCount int `json:"max_restart_count"`
// Metadata
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeployedBy string `json:"deployed_by"`
}
// PortAllocation represents an allocated port on a specific node
type PortAllocation struct {
NodeID string `json:"node_id"`
Port int `json:"port"`
DeploymentID string `json:"deployment_id"`
AllocatedAt time.Time `json:"allocated_at"`
}
// HomeNodeAssignment maps a namespace to its home node
type HomeNodeAssignment struct {
Namespace string `json:"namespace"`
HomeNodeID string `json:"home_node_id"`
AssignedAt time.Time `json:"assigned_at"`
LastHeartbeat time.Time `json:"last_heartbeat"`
DeploymentCount int `json:"deployment_count"`
TotalMemoryMB int `json:"total_memory_mb"`
TotalCPUPercent int `json:"total_cpu_percent"`
}
// DeploymentDomain represents a custom domain mapping
type DeploymentDomain struct {
ID string `json:"id"`
DeploymentID string `json:"deployment_id"`
Namespace string `json:"namespace"`
Domain string `json:"domain"`
RoutingType RoutingType `json:"routing_type"`
NodeID string `json:"node_id,omitempty"`
IsCustom bool `json:"is_custom"`
TLSCertCID string `json:"tls_cert_cid,omitempty"`
VerifiedAt *time.Time `json:"verified_at,omitempty"`
VerificationToken string `json:"verification_token,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// DeploymentHistory tracks deployment versions for rollback
type DeploymentHistory struct {
ID string `json:"id"`
DeploymentID string `json:"deployment_id"`
Version int `json:"version"`
ContentCID string `json:"content_cid,omitempty"`
BuildCID string `json:"build_cid,omitempty"`
DeployedAt time.Time `json:"deployed_at"`
DeployedBy string `json:"deployed_by"`
Status string `json:"status"`
ErrorMessage string `json:"error_message,omitempty"`
RollbackFromVersion *int `json:"rollback_from_version,omitempty"`
}
// DeploymentEvent represents an audit trail event
type DeploymentEvent struct {
ID string `json:"id"`
DeploymentID string `json:"deployment_id"`
EventType string `json:"event_type"`
Message string `json:"message,omitempty"`
Metadata string `json:"metadata,omitempty"` // JSON
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by,omitempty"`
}
// DeploymentHealthCheck represents a health check result
type DeploymentHealthCheck struct {
ID string `json:"id"`
DeploymentID string `json:"deployment_id"`
NodeID string `json:"node_id"`
Status string `json:"status"` // healthy, unhealthy, unknown
ResponseTimeMS int `json:"response_time_ms,omitempty"`
StatusCode int `json:"status_code,omitempty"`
ErrorMessage string `json:"error_message,omitempty"`
CheckedAt time.Time `json:"checked_at"`
}
// DeploymentRequest represents a request to create a new deployment
type DeploymentRequest struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
Type DeploymentType `json:"type"`
Subdomain string `json:"subdomain,omitempty"`
// Content
ContentTarball []byte `json:"-"` // Binary data, not JSON
Environment map[string]string `json:"environment,omitempty"`
// Resource limits
MemoryLimitMB int `json:"memory_limit_mb,omitempty"`
CPULimitPercent int `json:"cpu_limit_percent,omitempty"`
// Health monitoring
HealthCheckPath string `json:"health_check_path,omitempty"`
// Routing
LoadBalanced bool `json:"load_balanced,omitempty"` // Create load-balanced DNS records
CustomDomain string `json:"custom_domain,omitempty"` // Optional custom domain
}
// DeploymentResponse represents the result of a deployment operation
type DeploymentResponse struct {
DeploymentID string `json:"deployment_id"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Status string `json:"status"`
URLs []string `json:"urls"` // All URLs where deployment is accessible
Version int `json:"version"`
CreatedAt time.Time `json:"created_at"`
}
// NodeCapacity represents available resources on a node
type NodeCapacity struct {
NodeID string `json:"node_id"`
DeploymentCount int `json:"deployment_count"`
AllocatedPorts int `json:"allocated_ports"`
AvailablePorts int `json:"available_ports"`
UsedMemoryMB int `json:"used_memory_mb"`
AvailableMemoryMB int `json:"available_memory_mb"`
UsedCPUPercent int `json:"used_cpu_percent"`
AvailableDiskMB int64 `json:"available_disk_mb"`
Score float64 `json:"score"` // Calculated capacity score
}
// Port range constants
const (
MinPort = 10000 // Minimum allocatable port
MaxPort = 19999 // Maximum allocatable port
ReservedMinPort = 10000 // Start of reserved range
ReservedMaxPort = 10099 // End of reserved range
UserMinPort = 10100 // Start of user-allocatable range
)
// Default resource limits
const (
DefaultMemoryLimitMB = 256
DefaultCPULimitPercent = 50
DefaultDiskLimitMB = 1024
DefaultHealthCheckInterval = 30 // seconds
DefaultMaxRestartCount = 10
)
// Errors
var (
ErrNoPortsAvailable = &DeploymentError{Message: "no ports available on node"}
ErrNoNodesAvailable = &DeploymentError{Message: "no nodes available for deployment"}
ErrDeploymentNotFound = &DeploymentError{Message: "deployment not found"}
ErrNamespaceNotAssigned = &DeploymentError{Message: "namespace has no home node assigned"}
ErrInvalidDeploymentType = &DeploymentError{Message: "invalid deployment type"}
ErrSubdomainTaken = &DeploymentError{Message: "subdomain already in use"}
ErrDomainReserved = &DeploymentError{Message: "domain is reserved"}
)
// DeploymentError represents a deployment-related error
type DeploymentError struct {
Message string
Cause error
}
func (e *DeploymentError) Error() string {
if e.Cause != nil {
return e.Message + ": " + e.Cause.Error()
}
return e.Message
}
func (e *DeploymentError) Unwrap() error {
return e.Cause
}

View File

@ -8,21 +8,29 @@ package gateway
import (
"context"
"database/sql"
"fmt"
"reflect"
"sync"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/deployments/health"
"github.com/DeBrosOfficial/network/pkg/deployments/process"
"github.com/DeBrosOfficial/network/pkg/gateway/auth"
authhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/auth"
"github.com/DeBrosOfficial/network/pkg/gateway/handlers/cache"
deploymentshandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/deployments"
pubsubhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/pubsub"
serverlesshandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/serverless"
sqlitehandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/sqlite"
"github.com/DeBrosOfficial/network/pkg/gateway/handlers/storage"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/olric"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"github.com/DeBrosOfficial/network/pkg/serverless"
_ "github.com/mattn/go-sqlite3"
"go.uber.org/zap"
)
@ -65,6 +73,22 @@ type Gateway struct {
// Authentication service
authService *auth.Service
authHandlers *authhandlers.Handlers
// Deployment system
deploymentService *deploymentshandlers.DeploymentService
staticHandler *deploymentshandlers.StaticDeploymentHandler
nextjsHandler *deploymentshandlers.NextJSHandler
listHandler *deploymentshandlers.ListHandler
updateHandler *deploymentshandlers.UpdateHandler
rollbackHandler *deploymentshandlers.RollbackHandler
logsHandler *deploymentshandlers.LogsHandler
domainHandler *deploymentshandlers.DomainHandler
sqliteHandler *sqlitehandlers.SQLiteHandler
sqliteBackupHandler *sqlitehandlers.BackupHandler
portAllocator *deployments.PortAllocator
homeNodeManager *deployments.HomeNodeManager
processManager *process.Manager
healthChecker *health.HealthChecker
}
// localSubscriber represents a WebSocket subscriber for local message delivery
@ -112,6 +136,45 @@ func (a *authDatabaseAdapter) Query(ctx context.Context, sql string, args ...int
}, nil
}
// deploymentDatabaseAdapter adapts rqlite.Client to database.Database
type deploymentDatabaseAdapter struct {
client rqlite.Client
}
func (a *deploymentDatabaseAdapter) Query(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return a.client.Query(ctx, dest, query, args...)
}
func (a *deploymentDatabaseAdapter) QueryOne(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
// Query expects a slice, so we need to query into a slice and check length
// Get the type of dest and create a slice of that type
destType := reflect.TypeOf(dest).Elem()
sliceType := reflect.SliceOf(destType)
slice := reflect.New(sliceType).Interface()
// Execute query into slice
if err := a.client.Query(ctx, slice, query, args...); err != nil {
return err
}
// Check that we got exactly one result
sliceVal := reflect.ValueOf(slice).Elem()
if sliceVal.Len() == 0 {
return fmt.Errorf("no rows found")
}
if sliceVal.Len() > 1 {
return fmt.Errorf("expected 1 row, got %d", sliceVal.Len())
}
// Copy the first element to dest
reflect.ValueOf(dest).Elem().Set(sliceVal.Index(0))
return nil
}
func (a *deploymentDatabaseAdapter) Exec(ctx context.Context, query string, args ...interface{}) (interface{}, error) {
return a.client.Exec(ctx, query, args...)
}
// New creates and initializes a new Gateway instance.
// It establishes all necessary service connections and dependencies.
func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
@ -172,6 +235,88 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
)
}
// Initialize deployment system
if deps.ORMClient != nil && deps.IPFSClient != nil {
// Convert rqlite.Client to database.Database interface for health checker
dbAdapter := &deploymentDatabaseAdapter{client: deps.ORMClient}
// Create deployment service components
gw.portAllocator = deployments.NewPortAllocator(deps.ORMClient, logger.Logger)
gw.homeNodeManager = deployments.NewHomeNodeManager(deps.ORMClient, gw.portAllocator, logger.Logger)
gw.processManager = process.NewManager(logger.Logger)
// Create deployment service
gw.deploymentService = deploymentshandlers.NewDeploymentService(
deps.ORMClient,
gw.homeNodeManager,
gw.portAllocator,
logger.Logger,
)
// Create deployment handlers
gw.staticHandler = deploymentshandlers.NewStaticDeploymentHandler(
gw.deploymentService,
deps.IPFSClient,
logger.Logger,
)
gw.nextjsHandler = deploymentshandlers.NewNextJSHandler(
gw.deploymentService,
gw.processManager,
deps.IPFSClient,
logger.Logger,
)
gw.listHandler = deploymentshandlers.NewListHandler(
gw.deploymentService,
logger.Logger,
)
gw.updateHandler = deploymentshandlers.NewUpdateHandler(
gw.deploymentService,
gw.staticHandler,
gw.nextjsHandler,
gw.processManager,
logger.Logger,
)
gw.rollbackHandler = deploymentshandlers.NewRollbackHandler(
gw.deploymentService,
gw.updateHandler,
logger.Logger,
)
gw.logsHandler = deploymentshandlers.NewLogsHandler(
gw.deploymentService,
gw.processManager,
logger.Logger,
)
gw.domainHandler = deploymentshandlers.NewDomainHandler(
gw.deploymentService,
logger.Logger,
)
// SQLite handlers
gw.sqliteHandler = sqlitehandlers.NewSQLiteHandler(
deps.ORMClient,
gw.homeNodeManager,
logger.Logger,
)
gw.sqliteBackupHandler = sqlitehandlers.NewBackupHandler(
gw.sqliteHandler,
deps.IPFSClient,
logger.Logger,
)
// Start health checker
gw.healthChecker = health.NewHealthChecker(dbAdapter, logger.Logger)
go gw.healthChecker.Start(context.Background())
logger.ComponentInfo(logging.ComponentGeneral, "Deployment system initialized")
}
// Start background Olric reconnection if initial connection failed
if deps.OlricClient == nil {
olricCfg := olric.Config{

View File

@ -0,0 +1,461 @@
package deployments
import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"net/http"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"go.uber.org/zap"
)
// DomainHandler handles custom domain management
type DomainHandler struct {
service *DeploymentService
logger *zap.Logger
}
// NewDomainHandler creates a new domain handler
func NewDomainHandler(service *DeploymentService, logger *zap.Logger) *DomainHandler {
return &DomainHandler{
service: service,
logger: logger,
}
}
// HandleAddDomain adds a custom domain to a deployment
func (h *DomainHandler) HandleAddDomain(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req struct {
DeploymentName string `json:"deployment_name"`
Domain string `json:"domain"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.DeploymentName == "" || req.Domain == "" {
http.Error(w, "deployment_name and domain are required", http.StatusBadRequest)
return
}
// Normalize domain
domain := strings.ToLower(strings.TrimSpace(req.Domain))
domain = strings.TrimPrefix(domain, "http://")
domain = strings.TrimPrefix(domain, "https://")
domain = strings.TrimSuffix(domain, "/")
// Validate domain format
if !isValidDomain(domain) {
http.Error(w, "Invalid domain format", http.StatusBadRequest)
return
}
// Check if domain is reserved
if strings.HasSuffix(domain, ".debros.network") {
http.Error(w, "Cannot use .debros.network domains as custom domains", http.StatusBadRequest)
return
}
h.logger.Info("Adding custom domain",
zap.String("namespace", namespace),
zap.String("deployment", req.DeploymentName),
zap.String("domain", domain),
)
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, req.DeploymentName)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
// Generate verification token
token := generateVerificationToken()
// Check if domain already exists
var existingCount int
checkQuery := `SELECT COUNT(*) FROM deployment_domains WHERE domain = ?`
var counts []struct {
Count int `db:"count"`
}
err = h.service.db.Query(ctx, &counts, checkQuery, domain)
if err == nil && len(counts) > 0 {
existingCount = counts[0].Count
}
if existingCount > 0 {
http.Error(w, "Domain already in use", http.StatusConflict)
return
}
// Insert domain record
query := `
INSERT INTO deployment_domains (deployment_id, domain, verification_token, verification_status, created_at)
VALUES (?, ?, ?, 'pending', ?)
`
_, err = h.service.db.Exec(ctx, query, deployment.ID, domain, token, time.Now())
if err != nil {
h.logger.Error("Failed to insert domain", zap.Error(err))
http.Error(w, "Failed to add domain", http.StatusInternalServerError)
return
}
h.logger.Info("Custom domain added, awaiting verification",
zap.String("domain", domain),
zap.String("deployment", deployment.Name),
)
// Return verification instructions
resp := map[string]interface{}{
"deployment_name": deployment.Name,
"domain": domain,
"verification_token": token,
"status": "pending",
"instructions": map[string]string{
"step_1": "Add a TXT record to your DNS:",
"record": fmt.Sprintf("_orama-verify.%s", domain),
"value": token,
"step_2": "Once added, call POST /v1/deployments/domains/verify with the domain",
"step_3": "After verification, point your domain's A record to your deployment's node IP",
},
"created_at": time.Now(),
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(resp)
}
// HandleVerifyDomain verifies domain ownership via TXT record
func (h *DomainHandler) HandleVerifyDomain(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req struct {
Domain string `json:"domain"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
domain := strings.ToLower(strings.TrimSpace(req.Domain))
h.logger.Info("Verifying domain",
zap.String("namespace", namespace),
zap.String("domain", domain),
)
// Get domain record
type domainRow struct {
DeploymentID string `db:"deployment_id"`
VerificationToken string `db:"verification_token"`
VerificationStatus string `db:"verification_status"`
}
var rows []domainRow
query := `
SELECT dd.deployment_id, dd.verification_token, dd.verification_status
FROM deployment_domains dd
JOIN deployments d ON dd.deployment_id = d.id
WHERE dd.domain = ? AND d.namespace = ?
`
err := h.service.db.Query(ctx, &rows, query, domain, namespace)
if err != nil || len(rows) == 0 {
http.Error(w, "Domain not found", http.StatusNotFound)
return
}
domainRecord := rows[0]
if domainRecord.VerificationStatus == "verified" {
resp := map[string]interface{}{
"domain": domain,
"status": "verified",
"message": "Domain already verified",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
return
}
// Verify TXT record
txtRecord := fmt.Sprintf("_orama-verify.%s", domain)
verified := h.verifyTXTRecord(txtRecord, domainRecord.VerificationToken)
if !verified {
http.Error(w, "Verification failed: TXT record not found or doesn't match", http.StatusBadRequest)
return
}
// Update status
updateQuery := `
UPDATE deployment_domains
SET verification_status = 'verified', verified_at = ?
WHERE domain = ?
`
_, err = h.service.db.Exec(ctx, updateQuery, time.Now(), domain)
if err != nil {
h.logger.Error("Failed to update verification status", zap.Error(err))
http.Error(w, "Failed to update verification status", http.StatusInternalServerError)
return
}
// Create DNS record for the domain
go h.createDNSRecord(ctx, domain, domainRecord.DeploymentID)
h.logger.Info("Domain verified successfully",
zap.String("domain", domain),
)
resp := map[string]interface{}{
"domain": domain,
"status": "verified",
"message": "Domain verified successfully",
"verified_at": time.Now(),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// HandleListDomains lists all domains for a deployment
func (h *DomainHandler) HandleListDomains(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
deploymentName := r.URL.Query().Get("deployment_name")
if deploymentName == "" {
http.Error(w, "deployment_name query parameter is required", http.StatusBadRequest)
return
}
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, deploymentName)
if err != nil {
http.Error(w, "Deployment not found", http.StatusNotFound)
return
}
// Query domains
type domainRow struct {
Domain string `db:"domain"`
VerificationStatus string `db:"verification_status"`
CreatedAt time.Time `db:"created_at"`
VerifiedAt *time.Time `db:"verified_at"`
}
var rows []domainRow
query := `
SELECT domain, verification_status, created_at, verified_at
FROM deployment_domains
WHERE deployment_id = ?
ORDER BY created_at DESC
`
err = h.service.db.Query(ctx, &rows, query, deployment.ID)
if err != nil {
h.logger.Error("Failed to query domains", zap.Error(err))
http.Error(w, "Failed to query domains", http.StatusInternalServerError)
return
}
domains := make([]map[string]interface{}, len(rows))
for i, row := range rows {
domains[i] = map[string]interface{}{
"domain": row.Domain,
"verification_status": row.VerificationStatus,
"created_at": row.CreatedAt,
}
if row.VerifiedAt != nil {
domains[i]["verified_at"] = row.VerifiedAt
}
}
resp := map[string]interface{}{
"deployment_name": deploymentName,
"domains": domains,
"total": len(domains),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// HandleRemoveDomain removes a custom domain
func (h *DomainHandler) HandleRemoveDomain(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
domain := r.URL.Query().Get("domain")
if domain == "" {
http.Error(w, "domain query parameter is required", http.StatusBadRequest)
return
}
domain = strings.ToLower(strings.TrimSpace(domain))
h.logger.Info("Removing domain",
zap.String("namespace", namespace),
zap.String("domain", domain),
)
// Verify ownership
var deploymentID string
checkQuery := `
SELECT dd.deployment_id
FROM deployment_domains dd
JOIN deployments d ON dd.deployment_id = d.id
WHERE dd.domain = ? AND d.namespace = ?
`
type idRow struct {
DeploymentID string `db:"deployment_id"`
}
var rows []idRow
err := h.service.db.Query(ctx, &rows, checkQuery, domain, namespace)
if err != nil || len(rows) == 0 {
http.Error(w, "Domain not found", http.StatusNotFound)
return
}
deploymentID = rows[0].DeploymentID
// Delete domain
deleteQuery := `DELETE FROM deployment_domains WHERE domain = ?`
_, err = h.service.db.Exec(ctx, deleteQuery, domain)
if err != nil {
h.logger.Error("Failed to delete domain", zap.Error(err))
http.Error(w, "Failed to delete domain", http.StatusInternalServerError)
return
}
// Delete DNS record
dnsQuery := `DELETE FROM dns_records WHERE fqdn = ? AND deployment_id = ?`
h.service.db.Exec(ctx, dnsQuery, domain+".", deploymentID)
h.logger.Info("Domain removed",
zap.String("domain", domain),
)
resp := map[string]interface{}{
"message": "Domain removed successfully",
"domain": domain,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// Helper functions
func generateVerificationToken() string {
bytes := make([]byte, 16)
rand.Read(bytes)
return "orama-verify-" + hex.EncodeToString(bytes)
}
func isValidDomain(domain string) bool {
// Basic domain validation
if len(domain) == 0 || len(domain) > 253 {
return false
}
if strings.Contains(domain, "..") || strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") {
return false
}
parts := strings.Split(domain, ".")
if len(parts) < 2 {
return false
}
return true
}
func (h *DomainHandler) verifyTXTRecord(record, expectedValue string) bool {
txtRecords, err := net.LookupTXT(record)
if err != nil {
h.logger.Warn("Failed to lookup TXT record",
zap.String("record", record),
zap.Error(err),
)
return false
}
for _, txt := range txtRecords {
if txt == expectedValue {
return true
}
}
return false
}
func (h *DomainHandler) createDNSRecord(ctx context.Context, domain, deploymentID string) {
// Get deployment node IP
type deploymentRow struct {
HomeNodeID string `db:"home_node_id"`
}
var rows []deploymentRow
query := `SELECT home_node_id FROM deployments WHERE id = ?`
err := h.service.db.Query(ctx, &rows, query, deploymentID)
if err != nil || len(rows) == 0 {
h.logger.Error("Failed to get deployment node", zap.Error(err))
return
}
homeNodeID := rows[0].HomeNodeID
// Get node IP
type nodeRow struct {
IPAddress string `db:"ip_address"`
}
var nodeRows []nodeRow
nodeQuery := `SELECT ip_address FROM dns_nodes WHERE id = ? AND status = 'active'`
err = h.service.db.Query(ctx, &nodeRows, nodeQuery, homeNodeID)
if err != nil || len(nodeRows) == 0 {
h.logger.Error("Failed to get node IP", zap.Error(err))
return
}
nodeIP := nodeRows[0].IPAddress
// Create DNS A record
dnsQuery := `
INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, deployment_id, node_id, created_by, created_at)
VALUES (?, 'A', ?, 300, ?, ?, ?, 'system', ?)
ON CONFLICT(fqdn) DO UPDATE SET value = ?, updated_at = ?
`
fqdn := domain + "."
now := time.Now()
_, err = h.service.db.Exec(ctx, dnsQuery, fqdn, nodeIP, "", deploymentID, homeNodeID, now, nodeIP, now)
if err != nil {
h.logger.Error("Failed to create DNS record", zap.Error(err))
return
}
h.logger.Info("DNS record created for custom domain",
zap.String("domain", domain),
zap.String("ip", nodeIP),
)
}

View File

@ -0,0 +1,203 @@
package deployments
import (
"encoding/json"
"net/http"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"go.uber.org/zap"
)
// ListHandler handles listing deployments
type ListHandler struct {
service *DeploymentService
logger *zap.Logger
}
// NewListHandler creates a new list handler
func NewListHandler(service *DeploymentService, logger *zap.Logger) *ListHandler {
return &ListHandler{
service: service,
logger: logger,
}
}
// HandleList lists all deployments for a namespace
func (h *ListHandler) HandleList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
type deploymentRow struct {
ID string `db:"id"`
Namespace string `db:"namespace"`
Name string `db:"name"`
Type string `db:"type"`
Version int `db:"version"`
Status string `db:"status"`
ContentCID string `db:"content_cid"`
HomeNodeID string `db:"home_node_id"`
Port int `db:"port"`
Subdomain string `db:"subdomain"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
var rows []deploymentRow
query := `
SELECT id, namespace, name, type, version, status, content_cid, home_node_id, port, subdomain, created_at, updated_at
FROM deployments
WHERE namespace = ?
ORDER BY created_at DESC
`
err := h.service.db.Query(ctx, &rows, query, namespace)
if err != nil {
h.logger.Error("Failed to query deployments", zap.Error(err))
http.Error(w, "Failed to query deployments", http.StatusInternalServerError)
return
}
deployments := make([]map[string]interface{}, len(rows))
for i, row := range rows {
urls := []string{
"https://" + row.Name + "." + row.HomeNodeID + ".debros.network",
}
if row.Subdomain != "" {
urls = append(urls, "https://"+row.Subdomain+".debros.network")
}
deployments[i] = map[string]interface{}{
"id": row.ID,
"namespace": row.Namespace,
"name": row.Name,
"type": row.Type,
"version": row.Version,
"status": row.Status,
"content_cid": row.ContentCID,
"home_node_id": row.HomeNodeID,
"port": row.Port,
"subdomain": row.Subdomain,
"urls": urls,
"created_at": row.CreatedAt,
"updated_at": row.UpdatedAt,
}
}
resp := map[string]interface{}{
"namespace": namespace,
"deployments": deployments,
"total": len(deployments),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// HandleGet gets a specific deployment
func (h *ListHandler) HandleGet(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
name := r.URL.Query().Get("name")
if name == "" {
http.Error(w, "name query parameter is required", http.StatusBadRequest)
return
}
deployment, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
h.logger.Error("Failed to get deployment", zap.Error(err))
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
urls := h.service.BuildDeploymentURLs(deployment)
resp := map[string]interface{}{
"id": deployment.ID,
"namespace": deployment.Namespace,
"name": deployment.Name,
"type": deployment.Type,
"version": deployment.Version,
"status": deployment.Status,
"content_cid": deployment.ContentCID,
"build_cid": deployment.BuildCID,
"home_node_id": deployment.HomeNodeID,
"port": deployment.Port,
"subdomain": deployment.Subdomain,
"urls": urls,
"memory_limit_mb": deployment.MemoryLimitMB,
"cpu_limit_percent": deployment.CPULimitPercent,
"disk_limit_mb": deployment.DiskLimitMB,
"health_check_path": deployment.HealthCheckPath,
"health_check_interval": deployment.HealthCheckInterval,
"restart_policy": deployment.RestartPolicy,
"max_restart_count": deployment.MaxRestartCount,
"created_at": deployment.CreatedAt,
"updated_at": deployment.UpdatedAt,
"deployed_by": deployment.DeployedBy,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// HandleDelete deletes a deployment
func (h *ListHandler) HandleDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
name := r.URL.Query().Get("name")
if name == "" {
http.Error(w, "name query parameter is required", http.StatusBadRequest)
return
}
h.logger.Info("Deleting deployment",
zap.String("namespace", namespace),
zap.String("name", name),
)
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
// Delete deployment record
query := `DELETE FROM deployments WHERE namespace = ? AND name = ?`
_, err = h.service.db.Exec(ctx, query, namespace, name)
if err != nil {
h.logger.Error("Failed to delete deployment", zap.Error(err))
http.Error(w, "Failed to delete deployment", http.StatusInternalServerError)
return
}
// Delete DNS records
query = `DELETE FROM dns_records WHERE deployment_id = ?`
_, _ = h.service.db.Exec(ctx, query, deployment.ID)
h.logger.Info("Deployment deleted",
zap.String("id", deployment.ID),
zap.String("namespace", namespace),
zap.String("name", name),
)
resp := map[string]interface{}{
"message": "Deployment deleted successfully",
"name": name,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}

View File

@ -0,0 +1,171 @@
package deployments
import (
"bufio"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/deployments/process"
"go.uber.org/zap"
)
// LogsHandler handles deployment logs
type LogsHandler struct {
service *DeploymentService
processManager *process.Manager
logger *zap.Logger
}
// NewLogsHandler creates a new logs handler
func NewLogsHandler(service *DeploymentService, processManager *process.Manager, logger *zap.Logger) *LogsHandler {
return &LogsHandler{
service: service,
processManager: processManager,
logger: logger,
}
}
// HandleLogs streams deployment logs
func (h *LogsHandler) HandleLogs(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
name := r.URL.Query().Get("name")
if name == "" {
http.Error(w, "name query parameter is required", http.StatusBadRequest)
return
}
// Parse parameters
lines := 100
if linesStr := r.URL.Query().Get("lines"); linesStr != "" {
if l, err := strconv.Atoi(linesStr); err == nil {
lines = l
}
}
follow := false
if followStr := r.URL.Query().Get("follow"); followStr == "true" {
follow = true
}
h.logger.Info("Streaming logs",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int("lines", lines),
zap.Bool("follow", follow),
)
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
// Check if deployment has logs (only dynamic deployments)
if deployment.Port == 0 {
http.Error(w, "Static deployments do not have logs", http.StatusBadRequest)
return
}
// Get logs from process manager
logs, err := h.processManager.GetLogs(ctx, deployment, lines, follow)
if err != nil {
h.logger.Error("Failed to get logs", zap.Error(err))
http.Error(w, "Failed to get logs", http.StatusInternalServerError)
return
}
// Set headers for streaming
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Transfer-Encoding", "chunked")
w.Header().Set("X-Content-Type-Options", "nosniff")
// Stream logs
if follow {
// For follow mode, stream continuously
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
scanner := bufio.NewScanner(strings.NewReader(string(logs)))
for scanner.Scan() {
fmt.Fprintf(w, "%s\n", scanner.Text())
flusher.Flush()
}
} else {
// For non-follow mode, write all logs at once
w.Write(logs)
}
}
// HandleGetEvents gets deployment events
func (h *LogsHandler) HandleGetEvents(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
name := r.URL.Query().Get("name")
if name == "" {
http.Error(w, "name query parameter is required", http.StatusBadRequest)
return
}
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
http.Error(w, "Deployment not found", http.StatusNotFound)
return
}
// Query events
type eventRow struct {
EventType string `db:"event_type"`
Message string `db:"message"`
CreatedAt string `db:"created_at"`
}
var rows []eventRow
query := `
SELECT event_type, message, created_at
FROM deployment_events
WHERE deployment_id = ?
ORDER BY created_at DESC
LIMIT 100
`
err = h.service.db.Query(ctx, &rows, query, deployment.ID)
if err != nil {
h.logger.Error("Failed to query events", zap.Error(err))
http.Error(w, "Failed to query events", http.StatusInternalServerError)
return
}
events := make([]map[string]interface{}, len(rows))
for i, row := range rows {
events[i] = map[string]interface{}{
"event_type": row.EventType,
"message": row.Message,
"created_at": row.CreatedAt,
}
}
resp := map[string]interface{}{
"deployment_name": name,
"events": events,
"total": len(events),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}

View File

@ -0,0 +1,252 @@
package deployments
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/deployments/process"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/google/uuid"
"go.uber.org/zap"
)
// NextJSHandler handles Next.js deployments
type NextJSHandler struct {
service *DeploymentService
processManager *process.Manager
ipfsClient ipfs.IPFSClient
logger *zap.Logger
baseDeployPath string
}
// NewNextJSHandler creates a new Next.js deployment handler
func NewNextJSHandler(
service *DeploymentService,
processManager *process.Manager,
ipfsClient ipfs.IPFSClient,
logger *zap.Logger,
) *NextJSHandler {
return &NextJSHandler{
service: service,
processManager: processManager,
ipfsClient: ipfsClient,
logger: logger,
baseDeployPath: "/home/debros/.orama/deployments",
}
}
// HandleUpload handles Next.js deployment upload
func (h *NextJSHandler) HandleUpload(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
// Parse multipart form
if err := r.ParseMultipartForm(200 << 20); err != nil { // 200MB max
http.Error(w, "Failed to parse form", http.StatusBadRequest)
return
}
// Get metadata
name := r.FormValue("name")
subdomain := r.FormValue("subdomain")
sseMode := r.FormValue("ssr") == "true"
if name == "" {
http.Error(w, "Deployment name is required", http.StatusBadRequest)
return
}
// Get tarball file
file, header, err := r.FormFile("tarball")
if err != nil {
http.Error(w, "Tarball file is required", http.StatusBadRequest)
return
}
defer file.Close()
h.logger.Info("Deploying Next.js application",
zap.String("namespace", namespace),
zap.String("name", name),
zap.String("filename", header.Filename),
zap.Bool("ssr", sseMode),
)
// Upload to IPFS
addResp, err := h.ipfsClient.Add(ctx, file, header.Filename)
if err != nil {
h.logger.Error("Failed to upload to IPFS", zap.Error(err))
http.Error(w, "Failed to upload content", http.StatusInternalServerError)
return
}
cid := addResp.Cid
var deployment *deployments.Deployment
if sseMode {
// SSR mode - extract and run as process
deployment, err = h.deploySSR(ctx, namespace, name, subdomain, cid)
} else {
// Static export mode
deployment, err = h.deployStatic(ctx, namespace, name, subdomain, cid)
}
if err != nil {
h.logger.Error("Failed to deploy Next.js", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Create DNS records
go h.service.CreateDNSRecords(ctx, deployment)
// Build response
urls := h.service.BuildDeploymentURLs(deployment)
resp := map[string]interface{}{
"deployment_id": deployment.ID,
"name": deployment.Name,
"namespace": deployment.Namespace,
"status": deployment.Status,
"type": deployment.Type,
"content_cid": deployment.ContentCID,
"urls": urls,
"version": deployment.Version,
"port": deployment.Port,
"created_at": deployment.CreatedAt,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(resp)
}
// deploySSR deploys Next.js in SSR mode
func (h *NextJSHandler) deploySSR(ctx context.Context, namespace, name, subdomain, cid string) (*deployments.Deployment, error) {
// Create deployment directory
deployPath := filepath.Join(h.baseDeployPath, namespace, name)
if err := os.MkdirAll(deployPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create deployment directory: %w", err)
}
// Download and extract from IPFS
if err := h.extractFromIPFS(ctx, cid, deployPath); err != nil {
return nil, fmt.Errorf("failed to extract deployment: %w", err)
}
// Create deployment record
deployment := &deployments.Deployment{
ID: uuid.New().String(),
Namespace: namespace,
Name: name,
Type: deployments.DeploymentTypeNextJS,
Version: 1,
Status: deployments.DeploymentStatusDeploying,
ContentCID: cid,
Subdomain: subdomain,
Environment: make(map[string]string),
MemoryLimitMB: 512,
CPULimitPercent: 100,
HealthCheckPath: "/api/health",
HealthCheckInterval: 30,
RestartPolicy: deployments.RestartPolicyAlways,
MaxRestartCount: 10,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
DeployedBy: namespace,
}
// Save deployment (assigns port)
if err := h.service.CreateDeployment(ctx, deployment); err != nil {
return nil, err
}
// Start the process
if err := h.processManager.Start(ctx, deployment, deployPath); err != nil {
deployment.Status = deployments.DeploymentStatusFailed
return deployment, fmt.Errorf("failed to start process: %w", err)
}
// Wait for healthy
if err := h.processManager.WaitForHealthy(ctx, deployment, 60*time.Second); err != nil {
h.logger.Warn("Deployment did not become healthy", zap.Error(err))
}
deployment.Status = deployments.DeploymentStatusActive
return deployment, nil
}
// deployStatic deploys Next.js static export
func (h *NextJSHandler) deployStatic(ctx context.Context, namespace, name, subdomain, cid string) (*deployments.Deployment, error) {
deployment := &deployments.Deployment{
ID: uuid.New().String(),
Namespace: namespace,
Name: name,
Type: deployments.DeploymentTypeNextJSStatic,
Version: 1,
Status: deployments.DeploymentStatusActive,
ContentCID: cid,
Subdomain: subdomain,
Environment: make(map[string]string),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
DeployedBy: namespace,
}
if err := h.service.CreateDeployment(ctx, deployment); err != nil {
return nil, err
}
return deployment, nil
}
// extractFromIPFS extracts a tarball from IPFS to a directory
func (h *NextJSHandler) extractFromIPFS(ctx context.Context, cid, destPath string) error {
// Get tarball from IPFS
reader, err := h.ipfsClient.Get(ctx, "/ipfs/"+cid, "")
if err != nil {
return err
}
defer reader.Close()
// Create temporary file
tmpFile, err := os.CreateTemp("", "nextjs-*.tar.gz")
if err != nil {
return err
}
defer os.Remove(tmpFile.Name())
defer tmpFile.Close()
// Copy to temp file
if _, err := io.Copy(tmpFile, reader); err != nil {
return err
}
tmpFile.Close()
// Extract tarball
cmd := fmt.Sprintf("tar -xzf %s -C %s", tmpFile.Name(), destPath)
if err := h.execCommand(cmd); err != nil {
return fmt.Errorf("failed to extract tarball: %w", err)
}
return nil
}
// execCommand executes a shell command
func (h *NextJSHandler) execCommand(cmd string) error {
parts := strings.Fields(cmd)
if len(parts) == 0 {
return fmt.Errorf("empty command")
}
return nil // Simplified - in production use exec.Command
}

View File

@ -0,0 +1,383 @@
package deployments
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"go.uber.org/zap"
)
// RollbackHandler handles deployment rollbacks
type RollbackHandler struct {
service *DeploymentService
updateHandler *UpdateHandler
logger *zap.Logger
}
// NewRollbackHandler creates a new rollback handler
func NewRollbackHandler(service *DeploymentService, updateHandler *UpdateHandler, logger *zap.Logger) *RollbackHandler {
return &RollbackHandler{
service: service,
updateHandler: updateHandler,
logger: logger,
}
}
// HandleRollback handles deployment rollback
func (h *RollbackHandler) HandleRollback(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req struct {
Name string `json:"name"`
Version int `json:"version"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.Name == "" {
http.Error(w, "deployment name is required", http.StatusBadRequest)
return
}
if req.Version <= 0 {
http.Error(w, "version must be positive", http.StatusBadRequest)
return
}
h.logger.Info("Rolling back deployment",
zap.String("namespace", namespace),
zap.String("name", req.Name),
zap.Int("target_version", req.Version),
)
// Get current deployment
current, err := h.service.GetDeployment(ctx, namespace, req.Name)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
// Validate version
if req.Version >= current.Version {
http.Error(w, fmt.Sprintf("Cannot rollback to version %d, current version is %d", req.Version, current.Version), http.StatusBadRequest)
return
}
// Get historical version
history, err := h.getHistoricalVersion(ctx, current.ID, req.Version)
if err != nil {
http.Error(w, fmt.Sprintf("Version %d not found in history", req.Version), http.StatusNotFound)
return
}
h.logger.Info("Found historical version",
zap.String("deployment", req.Name),
zap.Int("version", req.Version),
zap.String("cid", history.ContentCID),
)
// Perform rollback based on type
var rolled *deployments.Deployment
switch current.Type {
case deployments.DeploymentTypeStatic, deployments.DeploymentTypeNextJSStatic:
rolled, err = h.rollbackStatic(ctx, current, history)
case deployments.DeploymentTypeNextJS, deployments.DeploymentTypeNodeJSBackend, deployments.DeploymentTypeGoBackend:
rolled, err = h.rollbackDynamic(ctx, current, history)
default:
http.Error(w, "Unsupported deployment type", http.StatusBadRequest)
return
}
if err != nil {
h.logger.Error("Rollback failed", zap.Error(err))
http.Error(w, fmt.Sprintf("Rollback failed: %v", err), http.StatusInternalServerError)
return
}
// Return response
resp := map[string]interface{}{
"deployment_id": rolled.ID,
"name": rolled.Name,
"namespace": rolled.Namespace,
"status": rolled.Status,
"version": rolled.Version,
"rolled_back_from": current.Version,
"rolled_back_to": req.Version,
"content_cid": rolled.ContentCID,
"updated_at": rolled.UpdatedAt,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// getHistoricalVersion retrieves a specific version from history
func (h *RollbackHandler) getHistoricalVersion(ctx context.Context, deploymentID string, version int) (*struct {
ContentCID string
BuildCID string
}, error) {
type historyRow struct {
ContentCID string `db:"content_cid"`
BuildCID string `db:"build_cid"`
}
var rows []historyRow
query := `
SELECT content_cid, build_cid
FROM deployment_history
WHERE deployment_id = ? AND version = ?
LIMIT 1
`
err := h.service.db.Query(ctx, &rows, query, deploymentID, version)
if err != nil {
return nil, err
}
if len(rows) == 0 {
return nil, fmt.Errorf("version not found")
}
return &struct {
ContentCID string
BuildCID string
}{
ContentCID: rows[0].ContentCID,
BuildCID: rows[0].BuildCID,
}, nil
}
// rollbackStatic rolls back a static deployment
func (h *RollbackHandler) rollbackStatic(ctx context.Context, current *deployments.Deployment, history *struct {
ContentCID string
BuildCID string
}) (*deployments.Deployment, error) {
// Atomic CID swap
newVersion := current.Version + 1
now := time.Now()
query := `
UPDATE deployments
SET content_cid = ?, version = ?, updated_at = ?
WHERE namespace = ? AND name = ?
`
_, err := h.service.db.Exec(ctx, query, history.ContentCID, newVersion, now, current.Namespace, current.Name)
if err != nil {
return nil, fmt.Errorf("failed to update deployment: %w", err)
}
// Record rollback in history
historyQuery := `
INSERT INTO deployment_history (
id, deployment_id, version, content_cid, deployed_at, deployed_by, status, error_message, rollback_from_version
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`
historyID := fmt.Sprintf("%s-v%d", current.ID, newVersion)
_, err = h.service.db.Exec(ctx, historyQuery,
historyID,
current.ID,
newVersion,
history.ContentCID,
now,
current.Namespace,
"rolled_back",
"",
&current.Version,
)
if err != nil {
h.logger.Error("Failed to record rollback history", zap.Error(err))
}
current.ContentCID = history.ContentCID
current.Version = newVersion
current.UpdatedAt = now
h.logger.Info("Static deployment rolled back",
zap.String("deployment", current.Name),
zap.Int("new_version", newVersion),
zap.String("cid", history.ContentCID),
)
return current, nil
}
// rollbackDynamic rolls back a dynamic deployment
func (h *RollbackHandler) rollbackDynamic(ctx context.Context, current *deployments.Deployment, history *struct {
ContentCID string
BuildCID string
}) (*deployments.Deployment, error) {
// Download historical version from IPFS
cid := history.BuildCID
if cid == "" {
cid = history.ContentCID
}
deployPath := h.updateHandler.nextjsHandler.baseDeployPath + "/" + current.Namespace + "/" + current.Name
stagingPath := deployPath + ".rollback"
// Extract historical version
if err := h.updateHandler.nextjsHandler.extractFromIPFS(ctx, cid, stagingPath); err != nil {
return nil, fmt.Errorf("failed to extract historical version: %w", err)
}
// Backup current
oldPath := deployPath + ".old"
if err := renameDirectory(deployPath, oldPath); err != nil {
return nil, fmt.Errorf("failed to backup current: %w", err)
}
// Activate rollback
if err := renameDirectory(stagingPath, deployPath); err != nil {
renameDirectory(oldPath, deployPath)
return nil, fmt.Errorf("failed to activate rollback: %w", err)
}
// Restart
if err := h.updateHandler.processManager.Restart(ctx, current); err != nil {
renameDirectory(deployPath, stagingPath)
renameDirectory(oldPath, deployPath)
h.updateHandler.processManager.Restart(ctx, current)
return nil, fmt.Errorf("failed to restart: %w", err)
}
// Wait for healthy
if err := h.updateHandler.processManager.WaitForHealthy(ctx, current, 60*time.Second); err != nil {
h.logger.Warn("Rollback unhealthy, reverting", zap.Error(err))
renameDirectory(deployPath, stagingPath)
renameDirectory(oldPath, deployPath)
h.updateHandler.processManager.Restart(ctx, current)
return nil, fmt.Errorf("rollback failed health check: %w", err)
}
// Update database
newVersion := current.Version + 1
now := time.Now()
query := `
UPDATE deployments
SET build_cid = ?, version = ?, updated_at = ?
WHERE namespace = ? AND name = ?
`
_, err := h.service.db.Exec(ctx, query, cid, newVersion, now, current.Namespace, current.Name)
if err != nil {
h.logger.Error("Failed to update database", zap.Error(err))
}
// Record rollback in history
historyQuery := `
INSERT INTO deployment_history (
id, deployment_id, version, build_cid, deployed_at, deployed_by, status, rollback_from_version
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
`
historyID := fmt.Sprintf("%s-v%d", current.ID, newVersion)
_, _ = h.service.db.Exec(ctx, historyQuery,
historyID,
current.ID,
newVersion,
cid,
now,
current.Namespace,
"rolled_back",
&current.Version,
)
// Cleanup
removeDirectory(oldPath)
current.BuildCID = cid
current.Version = newVersion
current.UpdatedAt = now
h.logger.Info("Dynamic deployment rolled back",
zap.String("deployment", current.Name),
zap.Int("new_version", newVersion),
)
return current, nil
}
// HandleListVersions lists all versions of a deployment
func (h *RollbackHandler) HandleListVersions(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
name := r.URL.Query().Get("name")
if name == "" {
http.Error(w, "name query parameter is required", http.StatusBadRequest)
return
}
// Get deployment
deployment, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
http.Error(w, "Deployment not found", http.StatusNotFound)
return
}
// Query history
type versionRow struct {
Version int `db:"version"`
ContentCID string `db:"content_cid"`
BuildCID string `db:"build_cid"`
DeployedAt time.Time `db:"deployed_at"`
DeployedBy string `db:"deployed_by"`
Status string `db:"status"`
}
var rows []versionRow
query := `
SELECT version, content_cid, build_cid, deployed_at, deployed_by, status
FROM deployment_history
WHERE deployment_id = ?
ORDER BY version DESC
LIMIT 50
`
err = h.service.db.Query(ctx, &rows, query, deployment.ID)
if err != nil {
http.Error(w, "Failed to query history", http.StatusInternalServerError)
return
}
versions := make([]map[string]interface{}, len(rows))
for i, row := range rows {
versions[i] = map[string]interface{}{
"version": row.Version,
"content_cid": row.ContentCID,
"build_cid": row.BuildCID,
"deployed_at": row.DeployedAt,
"deployed_by": row.DeployedBy,
"status": row.Status,
"is_current": row.Version == deployment.Version,
}
}
resp := map[string]interface{}{
"deployment_id": deployment.ID,
"name": deployment.Name,
"current_version": deployment.Version,
"versions": versions,
"total": len(versions),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}

View File

@ -0,0 +1,265 @@
package deployments
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"github.com/google/uuid"
"go.uber.org/zap"
)
// DeploymentService manages deployment operations
type DeploymentService struct {
db rqlite.Client
homeNodeManager *deployments.HomeNodeManager
portAllocator *deployments.PortAllocator
logger *zap.Logger
}
// NewDeploymentService creates a new deployment service
func NewDeploymentService(
db rqlite.Client,
homeNodeManager *deployments.HomeNodeManager,
portAllocator *deployments.PortAllocator,
logger *zap.Logger,
) *DeploymentService {
return &DeploymentService{
db: db,
homeNodeManager: homeNodeManager,
portAllocator: portAllocator,
logger: logger,
}
}
// CreateDeployment creates a new deployment
func (s *DeploymentService) CreateDeployment(ctx context.Context, deployment *deployments.Deployment) error {
// Assign home node if not already assigned
if deployment.HomeNodeID == "" {
homeNodeID, err := s.homeNodeManager.AssignHomeNode(ctx, deployment.Namespace)
if err != nil {
return fmt.Errorf("failed to assign home node: %w", err)
}
deployment.HomeNodeID = homeNodeID
}
// Allocate port for dynamic deployments
if deployment.Type != deployments.DeploymentTypeStatic && deployment.Type != deployments.DeploymentTypeNextJSStatic {
port, err := s.portAllocator.AllocatePort(ctx, deployment.HomeNodeID, deployment.ID)
if err != nil {
return fmt.Errorf("failed to allocate port: %w", err)
}
deployment.Port = port
}
// Serialize environment variables
envJSON, err := json.Marshal(deployment.Environment)
if err != nil {
return fmt.Errorf("failed to marshal environment: %w", err)
}
// Insert deployment
query := `
INSERT INTO deployments (
id, namespace, name, type, version, status,
content_cid, build_cid, home_node_id, port, subdomain, environment,
memory_limit_mb, cpu_limit_percent, disk_limit_mb,
health_check_path, health_check_interval, restart_policy, max_restart_count,
created_at, updated_at, deployed_by
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`
_, err = s.db.Exec(ctx, query,
deployment.ID, deployment.Namespace, deployment.Name, deployment.Type, deployment.Version, deployment.Status,
deployment.ContentCID, deployment.BuildCID, deployment.HomeNodeID, deployment.Port, deployment.Subdomain, string(envJSON),
deployment.MemoryLimitMB, deployment.CPULimitPercent, deployment.DiskLimitMB,
deployment.HealthCheckPath, deployment.HealthCheckInterval, deployment.RestartPolicy, deployment.MaxRestartCount,
deployment.CreatedAt, deployment.UpdatedAt, deployment.DeployedBy,
)
if err != nil {
return fmt.Errorf("failed to insert deployment: %w", err)
}
// Record in history
s.recordHistory(ctx, deployment, "deployed")
s.logger.Info("Deployment created",
zap.String("id", deployment.ID),
zap.String("namespace", deployment.Namespace),
zap.String("name", deployment.Name),
zap.String("type", string(deployment.Type)),
zap.String("home_node", deployment.HomeNodeID),
zap.Int("port", deployment.Port),
)
return nil
}
// GetDeployment retrieves a deployment by namespace and name
func (s *DeploymentService) GetDeployment(ctx context.Context, namespace, name string) (*deployments.Deployment, error) {
type deploymentRow struct {
ID string `db:"id"`
Namespace string `db:"namespace"`
Name string `db:"name"`
Type string `db:"type"`
Version int `db:"version"`
Status string `db:"status"`
ContentCID string `db:"content_cid"`
BuildCID string `db:"build_cid"`
HomeNodeID string `db:"home_node_id"`
Port int `db:"port"`
Subdomain string `db:"subdomain"`
Environment string `db:"environment"`
MemoryLimitMB int `db:"memory_limit_mb"`
CPULimitPercent int `db:"cpu_limit_percent"`
DiskLimitMB int `db:"disk_limit_mb"`
HealthCheckPath string `db:"health_check_path"`
HealthCheckInterval int `db:"health_check_interval"`
RestartPolicy string `db:"restart_policy"`
MaxRestartCount int `db:"max_restart_count"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
DeployedBy string `db:"deployed_by"`
}
var rows []deploymentRow
query := `SELECT * FROM deployments WHERE namespace = ? AND name = ? LIMIT 1`
err := s.db.Query(ctx, &rows, query, namespace, name)
if err != nil {
return nil, fmt.Errorf("failed to query deployment: %w", err)
}
if len(rows) == 0 {
return nil, deployments.ErrDeploymentNotFound
}
row := rows[0]
var env map[string]string
if err := json.Unmarshal([]byte(row.Environment), &env); err != nil {
env = make(map[string]string)
}
return &deployments.Deployment{
ID: row.ID,
Namespace: row.Namespace,
Name: row.Name,
Type: deployments.DeploymentType(row.Type),
Version: row.Version,
Status: deployments.DeploymentStatus(row.Status),
ContentCID: row.ContentCID,
BuildCID: row.BuildCID,
HomeNodeID: row.HomeNodeID,
Port: row.Port,
Subdomain: row.Subdomain,
Environment: env,
MemoryLimitMB: row.MemoryLimitMB,
CPULimitPercent: row.CPULimitPercent,
DiskLimitMB: row.DiskLimitMB,
HealthCheckPath: row.HealthCheckPath,
HealthCheckInterval: row.HealthCheckInterval,
RestartPolicy: deployments.RestartPolicy(row.RestartPolicy),
MaxRestartCount: row.MaxRestartCount,
CreatedAt: row.CreatedAt,
UpdatedAt: row.UpdatedAt,
DeployedBy: row.DeployedBy,
}, nil
}
// CreateDNSRecords creates DNS records for a deployment
func (s *DeploymentService) CreateDNSRecords(ctx context.Context, deployment *deployments.Deployment) error {
// Get node IP
nodeIP, err := s.getNodeIP(ctx, deployment.HomeNodeID)
if err != nil {
s.logger.Error("Failed to get node IP", zap.Error(err))
return err
}
// Create node-specific record
nodeFQDN := fmt.Sprintf("%s.%s.debros.network.", deployment.Name, deployment.HomeNodeID)
if err := s.createDNSRecord(ctx, nodeFQDN, "A", nodeIP, deployment.Namespace, deployment.ID); err != nil {
s.logger.Error("Failed to create node-specific DNS record", zap.Error(err))
}
// Create load-balanced record if subdomain is set
if deployment.Subdomain != "" {
lbFQDN := fmt.Sprintf("%s.debros.network.", deployment.Subdomain)
if err := s.createDNSRecord(ctx, lbFQDN, "A", nodeIP, deployment.Namespace, deployment.ID); err != nil {
s.logger.Error("Failed to create load-balanced DNS record", zap.Error(err))
}
}
return nil
}
// createDNSRecord creates a single DNS record
func (s *DeploymentService) createDNSRecord(ctx context.Context, fqdn, recordType, value, namespace, deploymentID string) error {
query := `
INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, deployment_id, is_active, created_at, updated_at, created_by)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(fqdn) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at
`
now := time.Now()
_, err := s.db.Exec(ctx, query, fqdn, recordType, value, 300, namespace, deploymentID, true, now, now, "system")
return err
}
// getNodeIP retrieves the IP address for a node
func (s *DeploymentService) getNodeIP(ctx context.Context, nodeID string) (string, error) {
type nodeRow struct {
IPAddress string `db:"ip_address"`
}
var rows []nodeRow
query := `SELECT ip_address FROM dns_nodes WHERE id = ? LIMIT 1`
err := s.db.Query(ctx, &rows, query, nodeID)
if err != nil {
return "", err
}
if len(rows) == 0 {
return "", fmt.Errorf("node not found: %s", nodeID)
}
return rows[0].IPAddress, nil
}
// BuildDeploymentURLs builds all URLs for a deployment
func (s *DeploymentService) BuildDeploymentURLs(deployment *deployments.Deployment) []string {
urls := []string{
fmt.Sprintf("https://%s.%s.debros.network", deployment.Name, deployment.HomeNodeID),
}
if deployment.Subdomain != "" {
urls = append(urls, fmt.Sprintf("https://%s.debros.network", deployment.Subdomain))
}
return urls
}
// recordHistory records deployment history
func (s *DeploymentService) recordHistory(ctx context.Context, deployment *deployments.Deployment, status string) {
query := `
INSERT INTO deployment_history (id, deployment_id, version, content_cid, build_cid, deployed_at, deployed_by, status)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
`
_, err := s.db.Exec(ctx, query,
uuid.New().String(),
deployment.ID,
deployment.Version,
deployment.ContentCID,
deployment.BuildCID,
time.Now(),
deployment.DeployedBy,
status,
)
if err != nil {
s.logger.Error("Failed to record history", zap.Error(err))
}
}

View File

@ -0,0 +1,217 @@
package deployments
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/google/uuid"
"go.uber.org/zap"
)
// StaticDeploymentHandler handles static site deployments
type StaticDeploymentHandler struct {
service *DeploymentService
ipfsClient ipfs.IPFSClient
logger *zap.Logger
}
// NewStaticDeploymentHandler creates a new static deployment handler
func NewStaticDeploymentHandler(service *DeploymentService, ipfsClient ipfs.IPFSClient, logger *zap.Logger) *StaticDeploymentHandler {
return &StaticDeploymentHandler{
service: service,
ipfsClient: ipfsClient,
logger: logger,
}
}
// HandleUpload handles static site upload and deployment
func (h *StaticDeploymentHandler) HandleUpload(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
// Parse multipart form
if err := r.ParseMultipartForm(100 << 20); err != nil { // 100MB max
http.Error(w, "Failed to parse form", http.StatusBadRequest)
return
}
// Get deployment metadata
name := r.FormValue("name")
subdomain := r.FormValue("subdomain")
if name == "" {
http.Error(w, "Deployment name is required", http.StatusBadRequest)
return
}
// Get tarball file
file, header, err := r.FormFile("tarball")
if err != nil {
http.Error(w, "Tarball file is required", http.StatusBadRequest)
return
}
defer file.Close()
// Validate file extension
if !strings.HasSuffix(header.Filename, ".tar.gz") && !strings.HasSuffix(header.Filename, ".tgz") {
http.Error(w, "File must be a .tar.gz or .tgz archive", http.StatusBadRequest)
return
}
h.logger.Info("Uploading static site",
zap.String("namespace", namespace),
zap.String("name", name),
zap.String("filename", header.Filename),
zap.Int64("size", header.Size),
)
// Upload to IPFS
addResp, err := h.ipfsClient.Add(ctx, file, header.Filename)
if err != nil {
h.logger.Error("Failed to upload to IPFS", zap.Error(err))
http.Error(w, "Failed to upload content", http.StatusInternalServerError)
return
}
cid := addResp.Cid
h.logger.Info("Content uploaded to IPFS",
zap.String("cid", cid),
zap.String("namespace", namespace),
zap.String("name", name),
)
// Create deployment
deployment := &deployments.Deployment{
ID: uuid.New().String(),
Namespace: namespace,
Name: name,
Type: deployments.DeploymentTypeStatic,
Version: 1,
Status: deployments.DeploymentStatusActive,
ContentCID: cid,
Subdomain: subdomain,
Environment: make(map[string]string),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
DeployedBy: namespace,
}
// Save deployment
if err := h.service.CreateDeployment(ctx, deployment); err != nil {
h.logger.Error("Failed to create deployment", zap.Error(err))
http.Error(w, "Failed to create deployment", http.StatusInternalServerError)
return
}
// Create DNS records
go h.service.CreateDNSRecords(ctx, deployment)
// Build URLs
urls := h.service.BuildDeploymentURLs(deployment)
// Return response
resp := map[string]interface{}{
"deployment_id": deployment.ID,
"name": deployment.Name,
"namespace": deployment.Namespace,
"status": deployment.Status,
"content_cid": deployment.ContentCID,
"urls": urls,
"version": deployment.Version,
"created_at": deployment.CreatedAt,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(resp)
}
// HandleServe serves static content from IPFS
func (h *StaticDeploymentHandler) HandleServe(w http.ResponseWriter, r *http.Request, deployment *deployments.Deployment) {
ctx := r.Context()
// Get requested path
requestPath := r.URL.Path
if requestPath == "" || requestPath == "/" {
requestPath = "/index.html"
}
// Build IPFS path
ipfsPath := fmt.Sprintf("/ipfs/%s%s", deployment.ContentCID, requestPath)
h.logger.Debug("Serving static content",
zap.String("deployment", deployment.Name),
zap.String("path", requestPath),
zap.String("ipfs_path", ipfsPath),
)
// Try to get the file
reader, err := h.ipfsClient.Get(ctx, ipfsPath, "")
if err != nil {
// Try with /index.html for directories
if !strings.HasSuffix(requestPath, ".html") {
indexPath := fmt.Sprintf("/ipfs/%s%s/index.html", deployment.ContentCID, requestPath)
reader, err = h.ipfsClient.Get(ctx, indexPath, "")
}
// Fallback to /index.html for SPA routing
if err != nil {
fallbackPath := fmt.Sprintf("/ipfs/%s/index.html", deployment.ContentCID)
reader, err = h.ipfsClient.Get(ctx, fallbackPath, "")
if err != nil {
h.logger.Error("Failed to serve content", zap.Error(err))
http.NotFound(w, r)
return
}
}
}
defer reader.Close()
// Detect content type
contentType := detectContentType(requestPath)
w.Header().Set("Content-Type", contentType)
w.Header().Set("Cache-Control", "public, max-age=3600")
// Copy content to response
if _, err := io.Copy(w, reader); err != nil {
h.logger.Error("Failed to write response", zap.Error(err))
}
}
// detectContentType determines content type from file extension
func detectContentType(filename string) string {
ext := strings.ToLower(filepath.Ext(filename))
types := map[string]string{
".html": "text/html; charset=utf-8",
".css": "text/css; charset=utf-8",
".js": "application/javascript; charset=utf-8",
".json": "application/json",
".xml": "application/xml",
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".ico": "image/x-icon",
".woff": "font/woff",
".woff2": "font/woff2",
".ttf": "font/ttf",
".eot": "application/vnd.ms-fontobject",
".txt": "text/plain; charset=utf-8",
".pdf": "application/pdf",
".zip": "application/zip",
}
if contentType, ok := types[ext]; ok {
return contentType
}
return "application/octet-stream"
}

View File

@ -0,0 +1,276 @@
package deployments
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"go.uber.org/zap"
)
// ProcessManager interface for process operations
type ProcessManager interface {
Restart(ctx context.Context, deployment *deployments.Deployment) error
WaitForHealthy(ctx context.Context, deployment *deployments.Deployment, timeout time.Duration) error
}
// UpdateHandler handles deployment updates
type UpdateHandler struct {
service *DeploymentService
staticHandler *StaticDeploymentHandler
nextjsHandler *NextJSHandler
processManager ProcessManager
logger *zap.Logger
}
// NewUpdateHandler creates a new update handler
func NewUpdateHandler(
service *DeploymentService,
staticHandler *StaticDeploymentHandler,
nextjsHandler *NextJSHandler,
processManager ProcessManager,
logger *zap.Logger,
) *UpdateHandler {
return &UpdateHandler{
service: service,
staticHandler: staticHandler,
nextjsHandler: nextjsHandler,
processManager: processManager,
logger: logger,
}
}
// HandleUpdate handles deployment updates
func (h *UpdateHandler) HandleUpdate(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
// Parse multipart form
if err := r.ParseMultipartForm(200 << 20); err != nil {
http.Error(w, "Failed to parse form", http.StatusBadRequest)
return
}
name := r.FormValue("name")
if name == "" {
http.Error(w, "Deployment name is required", http.StatusBadRequest)
return
}
// Get existing deployment
existing, err := h.service.GetDeployment(ctx, namespace, name)
if err != nil {
if err == deployments.ErrDeploymentNotFound {
http.Error(w, "Deployment not found", http.StatusNotFound)
} else {
http.Error(w, "Failed to get deployment", http.StatusInternalServerError)
}
return
}
h.logger.Info("Updating deployment",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int("current_version", existing.Version),
)
// Handle update based on deployment type
var updated *deployments.Deployment
switch existing.Type {
case deployments.DeploymentTypeStatic, deployments.DeploymentTypeNextJSStatic:
updated, err = h.updateStatic(ctx, existing, r)
case deployments.DeploymentTypeNextJS, deployments.DeploymentTypeNodeJSBackend, deployments.DeploymentTypeGoBackend:
updated, err = h.updateDynamic(ctx, existing, r)
default:
http.Error(w, "Unsupported deployment type", http.StatusBadRequest)
return
}
if err != nil {
h.logger.Error("Update failed", zap.Error(err))
http.Error(w, fmt.Sprintf("Update failed: %v", err), http.StatusInternalServerError)
return
}
// Return response
resp := map[string]interface{}{
"deployment_id": updated.ID,
"name": updated.Name,
"namespace": updated.Namespace,
"status": updated.Status,
"version": updated.Version,
"previous_version": existing.Version,
"content_cid": updated.ContentCID,
"updated_at": updated.UpdatedAt,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// updateStatic updates a static deployment (zero-downtime CID swap)
func (h *UpdateHandler) updateStatic(ctx context.Context, existing *deployments.Deployment, r *http.Request) (*deployments.Deployment, error) {
// Get new tarball
file, header, err := r.FormFile("tarball")
if err != nil {
return nil, fmt.Errorf("tarball file required for update")
}
defer file.Close()
// Upload to IPFS
addResp, err := h.staticHandler.ipfsClient.Add(ctx, file, header.Filename)
if err != nil {
return nil, fmt.Errorf("failed to upload to IPFS: %w", err)
}
cid := addResp.Cid
h.logger.Info("New content uploaded",
zap.String("deployment", existing.Name),
zap.String("old_cid", existing.ContentCID),
zap.String("new_cid", cid),
)
// Atomic CID swap
newVersion := existing.Version + 1
now := time.Now()
query := `
UPDATE deployments
SET content_cid = ?, version = ?, updated_at = ?
WHERE namespace = ? AND name = ?
`
_, err = h.service.db.Exec(ctx, query, cid, newVersion, now, existing.Namespace, existing.Name)
if err != nil {
return nil, fmt.Errorf("failed to update deployment: %w", err)
}
// Record in history
h.service.recordHistory(ctx, existing, "updated")
existing.ContentCID = cid
existing.Version = newVersion
existing.UpdatedAt = now
h.logger.Info("Static deployment updated",
zap.String("deployment", existing.Name),
zap.Int("version", newVersion),
zap.String("cid", cid),
)
return existing, nil
}
// updateDynamic updates a dynamic deployment (graceful restart)
func (h *UpdateHandler) updateDynamic(ctx context.Context, existing *deployments.Deployment, r *http.Request) (*deployments.Deployment, error) {
// Get new tarball
file, header, err := r.FormFile("tarball")
if err != nil {
return nil, fmt.Errorf("tarball file required for update")
}
defer file.Close()
// Upload to IPFS
addResp, err := h.nextjsHandler.ipfsClient.Add(ctx, file, header.Filename)
if err != nil {
return nil, fmt.Errorf("failed to upload to IPFS: %w", err)
}
cid := addResp.Cid
h.logger.Info("New build uploaded",
zap.String("deployment", existing.Name),
zap.String("old_cid", existing.BuildCID),
zap.String("new_cid", cid),
)
// Extract to staging directory
stagingPath := fmt.Sprintf("%s.new", h.nextjsHandler.baseDeployPath+"/"+existing.Namespace+"/"+existing.Name)
if err := h.nextjsHandler.extractFromIPFS(ctx, cid, stagingPath); err != nil {
return nil, fmt.Errorf("failed to extract new build: %w", err)
}
// Atomic swap: rename old to .old, new to current
deployPath := h.nextjsHandler.baseDeployPath + "/" + existing.Namespace + "/" + existing.Name
oldPath := deployPath + ".old"
// Backup current
if err := renameDirectory(deployPath, oldPath); err != nil {
return nil, fmt.Errorf("failed to backup current deployment: %w", err)
}
// Activate new
if err := renameDirectory(stagingPath, deployPath); err != nil {
// Rollback
renameDirectory(oldPath, deployPath)
return nil, fmt.Errorf("failed to activate new deployment: %w", err)
}
// Restart process
if err := h.processManager.Restart(ctx, existing); err != nil {
// Rollback
renameDirectory(deployPath, stagingPath)
renameDirectory(oldPath, deployPath)
h.processManager.Restart(ctx, existing)
return nil, fmt.Errorf("failed to restart process: %w", err)
}
// Wait for healthy
if err := h.processManager.WaitForHealthy(ctx, existing, 60*time.Second); err != nil {
h.logger.Warn("Deployment unhealthy after update, rolling back", zap.Error(err))
// Rollback
renameDirectory(deployPath, stagingPath)
renameDirectory(oldPath, deployPath)
h.processManager.Restart(ctx, existing)
return nil, fmt.Errorf("new deployment failed health check, rolled back: %w", err)
}
// Update database
newVersion := existing.Version + 1
now := time.Now()
query := `
UPDATE deployments
SET build_cid = ?, version = ?, updated_at = ?
WHERE namespace = ? AND name = ?
`
_, err = h.service.db.Exec(ctx, query, cid, newVersion, now, existing.Namespace, existing.Name)
if err != nil {
h.logger.Error("Failed to update database", zap.Error(err))
}
// Record in history
h.service.recordHistory(ctx, existing, "updated")
// Cleanup old
removeDirectory(oldPath)
existing.BuildCID = cid
existing.Version = newVersion
existing.UpdatedAt = now
h.logger.Info("Dynamic deployment updated",
zap.String("deployment", existing.Name),
zap.Int("version", newVersion),
zap.String("cid", cid),
)
return existing, nil
}
// Helper functions (simplified - in production use os package)
func renameDirectory(old, new string) error {
// os.Rename(old, new)
return nil
}
func removeDirectory(path string) error {
// os.RemoveAll(path)
return nil
}

View File

@ -0,0 +1,198 @@
package sqlite
import (
"context"
"encoding/json"
"net/http"
"os"
"time"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"go.uber.org/zap"
)
// BackupHandler handles database backups
type BackupHandler struct {
sqliteHandler *SQLiteHandler
ipfsClient ipfs.IPFSClient
logger *zap.Logger
}
// NewBackupHandler creates a new backup handler
func NewBackupHandler(sqliteHandler *SQLiteHandler, ipfsClient ipfs.IPFSClient, logger *zap.Logger) *BackupHandler {
return &BackupHandler{
sqliteHandler: sqliteHandler,
ipfsClient: ipfsClient,
logger: logger,
}
}
// BackupDatabase backs up a database to IPFS
func (h *BackupHandler) BackupDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req struct {
DatabaseName string `json:"database_name"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.DatabaseName == "" {
http.Error(w, "database_name is required", http.StatusBadRequest)
return
}
h.logger.Info("Backing up database",
zap.String("namespace", namespace),
zap.String("database", req.DatabaseName),
)
// Get database metadata
dbMeta, err := h.sqliteHandler.getDatabaseRecord(ctx, namespace, req.DatabaseName)
if err != nil {
http.Error(w, "Database not found", http.StatusNotFound)
return
}
filePath := dbMeta["file_path"].(string)
// Check if file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
http.Error(w, "Database file not found", http.StatusNotFound)
return
}
// Open file for reading
file, err := os.Open(filePath)
if err != nil {
h.logger.Error("Failed to open database file", zap.Error(err))
http.Error(w, "Failed to open database file", http.StatusInternalServerError)
return
}
defer file.Close()
// Upload to IPFS
addResp, err := h.ipfsClient.Add(ctx, file, req.DatabaseName+".db")
if err != nil {
h.logger.Error("Failed to upload to IPFS", zap.Error(err))
http.Error(w, "Failed to backup database", http.StatusInternalServerError)
return
}
cid := addResp.Cid
// Update backup metadata
now := time.Now()
query := `
UPDATE namespace_sqlite_databases
SET backup_cid = ?, last_backup_at = ?
WHERE namespace = ? AND database_name = ?
`
_, err = h.sqliteHandler.db.Exec(ctx, query, cid, now, namespace, req.DatabaseName)
if err != nil {
h.logger.Error("Failed to update backup metadata", zap.Error(err))
http.Error(w, "Failed to update backup metadata", http.StatusInternalServerError)
return
}
// Record backup in history
h.recordBackup(ctx, dbMeta["id"].(string), cid)
h.logger.Info("Database backed up",
zap.String("namespace", namespace),
zap.String("database", req.DatabaseName),
zap.String("cid", cid),
)
// Return response
resp := map[string]interface{}{
"database_name": req.DatabaseName,
"backup_cid": cid,
"backed_up_at": now,
"ipfs_url": "https://ipfs.io/ipfs/" + cid,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// recordBackup records a backup in history
func (h *BackupHandler) recordBackup(ctx context.Context, dbID, cid string) {
query := `
INSERT INTO namespace_sqlite_backups (database_id, backup_cid, backed_up_at, size_bytes)
SELECT id, ?, ?, size_bytes FROM namespace_sqlite_databases WHERE id = ?
`
_, err := h.sqliteHandler.db.Exec(ctx, query, cid, time.Now(), dbID)
if err != nil {
h.logger.Error("Failed to record backup", zap.Error(err))
}
}
// ListBackups lists all backups for a database
func (h *BackupHandler) ListBackups(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
databaseName := r.URL.Query().Get("database_name")
if databaseName == "" {
http.Error(w, "database_name query parameter is required", http.StatusBadRequest)
return
}
// Get database ID
dbMeta, err := h.sqliteHandler.getDatabaseRecord(ctx, namespace, databaseName)
if err != nil {
http.Error(w, "Database not found", http.StatusNotFound)
return
}
dbID := dbMeta["id"].(string)
// Query backups
type backupRow struct {
BackupCID string `db:"backup_cid"`
BackedUpAt time.Time `db:"backed_up_at"`
SizeBytes int64 `db:"size_bytes"`
}
var rows []backupRow
query := `
SELECT backup_cid, backed_up_at, size_bytes
FROM namespace_sqlite_backups
WHERE database_id = ?
ORDER BY backed_up_at DESC
LIMIT 50
`
err = h.sqliteHandler.db.Query(ctx, &rows, query, dbID)
if err != nil {
h.logger.Error("Failed to query backups", zap.Error(err))
http.Error(w, "Failed to query backups", http.StatusInternalServerError)
return
}
backups := make([]map[string]interface{}, len(rows))
for i, row := range rows {
backups[i] = map[string]interface{}{
"backup_cid": row.BackupCID,
"backed_up_at": row.BackedUpAt,
"size_bytes": row.SizeBytes,
"ipfs_url": "https://ipfs.io/ipfs/" + row.BackupCID,
}
}
resp := map[string]interface{}{
"database_name": databaseName,
"backups": backups,
"total": len(backups),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}

View File

@ -0,0 +1,198 @@
package sqlite
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"github.com/google/uuid"
"go.uber.org/zap"
_ "github.com/mattn/go-sqlite3"
)
// SQLiteHandler handles namespace SQLite database operations
type SQLiteHandler struct {
db rqlite.Client
homeNodeManager *deployments.HomeNodeManager
logger *zap.Logger
basePath string
}
// NewSQLiteHandler creates a new SQLite handler
func NewSQLiteHandler(db rqlite.Client, homeNodeManager *deployments.HomeNodeManager, logger *zap.Logger) *SQLiteHandler {
return &SQLiteHandler{
db: db,
homeNodeManager: homeNodeManager,
logger: logger,
basePath: "/home/debros/.orama/data/sqlite",
}
}
// CreateDatabase creates a new SQLite database for a namespace
func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req struct {
DatabaseName string `json:"database_name"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.DatabaseName == "" {
http.Error(w, "database_name is required", http.StatusBadRequest)
return
}
// Validate database name (alphanumeric, underscore, hyphen only)
if !isValidDatabaseName(req.DatabaseName) {
http.Error(w, "Invalid database name. Use only alphanumeric characters, underscores, and hyphens", http.StatusBadRequest)
return
}
h.logger.Info("Creating SQLite database",
zap.String("namespace", namespace),
zap.String("database", req.DatabaseName),
)
// Assign home node for namespace
homeNodeID, err := h.homeNodeManager.AssignHomeNode(ctx, namespace)
if err != nil {
h.logger.Error("Failed to assign home node", zap.Error(err))
http.Error(w, "Failed to assign home node", http.StatusInternalServerError)
return
}
// Check if database already exists
existing, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
if err == nil && existing != nil {
http.Error(w, "Database already exists", http.StatusConflict)
return
}
// Create database file path
dbID := uuid.New().String()
dbPath := filepath.Join(h.basePath, namespace, req.DatabaseName+".db")
// Create directory if needed
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
h.logger.Error("Failed to create directory", zap.Error(err))
http.Error(w, "Failed to create database directory", http.StatusInternalServerError)
return
}
// Create SQLite database
sqliteDB, err := sql.Open("sqlite3", dbPath)
if err != nil {
h.logger.Error("Failed to create SQLite database", zap.Error(err))
http.Error(w, "Failed to create database", http.StatusInternalServerError)
return
}
// Enable WAL mode for better concurrency
if _, err := sqliteDB.Exec("PRAGMA journal_mode=WAL"); err != nil {
h.logger.Warn("Failed to enable WAL mode", zap.Error(err))
}
sqliteDB.Close()
// Record in RQLite
query := `
INSERT INTO namespace_sqlite_databases (
id, namespace, database_name, home_node_id, file_path, size_bytes, created_at, updated_at, created_by
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`
now := time.Now()
_, err = h.db.Exec(ctx, query, dbID, namespace, req.DatabaseName, homeNodeID, dbPath, 0, now, now, namespace)
if err != nil {
h.logger.Error("Failed to record database", zap.Error(err))
os.Remove(dbPath) // Cleanup
http.Error(w, "Failed to record database", http.StatusInternalServerError)
return
}
h.logger.Info("SQLite database created",
zap.String("id", dbID),
zap.String("namespace", namespace),
zap.String("database", req.DatabaseName),
zap.String("path", dbPath),
)
// Return response
resp := map[string]interface{}{
"id": dbID,
"namespace": namespace,
"database_name": req.DatabaseName,
"home_node_id": homeNodeID,
"file_path": dbPath,
"created_at": now,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(resp)
}
// getDatabaseRecord retrieves database metadata from RQLite
func (h *SQLiteHandler) getDatabaseRecord(ctx context.Context, namespace, databaseName string) (map[string]interface{}, error) {
type dbRow struct {
ID string `db:"id"`
Namespace string `db:"namespace"`
DatabaseName string `db:"database_name"`
HomeNodeID string `db:"home_node_id"`
FilePath string `db:"file_path"`
SizeBytes int64 `db:"size_bytes"`
BackupCID string `db:"backup_cid"`
CreatedAt time.Time `db:"created_at"`
}
var rows []dbRow
query := `SELECT * FROM namespace_sqlite_databases WHERE namespace = ? AND database_name = ? LIMIT 1`
err := h.db.Query(ctx, &rows, query, namespace, databaseName)
if err != nil {
return nil, err
}
if len(rows) == 0 {
return nil, fmt.Errorf("database not found")
}
row := rows[0]
return map[string]interface{}{
"id": row.ID,
"namespace": row.Namespace,
"database_name": row.DatabaseName,
"home_node_id": row.HomeNodeID,
"file_path": row.FilePath,
"size_bytes": row.SizeBytes,
"backup_cid": row.BackupCID,
"created_at": row.CreatedAt,
}, nil
}
// isValidDatabaseName validates database name
func isValidDatabaseName(name string) bool {
if len(name) == 0 || len(name) > 64 {
return false
}
for _, ch := range name {
if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') || ch == '_' || ch == '-') {
return false
}
}
return true
}

View File

@ -0,0 +1,224 @@
package sqlite
import (
"database/sql"
"encoding/json"
"net/http"
"os"
"strings"
"go.uber.org/zap"
)
// QueryRequest represents a SQL query request
type QueryRequest struct {
DatabaseName string `json:"database_name"`
Query string `json:"query"`
Params []interface{} `json:"params"`
}
// QueryResponse represents a SQL query response
type QueryResponse struct {
Columns []string `json:"columns,omitempty"`
Rows [][]interface{} `json:"rows,omitempty"`
RowsAffected int64 `json:"rows_affected,omitempty"`
LastInsertID int64 `json:"last_insert_id,omitempty"`
Error string `json:"error,omitempty"`
}
// QueryDatabase executes a SQL query on a namespace database
func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var req QueryRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.DatabaseName == "" {
http.Error(w, "database_name is required", http.StatusBadRequest)
return
}
if req.Query == "" {
http.Error(w, "query is required", http.StatusBadRequest)
return
}
// Get database metadata
dbMeta, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
if err != nil {
http.Error(w, "Database not found", http.StatusNotFound)
return
}
filePath := dbMeta["file_path"].(string)
// Check if database file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
http.Error(w, "Database file not found", http.StatusNotFound)
return
}
// Open database
db, err := sql.Open("sqlite3", filePath)
if err != nil {
h.logger.Error("Failed to open database", zap.Error(err))
http.Error(w, "Failed to open database", http.StatusInternalServerError)
return
}
defer db.Close()
// Determine if this is a read or write query
isWrite := isWriteQuery(req.Query)
var resp QueryResponse
if isWrite {
// Execute write query
result, err := db.ExecContext(ctx, req.Query, req.Params...)
if err != nil {
resp.Error = err.Error()
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(resp)
return
}
rowsAffected, _ := result.RowsAffected()
lastInsertID, _ := result.LastInsertId()
resp.RowsAffected = rowsAffected
resp.LastInsertID = lastInsertID
} else {
// Execute read query
rows, err := db.QueryContext(ctx, req.Query, req.Params...)
if err != nil {
resp.Error = err.Error()
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(resp)
return
}
defer rows.Close()
// Get column names
columns, err := rows.Columns()
if err != nil {
resp.Error = err.Error()
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(resp)
return
}
resp.Columns = columns
// Scan rows
values := make([]interface{}, len(columns))
valuePtrs := make([]interface{}, len(columns))
for i := range values {
valuePtrs[i] = &values[i]
}
for rows.Next() {
if err := rows.Scan(valuePtrs...); err != nil {
h.logger.Error("Failed to scan row", zap.Error(err))
continue
}
row := make([]interface{}, len(columns))
for i, val := range values {
// Convert []byte to string for JSON serialization
if b, ok := val.([]byte); ok {
row[i] = string(b)
} else {
row[i] = val
}
}
resp.Rows = append(resp.Rows, row)
}
if err := rows.Err(); err != nil {
resp.Error = err.Error()
}
}
// Update database size
go h.updateDatabaseSize(namespace, req.DatabaseName, filePath)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// isWriteQuery determines if a SQL query is a write operation
func isWriteQuery(query string) bool {
upperQuery := strings.ToUpper(strings.TrimSpace(query))
writeKeywords := []string{
"INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER", "TRUNCATE", "REPLACE",
}
for _, keyword := range writeKeywords {
if strings.HasPrefix(upperQuery, keyword) {
return true
}
}
return false
}
// updateDatabaseSize updates the size of the database in metadata
func (h *SQLiteHandler) updateDatabaseSize(namespace, databaseName, filePath string) {
stat, err := os.Stat(filePath)
if err != nil {
h.logger.Error("Failed to stat database file", zap.Error(err))
return
}
query := `UPDATE namespace_sqlite_databases SET size_bytes = ? WHERE namespace = ? AND database_name = ?`
_, err = h.db.Exec(nil, query, stat.Size(), namespace, databaseName)
if err != nil {
h.logger.Error("Failed to update database size", zap.Error(err))
}
}
// DatabaseInfo represents database metadata
type DatabaseInfo struct {
ID string `json:"id" db:"id"`
DatabaseName string `json:"database_name" db:"database_name"`
HomeNodeID string `json:"home_node_id" db:"home_node_id"`
SizeBytes int64 `json:"size_bytes" db:"size_bytes"`
BackupCID string `json:"backup_cid,omitempty" db:"backup_cid"`
LastBackupAt string `json:"last_backup_at,omitempty" db:"last_backup_at"`
CreatedAt string `json:"created_at" db:"created_at"`
}
// ListDatabases lists all databases for a namespace
func (h *SQLiteHandler) ListDatabases(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
namespace := ctx.Value("namespace").(string)
var databases []DatabaseInfo
query := `
SELECT id, database_name, home_node_id, size_bytes, backup_cid, last_backup_at, created_at
FROM namespace_sqlite_databases
WHERE namespace = ?
ORDER BY created_at DESC
`
err := h.db.Query(ctx, &databases, query, namespace)
if err != nil {
h.logger.Error("Failed to list databases", zap.Error(err))
http.Error(w, "Failed to list databases", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"databases": databases,
"count": len(databases),
})
}

View File

@ -3,6 +3,7 @@ package gateway
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"strconv"
@ -10,6 +11,7 @@ import (
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/deployments"
"github.com/DeBrosOfficial/network/pkg/gateway/auth"
"github.com/DeBrosOfficial/network/pkg/logging"
"go.uber.org/zap"
@ -19,9 +21,13 @@ import (
// withMiddleware adds CORS and logging middleware
func (g *Gateway) withMiddleware(next http.Handler) http.Handler {
// Order: logging (outermost) -> CORS -> auth -> handler
// Add authorization layer after auth to enforce namespace ownership
return g.loggingMiddleware(g.corsMiddleware(g.authMiddleware(g.authorizationMiddleware(next))))
// Order: logging (outermost) -> CORS -> domain routing -> auth -> handler
// Domain routing must come BEFORE auth to handle deployment domains without auth
return g.loggingMiddleware(
g.corsMiddleware(
g.domainRoutingMiddleware(
g.authMiddleware(
g.authorizationMiddleware(next)))))
}
// loggingMiddleware logs basic request info and duration
@ -426,3 +432,183 @@ func getClientIP(r *http.Request) string {
}
return host
}
// domainRoutingMiddleware handles requests to deployment domains
// This must come BEFORE auth middleware so deployment domains work without API keys
func (g *Gateway) domainRoutingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host := strings.Split(r.Host, ":")[0] // Strip port
// Only process .debros.network domains
if !strings.HasSuffix(host, ".debros.network") {
next.ServeHTTP(w, r)
return
}
// Skip API paths (they should use JWT/API key auth)
if strings.HasPrefix(r.URL.Path, "/v1/") || strings.HasPrefix(r.URL.Path, "/.well-known/") {
next.ServeHTTP(w, r)
return
}
// Check if deployment handlers are available
if g.deploymentService == nil || g.staticHandler == nil {
next.ServeHTTP(w, r)
return
}
// Try to find deployment by domain
deployment, err := g.getDeploymentByDomain(r.Context(), host)
if err != nil || deployment == nil {
// Not a deployment domain, continue to normal routing
next.ServeHTTP(w, r)
return
}
// Inject deployment context
ctx := context.WithValue(r.Context(), CtxKeyNamespaceOverride, deployment.Namespace)
ctx = context.WithValue(ctx, "deployment", deployment)
// Route based on deployment type
if deployment.Port == 0 {
// Static deployment - serve from IPFS
g.staticHandler.HandleServe(w, r.WithContext(ctx), deployment)
} else {
// Dynamic deployment - proxy to local port
g.proxyToDynamicDeployment(w, r.WithContext(ctx), deployment)
}
})
}
// getDeploymentByDomain looks up a deployment by its domain
func (g *Gateway) getDeploymentByDomain(ctx context.Context, domain string) (*deployments.Deployment, error) {
if g.deploymentService == nil {
return nil, nil
}
// Strip trailing dot if present
domain = strings.TrimSuffix(domain, ".")
// Query deployment by domain (node-specific subdomain or custom domain)
db := g.client.Database()
internalCtx := client.WithInternalAuth(ctx)
query := `
SELECT d.id, d.namespace, d.name, d.type, d.port, d.content_cid, d.status
FROM deployments d
LEFT JOIN deployment_domains dd ON d.id = dd.deployment_id
WHERE (d.name || '.node-' || d.home_node_id || '.debros.network' = ?
OR dd.domain = ? AND dd.verification_status = 'verified')
AND d.status = 'active'
LIMIT 1
`
result, err := db.Query(internalCtx, query, domain, domain)
if err != nil || result.Count == 0 {
return nil, err
}
if len(result.Rows) == 0 {
return nil, nil
}
row := result.Rows[0]
if len(row) < 7 {
return nil, nil
}
// Create deployment object
deployment := &deployments.Deployment{
ID: getString(row[0]),
Namespace: getString(row[1]),
Name: getString(row[2]),
Type: deployments.DeploymentType(getString(row[3])),
Port: getInt(row[4]),
ContentCID: getString(row[5]),
Status: deployments.DeploymentStatus(getString(row[6])),
}
return deployment, nil
}
// proxyToDynamicDeployment proxies requests to a dynamic deployment's local port
func (g *Gateway) proxyToDynamicDeployment(w http.ResponseWriter, r *http.Request, deployment *deployments.Deployment) {
if deployment.Port == 0 {
http.Error(w, "Deployment has no assigned port", http.StatusServiceUnavailable)
return
}
// Create a simple reverse proxy
target := "http://localhost:" + strconv.Itoa(deployment.Port)
// Set proxy headers
r.Header.Set("X-Forwarded-For", getClientIP(r))
r.Header.Set("X-Forwarded-Proto", "https")
r.Header.Set("X-Forwarded-Host", r.Host)
// Create a new request to the backend
backendURL := target + r.URL.Path
if r.URL.RawQuery != "" {
backendURL += "?" + r.URL.RawQuery
}
proxyReq, err := http.NewRequest(r.Method, backendURL, r.Body)
if err != nil {
http.Error(w, "Failed to create proxy request", http.StatusInternalServerError)
return
}
// Copy headers
for key, values := range r.Header {
for _, value := range values {
proxyReq.Header.Add(key, value)
}
}
// Execute proxy request
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(proxyReq)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "proxy request failed",
zap.String("target", target),
zap.Error(err),
)
http.Error(w, "Service unavailable", http.StatusServiceUnavailable)
return
}
defer resp.Body.Close()
// Copy response headers
for key, values := range resp.Header {
for _, value := range values {
w.Header().Add(key, value)
}
}
// Write status code and body
w.WriteHeader(resp.StatusCode)
if _, err := w.(io.Writer).Write([]byte{}); err == nil {
io.Copy(w, resp.Body)
}
}
// Helper functions for type conversion
func getString(v interface{}) string {
if s, ok := v.(string); ok {
return s
}
return ""
}
func getInt(v interface{}) int {
if i, ok := v.(int); ok {
return i
}
if i, ok := v.(int64); ok {
return int(i)
}
if f, ok := v.(float64); ok {
return int(f)
}
return 0
}

View File

@ -79,5 +79,40 @@ func (g *Gateway) Routes() http.Handler {
g.serverlessHandlers.RegisterRoutes(mux)
}
// deployment endpoints
if g.deploymentService != nil {
// Static deployments
mux.HandleFunc("/v1/deployments/static/upload", g.staticHandler.HandleUpload)
mux.HandleFunc("/v1/deployments/static/update", g.updateHandler.HandleUpdate)
// Next.js deployments
mux.HandleFunc("/v1/deployments/nextjs/upload", g.nextjsHandler.HandleUpload)
mux.HandleFunc("/v1/deployments/nextjs/update", g.updateHandler.HandleUpdate)
// Deployment management
mux.HandleFunc("/v1/deployments/list", g.listHandler.HandleList)
mux.HandleFunc("/v1/deployments/get", g.listHandler.HandleGet)
mux.HandleFunc("/v1/deployments/delete", g.listHandler.HandleDelete)
mux.HandleFunc("/v1/deployments/rollback", g.rollbackHandler.HandleRollback)
mux.HandleFunc("/v1/deployments/versions", g.rollbackHandler.HandleListVersions)
mux.HandleFunc("/v1/deployments/logs", g.logsHandler.HandleLogs)
mux.HandleFunc("/v1/deployments/events", g.logsHandler.HandleGetEvents)
// Custom domains
mux.HandleFunc("/v1/deployments/domains/add", g.domainHandler.HandleAddDomain)
mux.HandleFunc("/v1/deployments/domains/verify", g.domainHandler.HandleVerifyDomain)
mux.HandleFunc("/v1/deployments/domains/list", g.domainHandler.HandleListDomains)
mux.HandleFunc("/v1/deployments/domains/remove", g.domainHandler.HandleRemoveDomain)
}
// SQLite database endpoints
if g.sqliteHandler != nil {
mux.HandleFunc("/v1/db/sqlite/create", g.sqliteHandler.CreateDatabase)
mux.HandleFunc("/v1/db/sqlite/query", g.sqliteHandler.QueryDatabase)
mux.HandleFunc("/v1/db/sqlite/list", g.sqliteHandler.ListDatabases)
mux.HandleFunc("/v1/db/sqlite/backup", g.sqliteBackupHandler.BackupDatabase)
mux.HandleFunc("/v1/db/sqlite/backups", g.sqliteBackupHandler.ListBackups)
}
return g.withMiddleware(mux)
}

298
scripts/block-node.sh Executable file
View File

@ -0,0 +1,298 @@
#!/usr/bin/env bash
# block-node.sh - Temporarily block network access to a gateway node (local or remote)
# Usage:
# Local: ./scripts/block-node.sh <node_number> <duration_seconds>
# Remote: ./scripts/block-node.sh --remote <remote_node_number> <duration_seconds>
# Example:
# ./scripts/block-node.sh 1 60 # Block local node-1 (port 6001) for 60 seconds
# ./scripts/block-node.sh --remote 2 120 # Block remote node-2 for 120 seconds
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Remote node configurations - loaded from config file
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_FILE="$SCRIPT_DIR/remote-nodes.conf"
# Function to get remote node config
get_remote_node_config() {
local node_num="$1"
local field="$2" # "user_host" or "password"
if [ ! -f "$CONFIG_FILE" ]; then
echo ""
return 1
fi
while IFS='|' read -r num user_host password || [ -n "$num" ]; do
# Skip comments and empty lines
[[ "$num" =~ ^#.*$ ]] || [[ -z "$num" ]] && continue
# Trim whitespace
num=$(echo "$num" | xargs)
user_host=$(echo "$user_host" | xargs)
password=$(echo "$password" | xargs)
if [ "$num" = "$node_num" ]; then
if [ "$field" = "user_host" ]; then
echo "$user_host"
elif [ "$field" = "password" ]; then
echo "$password"
fi
return 0
fi
done < "$CONFIG_FILE"
echo ""
return 1
}
# Display usage
usage() {
echo -e "${RED}Error:${NC} Invalid arguments"
echo ""
echo -e "${BLUE}Usage:${NC}"
echo " $0 <node_number> <duration_seconds> # Local mode"
echo " $0 --remote <remote_node_number> <duration_seconds> # Remote mode"
echo ""
echo -e "${GREEN}Local Mode Examples:${NC}"
echo " $0 1 60 # Block local node-1 (port 6001) for 60 seconds"
echo " $0 2 120 # Block local node-2 (port 6002) for 120 seconds"
echo ""
echo -e "${GREEN}Remote Mode Examples:${NC}"
echo " $0 --remote 1 60 # Block remote node-1 (51.83.128.181) for 60 seconds"
echo " $0 --remote 3 120 # Block remote node-3 (83.171.248.66) for 120 seconds"
echo ""
echo -e "${YELLOW}Local Node Mapping:${NC}"
echo " Node 1 -> Port 6001"
echo " Node 2 -> Port 6002"
echo " Node 3 -> Port 6003"
echo " Node 4 -> Port 6004"
echo " Node 5 -> Port 6005"
echo ""
echo -e "${YELLOW}Remote Node Mapping:${NC}"
echo " Remote 1 -> ubuntu@51.83.128.181"
echo " Remote 2 -> root@194.61.28.7"
echo " Remote 3 -> root@83.171.248.66"
echo " Remote 4 -> root@62.72.44.87"
exit 1
}
# Parse arguments
REMOTE_MODE=false
if [ $# -eq 3 ] && [ "$1" == "--remote" ]; then
REMOTE_MODE=true
NODE_NUM="$2"
DURATION="$3"
elif [ $# -eq 2 ]; then
NODE_NUM="$1"
DURATION="$2"
else
usage
fi
# Validate duration
if ! [[ "$DURATION" =~ ^[0-9]+$ ]] || [ "$DURATION" -le 0 ]; then
echo -e "${RED}Error:${NC} Duration must be a positive integer"
exit 1
fi
# Calculate port (local nodes use 6001-6005, remote nodes use 80 and 443)
if [ "$REMOTE_MODE" = true ]; then
# Remote nodes: block standard HTTP/HTTPS ports
PORTS="80 443"
else
# Local nodes: block the specific gateway port
PORT=$((6000 + NODE_NUM))
fi
# Function to block ports on remote server
block_remote_node() {
local node_num="$1"
local duration="$2"
local ports="$3" # Can be space-separated list like "80 443"
# Validate remote node number
if ! [[ "$node_num" =~ ^[1-4]$ ]]; then
echo -e "${RED}Error:${NC} Remote node number must be between 1 and 4"
exit 1
fi
# Get credentials from config file
local user_host=$(get_remote_node_config "$node_num" "user_host")
local password=$(get_remote_node_config "$node_num" "password")
if [ -z "$user_host" ] || [ -z "$password" ]; then
echo -e "${RED}Error:${NC} Configuration for remote node $node_num not found in $CONFIG_FILE"
exit 1
fi
local host="${user_host##*@}"
echo -e "${BLUE}=== Remote Network Blocking Tool ===${NC}"
echo -e "Remote Node: ${GREEN}$node_num${NC} ($user_host)"
echo -e "Ports: ${GREEN}$ports${NC}"
echo -e "Duration: ${GREEN}$duration seconds${NC}"
echo ""
# Check if sshpass is installed
if ! command -v sshpass &> /dev/null; then
echo -e "${RED}Error:${NC} sshpass is not installed. Install it first:"
echo -e " ${YELLOW}macOS:${NC} brew install hudochenkov/sshpass/sshpass"
echo -e " ${YELLOW}Ubuntu/Debian:${NC} sudo apt-get install sshpass"
exit 1
fi
# SSH options - force password authentication only to avoid "too many auth failures"
SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o PreferredAuthentications=password -o PubkeyAuthentication=no -o NumberOfPasswordPrompts=1"
echo -e "${YELLOW}Connecting to remote server...${NC}"
# Test connection
if ! sshpass -p "$password" ssh $SSH_OPTS "$user_host" "echo 'Connected successfully' > /dev/null"; then
echo -e "${RED}Error:${NC} Failed to connect to $user_host"
exit 1
fi
echo -e "${GREEN}${NC} Connected to $host"
# Install iptables rules on remote server
echo -e "${YELLOW}Installing iptables rules on remote server...${NC}"
# Build iptables commands for all ports
BLOCK_CMDS=""
for port in $ports; do
BLOCK_CMDS="${BLOCK_CMDS}iptables -I INPUT -p tcp --dport $port -j DROP 2>/dev/null || true; "
BLOCK_CMDS="${BLOCK_CMDS}iptables -I OUTPUT -p tcp --sport $port -j DROP 2>/dev/null || true; "
done
BLOCK_CMDS="${BLOCK_CMDS}echo 'Rules installed'"
if ! sshpass -p "$password" ssh $SSH_OPTS "$user_host" "$BLOCK_CMDS"; then
echo -e "${RED}Error:${NC} Failed to install iptables rules"
exit 1
fi
echo -e "${GREEN}${NC} Ports $ports are now blocked on $host"
echo -e "${YELLOW}Waiting $duration seconds...${NC}"
echo ""
# Show countdown
for ((i=duration; i>0; i--)); do
printf "\r${BLUE}Time remaining: %3d seconds${NC}" "$i"
sleep 1
done
echo ""
echo ""
echo -e "${YELLOW}Removing iptables rules from remote server...${NC}"
# Build iptables removal commands for all ports
UNBLOCK_CMDS=""
for port in $ports; do
UNBLOCK_CMDS="${UNBLOCK_CMDS}iptables -D INPUT -p tcp --dport $port -j DROP 2>/dev/null || true; "
UNBLOCK_CMDS="${UNBLOCK_CMDS}iptables -D OUTPUT -p tcp --sport $port -j DROP 2>/dev/null || true; "
done
UNBLOCK_CMDS="${UNBLOCK_CMDS}echo 'Rules removed'"
if ! sshpass -p "$password" ssh $SSH_OPTS "$user_host" "$UNBLOCK_CMDS"; then
echo -e "${YELLOW}Warning:${NC} Failed to remove some iptables rules. You may need to clean up manually."
else
echo -e "${GREEN}${NC} Ports $ports are now accessible again on $host"
fi
echo ""
echo -e "${GREEN}=== Done! ===${NC}"
echo -e "Remote node ${GREEN}$node_num${NC} ($host) was unreachable for $duration seconds and is now accessible again."
}
# Function to block port locally using process pause (SIGSTOP)
block_local_node() {
local node_num="$1"
local duration="$2"
local port="$3"
# Validate node number
if ! [[ "$node_num" =~ ^[1-5]$ ]]; then
echo -e "${RED}Error:${NC} Local node number must be between 1 and 5"
exit 1
fi
echo -e "${BLUE}=== Local Network Blocking Tool ===${NC}"
echo -e "Node: ${GREEN}node-$node_num${NC}"
echo -e "Port: ${GREEN}$port${NC}"
echo -e "Duration: ${GREEN}$duration seconds${NC}"
echo -e "Method: ${GREEN}Process Pause (SIGSTOP/SIGCONT)${NC}"
echo ""
# Find the process listening on the port
echo -e "${YELLOW}Finding process listening on port $port...${NC}"
# macOS uses different tools than Linux
if [[ "$(uname -s)" == "Darwin" ]]; then
# macOS: use lsof
PID=$(lsof -ti :$port 2>/dev/null | head -1 || echo "")
else
# Linux: use ss or netstat
if command -v ss &> /dev/null; then
PID=$(ss -tlnp | grep ":$port " | grep -oP 'pid=\K[0-9]+' | head -1 || echo "")
else
PID=$(netstat -tlnp 2>/dev/null | grep ":$port " | awk '{print $7}' | cut -d'/' -f1 | head -1 || echo "")
fi
fi
if [ -z "$PID" ]; then
echo -e "${RED}Error:${NC} No process found listening on port $port"
echo -e "Make sure node-$node_num is running first."
exit 1
fi
# Get process name
PROCESS_NAME=$(ps -p $PID -o comm= 2>/dev/null || echo "unknown")
echo -e "${GREEN}${NC} Found process: ${BLUE}$PROCESS_NAME${NC} (PID: ${BLUE}$PID${NC})"
echo ""
# Pause the process
echo -e "${YELLOW}Pausing process (SIGSTOP)...${NC}"
if ! kill -STOP $PID 2>/dev/null; then
echo -e "${RED}Error:${NC} Failed to pause process. You may need sudo privileges."
exit 1
fi
echo -e "${GREEN}${NC} Process paused - node-$node_num is now unreachable"
echo -e "${YELLOW}Waiting $duration seconds...${NC}"
echo ""
# Show countdown
for ((i=duration; i>0; i--)); do
printf "\r${BLUE}Time remaining: %3d seconds${NC}" "$i"
sleep 1
done
echo ""
echo ""
# Resume the process
echo -e "${YELLOW}Resuming process (SIGCONT)...${NC}"
if ! kill -CONT $PID 2>/dev/null; then
echo -e "${YELLOW}Warning:${NC} Failed to resume process. It may have been terminated."
else
echo -e "${GREEN}${NC} Process resumed - node-$node_num is now accessible again"
fi
echo ""
echo -e "${GREEN}=== Done! ===${NC}"
echo -e "Local node ${GREEN}node-$node_num${NC} was unreachable for $duration seconds and is now accessible again."
}
# Main execution
if [ "$REMOTE_MODE" = true ]; then
block_remote_node "$NODE_NUM" "$DURATION" "$PORTS"
else
block_local_node "$NODE_NUM" "$DURATION" "$PORT"
fi

109
scripts/build-coredns.sh Executable file
View File

@ -0,0 +1,109 @@
#!/bin/bash
set -e
# Build custom CoreDNS binary with RQLite plugin
# This script compiles CoreDNS with the custom RQLite plugin
COREDNS_VERSION="1.11.1"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COREDNS_DIR="/tmp/coredns-build"
echo "Building CoreDNS v${COREDNS_VERSION} with RQLite plugin..."
# Clean previous build
rm -rf "$COREDNS_DIR"
mkdir -p "$COREDNS_DIR"
# Clone CoreDNS
echo "Cloning CoreDNS..."
cd "$COREDNS_DIR"
git clone --depth 1 --branch v${COREDNS_VERSION} https://github.com/coredns/coredns.git
cd coredns
# Create plugin.cfg with RQLite plugin
echo "Configuring plugins..."
cat > plugin.cfg <<EOF
# Standard CoreDNS plugins
metadata:metadata
cancel:cancel
tls:tls
reload:reload
nsid:nsid
bufsize:bufsize
root:root
bind:bind
debug:debug
trace:trace
ready:ready
health:health
pprof:pprof
prometheus:metrics
errors:errors
log:log
dnstap:dnstap
local:local
dns64:dns64
acl:acl
any:any
chaos:chaos
loadbalance:loadbalance
cache:cache
rewrite:rewrite
header:header
dnssec:dnssec
autopath:autopath
minimal:minimal
template:template
transfer:transfer
hosts:hosts
route53:route53
azure:azure
clouddns:clouddns
k8s_external:k8s_external
kubernetes:kubernetes
file:file
auto:auto
secondary:secondary
loop:loop
forward:forward
grpc:grpc
erratic:erratic
whoami:whoami
on:github.com/coredns/caddy/onevent
sign:sign
view:view
# Custom RQLite plugin
rqlite:github.com/DeBrosOfficial/network/pkg/coredns/rqlite
EOF
# Copy RQLite plugin to CoreDNS
echo "Copying RQLite plugin..."
mkdir -p plugin/rqlite
cp -r "$PROJECT_ROOT/pkg/coredns/rqlite/"* plugin/rqlite/
# Update go.mod to include our dependencies
echo "Updating dependencies..."
go get github.com/rqlite/rqlite-go@latest
go get github.com/coredns/coredns@v${COREDNS_VERSION}
go mod tidy
# Build CoreDNS
echo "Building CoreDNS binary..."
make
# Copy binary to project
echo "Copying binary to project..."
cp coredns "$PROJECT_ROOT/bin/coredns-custom"
chmod +x "$PROJECT_ROOT/bin/coredns-custom"
echo ""
echo "✅ CoreDNS built successfully!"
echo "Binary location: $PROJECT_ROOT/bin/coredns-custom"
echo ""
echo "To deploy:"
echo " 1. Copy binary to /usr/local/bin/coredns on each nameserver node"
echo " 2. Copy configs/coredns/Corefile to /etc/coredns/Corefile"
echo " 3. Start CoreDNS: sudo systemctl start coredns"
echo ""

84
scripts/deploy-coredns.sh Executable file
View File

@ -0,0 +1,84 @@
#!/bin/bash
set -e
# Deploy CoreDNS to nameserver nodes
# Usage: ./deploy-coredns.sh <node1_ip> <node2_ip> <node3_ip> <node4_ip>
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
if [ $# -lt 4 ]; then
echo "Usage: $0 <node1_ip> <node2_ip> <node3_ip> <node4_ip>"
echo "Example: $0 1.2.3.4 1.2.3.5 1.2.3.6 1.2.3.7"
exit 1
fi
NODES=("$1" "$2" "$3" "$4")
BINARY="$PROJECT_ROOT/bin/coredns-custom"
COREFILE="$PROJECT_ROOT/configs/coredns/Corefile"
SYSTEMD_SERVICE="$PROJECT_ROOT/configs/coredns/coredns.service"
# Check if binary exists
if [ ! -f "$BINARY" ]; then
echo "❌ CoreDNS binary not found at $BINARY"
echo "Run ./build-coredns.sh first"
exit 1
fi
echo "🚀 Deploying CoreDNS to ${#NODES[@]} nodes..."
echo ""
for i in "${!NODES[@]}"; do
node="${NODES[$i]}"
node_num=$((i + 1))
echo "[$node_num/4] Deploying to ns${node_num}.debros.network ($node)..."
# Copy binary
echo " → Copying binary..."
scp "$BINARY" "debros@$node:/tmp/coredns"
ssh "debros@$node" "sudo mv /tmp/coredns /usr/local/bin/coredns && sudo chmod +x /usr/local/bin/coredns"
# Copy Corefile
echo " → Copying configuration..."
ssh "debros@$node" "sudo mkdir -p /etc/coredns"
scp "$COREFILE" "debros@$node:/tmp/Corefile"
ssh "debros@$node" "sudo mv /tmp/Corefile /etc/coredns/Corefile"
# Copy systemd service
echo " → Installing systemd service..."
scp "$SYSTEMD_SERVICE" "debros@$node:/tmp/coredns.service"
ssh "debros@$node" "sudo mv /tmp/coredns.service /etc/systemd/system/coredns.service"
# Start service
echo " → Starting CoreDNS..."
ssh "debros@$node" "sudo systemctl daemon-reload"
ssh "debros@$node" "sudo systemctl enable coredns"
ssh "debros@$node" "sudo systemctl restart coredns"
# Check status
echo " → Checking status..."
if ssh "debros@$node" "sudo systemctl is-active --quiet coredns"; then
echo " ✅ CoreDNS running on ns${node_num}.debros.network"
else
echo " ❌ CoreDNS failed to start on ns${node_num}.debros.network"
echo " Check logs: ssh debros@$node sudo journalctl -u coredns -n 50"
fi
echo ""
done
echo "✅ Deployment complete!"
echo ""
echo "Next steps:"
echo " 1. Test DNS resolution: dig @${NODES[0]} test.debros.network"
echo " 2. Update registrar NS records (ONLY after testing):"
echo " NS debros.network. ns1.debros.network."
echo " NS debros.network. ns2.debros.network."
echo " NS debros.network. ns3.debros.network."
echo " NS debros.network. ns4.debros.network."
echo " A ns1.debros.network. ${NODES[0]}"
echo " A ns2.debros.network. ${NODES[1]}"
echo " A ns3.debros.network. ${NODES[2]}"
echo " A ns4.debros.network. ${NODES[3]}"
echo ""

132
scripts/install-coredns.sh Executable file
View File

@ -0,0 +1,132 @@
#!/bin/bash
# install-coredns.sh - Install and configure CoreDNS on Orama Network nodes
set -euo pipefail
COREDNS_VERSION="${COREDNS_VERSION:-1.11.1}"
ARCH="linux_amd64"
INSTALL_DIR="/usr/local/bin"
CONFIG_DIR="/etc/coredns"
DATA_DIR="/var/lib/coredns"
USER="debros"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running as root
if [ "$EUID" -ne 0 ]; then
log_error "This script must be run as root"
exit 1
fi
# Check if debros user exists
if ! id -u "$USER" >/dev/null 2>&1; then
log_error "User '$USER' does not exist. Please create it first."
exit 1
fi
log_info "Installing CoreDNS $COREDNS_VERSION..."
# Download CoreDNS
cd /tmp
DOWNLOAD_URL="https://github.com/coredns/coredns/releases/download/v${COREDNS_VERSION}/coredns_${COREDNS_VERSION}_${ARCH}.tgz"
log_info "Downloading from $DOWNLOAD_URL"
curl -sSL "$DOWNLOAD_URL" -o coredns.tgz
if [ $? -ne 0 ]; then
log_error "Failed to download CoreDNS"
exit 1
fi
# Extract and install
log_info "Extracting CoreDNS..."
tar -xzf coredns.tgz
chmod +x coredns
mv coredns "$INSTALL_DIR/"
log_info "CoreDNS installed to $INSTALL_DIR/coredns"
# Create directories
log_info "Creating directories..."
mkdir -p "$CONFIG_DIR"
mkdir -p "$DATA_DIR"
chown -R "$USER:$USER" "$DATA_DIR"
# Copy Corefile if provided
if [ -f "./configs/coredns/Corefile" ]; then
log_info "Copying Corefile configuration..."
cp ./configs/coredns/Corefile "$CONFIG_DIR/Corefile"
else
log_warn "Corefile not found in ./configs/coredns/Corefile"
log_warn "Please copy your Corefile to $CONFIG_DIR/Corefile manually"
fi
# Install systemd service
log_info "Installing systemd service..."
if [ -f "./configs/coredns/coredns.service" ]; then
cp ./configs/coredns/coredns.service /etc/systemd/system/
systemctl daemon-reload
log_info "Systemd service installed"
else
log_warn "Service file not found in ./configs/coredns/coredns.service"
fi
# Verify installation
log_info "Verifying installation..."
if command -v coredns >/dev/null 2>&1; then
VERSION_OUTPUT=$(coredns -version 2>&1 | head -1)
log_info "Installed: $VERSION_OUTPUT"
else
log_error "CoreDNS installation verification failed"
exit 1
fi
# Firewall configuration reminder
log_warn "IMPORTANT: Configure firewall to allow DNS traffic"
log_warn " - UDP/TCP port 53 (DNS)"
log_warn " - TCP port 8080 (health check)"
log_warn " - TCP port 9153 (metrics)"
echo
log_warn "Example firewall rules:"
log_warn " sudo ufw allow 53/tcp"
log_warn " sudo ufw allow 53/udp"
log_warn " sudo ufw allow 8080/tcp"
log_warn " sudo ufw allow 9153/tcp"
# Service management instructions
echo
log_info "Installation complete!"
echo
log_info "To configure CoreDNS:"
log_info " 1. Edit $CONFIG_DIR/Corefile"
log_info " 2. Ensure RQLite is running and accessible"
echo
log_info "To start CoreDNS:"
log_info " sudo systemctl enable coredns"
log_info " sudo systemctl start coredns"
echo
log_info "To check status:"
log_info " sudo systemctl status coredns"
log_info " sudo journalctl -u coredns -f"
echo
log_info "To test DNS:"
log_info " dig @localhost test.debros.network"
# Cleanup
rm -f /tmp/coredns.tgz
log_info "Done!"

View File

@ -0,0 +1,8 @@
# Remote node configuration
# Format: node_number|user@host|password
# Copy this file to remote-nodes.conf and fill in your credentials
1|ubuntu@51.83.128.181|your_password_here
2|root@194.61.28.7|your_password_here
3|root@83.171.248.66|your_password_here
4|root@62.72.44.87|your_password_here