mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-03-17 03:33:01 +00:00
feat(security): add manifest signing, TLS TOFU, refresh token migration
- Invalidate plaintext refresh tokens (migration 019) - Add `--sign` flag to `orama build` for rootwallet manifest signing - Add `--ca-fingerprint` TOFU verification for production joins/invites - Save cluster secrets from join (RQLite auth, Olric key, IPFS peers) - Add RQLite auth config fields
This commit is contained in:
parent
a0468461ab
commit
fd87eec476
4
migrations/019_invalidate_plaintext_refresh_tokens.sql
Normal file
4
migrations/019_invalidate_plaintext_refresh_tokens.sql
Normal file
@ -0,0 +1,4 @@
|
||||
-- Invalidate all existing refresh tokens.
|
||||
-- Tokens were stored in plaintext; the application now stores SHA-256 hashes.
|
||||
-- Users will need to re-authenticate (tokens have 30-day expiry anyway).
|
||||
UPDATE refresh_tokens SET revoked_at = datetime('now') WHERE revoked_at IS NULL;
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
@ -106,6 +107,14 @@ func (b *Builder) createArchive(outputPath string, manifest *Manifest) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add manifest.sig if it exists (created by --sign)
|
||||
sigPath := filepath.Join(b.tmpDir, "manifest.sig")
|
||||
if _, err := os.Stat(sigPath); err == nil {
|
||||
if err := addFileToTar(tw, sigPath, "manifest.sig"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary
|
||||
fmt.Printf(" bin/: %d binaries\n", len(manifest.Checksums))
|
||||
fmt.Printf(" systemd/: namespace templates\n")
|
||||
@ -119,6 +128,46 @@ func (b *Builder) createArchive(outputPath string, manifest *Manifest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// signManifest signs the manifest hash using rootwallet CLI.
|
||||
// Produces manifest.sig containing the hex-encoded EVM signature.
|
||||
func (b *Builder) signManifest(manifest *Manifest) error {
|
||||
fmt.Printf("\nSigning manifest with rootwallet...\n")
|
||||
|
||||
// Serialize manifest deterministically (compact JSON, sorted keys via json.Marshal)
|
||||
manifestData, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal manifest: %w", err)
|
||||
}
|
||||
|
||||
// Hash the manifest JSON
|
||||
hash := sha256.Sum256(manifestData)
|
||||
hashHex := hex.EncodeToString(hash[:])
|
||||
|
||||
// Call rw sign <hash> --chain evm
|
||||
cmd := exec.Command("rw", "sign", hashHex, "--chain", "evm")
|
||||
var stdout, stderr strings.Builder
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("rw sign failed: %w\n%s", err, stderr.String())
|
||||
}
|
||||
|
||||
signature := strings.TrimSpace(stdout.String())
|
||||
if signature == "" {
|
||||
return fmt.Errorf("rw sign produced empty signature")
|
||||
}
|
||||
|
||||
// Write signature file
|
||||
sigPath := filepath.Join(b.tmpDir, "manifest.sig")
|
||||
if err := os.WriteFile(sigPath, []byte(signature), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write manifest.sig: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Manifest signed (SHA256: %s...)\n", hashHex[:16])
|
||||
return nil
|
||||
}
|
||||
|
||||
// addDirToTar adds all files in a directory to the tar archive under the given prefix.
|
||||
func addDirToTar(tw *tar.Writer, srcDir, prefix string) error {
|
||||
return filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {
|
||||
|
||||
@ -117,7 +117,14 @@ func (b *Builder) Build() error {
|
||||
return fmt.Errorf("failed to generate manifest: %w", err)
|
||||
}
|
||||
|
||||
// Step 11: Create archive
|
||||
// Step 11: Sign manifest (optional)
|
||||
if b.flags.Sign {
|
||||
if err := b.signManifest(manifest); err != nil {
|
||||
return fmt.Errorf("failed to sign manifest: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 12: Create archive
|
||||
outputPath := b.flags.Output
|
||||
if outputPath == "" {
|
||||
outputPath = fmt.Sprintf("/tmp/orama-%s-linux-%s.tar.gz", b.version, b.flags.Arch)
|
||||
|
||||
@ -13,6 +13,7 @@ type Flags struct {
|
||||
Arch string
|
||||
Output string
|
||||
Verbose bool
|
||||
Sign bool // Sign the archive manifest with rootwallet
|
||||
}
|
||||
|
||||
// Handle is the entry point for the build command.
|
||||
@ -42,6 +43,7 @@ func parseFlags(args []string) (*Flags, error) {
|
||||
fs.StringVar(&flags.Arch, "arch", "amd64", "Target architecture (amd64, arm64)")
|
||||
fs.StringVar(&flags.Output, "output", "", "Output archive path (default: /tmp/orama-<version>-linux-<arch>.tar.gz)")
|
||||
fs.BoolVar(&flags.Verbose, "verbose", false, "Verbose output")
|
||||
fs.BoolVar(&flags.Sign, "sign", false, "Sign the manifest with rootwallet (requires rw in PATH)")
|
||||
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -28,7 +28,8 @@ type Flags struct {
|
||||
IPFSClusterAddrs string
|
||||
|
||||
// Security flags
|
||||
SkipFirewall bool // Skip UFW firewall setup (for users who manage their own firewall)
|
||||
SkipFirewall bool // Skip UFW firewall setup (for users who manage their own firewall)
|
||||
CAFingerprint string // SHA-256 fingerprint of server TLS cert for TOFU verification
|
||||
|
||||
// Anyone flags
|
||||
AnyoneClient bool // Run Anyone as client-only (SOCKS5 proxy on port 9050, no relay)
|
||||
@ -74,6 +75,7 @@ func ParseFlags(args []string) (*Flags, error) {
|
||||
|
||||
// Security flags
|
||||
fs.BoolVar(&flags.SkipFirewall, "skip-firewall", false, "Skip UFW firewall setup (for users who manage their own firewall)")
|
||||
fs.StringVar(&flags.CAFingerprint, "ca-fingerprint", "", "SHA-256 fingerprint of server TLS cert (from orama invite output)")
|
||||
|
||||
// Anyone flags
|
||||
fs.BoolVar(&flags.AnyoneClient, "anyone-client", false, "Install Anyone as client-only (SOCKS5 proxy on port 9050, no relay)")
|
||||
|
||||
@ -2,8 +2,12 @@ package install
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -366,12 +370,35 @@ func (o *Orchestrator) callJoinEndpoint(wgPubKey string) (*joinhandlers.JoinResp
|
||||
}
|
||||
|
||||
url := strings.TrimRight(o.flags.JoinAddress, "/") + "/v1/internal/join"
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
if o.flags.CAFingerprint != "" {
|
||||
// TOFU: verify the server's TLS cert fingerprint matches the one from the invite
|
||||
expectedFP, err := hex.DecodeString(o.flags.CAFingerprint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid --ca-fingerprint: must be hex-encoded SHA-256: %w", err)
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
tlsConfig.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
|
||||
if len(rawCerts) == 0 {
|
||||
return fmt.Errorf("server presented no TLS certificates")
|
||||
}
|
||||
hash := sha256.Sum256(rawCerts[0])
|
||||
if !bytes.Equal(hash[:], expectedFP) {
|
||||
return fmt.Errorf("TLS certificate fingerprint mismatch: expected %s, got %x (possible MITM attack)",
|
||||
o.flags.CAFingerprint, hash[:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// No fingerprint provided — fall back to insecure for backward compatibility
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true, // Self-signed certs during initial setup
|
||||
},
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
|
||||
@ -419,6 +446,40 @@ func (o *Orchestrator) saveSecretsFromJoinResponse(resp *joinhandlers.JoinRespon
|
||||
}
|
||||
}
|
||||
|
||||
// Write API key HMAC secret
|
||||
if resp.APIKeyHMACSecret != "" {
|
||||
if err := os.WriteFile(filepath.Join(secretsDir, "api-key-hmac-secret"), []byte(resp.APIKeyHMACSecret), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write api-key-hmac-secret: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write RQLite password and generate auth JSON file
|
||||
if resp.RQLitePassword != "" {
|
||||
if err := os.WriteFile(filepath.Join(secretsDir, "rqlite-password"), []byte(resp.RQLitePassword), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write rqlite-password: %w", err)
|
||||
}
|
||||
// Also generate the auth JSON file that rqlited uses with -auth flag
|
||||
authJSON := fmt.Sprintf(`[{"username": "orama", "password": "%s", "perms": ["all"]}]`, resp.RQLitePassword)
|
||||
if err := os.WriteFile(filepath.Join(secretsDir, "rqlite-auth.json"), []byte(authJSON), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write rqlite-auth.json: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write Olric encryption key
|
||||
if resp.OlricEncryptionKey != "" {
|
||||
if err := os.WriteFile(filepath.Join(secretsDir, "olric-encryption-key"), []byte(resp.OlricEncryptionKey), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write olric-encryption-key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write IPFS Cluster trusted peer IDs
|
||||
if len(resp.IPFSClusterPeerIDs) > 0 {
|
||||
content := strings.Join(resp.IPFSClusterPeerIDs, "\n") + "\n"
|
||||
if err := os.WriteFile(filepath.Join(secretsDir, "ipfs-cluster-trusted-peers"), []byte(content), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write ipfs-cluster-trusted-peers: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -3,9 +3,12 @@ package invite
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
@ -59,13 +62,43 @@ func Handle(args []string) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get TLS certificate fingerprint for TOFU verification
|
||||
certFingerprint := getTLSCertFingerprint(domain)
|
||||
|
||||
// Print the invite command
|
||||
fmt.Printf("\nInvite token created (expires in %s)\n\n", expiry)
|
||||
fmt.Printf("Run this on the new node:\n\n")
|
||||
fmt.Printf(" sudo orama install --join https://%s --token %s --vps-ip <NEW_NODE_IP> --nameserver\n\n", domain, token)
|
||||
if certFingerprint != "" {
|
||||
fmt.Printf(" sudo orama install --join https://%s --token %s --ca-fingerprint %s --vps-ip <NEW_NODE_IP> --nameserver\n\n", domain, token, certFingerprint)
|
||||
} else {
|
||||
fmt.Printf(" sudo orama install --join https://%s --token %s --vps-ip <NEW_NODE_IP> --nameserver\n\n", domain, token)
|
||||
}
|
||||
fmt.Printf("Replace <NEW_NODE_IP> with the new node's public IP address.\n")
|
||||
}
|
||||
|
||||
// getTLSCertFingerprint connects to the domain over TLS and returns the
|
||||
// SHA-256 fingerprint of the leaf certificate. Returns empty string on failure.
|
||||
func getTLSCertFingerprint(domain string) string {
|
||||
conn, err := tls.DialWithDialer(
|
||||
&net.Dialer{Timeout: 5 * time.Second},
|
||||
"tcp",
|
||||
domain+":443",
|
||||
&tls.Config{InsecureSkipVerify: true},
|
||||
)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(certs[0].Raw)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// readNodeDomain reads the domain from the node config file
|
||||
func readNodeDomain() (string, error) {
|
||||
configPath := "/opt/orama/.orama/configs/node.yaml"
|
||||
|
||||
@ -22,6 +22,13 @@ type DatabaseConfig struct {
|
||||
NodeCACert string `yaml:"node_ca_cert"` // Path to CA certificate (optional, uses system CA if not set)
|
||||
NodeNoVerify bool `yaml:"node_no_verify"` // Skip certificate verification (for testing/self-signed certs)
|
||||
|
||||
// RQLite HTTP Basic Auth credentials.
|
||||
// When RQLiteAuthFile is set, rqlited is launched with `-auth <file>`.
|
||||
// Username/password are embedded in all client DSNs (harmless when auth not enforced).
|
||||
RQLiteUsername string `yaml:"rqlite_username"`
|
||||
RQLitePassword string `yaml:"rqlite_password"`
|
||||
RQLiteAuthFile string `yaml:"rqlite_auth_file"` // Path to RQLite auth JSON file. Empty = auth not enforced.
|
||||
|
||||
// Raft tuning (passed through to rqlited CLI flags).
|
||||
// Higher defaults than rqlited's 1s suit WireGuard latency.
|
||||
RaftElectionTimeout time.Duration `yaml:"raft_election_timeout"` // default: 5s
|
||||
|
||||
@ -31,9 +31,10 @@ type Backend struct {
|
||||
healthy bool
|
||||
}
|
||||
|
||||
// NewBackend creates a new RQLite backend
|
||||
func NewBackend(dsn string, refreshRate time.Duration, logger *zap.Logger) (*Backend, error) {
|
||||
client, err := NewRQLiteClient(dsn, logger)
|
||||
// NewBackend creates a new RQLite backend.
|
||||
// Optional username/password enable HTTP basic auth for RQLite connections.
|
||||
func NewBackend(dsn string, refreshRate time.Duration, logger *zap.Logger, username, password string) (*Backend, error) {
|
||||
client, err := NewRQLiteClient(dsn, logger, username, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create RQLite client: %w", err)
|
||||
}
|
||||
|
||||
@ -15,6 +15,8 @@ import (
|
||||
// RQLiteClient is a simple HTTP client for RQLite
|
||||
type RQLiteClient struct {
|
||||
baseURL string
|
||||
username string // HTTP basic auth username (empty = no auth)
|
||||
password string // HTTP basic auth password
|
||||
httpClient *http.Client
|
||||
logger *zap.Logger
|
||||
}
|
||||
@ -32,10 +34,13 @@ type QueryResult struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// NewRQLiteClient creates a new RQLite HTTP client
|
||||
func NewRQLiteClient(dsn string, logger *zap.Logger) (*RQLiteClient, error) {
|
||||
// NewRQLiteClient creates a new RQLite HTTP client.
|
||||
// Optional username/password enable HTTP basic auth on all requests.
|
||||
func NewRQLiteClient(dsn string, logger *zap.Logger, username, password string) (*RQLiteClient, error) {
|
||||
return &RQLiteClient{
|
||||
baseURL: dsn,
|
||||
baseURL: dsn,
|
||||
username: username,
|
||||
password: password,
|
||||
httpClient: &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
@ -65,6 +70,9 @@ func (c *RQLiteClient) Query(ctx context.Context, query string, args ...interfac
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.username != "" && c.password != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
|
||||
@ -38,11 +38,13 @@ func parseConfig(c *caddy.Controller) (*RQLitePlugin, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
dsn = "http://localhost:5001"
|
||||
refreshRate = 10 * time.Second
|
||||
cacheTTL = 30 * time.Second
|
||||
cacheSize = 10000
|
||||
zones []string
|
||||
dsn = "http://localhost:5001"
|
||||
refreshRate = 10 * time.Second
|
||||
cacheTTL = 30 * time.Second
|
||||
cacheSize = 10000
|
||||
rqliteUsername string
|
||||
rqlitePassword string
|
||||
zones []string
|
||||
)
|
||||
|
||||
// Parse zone arguments
|
||||
@ -90,6 +92,18 @@ func parseConfig(c *caddy.Controller) (*RQLitePlugin, error) {
|
||||
}
|
||||
cacheSize = size
|
||||
|
||||
case "username":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
rqliteUsername = c.Val()
|
||||
|
||||
case "password":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
rqlitePassword = c.Val()
|
||||
|
||||
default:
|
||||
return nil, c.Errf("unknown property '%s'", c.Val())
|
||||
}
|
||||
@ -101,7 +115,7 @@ func parseConfig(c *caddy.Controller) (*RQLitePlugin, error) {
|
||||
}
|
||||
|
||||
// Create backend
|
||||
backend, err := NewBackend(dsn, refreshRate, logger)
|
||||
backend, err := NewBackend(dsn, refreshRate, logger, rqliteUsername, rqlitePassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create backend: %w", err)
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package production
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
@ -239,8 +240,15 @@ func (cg *ConfigGenerator) GenerateGatewayConfig(peerAddresses []string, enableH
|
||||
return templates.RenderGatewayConfig(data)
|
||||
}
|
||||
|
||||
// GenerateOlricConfig generates Olric configuration
|
||||
// GenerateOlricConfig generates Olric configuration.
|
||||
// Reads the Olric encryption key from secrets if available.
|
||||
func (cg *ConfigGenerator) GenerateOlricConfig(serverBindAddr string, httpPort int, memberlistBindAddr string, memberlistPort int, memberlistEnv string, advertiseAddr string, peers []string) (string, error) {
|
||||
// Read encryption key from secrets if available
|
||||
encryptionKey := ""
|
||||
if data, err := os.ReadFile(filepath.Join(cg.oramaDir, "secrets", "olric-encryption-key")); err == nil {
|
||||
encryptionKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
data := templates.OlricConfigData{
|
||||
ServerBindAddr: serverBindAddr,
|
||||
HTTPPort: httpPort,
|
||||
@ -249,6 +257,7 @@ func (cg *ConfigGenerator) GenerateOlricConfig(serverBindAddr string, httpPort i
|
||||
MemberlistEnvironment: memberlistEnv,
|
||||
MemberlistAdvertiseAddr: advertiseAddr,
|
||||
Peers: peers,
|
||||
EncryptionKey: encryptionKey,
|
||||
}
|
||||
return templates.RenderOlricConfig(data)
|
||||
}
|
||||
@ -323,6 +332,137 @@ func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// EnsureRQLiteAuth generates the RQLite auth credentials and JSON auth file.
|
||||
// Returns (username, password). The auth JSON file is written to secrets/rqlite-auth.json.
|
||||
func (sg *SecretGenerator) EnsureRQLiteAuth() (string, string, error) {
|
||||
passwordPath := filepath.Join(sg.oramaDir, "secrets", "rqlite-password")
|
||||
authFilePath := filepath.Join(sg.oramaDir, "secrets", "rqlite-auth.json")
|
||||
secretDir := filepath.Dir(passwordPath)
|
||||
username := "orama"
|
||||
|
||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
||||
return "", "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing password
|
||||
var password string
|
||||
if data, err := os.ReadFile(passwordPath); err == nil {
|
||||
password = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
// Generate new password if needed
|
||||
if password == "" {
|
||||
bytes := make([]byte, 32)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", "", fmt.Errorf("failed to generate RQLite password: %w", err)
|
||||
}
|
||||
password = hex.EncodeToString(bytes)
|
||||
|
||||
if err := os.WriteFile(passwordPath, []byte(password), 0600); err != nil {
|
||||
return "", "", fmt.Errorf("failed to save RQLite password: %w", err)
|
||||
}
|
||||
if err := ensureSecretFilePermissions(passwordPath); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Always regenerate the auth JSON file to ensure consistency
|
||||
authJSON := fmt.Sprintf(`[{"username": "%s", "password": "%s", "perms": ["all"]}]`, username, password)
|
||||
if err := os.WriteFile(authFilePath, []byte(authJSON), 0600); err != nil {
|
||||
return "", "", fmt.Errorf("failed to save RQLite auth file: %w", err)
|
||||
}
|
||||
if err := ensureSecretFilePermissions(authFilePath); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return username, password, nil
|
||||
}
|
||||
|
||||
// EnsureOlricEncryptionKey gets or generates a 32-byte encryption key for Olric memberlist gossip.
|
||||
// The key is stored as base64 on disk and returned as base64 (what Olric expects).
|
||||
func (sg *SecretGenerator) EnsureOlricEncryptionKey() (string, error) {
|
||||
secretPath := filepath.Join(sg.oramaDir, "secrets", "olric-encryption-key")
|
||||
secretDir := filepath.Dir(secretPath)
|
||||
|
||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing key
|
||||
if data, err := os.ReadFile(secretPath); err == nil {
|
||||
key := strings.TrimSpace(string(data))
|
||||
if key != "" {
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new 32-byte key, base64 encoded
|
||||
keyBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate Olric encryption key: %w", err)
|
||||
}
|
||||
key := base64.StdEncoding.EncodeToString(keyBytes)
|
||||
|
||||
if err := os.WriteFile(secretPath, []byte(key), 0600); err != nil {
|
||||
return "", fmt.Errorf("failed to save Olric encryption key: %w", err)
|
||||
}
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// EnsureAPIKeyHMACSecret gets or generates the HMAC secret used to hash API keys.
|
||||
// The secret is a 32-byte random value stored as 64 hex characters.
|
||||
func (sg *SecretGenerator) EnsureAPIKeyHMACSecret() (string, error) {
|
||||
secretPath := filepath.Join(sg.oramaDir, "secrets", "api-key-hmac-secret")
|
||||
secretDir := filepath.Dir(secretPath)
|
||||
|
||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing secret
|
||||
if data, err := os.ReadFile(secretPath); err == nil {
|
||||
secret := strings.TrimSpace(string(data))
|
||||
if len(secret) == 64 {
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new secret (32 bytes = 64 hex chars)
|
||||
bytes := make([]byte, 32)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate API key HMAC secret: %w", err)
|
||||
}
|
||||
secret := hex.EncodeToString(bytes)
|
||||
|
||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||
return "", fmt.Errorf("failed to save API key HMAC secret: %w", err)
|
||||
}
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func ensureSecretFilePermissions(secretPath string) error {
|
||||
if err := os.Chmod(secretPath, 0600); err != nil {
|
||||
return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err)
|
||||
|
||||
@ -98,7 +98,12 @@ func (fp *FirewallProvisioner) GenerateRules() []string {
|
||||
}
|
||||
|
||||
// Allow all traffic from WireGuard subnet (inter-node encrypted traffic)
|
||||
rules = append(rules, "ufw allow from 10.0.0.0/8")
|
||||
rules = append(rules, "ufw allow from 10.0.0.0/24")
|
||||
|
||||
// Disable IPv6 — no ip6tables rules exist, so services bound to 0.0.0.0
|
||||
// may be reachable via IPv6. Disable it entirely at the kernel level.
|
||||
rules = append(rules, "sysctl -w net.ipv6.conf.all.disable_ipv6=1")
|
||||
rules = append(rules, "sysctl -w net.ipv6.conf.default.disable_ipv6=1")
|
||||
|
||||
// Enable firewall
|
||||
rules = append(rules, "ufw --force enable")
|
||||
@ -109,7 +114,7 @@ func (fp *FirewallProvisioner) GenerateRules() []string {
|
||||
// can be misclassified as "invalid" by conntrack due to reordering/jitter
|
||||
// (especially between high-latency peers), causing silent packet drops.
|
||||
// Inserting at position 1 in INPUT ensures this runs before UFW chains.
|
||||
rules = append(rules, "iptables -I INPUT 1 -i wg0 -s 10.0.0.0/8 -j ACCEPT")
|
||||
rules = append(rules, "iptables -I INPUT 1 -i wg0 -s 10.0.0.0/24 -j ACCEPT")
|
||||
|
||||
return rules
|
||||
}
|
||||
@ -130,6 +135,22 @@ func (fp *FirewallProvisioner) Setup() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Persist IPv6 disable across reboots
|
||||
if err := fp.persistIPv6Disable(); err != nil {
|
||||
return fmt.Errorf("failed to persist IPv6 disable: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// persistIPv6Disable writes a sysctl config to disable IPv6 on boot.
|
||||
func (fp *FirewallProvisioner) persistIPv6Disable() error {
|
||||
content := "# Orama Network: disable IPv6 (no ip6tables rules configured)\nnet.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv6 = 1\n"
|
||||
cmd := exec.Command("tee", "/etc/sysctl.d/99-orama-disable-ipv6.conf")
|
||||
cmd.Stdin = strings.NewReader(content)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to write sysctl config: %w\n%s", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -18,9 +18,11 @@ func TestFirewallProvisioner_GenerateRules_StandardNode(t *testing.T) {
|
||||
assertContainsRule(t, rules, "ufw allow 51820/udp")
|
||||
assertContainsRule(t, rules, "ufw allow 80/tcp")
|
||||
assertContainsRule(t, rules, "ufw allow 443/tcp")
|
||||
assertContainsRule(t, rules, "ufw allow from 10.0.0.0/8")
|
||||
assertContainsRule(t, rules, "ufw allow from 10.0.0.0/24")
|
||||
assertContainsRule(t, rules, "sysctl -w net.ipv6.conf.all.disable_ipv6=1")
|
||||
assertContainsRule(t, rules, "sysctl -w net.ipv6.conf.default.disable_ipv6=1")
|
||||
assertContainsRule(t, rules, "ufw --force enable")
|
||||
assertContainsRule(t, rules, "iptables -I INPUT 1 -i wg0 -s 10.0.0.0/8 -j ACCEPT")
|
||||
assertContainsRule(t, rules, "iptables -I INPUT 1 -i wg0 -s 10.0.0.0/24 -j ACCEPT")
|
||||
|
||||
// Should NOT contain DNS or Anyone relay
|
||||
for _, rule := range rules {
|
||||
@ -76,7 +78,7 @@ func TestFirewallProvisioner_GenerateRules_WireGuardSubnetAllowed(t *testing.T)
|
||||
|
||||
rules := fp.GenerateRules()
|
||||
|
||||
assertContainsRule(t, rules, "ufw allow from 10.0.0.0/8")
|
||||
assertContainsRule(t, rules, "ufw allow from 10.0.0.0/24")
|
||||
}
|
||||
|
||||
func TestFirewallProvisioner_GenerateRules_FullConfig(t *testing.T) {
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/constants"
|
||||
@ -323,8 +324,18 @@ rqlite:rqlite
|
||||
`
|
||||
}
|
||||
|
||||
// generateCorefile creates the CoreDNS configuration (RQLite only)
|
||||
// generateCorefile creates the CoreDNS configuration (RQLite only).
|
||||
// If RQLite credentials exist on disk, they are included in the config.
|
||||
func (ci *CoreDNSInstaller) generateCorefile(domain, rqliteDSN string) string {
|
||||
// Read RQLite credentials from secrets if available
|
||||
authBlock := ""
|
||||
if data, err := os.ReadFile("/opt/orama/.orama/secrets/rqlite-password"); err == nil {
|
||||
password := strings.TrimSpace(string(data))
|
||||
if password != "" {
|
||||
authBlock = fmt.Sprintf(" username orama\n password %s\n", password)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`# CoreDNS configuration for %s
|
||||
# Uses RQLite for ALL DNS records (static + dynamic)
|
||||
# Static records (SOA, NS, A) are seeded into RQLite during installation
|
||||
@ -336,7 +347,7 @@ func (ci *CoreDNSInstaller) generateCorefile(domain, rqliteDSN string) string {
|
||||
refresh 5s
|
||||
ttl 30
|
||||
cache_size 10000
|
||||
}
|
||||
%s }
|
||||
|
||||
# Enable logging and error reporting
|
||||
log
|
||||
@ -351,7 +362,7 @@ func (ci *CoreDNSInstaller) generateCorefile(domain, rqliteDSN string) string {
|
||||
cache 300
|
||||
errors
|
||||
}
|
||||
`, domain, domain, rqliteDSN)
|
||||
`, domain, domain, rqliteDSN, authBlock)
|
||||
}
|
||||
|
||||
// seedStaticRecords inserts static zone records into RQLite (non-destructive)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -256,6 +257,13 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
|
||||
}
|
||||
ps.logf(" ✓ Directory structure created")
|
||||
|
||||
// Create dedicated orama user for running services (non-root)
|
||||
if err := ps.fsProvisioner.EnsureOramaUser(); err != nil {
|
||||
ps.logf(" ⚠️ Could not create orama user: %v (services will run as root)", err)
|
||||
} else {
|
||||
ps.logf(" ✓ orama user ensured")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -477,6 +485,11 @@ func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vps
|
||||
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
|
||||
}
|
||||
|
||||
// After init, save own IPFS Cluster peer ID to trusted peers file
|
||||
if err := ps.saveOwnClusterPeerID(clusterPath); err != nil {
|
||||
ps.logf(" ⚠️ Could not save IPFS Cluster peer ID to trusted peers: %v", err)
|
||||
}
|
||||
|
||||
// Initialize RQLite data directory
|
||||
rqliteDataDir := filepath.Join(dataDir, "rqlite")
|
||||
if err := ps.binaryInstaller.InitializeRQLiteDataDir(rqliteDataDir); err != nil {
|
||||
@ -487,6 +500,50 @@ func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vps
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveOwnClusterPeerID reads this node's IPFS Cluster peer ID from identity.json
|
||||
// and appends it to the trusted-peers file so EnsureConfig() can use it.
|
||||
func (ps *ProductionSetup) saveOwnClusterPeerID(clusterPath string) error {
|
||||
identityPath := filepath.Join(clusterPath, "identity.json")
|
||||
data, err := os.ReadFile(identityPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read identity.json: %w", err)
|
||||
}
|
||||
|
||||
var identity struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &identity); err != nil {
|
||||
return fmt.Errorf("failed to parse identity.json: %w", err)
|
||||
}
|
||||
if identity.ID == "" {
|
||||
return fmt.Errorf("peer ID not found in identity.json")
|
||||
}
|
||||
|
||||
// Read existing trusted peers
|
||||
trustedPeersPath := filepath.Join(ps.oramaDir, "secrets", "ipfs-cluster-trusted-peers")
|
||||
var existing []string
|
||||
if fileData, err := os.ReadFile(trustedPeersPath); err == nil {
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(fileData)), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
if line == identity.ID {
|
||||
return nil // already present
|
||||
}
|
||||
existing = append(existing, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
existing = append(existing, identity.ID)
|
||||
content := strings.Join(existing, "\n") + "\n"
|
||||
if err := os.WriteFile(trustedPeersPath, []byte(content), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write trusted peers file: %w", err)
|
||||
}
|
||||
|
||||
ps.logf(" ✓ IPFS Cluster peer ID saved to trusted peers: %s", identity.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase3GenerateSecrets generates shared secrets and keys
|
||||
func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
||||
ps.logf("Phase 3: Generating secrets...")
|
||||
@ -503,6 +560,24 @@ func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
||||
}
|
||||
ps.logf(" ✓ IPFS swarm key ensured")
|
||||
|
||||
// RQLite auth credentials
|
||||
if _, _, err := ps.secretGenerator.EnsureRQLiteAuth(); err != nil {
|
||||
return fmt.Errorf("failed to ensure RQLite auth: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ RQLite auth credentials ensured")
|
||||
|
||||
// Olric gossip encryption key
|
||||
if _, err := ps.secretGenerator.EnsureOlricEncryptionKey(); err != nil {
|
||||
return fmt.Errorf("failed to ensure Olric encryption key: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Olric encryption key ensured")
|
||||
|
||||
// API key HMAC secret
|
||||
if _, err := ps.secretGenerator.EnsureAPIKeyHMACSecret(); err != nil {
|
||||
return fmt.Errorf("failed to ensure API key HMAC secret: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ API key HMAC secret ensured")
|
||||
|
||||
// Node identity (unified architecture)
|
||||
peerID, err := ps.secretGenerator.EnsureNodeIdentity()
|
||||
if err != nil {
|
||||
|
||||
@ -13,7 +13,8 @@ const (
|
||||
OramaLogs = "/opt/orama/.orama/logs"
|
||||
|
||||
// Pre-built binary archive paths (created by `orama build`)
|
||||
OramaManifest = "/opt/orama/manifest.json"
|
||||
OramaManifest = "/opt/orama/manifest.json"
|
||||
OramaManifestSig = "/opt/orama/manifest.sig"
|
||||
OramaArchiveBin = "/opt/orama/bin" // Pre-built binaries
|
||||
OramaSystemdDir = "/opt/orama/systemd" // Namespace service templates
|
||||
OramaPackagesDir = "/opt/orama/packages" // .deb packages (e.g., anon.deb)
|
||||
|
||||
@ -1,12 +1,17 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
ethcrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// PreBuiltManifest describes the contents of a pre-built binary archive.
|
||||
@ -40,6 +45,74 @@ func LoadPreBuiltManifest() (*PreBuiltManifest, error) {
|
||||
return &manifest, nil
|
||||
}
|
||||
|
||||
// OramaSignerAddress is the Ethereum address authorized to sign build archives.
|
||||
// Archives signed by any other address are rejected during install.
|
||||
// This is the DeBros deploy wallet — update if the signing key rotates.
|
||||
const OramaSignerAddress = "0x0000000000000000000000000000000000000000" // TODO: set real address
|
||||
|
||||
// VerifyArchiveSignature verifies that the pre-built archive was signed by the
|
||||
// authorized Orama signer. Returns nil if the signature is valid, or if no
|
||||
// signature file exists (unsigned archives are allowed but logged as a warning).
|
||||
func VerifyArchiveSignature(manifest *PreBuiltManifest) error {
|
||||
sigData, err := os.ReadFile(OramaManifestSig)
|
||||
if os.IsNotExist(err) {
|
||||
return nil // unsigned archive — caller decides whether to proceed
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read manifest.sig: %w", err)
|
||||
}
|
||||
|
||||
// Reproduce the same hash used during signing: SHA256 of compact JSON
|
||||
manifestJSON, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal manifest: %w", err)
|
||||
}
|
||||
manifestHash := sha256.Sum256(manifestJSON)
|
||||
hashHex := hex.EncodeToString(manifestHash[:])
|
||||
|
||||
// EVM personal_sign: keccak256("\x19Ethereum Signed Message:\n" + len + message)
|
||||
msg := []byte(hashHex)
|
||||
prefix := []byte("\x19Ethereum Signed Message:\n" + fmt.Sprintf("%d", len(msg)))
|
||||
ethHash := ethcrypto.Keccak256(prefix, msg)
|
||||
|
||||
// Decode signature
|
||||
sigHex := strings.TrimSpace(string(sigData))
|
||||
if strings.HasPrefix(sigHex, "0x") || strings.HasPrefix(sigHex, "0X") {
|
||||
sigHex = sigHex[2:]
|
||||
}
|
||||
sig, err := hex.DecodeString(sigHex)
|
||||
if err != nil || len(sig) != 65 {
|
||||
return fmt.Errorf("invalid signature format in manifest.sig")
|
||||
}
|
||||
|
||||
// Normalize recovery ID
|
||||
if sig[64] >= 27 {
|
||||
sig[64] -= 27
|
||||
}
|
||||
|
||||
// Recover public key from signature
|
||||
pub, err := ethcrypto.SigToPub(ethHash, sig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("signature recovery failed: %w", err)
|
||||
}
|
||||
|
||||
recovered := ethcrypto.PubkeyToAddress(*pub).Hex()
|
||||
expected := strings.ToLower(OramaSignerAddress)
|
||||
got := strings.ToLower(recovered)
|
||||
|
||||
if got != expected {
|
||||
return fmt.Errorf("archive signed by %s, expected %s — refusing to install", recovered, OramaSignerAddress)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsArchiveSigned returns true if a manifest.sig file exists alongside the manifest.
|
||||
func IsArchiveSigned() bool {
|
||||
_, err := os.Stat(OramaManifestSig)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// installFromPreBuilt installs all binaries from a pre-built archive.
|
||||
// The archive must already be extracted at /opt/orama/ with:
|
||||
// - /opt/orama/bin/ — all pre-compiled binaries
|
||||
@ -49,6 +122,16 @@ func LoadPreBuiltManifest() (*PreBuiltManifest, error) {
|
||||
func (ps *ProductionSetup) installFromPreBuilt(manifest *PreBuiltManifest) error {
|
||||
ps.logf(" Using pre-built binary archive v%s (%s) linux/%s", manifest.Version, manifest.Commit, manifest.Arch)
|
||||
|
||||
// Verify archive signature if present
|
||||
if IsArchiveSigned() {
|
||||
if err := VerifyArchiveSignature(manifest); err != nil {
|
||||
return fmt.Errorf("archive signature verification failed: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Archive signature verified")
|
||||
} else {
|
||||
ps.logf(" ⚠️ Archive is unsigned — consider using 'orama build --sign'")
|
||||
}
|
||||
|
||||
// Install minimal system dependencies (no build tools needed)
|
||||
if err := ps.installMinimalSystemDeps(); err != nil {
|
||||
ps.logf(" ⚠️ System dependencies warning: %v", err)
|
||||
|
||||
@ -83,6 +83,38 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureOramaUser creates the 'orama' system user and group for running services.
|
||||
// Sets ownership of the orama data directory to the new user.
|
||||
func (fp *FilesystemProvisioner) EnsureOramaUser() error {
|
||||
// Check if user already exists
|
||||
if err := exec.Command("id", "orama").Run(); err == nil {
|
||||
return nil // user already exists
|
||||
}
|
||||
|
||||
// Create system user with no login shell and home at /opt/orama
|
||||
cmd := exec.Command("useradd", "--system", "--no-create-home",
|
||||
"--home-dir", fp.oramaHome, "--shell", "/usr/sbin/nologin", "orama")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to create orama user: %w\n%s", err, string(output))
|
||||
}
|
||||
|
||||
// Set ownership of orama directories
|
||||
chown := exec.Command("chown", "-R", "orama:orama", fp.oramaDir)
|
||||
if output, err := chown.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to chown %s: %w\n%s", fp.oramaDir, err, string(output))
|
||||
}
|
||||
|
||||
// Also chown the bin directory
|
||||
binDir := filepath.Join(fp.oramaHome, "bin")
|
||||
if _, err := os.Stat(binDir); err == nil {
|
||||
chown = exec.Command("chown", "-R", "orama:orama", binDir)
|
||||
if output, err := chown.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to chown %s: %w\n%s", binDir, err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateDetector checks for existing production state
|
||||
type StateDetector struct {
|
||||
|
||||
@ -8,6 +8,17 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// oramaServiceHardening contains common systemd security directives for orama services.
|
||||
const oramaServiceHardening = `User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes`
|
||||
|
||||
// SystemdServiceGenerator generates systemd unit files
|
||||
type SystemdServiceGenerator struct {
|
||||
oramaHome string
|
||||
@ -34,6 +45,8 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[6]s
|
||||
ReadWritePaths=%[3]s
|
||||
Environment=HOME=%[1]s
|
||||
Environment=IPFS_PATH=%[2]s
|
||||
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
|
||||
@ -52,7 +65,7 @@ MemoryMax=4G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary)
|
||||
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary, oramaServiceHardening)
|
||||
}
|
||||
|
||||
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
|
||||
@ -75,6 +88,8 @@ Requires=orama-ipfs.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[6]s
|
||||
ReadWritePaths=%[7]s
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
Environment=IPFS_CLUSTER_PATH=%[2]s
|
||||
@ -96,7 +111,7 @@ MemoryMax=2G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, clusterPath, logFile, clusterBinary, clusterSecret)
|
||||
`, ssg.oramaHome, clusterPath, logFile, clusterBinary, clusterSecret, oramaServiceHardening, ssg.oramaDir)
|
||||
}
|
||||
|
||||
// GenerateRQLiteService generates the RQLite systemd unit
|
||||
@ -128,6 +143,8 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[6]s
|
||||
ReadWritePaths=%[7]s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[5]s %[2]s
|
||||
Restart=always
|
||||
@ -143,7 +160,7 @@ KillMode=mixed
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, args, logFile, dataDir, rqliteBinary)
|
||||
`, ssg.oramaHome, args, logFile, dataDir, rqliteBinary, oramaServiceHardening, ssg.oramaDir)
|
||||
}
|
||||
|
||||
// GenerateOlricService generates the Olric systemd unit
|
||||
@ -158,6 +175,8 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[6]s
|
||||
ReadWritePaths=%[4]s
|
||||
Environment=HOME=%[1]s
|
||||
Environment=OLRIC_SERVER_CONFIG=%[2]s
|
||||
ExecStart=%[5]s
|
||||
@ -175,7 +194,7 @@ MemoryMax=4G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
|
||||
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary, oramaServiceHardening)
|
||||
}
|
||||
|
||||
// GenerateNodeService generates the Orama Node systemd unit
|
||||
@ -193,6 +212,8 @@ Requires=wg-quick@wg0.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[5]s
|
||||
ReadWritePaths=%[2]s
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
|
||||
@ -211,7 +232,7 @@ OOMScoreAdjust=-500
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
|
||||
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile, oramaServiceHardening)
|
||||
}
|
||||
|
||||
// GenerateVaultService generates the Orama Vault Guardian systemd unit.
|
||||
@ -230,6 +251,16 @@ PartOf=orama-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths=%[2]s
|
||||
ExecStart=%[1]s/bin/vault-guardian --config %[2]s/vault.yaml
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
@ -238,9 +269,6 @@ StandardError=append:%[3]s
|
||||
SyslogIdentifier=orama-vault
|
||||
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%[2]s
|
||||
NoNewPrivileges=yes
|
||||
LimitMEMLOCK=67108864
|
||||
MemoryMax=512M
|
||||
TimeoutStopSec=30
|
||||
@ -261,6 +289,8 @@ Wants=orama-node.service orama-olric.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[4]s
|
||||
ReadWritePaths=%[2]s
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
|
||||
@ -278,7 +308,7 @@ MemoryMax=4G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, ssg.oramaDir, logFile)
|
||||
`, ssg.oramaHome, ssg.oramaDir, logFile, oramaServiceHardening)
|
||||
}
|
||||
|
||||
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit.
|
||||
@ -353,7 +383,7 @@ WantedBy=multi-user.target
|
||||
|
||||
// GenerateCoreDNSService generates the CoreDNS systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateCoreDNSService() string {
|
||||
return `[Unit]
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=CoreDNS DNS Server with RQLite backend
|
||||
Documentation=https://coredns.io
|
||||
After=network-online.target orama-node.service
|
||||
@ -361,11 +391,16 @@ Wants=network-online.target orama-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[1]s
|
||||
ReadWritePaths=%[2]s
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
ExecStart=/usr/local/bin/coredns -conf /etc/coredns/Corefile
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
SyslogIdentifier=coredns
|
||||
|
||||
PrivateTmp=yes
|
||||
LimitNOFILE=65536
|
||||
TimeoutStopSec=30
|
||||
KillMode=mixed
|
||||
@ -373,12 +408,12 @@ MemoryMax=1G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`
|
||||
`, oramaServiceHardening, ssg.oramaDir)
|
||||
}
|
||||
|
||||
// GenerateCaddyService generates the Caddy systemd unit for SSL/TLS
|
||||
func (ssg *SystemdServiceGenerator) GenerateCaddyService() string {
|
||||
return `[Unit]
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=Caddy HTTP/2 Server
|
||||
Documentation=https://caddyserver.com/docs/
|
||||
After=network-online.target orama-node.service coredns.service
|
||||
@ -387,6 +422,10 @@ Wants=orama-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
%[1]s
|
||||
ReadWritePaths=%[2]s /var/lib/caddy /etc/caddy
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
ExecStart=/usr/bin/caddy run --environ --config /etc/caddy/Caddyfile
|
||||
ExecReload=/usr/bin/caddy reload --config /etc/caddy/Caddyfile
|
||||
TimeoutStopSec=5s
|
||||
@ -401,7 +440,7 @@ MemoryMax=2G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`
|
||||
`, oramaServiceHardening, ssg.oramaDir)
|
||||
}
|
||||
|
||||
// SystemdController manages systemd service operations
|
||||
|
||||
@ -117,8 +117,8 @@ func (wp *WireGuardProvisioner) GenerateConfig() string {
|
||||
|
||||
// Accept all WireGuard subnet traffic before UFW's conntrack "invalid" drop.
|
||||
// Without this, packets reordered by the tunnel get silently dropped.
|
||||
sb.WriteString("PostUp = iptables -I INPUT 1 -i wg0 -s 10.0.0.0/8 -j ACCEPT\n")
|
||||
sb.WriteString("PostDown = iptables -D INPUT -i wg0 -s 10.0.0.0/8 -j ACCEPT\n")
|
||||
sb.WriteString("PostUp = iptables -I INPUT 1 -i wg0 -s 10.0.0.0/24 -j ACCEPT\n")
|
||||
sb.WriteString("PostDown = iptables -D INPUT -i wg0 -s 10.0.0.0/24 -j ACCEPT\n")
|
||||
|
||||
for _, peer := range wp.config.Peers {
|
||||
sb.WriteString("\n[Peer]\n")
|
||||
|
||||
@ -95,10 +95,10 @@ func TestWireGuardProvisioner_GenerateConfig_NoPeers(t *testing.T) {
|
||||
if !strings.Contains(config, "PrivateKey = dGVzdHByaXZhdGVrZXl0ZXN0cHJpdmF0ZWtleXM=") {
|
||||
t.Error("config should contain PrivateKey")
|
||||
}
|
||||
if !strings.Contains(config, "PostUp = iptables -I INPUT 1 -i wg0 -s 10.0.0.0/8 -j ACCEPT") {
|
||||
if !strings.Contains(config, "PostUp = iptables -I INPUT 1 -i wg0 -s 10.0.0.0/24 -j ACCEPT") {
|
||||
t.Error("config should contain PostUp iptables rule for WireGuard subnet")
|
||||
}
|
||||
if !strings.Contains(config, "PostDown = iptables -D INPUT -i wg0 -s 10.0.0.0/8 -j ACCEPT") {
|
||||
if !strings.Contains(config, "PostDown = iptables -D INPUT -i wg0 -s 10.0.0.0/24 -j ACCEPT") {
|
||||
t.Error("config should contain PostDown iptables cleanup rule")
|
||||
}
|
||||
if strings.Contains(config, "[Peer]") {
|
||||
|
||||
@ -15,3 +15,6 @@ memberlist:
|
||||
- "{{.}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .EncryptionKey}}
|
||||
encryptionKey: "{{.EncryptionKey}}"
|
||||
{{- end}}
|
||||
|
||||
@ -65,6 +65,7 @@ type OlricConfigData struct {
|
||||
MemberlistEnvironment string // "local", "lan", or "wan"
|
||||
MemberlistAdvertiseAddr string // Advertise address (WG IP) so other nodes can reach us
|
||||
Peers []string // Seed peers for memberlist (host:port)
|
||||
EncryptionKey string // Base64-encoded 32-byte key for memberlist gossip encryption (empty = no encryption)
|
||||
}
|
||||
|
||||
// SystemdIPFSData holds parameters for systemd IPFS service rendering
|
||||
|
||||
@ -5,6 +5,16 @@ Wants=orama-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
|
||||
|
||||
@ -5,6 +5,16 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths={{.IPFSRepoPath}} {{.OramaDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=IPFS_PATH={{.IPFSRepoPath}}
|
||||
ExecStartPre=/bin/bash -c 'if [ -f {{.SecretsDir}}/swarm.key ] && [ ! -f {{.IPFSRepoPath}}/swarm.key ]; then cp {{.SecretsDir}}/swarm.key {{.IPFSRepoPath}}/swarm.key && chmod 600 {{.IPFSRepoPath}}/swarm.key; fi'
|
||||
|
||||
@ -6,6 +6,16 @@ Requires=orama-ipfs-{{.NodeType}}.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths={{.ClusterPath}} {{.OramaDir}}
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=CLUSTER_PATH={{.ClusterPath}}
|
||||
|
||||
@ -6,6 +6,16 @@ Requires=orama-ipfs-cluster-{{.NodeType}}.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
|
||||
|
||||
@ -5,6 +5,16 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=orama
|
||||
Group=orama
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
RestrictNamespaces=yes
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
Environment=OLRIC_SERVER_CONFIG={{.ConfigPath}}
|
||||
ExecStart=/usr/local/bin/olric-server
|
||||
|
||||
24
pkg/gateway/auth/crypto.go
Normal file
24
pkg/gateway/auth/crypto.go
Normal file
@ -0,0 +1,24 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// sha256Hex returns the lowercase hex-encoded SHA-256 hash of the input string.
|
||||
// Used to hash refresh tokens before storage — deterministic so we can hash on
|
||||
// insert and hash on lookup without storing the raw token.
|
||||
func sha256Hex(s string) string {
|
||||
h := sha256.Sum256([]byte(s))
|
||||
return hex.EncodeToString(h[:])
|
||||
}
|
||||
|
||||
// HmacSHA256Hex computes HMAC-SHA256 of data with the given secret key and
|
||||
// returns the result as a lowercase hex string. Used for API key hashing —
|
||||
// fast and deterministic, allowing direct DB lookup by hash.
|
||||
func HmacSHA256Hex(data, secret string) string {
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
mac.Write([]byte(data))
|
||||
return hex.EncodeToString(mac.Sum(nil))
|
||||
}
|
||||
@ -24,14 +24,15 @@ import (
|
||||
|
||||
// Service handles authentication business logic
|
||||
type Service struct {
|
||||
logger *logging.ColoredLogger
|
||||
orm client.NetworkClient
|
||||
signingKey *rsa.PrivateKey
|
||||
keyID string
|
||||
edSigningKey ed25519.PrivateKey
|
||||
edKeyID string
|
||||
preferEdDSA bool
|
||||
defaultNS string
|
||||
logger *logging.ColoredLogger
|
||||
orm client.NetworkClient
|
||||
signingKey *rsa.PrivateKey
|
||||
keyID string
|
||||
edSigningKey ed25519.PrivateKey
|
||||
edKeyID string
|
||||
preferEdDSA bool
|
||||
defaultNS string
|
||||
apiKeyHMACSecret string // HMAC secret for hashing API keys before storage
|
||||
}
|
||||
|
||||
func NewService(logger *logging.ColoredLogger, orm client.NetworkClient, signingKeyPEM string, defaultNS string) (*Service, error) {
|
||||
@ -61,6 +62,21 @@ func NewService(logger *logging.ColoredLogger, orm client.NetworkClient, signing
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetAPIKeyHMACSecret configures the HMAC secret used to hash API keys before storage.
|
||||
// When set, API keys are stored as HMAC-SHA256(key, secret) in the database.
|
||||
func (s *Service) SetAPIKeyHMACSecret(secret string) {
|
||||
s.apiKeyHMACSecret = secret
|
||||
}
|
||||
|
||||
// HashAPIKey returns the HMAC-SHA256 hash of an API key if the HMAC secret is set,
|
||||
// or returns the raw key for backward compatibility during rolling upgrade.
|
||||
func (s *Service) HashAPIKey(key string) string {
|
||||
if s.apiKeyHMACSecret == "" {
|
||||
return key
|
||||
}
|
||||
return HmacSHA256Hex(key, s.apiKeyHMACSecret)
|
||||
}
|
||||
|
||||
// SetEdDSAKey configures an Ed25519 signing key for EdDSA JWT support.
|
||||
// When set, new tokens are signed with EdDSA; RS256 is still accepted for verification.
|
||||
func (s *Service) SetEdDSAKey(privKey ed25519.PrivateKey) {
|
||||
@ -207,9 +223,10 @@ func (s *Service) IssueTokens(ctx context.Context, wallet, namespace string) (st
|
||||
|
||||
internalCtx := client.WithInternalAuth(ctx)
|
||||
db := s.orm.Database()
|
||||
hashedRefresh := sha256Hex(refresh)
|
||||
if _, err := db.Query(internalCtx,
|
||||
"INSERT INTO refresh_tokens(namespace_id, subject, token, audience, expires_at) VALUES (?, ?, ?, ?, datetime('now', '+30 days'))",
|
||||
nsID, wallet, refresh, "gateway",
|
||||
nsID, wallet, hashedRefresh, "gateway",
|
||||
); err != nil {
|
||||
return "", "", 0, fmt.Errorf("failed to store refresh token: %w", err)
|
||||
}
|
||||
@ -227,8 +244,9 @@ func (s *Service) RefreshToken(ctx context.Context, refreshToken, namespace stri
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
hashedRefresh := sha256Hex(refreshToken)
|
||||
q := "SELECT subject FROM refresh_tokens WHERE namespace_id = ? AND token = ? AND revoked_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||
res, err := db.Query(internalCtx, q, nsID, refreshToken)
|
||||
res, err := db.Query(internalCtx, q, nsID, hashedRefresh)
|
||||
if err != nil || res == nil || res.Count == 0 {
|
||||
return "", "", 0, fmt.Errorf("invalid or expired refresh token")
|
||||
}
|
||||
@ -262,7 +280,8 @@ func (s *Service) RevokeToken(ctx context.Context, namespace, token string, all
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
_, err := db.Query(internalCtx, "UPDATE refresh_tokens SET revoked_at = datetime('now') WHERE namespace_id = ? AND token = ? AND revoked_at IS NULL", nsID, token)
|
||||
hashedToken := sha256Hex(token)
|
||||
_, err := db.Query(internalCtx, "UPDATE refresh_tokens SET revoked_at = datetime('now') WHERE namespace_id = ? AND token = ? AND revoked_at IS NULL", nsID, hashedToken)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -335,19 +354,21 @@ func (s *Service) GetOrCreateAPIKey(ctx context.Context, wallet, namespace strin
|
||||
}
|
||||
apiKey = "ak_" + base64.RawURLEncoding.EncodeToString(buf) + ":" + namespace
|
||||
|
||||
if _, err := db.Query(internalCtx, "INSERT INTO api_keys(key, name, namespace_id) VALUES (?, ?, ?)", apiKey, "", nsID); err != nil {
|
||||
// Store the HMAC hash of the key (not the raw key) if HMAC secret is configured
|
||||
hashedKey := s.HashAPIKey(apiKey)
|
||||
if _, err := db.Query(internalCtx, "INSERT INTO api_keys(key, name, namespace_id) VALUES (?, ?, ?)", hashedKey, "", nsID); err != nil {
|
||||
return "", fmt.Errorf("failed to store api key: %w", err)
|
||||
}
|
||||
|
||||
// Link wallet -> api_key
|
||||
rid, err := db.Query(internalCtx, "SELECT id FROM api_keys WHERE key = ? LIMIT 1", apiKey)
|
||||
rid, err := db.Query(internalCtx, "SELECT id FROM api_keys WHERE key = ? LIMIT 1", hashedKey)
|
||||
if err == nil && rid != nil && rid.Count > 0 && len(rid.Rows) > 0 && len(rid.Rows[0]) > 0 {
|
||||
apiKeyID := rid.Rows[0][0]
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO wallet_api_keys(namespace_id, wallet, api_key_id) VALUES (?, ?, ?)", nsID, strings.ToLower(wallet), apiKeyID)
|
||||
}
|
||||
|
||||
// Record ownerships
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'api_key', ?)", nsID, apiKey)
|
||||
// Record ownerships — store the hash in ownership too
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'api_key', ?)", nsID, hashedKey)
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'wallet', ?)", nsID, wallet)
|
||||
|
||||
return apiKey, nil
|
||||
|
||||
@ -39,9 +39,18 @@ type Config struct {
|
||||
IPFSReplicationFactor int // Replication factor for pins (default: 3)
|
||||
IPFSEnableEncryption bool // Enable client-side encryption before upload (default: true, discovered from node configs)
|
||||
|
||||
// RQLite authentication (basic auth credentials embedded in DSN)
|
||||
RQLiteUsername string // RQLite HTTP basic auth username (default: "orama")
|
||||
RQLitePassword string // RQLite HTTP basic auth password
|
||||
|
||||
// WireGuard mesh configuration
|
||||
ClusterSecret string // Cluster secret for authenticating internal WireGuard peer exchange
|
||||
|
||||
// API key HMAC secret for hashing API keys before storage.
|
||||
// When set, API keys are stored as HMAC-SHA256(key, secret) in the database.
|
||||
// Loaded from ~/.orama/secrets/api-key-hmac-secret.
|
||||
APIKeyHMACSecret string
|
||||
|
||||
// WebRTC configuration (set when namespace has WebRTC enabled)
|
||||
WebRTCEnabled bool // Whether WebRTC endpoints are active on this gateway
|
||||
SFUPort int // Local SFU signaling port to proxy WebSocket connections to
|
||||
|
||||
@ -86,6 +86,7 @@ func NewDependencies(logger *logging.ColoredLogger, cfg *Config) (*Dependencies,
|
||||
if dsn == "" {
|
||||
dsn = "http://localhost:5001"
|
||||
}
|
||||
dsn = injectRQLiteAuth(dsn, cfg.RQLiteUsername, cfg.RQLitePassword)
|
||||
cliCfg.DatabaseEndpoints = []string{dsn}
|
||||
}
|
||||
|
||||
@ -136,6 +137,9 @@ func initializeRQLite(logger *logging.ColoredLogger, cfg *Config, deps *Dependen
|
||||
dsn = "http://localhost:5001"
|
||||
}
|
||||
|
||||
// Inject basic auth credentials into DSN if available
|
||||
dsn = injectRQLiteAuth(dsn, cfg.RQLiteUsername, cfg.RQLitePassword)
|
||||
|
||||
if strings.Contains(dsn, "?") {
|
||||
dsn += "&disableClusterDiscovery=true&level=none"
|
||||
} else {
|
||||
@ -483,6 +487,12 @@ func initializeServerless(logger *logging.ColoredLogger, cfg *Config, deps *Depe
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "EdDSA signing key loaded; new JWTs will use EdDSA")
|
||||
}
|
||||
|
||||
// Configure API key HMAC secret if available
|
||||
if cfg.APIKeyHMACSecret != "" {
|
||||
authService.SetAPIKeyHMACSecret(cfg.APIKeyHMACSecret)
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "API key HMAC secret loaded; new API keys will be hashed")
|
||||
}
|
||||
|
||||
deps.AuthService = authService
|
||||
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Serverless function engine ready",
|
||||
@ -660,3 +670,19 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
||||
|
||||
return ipfsDiscoveryResult{}
|
||||
}
|
||||
|
||||
// injectRQLiteAuth injects HTTP basic auth credentials into a RQLite DSN URL.
|
||||
// If username or password is empty, the DSN is returned unchanged.
|
||||
// Input: "http://localhost:5001" → Output: "http://orama:secret@localhost:5001"
|
||||
func injectRQLiteAuth(dsn, username, password string) string {
|
||||
if username == "" || password == "" {
|
||||
return dsn
|
||||
}
|
||||
// Insert user:pass@ after the scheme (http:// or https://)
|
||||
for _, scheme := range []string{"https://", "http://"} {
|
||||
if strings.HasPrefix(dsn, scheme) {
|
||||
return scheme + username + ":" + password + "@" + dsn[len(scheme):]
|
||||
}
|
||||
}
|
||||
return dsn
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
||||
|
||||
// Create client config for global namespace
|
||||
authCfg := client.DefaultClientConfig("default") // Use "default" namespace for global
|
||||
authCfg.DatabaseEndpoints = []string{cfg.GlobalRQLiteDSN}
|
||||
authCfg.DatabaseEndpoints = []string{injectRQLiteAuth(cfg.GlobalRQLiteDSN, cfg.RQLiteUsername, cfg.RQLitePassword)}
|
||||
if len(cfg.BootstrapPeers) > 0 {
|
||||
authCfg.BootstrapPeers = cfg.BootstrapPeers
|
||||
}
|
||||
|
||||
@ -32,14 +32,18 @@ type JoinResponse struct {
|
||||
WGPeers []WGPeerInfo `json:"wg_peers"`
|
||||
|
||||
// Secrets
|
||||
ClusterSecret string `json:"cluster_secret"`
|
||||
SwarmKey string `json:"swarm_key"`
|
||||
ClusterSecret string `json:"cluster_secret"`
|
||||
SwarmKey string `json:"swarm_key"`
|
||||
APIKeyHMACSecret string `json:"api_key_hmac_secret,omitempty"`
|
||||
RQLitePassword string `json:"rqlite_password,omitempty"`
|
||||
OlricEncryptionKey string `json:"olric_encryption_key,omitempty"`
|
||||
|
||||
// Cluster join info (all using WG IPs)
|
||||
RQLiteJoinAddress string `json:"rqlite_join_address"`
|
||||
IPFSPeer PeerInfo `json:"ipfs_peer"`
|
||||
IPFSClusterPeer PeerInfo `json:"ipfs_cluster_peer"`
|
||||
BootstrapPeers []string `json:"bootstrap_peers"`
|
||||
RQLiteJoinAddress string `json:"rqlite_join_address"`
|
||||
IPFSPeer PeerInfo `json:"ipfs_peer"`
|
||||
IPFSClusterPeer PeerInfo `json:"ipfs_cluster_peer"`
|
||||
IPFSClusterPeerIDs []string `json:"ipfs_cluster_peer_ids,omitempty"`
|
||||
BootstrapPeers []string `json:"bootstrap_peers"`
|
||||
|
||||
// Olric seed peers (WG IP:port for memberlist)
|
||||
OlricPeers []string `json:"olric_peers,omitempty"`
|
||||
@ -155,6 +159,24 @@ func (h *Handler) HandleJoin(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read API key HMAC secret (optional — may not exist on older clusters)
|
||||
apiKeyHMACSecret := ""
|
||||
if data, err := os.ReadFile(h.oramaDir + "/secrets/api-key-hmac-secret"); err == nil {
|
||||
apiKeyHMACSecret = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
// Read RQLite password (optional — may not exist on older clusters)
|
||||
rqlitePassword := ""
|
||||
if data, err := os.ReadFile(h.oramaDir + "/secrets/rqlite-password"); err == nil {
|
||||
rqlitePassword = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
// Read Olric encryption key (optional — may not exist on older clusters)
|
||||
olricEncryptionKey := ""
|
||||
if data, err := os.ReadFile(h.oramaDir + "/secrets/olric-encryption-key"); err == nil {
|
||||
olricEncryptionKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
// 7. Get all WG peers
|
||||
wgPeers, err := h.getWGPeers(ctx, req.WGPublicKey)
|
||||
if err != nil {
|
||||
@ -181,6 +203,9 @@ func (h *Handler) HandleJoin(w http.ResponseWriter, r *http.Request) {
|
||||
// 11. Read base domain from config
|
||||
baseDomain := h.readBaseDomain()
|
||||
|
||||
// 12. Read IPFS Cluster trusted peer IDs
|
||||
ipfsClusterPeerIDs := h.readIPFSClusterTrustedPeers()
|
||||
|
||||
// Build Olric seed peers from all existing WG peer IPs (memberlist port 3322)
|
||||
var olricPeers []string
|
||||
for _, p := range wgPeers {
|
||||
@ -191,16 +216,20 @@ func (h *Handler) HandleJoin(w http.ResponseWriter, r *http.Request) {
|
||||
olricPeers = append(olricPeers, fmt.Sprintf("%s:3322", myWGIP))
|
||||
|
||||
resp := JoinResponse{
|
||||
WGIP: wgIP,
|
||||
WGPeers: wgPeers,
|
||||
ClusterSecret: strings.TrimSpace(string(clusterSecret)),
|
||||
SwarmKey: strings.TrimSpace(string(swarmKey)),
|
||||
RQLiteJoinAddress: fmt.Sprintf("%s:7001", myWGIP),
|
||||
IPFSPeer: ipfsPeer,
|
||||
IPFSClusterPeer: ipfsClusterPeer,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
OlricPeers: olricPeers,
|
||||
BaseDomain: baseDomain,
|
||||
WGIP: wgIP,
|
||||
WGPeers: wgPeers,
|
||||
ClusterSecret: strings.TrimSpace(string(clusterSecret)),
|
||||
SwarmKey: strings.TrimSpace(string(swarmKey)),
|
||||
APIKeyHMACSecret: apiKeyHMACSecret,
|
||||
RQLitePassword: rqlitePassword,
|
||||
OlricEncryptionKey: olricEncryptionKey,
|
||||
RQLiteJoinAddress: fmt.Sprintf("%s:7001", myWGIP),
|
||||
IPFSPeer: ipfsPeer,
|
||||
IPFSClusterPeer: ipfsClusterPeer,
|
||||
IPFSClusterPeerIDs: ipfsClusterPeerIDs,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
OlricPeers: olricPeers,
|
||||
BaseDomain: baseDomain,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@ -454,6 +483,22 @@ func (h *Handler) buildBootstrapPeers(myWGIP, ipfsPeerID string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
// readIPFSClusterTrustedPeers reads IPFS Cluster trusted peer IDs from the secrets file
|
||||
func (h *Handler) readIPFSClusterTrustedPeers() []string {
|
||||
data, err := os.ReadFile(h.oramaDir + "/secrets/ipfs-cluster-trusted-peers")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var peers []string
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(data)), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
peers = append(peers, line)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// readBaseDomain reads the base domain from node config
|
||||
func (h *Handler) readBaseDomain() string {
|
||||
data, err := os.ReadFile(h.oramaDir + "/configs/node.yaml")
|
||||
|
||||
@ -4,6 +4,8 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
@ -14,8 +16,29 @@ import (
|
||||
var wsUpgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
// For early development we accept any origin; tighten later.
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
CheckOrigin: checkWSOrigin,
|
||||
}
|
||||
|
||||
// checkWSOrigin validates WebSocket origins against the request's Host header.
|
||||
// Non-browser clients (no Origin) are allowed. Browser clients must match the host.
|
||||
func checkWSOrigin(r *http.Request) bool {
|
||||
origin := r.Header.Get("Origin")
|
||||
if origin == "" {
|
||||
return true
|
||||
}
|
||||
host := r.Host
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
if idx := strings.LastIndex(host, ":"); idx != -1 {
|
||||
host = host[:idx]
|
||||
}
|
||||
parsed, err := url.Parse(origin)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
originHost := parsed.Hostname()
|
||||
return originHost == host || strings.HasSuffix(originHost, "."+host)
|
||||
}
|
||||
|
||||
// wsClient wraps a WebSocket connection with message handling
|
||||
|
||||
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/serverless"
|
||||
@ -12,6 +14,29 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// checkWSOrigin validates WebSocket origins against the request's Host header.
|
||||
// Non-browser clients (no Origin) are allowed. Browser clients must match the host.
|
||||
func checkWSOrigin(r *http.Request) bool {
|
||||
origin := r.Header.Get("Origin")
|
||||
if origin == "" {
|
||||
return true
|
||||
}
|
||||
host := r.Host
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
// Strip port from host if present
|
||||
if idx := strings.LastIndex(host, ":"); idx != -1 {
|
||||
host = host[:idx]
|
||||
}
|
||||
parsed, err := url.Parse(origin)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
originHost := parsed.Hostname()
|
||||
return originHost == host || strings.HasSuffix(originHost, "."+host)
|
||||
}
|
||||
|
||||
// HandleWebSocket handles WebSocket connections for function streaming.
|
||||
// It upgrades HTTP connections to WebSocket and manages bi-directional communication
|
||||
// for real-time function invocation and streaming responses.
|
||||
@ -28,7 +53,7 @@ func (h *ServerlessHandlers) HandleWebSocket(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Upgrade to WebSocket
|
||||
upgrader := websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
CheckOrigin: checkWSOrigin,
|
||||
}
|
||||
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/rqlite"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -129,6 +130,11 @@ func (h *Handler) HandleListPeers(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if !h.validateInternalRequest(r) {
|
||||
http.Error(w, "unauthorized", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
peers, err := h.ListPeers(r.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("failed to list WG peers", zap.Error(err))
|
||||
@ -147,6 +153,11 @@ func (h *Handler) HandleRemovePeer(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if !h.validateInternalRequest(r) {
|
||||
http.Error(w, "unauthorized", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
nodeID := r.URL.Query().Get("node_id")
|
||||
if nodeID == "" {
|
||||
http.Error(w, "node_id parameter required", http.StatusBadRequest)
|
||||
@ -165,6 +176,18 @@ func (h *Handler) HandleRemovePeer(w http.ResponseWriter, r *http.Request) {
|
||||
h.logger.Info("removed WireGuard peer", zap.String("node_id", nodeID))
|
||||
}
|
||||
|
||||
// validateInternalRequest checks that the request comes from a WireGuard peer
|
||||
// and includes a valid cluster secret. Both conditions must be met.
|
||||
func (h *Handler) validateInternalRequest(r *http.Request) bool {
|
||||
if !auth.IsWireGuardPeer(r.RemoteAddr) {
|
||||
return false
|
||||
}
|
||||
if h.clusterSecret == "" {
|
||||
return true
|
||||
}
|
||||
return r.Header.Get("X-Cluster-Secret") == h.clusterSecret
|
||||
}
|
||||
|
||||
// ListPeers returns all registered WireGuard peers
|
||||
func (h *Handler) ListPeers(ctx context.Context) ([]PeerRecord, error) {
|
||||
var peers []PeerRecord
|
||||
|
||||
@ -74,7 +74,11 @@ func (g *Gateway) validateAuthForNamespaceProxy(r *http.Request) (namespace stri
|
||||
// lookupAPIKeyNamespace resolves an API key to its namespace using cache and DB.
|
||||
// dbClient controls which database is queried (global vs namespace-specific).
|
||||
// Returns the namespace name or an error if the key is invalid.
|
||||
//
|
||||
// Dual lookup strategy for rolling upgrade: tries HMAC-hashed key first (new keys),
|
||||
// then falls back to raw key lookup (existing unhashed keys during transition).
|
||||
func (g *Gateway) lookupAPIKeyNamespace(ctx context.Context, key string, dbClient client.NetworkClient) (string, error) {
|
||||
// Cache uses raw key as cache key (in-memory only, never persisted)
|
||||
if g.mwCache != nil {
|
||||
if cachedNS, ok := g.mwCache.GetAPIKeyNamespace(key); ok {
|
||||
return cachedNS, nil
|
||||
@ -84,20 +88,33 @@ func (g *Gateway) lookupAPIKeyNamespace(ctx context.Context, key string, dbClien
|
||||
db := dbClient.Database()
|
||||
internalCtx := client.WithInternalAuth(ctx)
|
||||
q := "SELECT namespaces.name FROM api_keys JOIN namespaces ON api_keys.namespace_id = namespaces.id WHERE api_keys.key = ? LIMIT 1"
|
||||
res, err := db.Query(internalCtx, q, key)
|
||||
if err != nil || res == nil || res.Count == 0 || len(res.Rows) == 0 || len(res.Rows[0]) == 0 {
|
||||
return "", fmt.Errorf("invalid API key")
|
||||
|
||||
// Try HMAC-hashed lookup first (new keys stored as hashes)
|
||||
hashedKey := g.authService.HashAPIKey(key)
|
||||
res, err := db.Query(internalCtx, q, hashedKey)
|
||||
if err == nil && res != nil && res.Count > 0 && len(res.Rows) > 0 && len(res.Rows[0]) > 0 {
|
||||
if ns := getString(res.Rows[0][0]); ns != "" {
|
||||
if g.mwCache != nil {
|
||||
g.mwCache.SetAPIKeyNamespace(key, ns)
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
}
|
||||
|
||||
ns := getString(res.Rows[0][0])
|
||||
if ns == "" {
|
||||
return "", fmt.Errorf("invalid API key")
|
||||
// Fallback: try raw key lookup (existing unhashed keys during rolling upgrade)
|
||||
if hashedKey != key {
|
||||
res, err = db.Query(internalCtx, q, key)
|
||||
if err == nil && res != nil && res.Count > 0 && len(res.Rows) > 0 && len(res.Rows[0]) > 0 {
|
||||
if ns := getString(res.Rows[0][0]); ns != "" {
|
||||
if g.mwCache != nil {
|
||||
g.mwCache.SetAPIKeyNamespace(key, ns)
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if g.mwCache != nil {
|
||||
g.mwCache.SetAPIKeyNamespace(key, ns)
|
||||
}
|
||||
return ns, nil
|
||||
return "", fmt.Errorf("invalid API key")
|
||||
}
|
||||
|
||||
// isWebSocketUpgrade checks if the request is a WebSocket upgrade request
|
||||
|
||||
@ -6,13 +6,15 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
)
|
||||
|
||||
// wireGuardNet is the WireGuard mesh subnet, parsed once at init.
|
||||
var wireGuardNet *net.IPNet
|
||||
|
||||
func init() {
|
||||
_, wireGuardNet, _ = net.ParseCIDR("10.0.0.0/8")
|
||||
_, wireGuardNet, _ = net.ParseCIDR(auth.WireGuardSubnet)
|
||||
}
|
||||
|
||||
// RateLimiter implements a token-bucket rate limiter per client IP.
|
||||
@ -126,7 +128,7 @@ func (nrl *NamespaceRateLimiter) Allow(namespace string) bool {
|
||||
}
|
||||
|
||||
// rateLimitMiddleware returns 429 when a client exceeds the rate limit.
|
||||
// Internal traffic from the WireGuard subnet (10.0.0.0/8) is exempt.
|
||||
// Internal traffic from the WireGuard subnet is exempt.
|
||||
func (g *Gateway) rateLimitMiddleware(next http.Handler) http.Handler {
|
||||
if g.rateLimiter == nil {
|
||||
return next
|
||||
@ -170,7 +172,7 @@ func (g *Gateway) namespaceRateLimitMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// isInternalIP returns true if the IP is in the WireGuard 10.0.0.0/8 subnet
|
||||
// isInternalIP returns true if the IP is in the WireGuard subnet
|
||||
// or is a loopback address.
|
||||
func isInternalIP(ipStr string) bool {
|
||||
// Strip port if present
|
||||
@ -187,6 +189,5 @@ func isInternalIP(ipStr string) bool {
|
||||
if ip.IsLoopback() {
|
||||
return true
|
||||
}
|
||||
// 10.0.0.0/8 — WireGuard mesh
|
||||
return wireGuardNet.Contains(ip)
|
||||
}
|
||||
|
||||
@ -98,7 +98,9 @@ func TestIsInternalIP(t *testing.T) {
|
||||
}{
|
||||
{"10.0.0.1", true},
|
||||
{"10.0.0.254", true},
|
||||
{"10.255.255.255", true},
|
||||
{"10.0.0.255", true},
|
||||
{"10.0.1.1", false}, // outside /24 — VPS provider's internal range, not our WG mesh
|
||||
{"10.255.255.255", false}, // outside /24
|
||||
{"127.0.0.1", true},
|
||||
{"192.168.1.1", false},
|
||||
{"8.8.8.8", false},
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -15,10 +16,11 @@ import (
|
||||
|
||||
// ClusterConfigManager manages IPFS Cluster configuration files
|
||||
type ClusterConfigManager struct {
|
||||
cfg *config.Config
|
||||
logger *zap.Logger
|
||||
clusterPath string
|
||||
secret string
|
||||
cfg *config.Config
|
||||
logger *zap.Logger
|
||||
clusterPath string
|
||||
secret string
|
||||
trustedPeersPath string // path to ipfs-cluster-trusted-peers file
|
||||
}
|
||||
|
||||
// NewClusterConfigManager creates a new IPFS Cluster config manager
|
||||
@ -46,12 +48,14 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
|
||||
}
|
||||
|
||||
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
|
||||
trustedPeersPath := ""
|
||||
if strings.Contains(dataDir, ".orama") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
secretsDir := filepath.Join(home, ".orama", "secrets")
|
||||
if err := os.MkdirAll(secretsDir, 0700); err == nil {
|
||||
secretPath = filepath.Join(secretsDir, "cluster-secret")
|
||||
trustedPeersPath = filepath.Join(secretsDir, "ipfs-cluster-trusted-peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -62,10 +66,11 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
|
||||
}
|
||||
|
||||
return &ClusterConfigManager{
|
||||
cfg: cfg,
|
||||
logger: logger,
|
||||
clusterPath: clusterPath,
|
||||
secret: secret,
|
||||
cfg: cfg,
|
||||
logger: logger,
|
||||
clusterPath: clusterPath,
|
||||
secret: secret,
|
||||
trustedPeersPath: trustedPeersPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -114,7 +119,15 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
|
||||
cfg.Cluster.Secret = cm.secret
|
||||
cfg.Cluster.ListenMultiaddress = []string{fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterListenPort)}
|
||||
cfg.Consensus.CRDT.ClusterName = "orama-cluster"
|
||||
cfg.Consensus.CRDT.TrustedPeers = []string{"*"}
|
||||
|
||||
// Use trusted peers from file if available, otherwise fall back to "*" (open trust)
|
||||
trustedPeers := cm.loadTrustedPeersWithSelf()
|
||||
if len(trustedPeers) > 0 {
|
||||
cfg.Consensus.CRDT.TrustedPeers = trustedPeers
|
||||
} else {
|
||||
cfg.Consensus.CRDT.TrustedPeers = []string{"*"}
|
||||
}
|
||||
|
||||
cfg.API.RestAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort)
|
||||
cfg.API.IPFSProxy.ListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort)
|
||||
cfg.API.IPFSProxy.NodeMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort)
|
||||
@ -198,3 +211,89 @@ func (cm *ClusterConfigManager) createTemplateConfig() *ClusterServiceConfig {
|
||||
cfg.Raw = make(map[string]interface{})
|
||||
return cfg
|
||||
}
|
||||
|
||||
// readClusterPeerID reads this node's IPFS Cluster peer ID from identity.json
|
||||
func (cm *ClusterConfigManager) readClusterPeerID() (string, error) {
|
||||
identityPath := filepath.Join(cm.clusterPath, "identity.json")
|
||||
data, err := os.ReadFile(identityPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read identity.json: %w", err)
|
||||
}
|
||||
|
||||
var identity struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &identity); err != nil {
|
||||
return "", fmt.Errorf("failed to parse identity.json: %w", err)
|
||||
}
|
||||
if identity.ID == "" {
|
||||
return "", fmt.Errorf("peer ID not found in identity.json")
|
||||
}
|
||||
return identity.ID, nil
|
||||
}
|
||||
|
||||
// loadTrustedPeers reads trusted peer IDs from the trusted-peers file (one per line)
|
||||
func (cm *ClusterConfigManager) loadTrustedPeers() []string {
|
||||
if cm.trustedPeersPath == "" {
|
||||
return nil
|
||||
}
|
||||
data, err := os.ReadFile(cm.trustedPeersPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var peers []string
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(data)), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
peers = append(peers, line)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// addTrustedPeer appends a peer ID to the trusted-peers file if not already present
|
||||
func (cm *ClusterConfigManager) addTrustedPeer(peerID string) error {
|
||||
if cm.trustedPeersPath == "" || peerID == "" {
|
||||
return nil
|
||||
}
|
||||
existing := cm.loadTrustedPeers()
|
||||
for _, p := range existing {
|
||||
if p == peerID {
|
||||
return nil // already present
|
||||
}
|
||||
}
|
||||
existing = append(existing, peerID)
|
||||
return os.WriteFile(cm.trustedPeersPath, []byte(strings.Join(existing, "\n")+"\n"), 0600)
|
||||
}
|
||||
|
||||
// loadTrustedPeersWithSelf loads trusted peers from file and ensures this node's
|
||||
// own peer ID is included. Returns nil if no trusted peers file exists.
|
||||
func (cm *ClusterConfigManager) loadTrustedPeersWithSelf() []string {
|
||||
peers := cm.loadTrustedPeers()
|
||||
|
||||
// Try to read own peer ID and add it
|
||||
ownID, err := cm.readClusterPeerID()
|
||||
if err != nil {
|
||||
cm.logger.Debug("Could not read own IPFS Cluster peer ID", zap.Error(err))
|
||||
return peers
|
||||
}
|
||||
|
||||
if ownID != "" {
|
||||
if err := cm.addTrustedPeer(ownID); err != nil {
|
||||
cm.logger.Warn("Failed to persist own peer ID to trusted peers file", zap.Error(err))
|
||||
}
|
||||
// Check if already in the list
|
||||
found := false
|
||||
for _, p := range peers {
|
||||
if p == ownID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
peers = append(peers, ownID)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
@ -34,6 +34,11 @@ type ClusterManagerConfig struct {
|
||||
IPFSAPIURL string // IPFS API URL (default: "http://localhost:4501")
|
||||
IPFSTimeout time.Duration // Timeout for IPFS operations (default: 60s)
|
||||
IPFSReplicationFactor int // IPFS replication factor (default: 3)
|
||||
|
||||
// TurnEncryptionKey is a 32-byte AES-256 key for encrypting TURN shared secrets
|
||||
// in RQLite. Derived from cluster secret via HKDF(clusterSecret, "turn-encryption").
|
||||
// If nil, TURN secrets are stored in plaintext (backward compatibility).
|
||||
TurnEncryptionKey []byte
|
||||
}
|
||||
|
||||
// ClusterManager orchestrates namespace cluster provisioning and lifecycle
|
||||
@ -58,6 +63,9 @@ type ClusterManager struct {
|
||||
// Local node identity for distributed spawning
|
||||
localNodeID string
|
||||
|
||||
// AES-256 key for encrypting TURN secrets in RQLite (nil = plaintext)
|
||||
turnEncryptionKey []byte
|
||||
|
||||
// Track provisioning operations
|
||||
provisioningMu sync.RWMutex
|
||||
provisioning map[string]bool // namespace -> in progress
|
||||
@ -108,6 +116,7 @@ func NewClusterManager(
|
||||
ipfsAPIURL: ipfsAPIURL,
|
||||
ipfsTimeout: ipfsTimeout,
|
||||
ipfsReplicationFactor: ipfsReplicationFactor,
|
||||
turnEncryptionKey: cfg.TurnEncryptionKey,
|
||||
logger: logger.With(zap.String("component", "cluster-manager")),
|
||||
provisioning: make(map[string]bool),
|
||||
}
|
||||
@ -154,6 +163,7 @@ func NewClusterManagerWithComponents(
|
||||
ipfsAPIURL: ipfsAPIURL,
|
||||
ipfsTimeout: ipfsTimeout,
|
||||
ipfsReplicationFactor: ipfsReplicationFactor,
|
||||
turnEncryptionKey: cfg.TurnEncryptionKey,
|
||||
logger: logger.With(zap.String("component", "cluster-manager")),
|
||||
provisioning: make(map[string]bool),
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/client"
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||
"github.com/DeBrosOfficial/network/pkg/secrets"
|
||||
"github.com/DeBrosOfficial/network/pkg/sfu"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
@ -51,13 +52,23 @@ func (cm *ClusterManager) EnableWebRTC(ctx context.Context, namespaceName, enabl
|
||||
}
|
||||
turnSecret := base64.StdEncoding.EncodeToString(secretBytes)
|
||||
|
||||
// Encrypt TURN secret before storing in RQLite
|
||||
storedSecret := turnSecret
|
||||
if cm.turnEncryptionKey != nil {
|
||||
encrypted, encErr := secrets.Encrypt(turnSecret, cm.turnEncryptionKey)
|
||||
if encErr != nil {
|
||||
return fmt.Errorf("failed to encrypt TURN secret: %w", encErr)
|
||||
}
|
||||
storedSecret = encrypted
|
||||
}
|
||||
|
||||
// 4. Insert namespace_webrtc_config
|
||||
webrtcConfigID := uuid.New().String()
|
||||
_, err = cm.db.Exec(internalCtx,
|
||||
`INSERT INTO namespace_webrtc_config (id, namespace_cluster_id, namespace_name, enabled, turn_shared_secret, turn_credential_ttl, sfu_node_count, turn_node_count, enabled_by, enabled_at)
|
||||
VALUES (?, ?, ?, 1, ?, ?, ?, ?, ?, ?)`,
|
||||
webrtcConfigID, cluster.ID, namespaceName,
|
||||
turnSecret, DefaultTURNCredentialTTL,
|
||||
storedSecret, DefaultTURNCredentialTTL,
|
||||
DefaultSFUNodeCount, DefaultTURNNodeCount,
|
||||
enabledBy, time.Now(),
|
||||
)
|
||||
@ -297,6 +308,7 @@ func (cm *ClusterManager) DisableWebRTC(ctx context.Context, namespaceName strin
|
||||
}
|
||||
|
||||
// GetWebRTCConfig returns the WebRTC configuration for a namespace.
|
||||
// Transparently decrypts the TURN shared secret if it was encrypted at rest.
|
||||
func (cm *ClusterManager) GetWebRTCConfig(ctx context.Context, namespaceName string) (*WebRTCConfig, error) {
|
||||
internalCtx := client.WithInternalAuth(ctx)
|
||||
|
||||
@ -309,6 +321,16 @@ func (cm *ClusterManager) GetWebRTCConfig(ctx context.Context, namespaceName str
|
||||
if len(configs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Decrypt TURN secret if encrypted (handles plaintext passthrough for backward compat)
|
||||
if cm.turnEncryptionKey != nil && secrets.IsEncrypted(configs[0].TURNSharedSecret) {
|
||||
decrypted, decErr := secrets.Decrypt(configs[0].TURNSharedSecret, cm.turnEncryptionKey)
|
||||
if decErr != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt TURN secret: %w", decErr)
|
||||
}
|
||||
configs[0].TURNSharedSecret = decrypted
|
||||
}
|
||||
|
||||
return &configs[0], nil
|
||||
}
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||
@ -13,6 +14,7 @@ import (
|
||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"github.com/DeBrosOfficial/network/pkg/namespace"
|
||||
"github.com/DeBrosOfficial/network/pkg/secrets"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -44,6 +46,18 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||
clusterSecret = string(secretBytes)
|
||||
}
|
||||
|
||||
// Read API key HMAC secret for hashing API keys before storage
|
||||
apiKeyHMACSecret := ""
|
||||
if secretBytes, err := os.ReadFile(filepath.Join(oramaDir, "secrets", "api-key-hmac-secret")); err == nil {
|
||||
apiKeyHMACSecret = strings.TrimSpace(string(secretBytes))
|
||||
}
|
||||
|
||||
// Read RQLite credentials for authenticated DB connections
|
||||
rqlitePassword := ""
|
||||
if secretBytes, err := os.ReadFile(filepath.Join(oramaDir, "secrets", "rqlite-password")); err == nil {
|
||||
rqlitePassword = strings.TrimSpace(string(secretBytes))
|
||||
}
|
||||
|
||||
gwCfg := &gateway.Config{
|
||||
ListenAddr: n.config.HTTPGateway.ListenAddr,
|
||||
ClientNamespace: n.config.HTTPGateway.ClientNamespace,
|
||||
@ -57,7 +71,10 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
|
||||
BaseDomain: n.config.HTTPGateway.BaseDomain,
|
||||
DataDir: oramaDir,
|
||||
RQLiteUsername: "orama",
|
||||
RQLitePassword: rqlitePassword,
|
||||
ClusterSecret: clusterSecret,
|
||||
APIKeyHMACSecret: apiKeyHMACSecret,
|
||||
WebRTCEnabled: n.config.HTTPGateway.WebRTC.Enabled,
|
||||
SFUPort: n.config.HTTPGateway.WebRTC.SFUPort,
|
||||
TURNDomain: n.config.HTTPGateway.WebRTC.TURNDomain,
|
||||
@ -73,6 +90,14 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||
// Wire up ClusterManager for per-namespace cluster provisioning
|
||||
if ormClient := apiGateway.GetORMClient(); ormClient != nil {
|
||||
baseDataDir := filepath.Join(os.ExpandEnv(n.config.Node.DataDir), "..", "data", "namespaces")
|
||||
// Derive TURN encryption key from cluster secret (nil if no secret available)
|
||||
var turnEncKey []byte
|
||||
if clusterSecret != "" {
|
||||
if key, keyErr := secrets.DeriveKey(clusterSecret, "turn-encryption"); keyErr == nil {
|
||||
turnEncKey = key
|
||||
}
|
||||
}
|
||||
|
||||
clusterCfg := namespace.ClusterManagerConfig{
|
||||
BaseDomain: n.config.HTTPGateway.BaseDomain,
|
||||
BaseDataDir: baseDataDir,
|
||||
@ -81,6 +106,7 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||
IPFSAPIURL: gwCfg.IPFSAPIURL,
|
||||
IPFSTimeout: gwCfg.IPFSTimeout,
|
||||
IPFSReplicationFactor: n.config.Database.IPFS.ReplicationFactor,
|
||||
TurnEncryptionKey: turnEncKey,
|
||||
}
|
||||
clusterManager := namespace.NewClusterManager(ormClient, clusterCfg, n.logger.Logger)
|
||||
clusterManager.SetLocalNodeID(gwCfg.NodePeerID)
|
||||
|
||||
@ -16,8 +16,13 @@ type RQLiteAdapter struct {
|
||||
|
||||
// NewRQLiteAdapter creates a new adapter that provides sql.DB interface for RQLite
|
||||
func NewRQLiteAdapter(manager *RQLiteManager) (*RQLiteAdapter, error) {
|
||||
// Use the gorqlite database/sql driver
|
||||
db, err := sql.Open("rqlite", fmt.Sprintf("http://localhost:%d?disableClusterDiscovery=true&level=none", manager.config.RQLitePort))
|
||||
// Build DSN with optional basic auth credentials
|
||||
dsn := fmt.Sprintf("http://localhost:%d?disableClusterDiscovery=true&level=none", manager.config.RQLitePort)
|
||||
if manager.config.RQLiteUsername != "" && manager.config.RQLitePassword != "" {
|
||||
dsn = fmt.Sprintf("http://%s:%s@localhost:%d?disableClusterDiscovery=true&level=none",
|
||||
manager.config.RQLiteUsername, manager.config.RQLitePassword, manager.config.RQLitePort)
|
||||
}
|
||||
db, err := sql.Open("rqlite", dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open RQLite SQL connection: %w", err)
|
||||
}
|
||||
|
||||
@ -31,6 +31,7 @@ type InstanceConfig struct {
|
||||
JoinAddresses []string // Addresses to join (e.g., ["192.168.1.2:10001"])
|
||||
DataDir string // Data directory for this instance
|
||||
IsLeader bool // Whether this is the first node (creates cluster)
|
||||
AuthFile string // Path to RQLite auth JSON file. Empty = no auth enforcement.
|
||||
}
|
||||
|
||||
// Instance represents a running RQLite instance
|
||||
@ -91,6 +92,11 @@ func (is *InstanceSpawner) SpawnInstance(ctx context.Context, cfg InstanceConfig
|
||||
"-raft-leader-lease-timeout", "2s",
|
||||
)
|
||||
|
||||
// RQLite HTTP Basic Auth
|
||||
if cfg.AuthFile != "" {
|
||||
args = append(args, "-auth", cfg.AuthFile)
|
||||
}
|
||||
|
||||
// Add join addresses if not the leader (must be before data directory)
|
||||
if !cfg.IsLeader && len(cfg.JoinAddresses) > 0 {
|
||||
for _, addr := range cfg.JoinAddresses {
|
||||
|
||||
@ -137,6 +137,13 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
|
||||
"-raft-leader-lease-timeout", raftLeaderLease.String(),
|
||||
)
|
||||
|
||||
// RQLite HTTP Basic Auth — when auth file exists, enforce authentication
|
||||
if r.config.RQLiteAuthFile != "" {
|
||||
r.logger.Info("Enabling RQLite HTTP Basic Auth",
|
||||
zap.String("auth_file", r.config.RQLiteAuthFile))
|
||||
args = append(args, "-auth", r.config.RQLiteAuthFile)
|
||||
}
|
||||
|
||||
if r.config.RQLiteJoinAddress != "" && !r.hasExistingState(rqliteDataDir) {
|
||||
r.logger.Info("First-time join to RQLite cluster", zap.String("join_address", r.config.RQLiteJoinAddress))
|
||||
|
||||
|
||||
98
pkg/secrets/encrypt.go
Normal file
98
pkg/secrets/encrypt.go
Normal file
@ -0,0 +1,98 @@
|
||||
// Package secrets provides application-level encryption for sensitive data stored in RQLite.
|
||||
// Uses AES-256-GCM with HKDF key derivation from the cluster secret.
|
||||
package secrets
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
||||
// Prefix for encrypted values to distinguish from plaintext during migration.
|
||||
const encryptedPrefix = "enc:"
|
||||
|
||||
// DeriveKey derives a 32-byte AES-256 key from the cluster secret using HKDF-SHA256.
|
||||
// The purpose string provides domain separation (e.g., "turn-encryption").
|
||||
func DeriveKey(clusterSecret, purpose string) ([]byte, error) {
|
||||
if clusterSecret == "" {
|
||||
return nil, fmt.Errorf("cluster secret is empty")
|
||||
}
|
||||
reader := hkdf.New(sha256.New, []byte(clusterSecret), nil, []byte(purpose))
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(reader, key); err != nil {
|
||||
return nil, fmt.Errorf("HKDF key derivation failed: %w", err)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Encrypt encrypts plaintext with AES-256-GCM using the given key.
|
||||
// Returns a base64-encoded string prefixed with "enc:" for identification.
|
||||
func Encrypt(plaintext string, key []byte) (string, error) {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create cipher: %w", err)
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create GCM: %w", err)
|
||||
}
|
||||
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
return "", fmt.Errorf("failed to generate nonce: %w", err)
|
||||
}
|
||||
|
||||
// nonce is prepended to ciphertext
|
||||
ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil)
|
||||
return encryptedPrefix + base64.StdEncoding.EncodeToString(ciphertext), nil
|
||||
}
|
||||
|
||||
// Decrypt decrypts an "enc:"-prefixed ciphertext string with AES-256-GCM.
|
||||
// If the input is not prefixed with "enc:", it is returned as-is (plaintext passthrough
|
||||
// for backward compatibility during migration).
|
||||
func Decrypt(ciphertext string, key []byte) (string, error) {
|
||||
if !strings.HasPrefix(ciphertext, encryptedPrefix) {
|
||||
return ciphertext, nil // plaintext passthrough
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(ciphertext, encryptedPrefix))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode ciphertext: %w", err)
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create cipher: %w", err)
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create GCM: %w", err)
|
||||
}
|
||||
|
||||
nonceSize := gcm.NonceSize()
|
||||
if len(data) < nonceSize {
|
||||
return "", fmt.Errorf("ciphertext too short")
|
||||
}
|
||||
|
||||
nonce, sealed := data[:nonceSize], data[nonceSize:]
|
||||
plaintext, err := gcm.Open(nil, nonce, sealed, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("decryption failed (wrong key or corrupted data): %w", err)
|
||||
}
|
||||
|
||||
return string(plaintext), nil
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if the value has the "enc:" prefix.
|
||||
func IsEncrypted(value string) bool {
|
||||
return strings.HasPrefix(value, encryptedPrefix)
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user