mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-03-17 06:23:00 +00:00
Implement WireGuard peer authentication and enhance internal request validation
This commit is contained in:
parent
4f1709e136
commit
b58e1d80ee
22
pkg/auth/internal_auth.go
Normal file
22
pkg/auth/internal_auth.go
Normal file
@ -0,0 +1,22 @@
|
||||
package auth
|
||||
|
||||
import "net"
|
||||
|
||||
// WireGuardSubnet is the internal WireGuard mesh CIDR.
|
||||
const WireGuardSubnet = "10.0.0.0/24"
|
||||
|
||||
// IsWireGuardPeer checks whether remoteAddr (host:port format) originates
|
||||
// from the WireGuard mesh subnet. This provides cryptographic peer
|
||||
// authentication since WireGuard validates keys at the tunnel layer.
|
||||
func IsWireGuardPeer(remoteAddr string) bool {
|
||||
host, _, err := net.SplitHostPort(remoteAddr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
_, wgNet, _ := net.ParseCIDR(WireGuardSubnet)
|
||||
return wgNet.Contains(ip)
|
||||
}
|
||||
@ -18,6 +18,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
nodeauth "github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/client"
|
||||
"github.com/DeBrosOfficial/network/pkg/deployments"
|
||||
"github.com/DeBrosOfficial/network/pkg/deployments/health"
|
||||
@ -806,8 +807,8 @@ func (g *Gateway) namespaceClusterRepairHandler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
// Internal auth check
|
||||
if r.Header.Get("X-Orama-Internal-Auth") != "namespace-coordination" {
|
||||
// Internal auth check: header + WireGuard subnet verification
|
||||
if r.Header.Get("X-Orama-Internal-Auth") != "namespace-coordination" || !nodeauth.IsWireGuardPeer(r.RemoteAddr) {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized")
|
||||
return
|
||||
}
|
||||
|
||||
@ -217,14 +217,14 @@ func (h *DomainHandler) HandleVerifyDomain(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
// Update status
|
||||
// Update status (scoped to deployment_id for defense-in-depth)
|
||||
updateQuery := `
|
||||
UPDATE deployment_domains
|
||||
SET verification_status = 'verified', verified_at = ?
|
||||
WHERE domain = ?
|
||||
WHERE domain = ? AND deployment_id = ?
|
||||
`
|
||||
|
||||
_, err = h.service.db.Exec(ctx, updateQuery, time.Now(), domain)
|
||||
_, err = h.service.db.Exec(ctx, updateQuery, time.Now(), domain, domainRecord.DeploymentID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to update verification status", zap.Error(err))
|
||||
http.Error(w, "Failed to update verification status", http.StatusInternalServerError)
|
||||
@ -358,9 +358,9 @@ func (h *DomainHandler) HandleRemoveDomain(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
deploymentID = rows[0].DeploymentID
|
||||
|
||||
// Delete domain
|
||||
deleteQuery := `DELETE FROM deployment_domains WHERE domain = ?`
|
||||
_, err = h.service.db.Exec(ctx, deleteQuery, domain)
|
||||
// Delete domain (scoped to deployment_id for defense-in-depth)
|
||||
deleteQuery := `DELETE FROM deployment_domains WHERE domain = ? AND deployment_id = ?`
|
||||
_, err = h.service.db.Exec(ctx, deleteQuery, domain, deploymentID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to delete domain", zap.Error(err))
|
||||
http.Error(w, "Failed to delete domain", http.StatusInternalServerError)
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"os/exec"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/deployments"
|
||||
"github.com/DeBrosOfficial/network/pkg/deployments/process"
|
||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||
@ -422,6 +423,11 @@ func (h *ReplicaHandler) extractFromIPFS(ctx context.Context, cid, destPath stri
|
||||
}
|
||||
|
||||
// isInternalRequest checks if the request is an internal node-to-node call.
|
||||
// Requires both the static auth header AND that the request originates from
|
||||
// the WireGuard mesh subnet (cryptographic peer authentication).
|
||||
func (h *ReplicaHandler) isInternalRequest(r *http.Request) bool {
|
||||
return r.Header.Get("X-Orama-Internal-Auth") == "replica-coordination"
|
||||
if r.Header.Get("X-Orama-Internal-Auth") != "replica-coordination" {
|
||||
return false
|
||||
}
|
||||
return auth.IsWireGuardPeer(r.RemoteAddr)
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||
namespacepkg "github.com/DeBrosOfficial/network/pkg/namespace"
|
||||
"github.com/DeBrosOfficial/network/pkg/olric"
|
||||
@ -80,8 +81,8 @@ func (h *SpawnHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Authenticate via internal auth header
|
||||
if r.Header.Get("X-Orama-Internal-Auth") != "namespace-coordination" {
|
||||
// Authenticate via internal auth header + WireGuard subnet check
|
||||
if r.Header.Get("X-Orama-Internal-Auth") != "namespace-coordination" || !auth.IsWireGuardPeer(r.RemoteAddr) {
|
||||
http.Error(w, "unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
@ -940,21 +940,21 @@ func (g *Gateway) handleNamespaceGatewayRequest(w http.ResponseWriter, r *http.R
|
||||
return targets[i].ip < targets[j].ip
|
||||
})
|
||||
|
||||
// Prefer local gateway if this node is part of the namespace cluster.
|
||||
// This avoids a WireGuard network hop and eliminates single-point-of-failure
|
||||
// when a remote gateway node is down.
|
||||
var selected namespaceGatewayTarget
|
||||
// Build ordered target list: local gateway first, then hash-selected, then remaining.
|
||||
// This ordering is used by the circuit breaker fallback loop below.
|
||||
orderedTargets := make([]namespaceGatewayTarget, 0, len(targets))
|
||||
localIdx := -1
|
||||
if g.localWireGuardIP != "" {
|
||||
for _, t := range targets {
|
||||
for i, t := range targets {
|
||||
if t.ip == g.localWireGuardIP {
|
||||
selected = t
|
||||
orderedTargets = append(orderedTargets, t)
|
||||
localIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to consistent hashing for nodes not in the namespace cluster
|
||||
if selected.ip == "" {
|
||||
// Consistent hashing for affinity (keeps WS subscribe/publish on same node)
|
||||
affinityKey := namespaceName + "|" + validatedNamespace
|
||||
if apiKey := extractAPIKey(r); apiKey != "" {
|
||||
affinityKey = namespaceName + "|" + apiKey
|
||||
@ -965,8 +965,32 @@ func (g *Gateway) handleNamespaceGatewayRequest(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
hasher := fnv.New32a()
|
||||
_, _ = hasher.Write([]byte(affinityKey))
|
||||
targetIdx := int(hasher.Sum32()) % len(targets)
|
||||
selected = targets[targetIdx]
|
||||
hashIdx := int(hasher.Sum32()) % len(targets)
|
||||
if hashIdx != localIdx {
|
||||
orderedTargets = append(orderedTargets, targets[hashIdx])
|
||||
}
|
||||
for i, t := range targets {
|
||||
if i != localIdx && i != hashIdx {
|
||||
orderedTargets = append(orderedTargets, t)
|
||||
}
|
||||
}
|
||||
|
||||
// Select the first target whose circuit breaker allows a request through.
|
||||
// This provides automatic failover when a namespace gateway node is down.
|
||||
var selected namespaceGatewayTarget
|
||||
var cb *CircuitBreaker
|
||||
for _, candidate := range orderedTargets {
|
||||
cbKey := "ns:" + candidate.ip
|
||||
candidateCB := g.circuitBreakers.Get(cbKey)
|
||||
if candidateCB.Allow() {
|
||||
selected = candidate
|
||||
cb = candidateCB
|
||||
break
|
||||
}
|
||||
}
|
||||
if selected.ip == "" {
|
||||
http.Error(w, "Namespace gateway unavailable (all circuits open)", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
gatewayIP := selected.ip
|
||||
gatewayPort := selected.port
|
||||
@ -1027,14 +1051,6 @@ func (g *Gateway) handleNamespaceGatewayRequest(w http.ResponseWriter, r *http.R
|
||||
proxyReq.Header.Set(HeaderInternalAuthNamespace, validatedNamespace)
|
||||
}
|
||||
|
||||
// Circuit breaker: check if target is healthy before sending request
|
||||
cbKey := "ns:" + gatewayIP
|
||||
cb := g.circuitBreakers.Get(cbKey)
|
||||
if !cb.Allow() {
|
||||
http.Error(w, "Namespace gateway unavailable (circuit open)", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// Execute proxy request using shared transport for connection pooling
|
||||
httpClient := &http.Client{Timeout: 30 * time.Second, Transport: g.proxyTransport}
|
||||
resp, err := httpClient.Do(proxyReq)
|
||||
|
||||
@ -3,9 +3,9 @@ package gateway
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -89,7 +89,7 @@ func (g *Gateway) probeLocalNamespaces(ctx context.Context) {
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT nc.namespace_name, npa.rqlite_http_port, npa.olric_memberlist_port, npa.gateway_http_port
|
||||
SELECT nc.namespace_name, npa.rqlite_http_port, npa.olric_http_port, npa.gateway_http_port
|
||||
FROM namespace_port_allocations npa
|
||||
JOIN namespace_clusters nc ON npa.namespace_cluster_id = nc.id
|
||||
WHERE npa.node_id = ? AND nc.status = 'ready'
|
||||
@ -117,7 +117,7 @@ func (g *Gateway) probeLocalNamespaces(ctx context.Context) {
|
||||
// Probe RQLite (HTTP on localhost)
|
||||
nsHealth.Services["rqlite"] = probeTCP("127.0.0.1", rqlitePort)
|
||||
|
||||
// Probe Olric memberlist (binds to WireGuard IP)
|
||||
// Probe Olric HTTP API (binds to WireGuard IP)
|
||||
olricHost := g.localWireGuardIP
|
||||
if olricHost == "" {
|
||||
olricHost = "127.0.0.1"
|
||||
@ -238,7 +238,7 @@ func (g *Gateway) isRQLiteLeader(ctx context.Context) bool {
|
||||
// probeTCP checks if a port is listening by attempting a TCP connection.
|
||||
func probeTCP(host string, port int) NamespaceServiceHealth {
|
||||
start := time.Now()
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second)
|
||||
latency := time.Since(start)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user